summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Davila <jdavila@ansible.com>2016-01-25 17:35:39 -0500
committerJonathan Davila <jdavila@ansible.com>2016-01-25 17:35:39 -0500
commitf95652e7db0200209d7d402e0fb12268728b0729 (patch)
tree963a568a07caf9a5e63695ce6f585f1327dfa515
parent1b76a9cef2d74eba9fd786e43f1cf3364a8ac501 (diff)
parentac1d1673be1968f21f158b8a1fda6249d0fb9b1e (diff)
downloadansible-f95652e7db0200209d7d402e0fb12268728b0729.tar.gz
Merge remote-tracking branch 'upstream/devel' into ec2_util_boto3
-rw-r--r--.travis.yml1
-rw-r--r--CHANGELOG.md106
-rw-r--r--MANIFEST.in4
-rw-r--r--Makefile5
-rw-r--r--README.md1
-rw-r--r--RELEASES.txt2
-rw-r--r--VERSION2
-rw-r--r--ansible-core-sitemap.xml2716
-rwxr-xr-xbin/ansible1
-rwxr-xr-xcontrib/inventory/libvirt_lxc.py4
-rw-r--r--contrib/inventory/nsot.py341
-rw-r--r--contrib/inventory/nsot.yaml22
-rwxr-xr-xcontrib/inventory/openstack.py67
-rw-r--r--contrib/inventory/openstack.yml3
-rw-r--r--contrib/inventory/rax.ini9
-rwxr-xr-xcontrib/inventory/rax.py5
-rw-r--r--docs/man/man1/ansible-galaxy.1.asciidoc.in203
-rw-r--r--docs/man/man1/ansible-playbook.1.asciidoc.in2
-rw-r--r--docs/man/man1/ansible-pull.1.asciidoc.in4
-rw-r--r--docsite/Makefile4
-rw-r--r--docsite/_themes/srtd/footer.html11
-rw-r--r--docsite/_themes/srtd/layout.html38
-rw-r--r--docsite/_themes/srtd/layout_old.html205
-rw-r--r--docsite/_themes/srtd/searchbox.html61
-rw-r--r--docsite/_themes/srtd/static/css/theme.css21
-rw-r--r--docsite/_themes/srtd/static/images/banner_ad_1.pngbin4510 -> 0 bytes
-rw-r--r--docsite/_themes/srtd/static/images/banner_ad_2.pngbin4951 -> 0 bytes
-rwxr-xr-xdocsite/build-site.py23
-rw-r--r--docsite/rst/YAMLSyntax.rst55
-rw-r--r--docsite/rst/become.rst14
-rw-r--r--docsite/rst/developing.rst1
-rw-r--r--docsite/rst/developing_api.rst76
-rw-r--r--docsite/rst/developing_modules.rst31
-rw-r--r--docsite/rst/developing_releases.rst48
-rw-r--r--docsite/rst/developing_test_pr.rst29
-rw-r--r--docsite/rst/faq.rst2
-rw-r--r--docsite/rst/galaxy.rst340
-rw-r--r--docsite/rst/guide_cloudstack.rst4
-rw-r--r--docsite/rst/guide_vagrant.rst149
-rw-r--r--docsite/rst/index.rst1
-rw-r--r--docsite/rst/intro_adhoc.rst8
-rw-r--r--docsite/rst/intro_bsd.rst2
-rw-r--r--docsite/rst/intro_configuration.rst25
-rw-r--r--docsite/rst/intro_dynamic_inventory.rst76
-rw-r--r--docsite/rst/intro_getting_started.rst2
-rw-r--r--docsite/rst/intro_installation.rst13
-rw-r--r--docsite/rst/intro_patterns.rst2
-rw-r--r--docsite/rst/intro_windows.rst32
-rw-r--r--docsite/rst/modules_core.rst2
-rw-r--r--docsite/rst/playbooks_best_practices.rst4
-rw-r--r--docsite/rst/playbooks_conditionals.rst2
-rw-r--r--docsite/rst/playbooks_delegation.rst32
-rw-r--r--docsite/rst/playbooks_environment.rst2
-rw-r--r--docsite/rst/playbooks_filters.rst47
-rw-r--r--docsite/rst/playbooks_intro.rst3
-rw-r--r--docsite/rst/playbooks_lookups.rst106
-rw-r--r--docsite/rst/playbooks_loops.rst31
-rw-r--r--docsite/rst/playbooks_roles.rst15
-rw-r--r--docsite/rst/playbooks_variables.rst6
-rw-r--r--docsite/rst/porting_guide_2.0.rst183
-rw-r--r--examples/ansible.cfg14
-rw-r--r--examples/hosts34
-rw-r--r--hacking/env-setup4
-rwxr-xr-xhacking/module_formatter.py4
-rw-r--r--lib/ansible/__init__.py2
-rw-r--r--lib/ansible/cli/__init__.py35
-rw-r--r--lib/ansible/cli/adhoc.py22
-rw-r--r--lib/ansible/cli/doc.py8
-rw-r--r--lib/ansible/cli/galaxy.py270
-rw-r--r--lib/ansible/cli/playbook.py50
-rw-r--r--lib/ansible/cli/pull.py47
-rw-r--r--lib/ansible/cli/vault.py2
-rw-r--r--lib/ansible/constants.py29
-rw-r--r--lib/ansible/errors/__init__.py6
-rw-r--r--lib/ansible/executor/module_common.py3
-rw-r--r--lib/ansible/executor/play_iterator.py125
-rw-r--r--lib/ansible/executor/playbook_executor.py96
-rw-r--r--lib/ansible/executor/process/result.py10
-rw-r--r--lib/ansible/executor/process/worker.py117
-rw-r--r--lib/ansible/executor/task_executor.py80
-rw-r--r--lib/ansible/executor/task_queue_manager.py74
-rw-r--r--lib/ansible/galaxy/__init__.py31
-rw-r--r--lib/ansible/galaxy/api.py209
-rw-r--r--lib/ansible/galaxy/data/metadata_template.j214
-rw-r--r--lib/ansible/galaxy/data/test_playbook.j25
-rw-r--r--lib/ansible/galaxy/data/travis.j229
-rw-r--r--lib/ansible/galaxy/login.py113
-rw-r--r--lib/ansible/galaxy/role.py22
-rw-r--r--lib/ansible/galaxy/token.py67
-rw-r--r--lib/ansible/inventory/__init__.py68
-rw-r--r--lib/ansible/inventory/dir.py5
-rw-r--r--lib/ansible/inventory/host.py7
-rw-r--r--lib/ansible/inventory/ini.py12
-rw-r--r--lib/ansible/inventory/script.py27
-rw-r--r--lib/ansible/module_utils/basic.py193
-rw-r--r--lib/ansible/module_utils/cloudstack.py24
-rw-r--r--lib/ansible/module_utils/eapi.py155
-rw-r--r--lib/ansible/module_utils/ec2.py32
-rw-r--r--lib/ansible/module_utils/eos.py227
-rw-r--r--lib/ansible/module_utils/f5.py36
-rw-r--r--lib/ansible/module_utils/facts.py53
-rw-r--r--lib/ansible/module_utils/ios.py134
-rw-r--r--lib/ansible/module_utils/iosxr.py122
-rw-r--r--lib/ansible/module_utils/junos.py122
-rw-r--r--lib/ansible/module_utils/known_hosts.py24
-rw-r--r--lib/ansible/module_utils/mysql.py66
-rw-r--r--lib/ansible/module_utils/netcfg.py85
-rw-r--r--lib/ansible/module_utils/nxapi.py130
-rw-r--r--lib/ansible/module_utils/nxos.py217
-rw-r--r--lib/ansible/module_utils/openswitch.py247
-rw-r--r--lib/ansible/module_utils/shell.py196
-rw-r--r--lib/ansible/module_utils/urls.py82
-rw-r--r--lib/ansible/module_utils/vca.py9
m---------lib/ansible/modules/core10
m---------lib/ansible/modules/extras13
-rw-r--r--lib/ansible/parsing/dataloader.py4
-rw-r--r--lib/ansible/parsing/mod_args.py27
-rw-r--r--lib/ansible/parsing/splitter.py13
-rw-r--r--lib/ansible/parsing/utils/addresses.py22
-rw-r--r--lib/ansible/parsing/vault/__init__.py94
-rw-r--r--lib/ansible/parsing/yaml/dumper.py12
-rw-r--r--lib/ansible/playbook/__init__.py6
-rw-r--r--lib/ansible/playbook/attribute.py6
-rw-r--r--lib/ansible/playbook/base.py13
-rw-r--r--lib/ansible/playbook/become.py16
-rw-r--r--lib/ansible/playbook/block.py32
-rw-r--r--lib/ansible/playbook/conditional.py36
-rw-r--r--lib/ansible/playbook/included_file.py30
-rw-r--r--lib/ansible/playbook/play.py2
-rw-r--r--lib/ansible/playbook/play_context.py52
-rw-r--r--lib/ansible/playbook/playbook_include.py26
-rw-r--r--lib/ansible/playbook/role/__init__.py40
-rw-r--r--lib/ansible/playbook/role/definition.py77
-rw-r--r--lib/ansible/playbook/role/include.py3
-rw-r--r--lib/ansible/playbook/taggable.py6
-rw-r--r--lib/ansible/playbook/task.py61
-rw-r--r--lib/ansible/plugins/__init__.py18
-rw-r--r--lib/ansible/plugins/action/__init__.py92
-rw-r--r--lib/ansible/plugins/action/add_host.py10
-rw-r--r--lib/ansible/plugins/action/async.py6
-rw-r--r--lib/ansible/plugins/action/debug.py37
-rw-r--r--lib/ansible/plugins/action/fetch.py39
-rw-r--r--lib/ansible/plugins/action/group_by.py2
-rw-r--r--lib/ansible/plugins/action/normal.py11
-rw-r--r--lib/ansible/plugins/action/pause.py4
-rw-r--r--lib/ansible/plugins/action/raw.py5
-rw-r--r--lib/ansible/plugins/action/synchronize.py5
-rw-r--r--lib/ansible/plugins/action/template.py9
-rw-r--r--lib/ansible/plugins/action/unarchive.py8
-rw-r--r--lib/ansible/plugins/callback/__init__.py38
-rw-r--r--lib/ansible/plugins/callback/default.py73
-rw-r--r--lib/ansible/plugins/callback/hipchat.py4
-rw-r--r--lib/ansible/plugins/callback/log_plays.py4
-rw-r--r--lib/ansible/plugins/callback/logentries.py345
-rw-r--r--lib/ansible/plugins/callback/minimal.py18
-rw-r--r--lib/ansible/plugins/callback/oneline.py14
-rw-r--r--lib/ansible/plugins/callback/osx_say.py4
-rw-r--r--lib/ansible/plugins/callback/profile_tasks.py4
-rw-r--r--lib/ansible/plugins/callback/skippy.py134
-rw-r--r--lib/ansible/plugins/callback/syslog_json.py4
-rw-r--r--lib/ansible/plugins/callback/timer.py4
-rw-r--r--lib/ansible/plugins/callback/tree.py9
-rw-r--r--lib/ansible/plugins/connection/__init__.py11
-rw-r--r--lib/ansible/plugins/connection/chroot.py2
-rw-r--r--lib/ansible/plugins/connection/docker.py19
-rw-r--r--lib/ansible/plugins/connection/jail.py6
-rw-r--r--lib/ansible/plugins/connection/libvirt_lxc.py6
-rw-r--r--lib/ansible/plugins/connection/local.py25
-rw-r--r--lib/ansible/plugins/connection/ssh.py67
-rw-r--r--lib/ansible/plugins/connection/winrm.py163
-rw-r--r--lib/ansible/plugins/connection/zone.py35
-rw-r--r--lib/ansible/plugins/filter/core.py25
-rw-r--r--[-rwxr-xr-x]lib/ansible/plugins/lookup/consul_kv.py2
-rw-r--r--lib/ansible/plugins/lookup/inventory_hostnames.py13
-rw-r--r--lib/ansible/plugins/shell/csh.py2
-rw-r--r--lib/ansible/plugins/shell/fish.py7
-rw-r--r--lib/ansible/plugins/shell/powershell.py7
-rw-r--r--lib/ansible/plugins/shell/sh.py26
-rw-r--r--lib/ansible/plugins/strategy/__init__.py152
-rw-r--r--lib/ansible/plugins/strategy/free.py47
-rw-r--r--lib/ansible/plugins/strategy/linear.py102
-rw-r--r--lib/ansible/plugins/test/core.py10
-rw-r--r--lib/ansible/template/__init__.py18
-rw-r--r--lib/ansible/utils/color.py3
-rw-r--r--lib/ansible/utils/display.py78
-rw-r--r--lib/ansible/utils/listify.py3
-rwxr-xr-xlib/ansible/utils/module_docs.py2
-rw-r--r--lib/ansible/utils/module_docs_fragments/eos.py91
-rw-r--r--lib/ansible/utils/module_docs_fragments/ios.py74
-rw-r--r--lib/ansible/utils/module_docs_fragments/iosxr.py59
-rw-r--r--lib/ansible/utils/module_docs_fragments/junos.py59
-rw-r--r--lib/ansible/utils/module_docs_fragments/mysql.py84
-rw-r--r--lib/ansible/utils/module_docs_fragments/nxos.py76
-rw-r--r--lib/ansible/utils/module_docs_fragments/openswitch.py82
-rw-r--r--lib/ansible/utils/module_docs_fragments/vca.py83
-rw-r--r--lib/ansible/utils/module_docs_fragments/vmware.py37
-rw-r--r--lib/ansible/utils/path.py2
-rw-r--r--lib/ansible/utils/vars.py17
-rw-r--r--lib/ansible/vars/__init__.py55
-rw-r--r--lib/ansible/vars/hostvars.py72
-rw-r--r--packaging/debian/control2
-rw-r--r--packaging/port/sysutils/ansible/Makefile2
-rwxr-xr-xtest/code-smell/required-and-default-attributes.sh10
-rw-r--r--test/integration/Makefile2
-rw-r--r--test/integration/destructive.yml3
-rw-r--r--test/integration/non_destructive.yml1
-rw-r--r--test/integration/roles/ec2_elb_instance_setup/tasks/main.yml7
-rw-r--r--test/integration/roles/prepare_tests/tasks/main.yml1
-rw-r--r--test/integration/roles/setup_mysql_db/tasks/main.yml5
-rw-r--r--test/integration/roles/setup_postgresql_db/tasks/main.yml8
-rw-r--r--test/integration/roles/test_add_host/tasks/main.yml39
-rw-r--r--test/integration/roles/test_apt/tasks/main.yml1
-rw-r--r--test/integration/roles/test_apt_repository/tasks/apt.yml42
-rw-r--r--test/integration/roles/test_cs_instance/tasks/absent.yml20
-rw-r--r--test/integration/roles/test_cs_instance/tasks/absent_display_name.yml43
-rw-r--r--test/integration/roles/test_cs_instance/tasks/cleanup.yml6
-rw-r--r--test/integration/roles/test_cs_instance/tasks/main.yml5
-rw-r--r--test/integration/roles/test_cs_instance/tasks/present.yml37
-rw-r--r--test/integration/roles/test_cs_instance/tasks/present_display_name.yml176
-rw-r--r--test/integration/roles/test_cs_instance/tasks/setup.yml8
-rw-r--r--test/integration/roles/test_docker/tasks/docker-setup-rht.yml17
-rw-r--r--test/integration/roles/test_docker/tasks/docker-tests.yml31
-rw-r--r--test/integration/roles/test_docker/tasks/main.yml2
-rw-r--r--test/integration/roles/test_docker/tasks/registry-tests.yml11
-rw-r--r--test/integration/roles/test_filters/tasks/main.yml9
-rw-r--r--test/integration/roles/test_get_url/tasks/main.yml81
-rw-r--r--test/integration/roles/test_git/tasks/main.yml66
-rw-r--r--test/integration/roles/test_hg/tasks/main.yml1
-rw-r--r--test/integration/roles/test_lookups/tasks/main.yml6
-rw-r--r--test/integration/roles/test_mysql_db/tasks/main.yml5
-rw-r--r--test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml18
-rw-r--r--test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml2
-rw-r--r--test/integration/roles/test_service/tasks/main.yml2
-rw-r--r--test/integration/roles/test_subversion/tasks/main.yml2
-rw-r--r--test/integration/roles/test_template/files/foo-py26.txt1
-rw-r--r--test/integration/roles/test_template/tasks/main.yml5
-rw-r--r--test/integration/roles/test_unarchive/tasks/main.yml4
-rw-r--r--test/integration/roles/test_uri/tasks/main.yml13
-rw-r--r--test/integration/roles/test_var_precedence_dep/tasks/main.yml2
-rw-r--r--test/integration/roles/test_win_raw/tasks/main.yml13
-rw-r--r--test/integration/roles/test_win_setup/tasks/main.yml8
-rw-r--r--test/integration/roles/test_yum/tasks/main.yml2
-rw-r--r--test/integration/roles/test_zypper/files/empty.spec12
-rw-r--r--test/integration/roles/test_zypper/meta/main.yml2
-rw-r--r--test/integration/roles/test_zypper/tasks/main.yml26
-rw-r--r--test/integration/roles/test_zypper/tasks/zypper.yml194
-rw-r--r--test/integration/test_var_precedence.yml66
-rwxr-xr-xtest/integration/unicode-test-script7
-rw-r--r--test/integration/unicode.yml75
-rw-r--r--test/units/errors/test_errors.py16
-rw-r--r--test/units/inventory/test_host.py4
-rw-r--r--test/units/module_utils/basic/test_exit_json.py22
-rw-r--r--test/units/module_utils/basic/test_heuristic_log_sanitize.py1
-rw-r--r--test/units/module_utils/basic/test_known_hosts.py55
-rw-r--r--test/units/module_utils/basic/test_no_log.py9
-rw-r--r--test/units/module_utils/basic/test_run_command.py176
-rw-r--r--test/units/parsing/test_addresses.py14
-rw-r--r--test/units/playbook/test_play_context.py1
-rw-r--r--test/units/plugins/action/test_action.py4
-rw-r--r--test/units/plugins/cache/test_cache.py4
-rw-r--r--test/units/plugins/callback/test_callback.py82
-rw-r--r--test/units/plugins/strategies/test_strategy_base.py130
-rw-r--r--test/units/vars/test_variable_manager.py1
-rw-r--r--test/utils/ansible-playbook_integration_runner/ansible.cfg2
-rw-r--r--test/utils/ansible-playbook_integration_runner/ec2.yml41
-rw-r--r--test/utils/ansible-playbook_integration_runner/inventory1
-rw-r--r--test/utils/ansible-playbook_integration_runner/main.yml77
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.travis.yml37
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md6
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/defaults/main.yml2
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/handlers/main.yml2
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/.galaxy_install_info1
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/main.yml23
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml102
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/inventory1
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml29
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/requirements.yml2
l---------test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps1
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/vars/main.yml2
-rw-r--r--test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml43
280 files changed, 11628 insertions, 2487 deletions
diff --git a/.travis.yml b/.travis.yml
index 1ff0ca118d..603132f722 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -24,6 +24,7 @@ script:
- ./test/code-smell/replace-urlopen.sh .
- ./test/code-smell/use-compat-six.sh lib
- ./test/code-smell/boilerplate.sh
+- ./test/code-smell/required-and-default-attributes.sh
- if test x"$TOXENV" != x'py24' ; then tox ; fi
- if test x"$TOXENV" = x'py24' ; then python2.4 -V && python2.4 -m compileall -fq -x 'module_utils/(a10|rax|openstack|ec2|gce).py' lib/ansible/module_utils ; fi
#- make -C docsite all
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ed50896733..4037ea7f9f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,7 +1,16 @@
Ansible Changes By Release
==========================
-## 2.0 "Over the Hills and Far Away" - ACTIVE DEVELOPMENT
+## 2.1 TBD - ACTIVE DEVELOPMENT
+
+####New Modules:
+* aws: ec2_vpc_net_facts
+* cloudstack: cs_volume
+
+####New Filters:
+* extract
+
+## 2.0 "Over the Hills and Far Away"
###Major Changes:
@@ -24,10 +33,13 @@ Ansible Changes By Release
by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable.
* Added `meta: refresh_inventory` to force rereading the inventory in a play.
This re-executes inventory scripts, but does not force them to ignore any cache they might use.
-* Now when you delegate an action that returns ansible_facts, these facts will be applied to the delegated host, unlike before when they were applied to the current host.
+* New delegate_facts directive, a boolean that allows you to apply facts to the delegated host (true/yes) instead of the inventory_hostname (no/false) which is the default and previous behaviour.
+* local connections now work with 'su' as a privilege escalation method
+* Ansible 2.0 has deprecated the “ssh” from ansible_ssh_user, ansible_ssh_host, and ansible_ssh_port to become ansible_user, ansible_host, and ansible_port.
* New ssh configuration variables(`ansible_ssh_common_args`, `ansible_ssh_extra_args`) can be used to configure a
per-group or per-host ssh ProxyCommand or set any other ssh options.
`ansible_ssh_extra_args` is used to set options that are accepted only by ssh (not sftp or scp, which have their own analogous settings).
+* ansible-pull can now verify the code it runs when using git as a source repository, using git's code signing and verification features.
* Backslashes used when specifying parameters in jinja2 expressions in YAML dicts sometimes needed to be escaped twice.
This has been fixed so that escaping once works. Here's an example of how playbooks need to be modified:
@@ -71,9 +83,31 @@ newline being stripped you can change your playbook like this:
"msg": "Testing some things"
```
+* When specifying complex args as a variable, the variable must use the full jinja2
+variable syntax ('{{var_name}}') - bare variable names there are no longer accepted.
+In fact, even specifying args with variables has been deprecated, and will not be
+allowed in future versions:
+
+ ```
+ ---
+ - hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ my_dirs:
+ - { path: /tmp/3a, state: directory, mode: 0755 }
+ - { path: /tmp/3b, state: directory, mode: 0700 }
+ tasks:
+ - file:
+ args: "{{item}}"
+ with_items: my_dirs
+ ```
+
###Plugins
* Rewritten dnf module that should be faster and less prone to encountering bugs in cornercases
+* WinRM connection plugin passes all vars named `ansible_winrm_*` to the underlying pywinrm client. This allows, for instance, `ansible_winrm_server_cert_validation=ignore` to be used with newer versions of pywinrm to disable certificate validation on Python 2.7.9+.
+* WinRM connection plugin put_file is significantly faster and no longer has file size limitations.
####Deprecated Modules (new ones in parens):
@@ -94,23 +128,31 @@ newline being stripped you can change your playbook like this:
* amazon: ec2_eni
* amazon: ec2_eni_facts
* amazon: ec2_remote_facts
+* amazon: ec2_vpc_igw
* amazon: ec2_vpc_net
+* amazon: ec2_vpc_net_facts
* amazon: ec2_vpc_route_table
* amazon: ec2_vpc_route_table_facts
* amazon: ec2_vpc_subnet
+* amazon: ec2_vpc_subnet_facts
* amazon: ec2_win_password
* amazon: ecs_cluster
* amazon: ecs_task
* amazon: ecs_taskdefinition
-* amazon: elasticache_subnet_group
+* amazon: elasticache_subnet_group_facts
* amazon: iam
+* amazon: iam_cert
* amazon: iam_policy
-* amazon: route53_zone
+* amazon: route53_facts
* amazon: route53_health_check
+* amazon: route53_zone
* amazon: sts_assume_role
* amazon: s3_bucket
* amazon: s3_lifecycle
* amazon: s3_logging
+* amazon: sqs_queue
+* amazon: sns_topic
+* amazon: sts_assume_role
* apk
* bigip_gtm_wide_ip
* bundler
@@ -151,29 +193,35 @@ newline being stripped you can change your playbook like this:
* cloudstack: cs_template
* cloudstack: cs_user
* cloudstack: cs_vmsnapshot
+* cronvar
* datadog_monitor
* deploy_helper
+* docker: docker_login
* dpkg_selections
* elasticsearch_plugin
* expect
* find
+* google: gce_tag
* hall
* ipify_facts
* iptables
* libvirt: virt_net
* libvirt: virt_pool
* maven_artifact
-* openstack: os_ironic
-* openstack: os_ironic_node
+* openstack: os_auth
* openstack: os_client_config
-* openstack: os_floating_ip
* openstack: os_image
* openstack: os_image_facts
+* openstack: os_floating_ip
+* openstack: os_ironic
+* openstack: os_ironic_node
+* openstack: os_keypair
* openstack: os_network
* openstack: os_network_facts
* openstack: os_nova_flavor
* openstack: os_object
* openstack: os_port
+* openstack: os_project
* openstack: os_router
* openstack: os_security_group
* openstack: os_security_group_rule
@@ -183,6 +231,7 @@ newline being stripped you can change your playbook like this:
* openstack: os_server_volume
* openstack: os_subnet
* openstack: os_subnet_facts
+* openstack: os_user
* openstack: os_user_group
* openstack: os_volume
* openvswitch_db.
@@ -193,14 +242,15 @@ newline being stripped you can change your playbook like this:
* profitbricks: profitbricks
* profitbricks: profitbricks_datacenter
* profitbricks: profitbricks_nic
-* profitbricks: profitbricks_snapshot
* profitbricks: profitbricks_volume
* profitbricks: profitbricks_volume_attachments
-* proxmox
-* proxmox_template
+* profitbricks: profitbricks_snapshot
+* proxmox: proxmox
+* proxmox: proxmox_template
* puppet
* pushover
* pushbullet
+* rax: rax_clb_ssl
* rax: rax_mon_alarm
* rax: rax_mon_check
* rax: rax_mon_entity
@@ -210,6 +260,7 @@ newline being stripped you can change your playbook like this:
* rabbitmq_exchange
* rabbitmq_queue
* selinux_permissive
+* sendgrid
* sensu_check
* sensu_subscription
* seport
@@ -221,21 +272,24 @@ newline being stripped you can change your playbook like this:
* vertica_role
* vertica_schema
* vertica_user
-* vmware: vmware_datacenter
+* vmware: vca_fw
+* vmware: vca_nat
* vmware: vmware_cluster
+* vmware: vmware_datacenter
* vmware: vmware_dns_config
* vmware: vmware_dvs_host
* vmware: vmware_dvs_portgroup
* vmware: vmware_dvswitch
* vmware: vmware_host
-* vmware: vmware_vmkernel_ip_config
+* vmware: vmware_migrate_vmk
* vmware: vmware_portgroup
+* vmware: vmware_target_canonical_facts
* vmware: vmware_vm_facts
+* vmware: vmware_vm_vss_dvs_migrate
* vmware: vmware_vmkernel
+* vmware: vmware_vmkernel_ip_config
* vmware: vmware_vsan_cluster
* vmware: vmware_vswitch
-* vmware: vca_fw
-* vmware: vca_nat
* vmware: vsphere_copy
* webfaction_app
* webfaction_db
@@ -243,17 +297,22 @@ newline being stripped you can change your playbook like this:
* webfaction_mailbox
* webfaction_site
* win_acl
+* win_dotnet_ngen
* win_environment
* win_firewall_rule
-* win_package
-* win_scheduled_task
* win_iis_virtualdirectory
* win_iis_webapplication
* win_iis_webapppool
* win_iis_webbinding
* win_iis_website
+* win_lineinfile
+* win_nssm
+* win_package
* win_regedit
+* win_scheduled_task
* win_unzip
+* win_updates
+* win_webpicmd
* xenserver_facts
* zabbix_host
* zabbix_hostmacro
@@ -266,6 +325,7 @@ newline being stripped you can change your playbook like this:
* fleetctl
* openvz
* nagios_ndo
+* nsot
* proxmox
* rudder
* serf
@@ -285,6 +345,11 @@ newline being stripped you can change your playbook like this:
* docker: for talking to docker containers on the ansible controller machine without using ssh.
+####New Callbacks:
+
+* logentries: plugin to send play data to logentries service
+* skippy: same as default but does not display skip messages
+
###Minor changes:
* Many more tests. The new API makes things more testable and we took advantage of it.
@@ -311,9 +376,16 @@ newline being stripped you can change your playbook like this:
* Lookup, vars and action plugin pathing has been normalized, all now follow the same sequence to find relative files.
* We do not ignore the explicitly set login user for ssh when it matches the 'current user' anymore, this allows overriding .ssh/config when it is set
explicitly. Leaving it unset will still use the same user and respect .ssh/config. This also means ansible_ssh_user can now return a None value.
-* Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour::
+* environment variables passed to remote shells now default to 'controller' settings, with fallback to en_us.UTF8 which was the previous default.
+* add_hosts is much stricter about host name and will prevent invalid names from being added.
+* ansible-pull now defaults to doing shallow checkouts with git, use `--full` to return to previous behaviour.
+* random cows are more random
+* when: now gets the registered var after the first iteration, making it possible to break out of item loops
+* Handling of undefined variables has changed. In most places they will now raise an error instead of silently injecting an empty string. Use the default filter if you want to approximate the old behaviour:
+ ```
- debug: msg="The error message was: {{error_code |default('') }}"
+ ```
## 1.9.4 "Dancing In the Street" - Oct 9, 2015
diff --git a/MANIFEST.in b/MANIFEST.in
index d8402f0297..a5e29c9a43 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -4,12 +4,14 @@ prune ticket_stubs
prune packaging
prune test
prune hacking
-include README.md packaging/rpm/ansible.spec COPYING
+include README.md COPYING
include examples/hosts
include examples/ansible.cfg
include lib/ansible/module_utils/powershell.ps1
recursive-include lib/ansible/modules *
+recursive-include lib/ansible/galaxy/data *
recursive-include docs *
+recursive-include packaging *
include Makefile
include VERSION
include MANIFEST.in
diff --git a/Makefile b/Makefile
index ac4c07f431..367987affc 100644
--- a/Makefile
+++ b/Makefile
@@ -44,7 +44,7 @@ GIT_HASH := $(shell git log -n 1 --format="%h")
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | sed 's/[-_.\/]//g')
GITINFO = .$(GIT_HASH).$(GIT_BRANCH)
else
-GITINFO = ''
+GITINFO = ""
endif
ifeq ($(shell echo $(OS) | egrep -c 'Darwin|FreeBSD|OpenBSD'),1)
@@ -167,6 +167,9 @@ install:
sdist: clean docs
$(PYTHON) setup.py sdist
+sdist_upload: clean docs
+ $(PYTHON) setup.py sdist upload 2>&1 |tee upload.log
+
rpmcommon: $(MANPAGES) sdist
@mkdir -p rpm-build
@cp dist/*.gz rpm-build/
diff --git a/README.md b/README.md
index cec8ccca97..2e1f15559d 100644
--- a/README.md
+++ b/README.md
@@ -55,3 +55,4 @@ Ansible was created by [Michael DeHaan](https://github.com/mpdehaan) (michael.de
Ansible is sponsored by [Ansible, Inc](http://ansible.com)
+
diff --git a/RELEASES.txt b/RELEASES.txt
index 035b81dc71..cd32b0cddb 100644
--- a/RELEASES.txt
+++ b/RELEASES.txt
@@ -4,7 +4,7 @@ Ansible Releases at a Glance
Active Development
++++++++++++++++++
-2.0 "TBD" - in progress
+2.0 "Over the Hills and Far Away" - in progress
Released
++++++++
diff --git a/VERSION b/VERSION
index d05cb3d448..7ec1d6db40 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.0.0 0.5.beta3
+2.1.0
diff --git a/ansible-core-sitemap.xml b/ansible-core-sitemap.xml
new file mode 100644
index 0000000000..84a048d311
--- /dev/null
+++ b/ansible-core-sitemap.xml
@@ -0,0 +1,2716 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
+<!-- created with Integrity from http://peacockmedia.software -->
+
+ <url>
+ <loc>http://docs.ansible.com/ansible/</loc>
+ <changefreq>weekly</changefreq>
+ <priority>1.0</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/intro_patterns.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/intro_adhoc.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/intro_configuration.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/intro_getting_started.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/intro.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/intro_inventory.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/intro_installation.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/intro_bsd.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/intro_dynamic_inventory.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/intro_windows.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_filters.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_conditionals.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/quickstart.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_loops.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_variables.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_roles.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_intro.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_blocks.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_async.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_checkmode.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/become.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_acceleration.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_best_practices.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_delegation.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_special_topics.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_strategies.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_environment.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_error_handling.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_prompts.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/modules_intro.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_tags.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_lookups.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_vault.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_startnstep.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/modules_core.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/modules_extra.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_commands_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/common_return_values.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/modules_by_category.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_cloud_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_all_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_clustering_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_database_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_files_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_inventory_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_source_control_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_system_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_utilities_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_monitoring_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_notification_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_messaging_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_network_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_packaging_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_web_infrastructure_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/guide_cloudstack.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/guide_vagrant.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/guides.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/guide_gce.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/list_of_windows_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/guide_aws.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/guide_rax.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/guide_rolling_upgrade.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/developing.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/developing_releases.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/tower.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/developing_inventory.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/developing_test_pr.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/developing_plugins.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/community.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/developing_api.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/developing_modules.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/test_strategies.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/glossary.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/galaxy.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/faq.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/YAMLSyntax.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.5</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/index.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/command_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/shell_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/playbooks_filters_ipaddr.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/expect_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/script_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/raw_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/znode_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/xenserver_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cloudtrail_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cloudformation_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/dynamodb_table_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_ami_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_ami_copy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_elb_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_ami_find_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_eip_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_elb_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_asg_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_eni_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_elb_lb_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_eni_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_key_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_lc_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_tag_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_scaling_policy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_metric_alarm_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_snapshot_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_remote_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_vpc_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_vol_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_vpc_igw_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_vpc_subnet_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_vpc_net_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_vpc_net_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_vpc_route_table_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_win_password_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_vpc_route_table_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_vpc_subnet_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ecs_cluster_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/iam_cert_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ecs_taskdefinition_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ecs_task_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/elasticache_subnet_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/iam_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/elasticache_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/iam_policy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rds_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/route53_zone_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rds_subnet_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/route53_health_check_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/route53_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rds_param_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/route53_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/s3_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/sts_assume_role_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/s3_bucket_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/s3_lifecycle_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/sns_topic_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/s3_logging_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/sqs_queue_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/azure_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/clc_aa_policy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/clc_modify_server_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/clc_alert_policy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/clc_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/clc_publicip_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/clc_firewall_policy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/clc_blueprint_package_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/clc_loadbalancer_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/clc_server_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_firewall_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_instance_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/clc_server_snapshot_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_affinitygroup_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_domain_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_account_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_instancegroup_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_iso_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_project_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_ip_address_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_securitygroup_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_loadbalancer_rule_member_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_loadbalancer_rule_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_network_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_portforward_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_securitygroup_rule_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_sshkeypair_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_template_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_staticnat_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/digital_ocean_domain_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_vmsnapshot_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/digital_ocean_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_volume_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cs_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/docker_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/digital_ocean_sshkey_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/docker_login_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/gce_net_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/gce_pd_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/gc_storage_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/docker_image_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/gce_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/gce_lb_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/gce_img_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/gce_tag_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/linode_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/virt_net_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/virt_pool_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_auth_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ovirt_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/lxc_container_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/virt_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/proxmox_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/proxmox_template_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_client_config_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_floating_ip_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_network_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_networks_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_image_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_ironic_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_image_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_ironic_node_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_keypair_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_nova_flavor_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_security_group_rule_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_server_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_server_actions_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_object_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_project_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_security_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_server_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_router_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_port_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_server_volume_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/profitbricks_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/profitbricks_datacenter_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_subnets_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_subnet_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_volume_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/os_user_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/profitbricks_nic_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_cdb_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_cdb_database_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/profitbricks_volume_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/profitbricks_volume_attachments_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_cbs_attachments_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_cdb_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_cbs_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_files_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_files_objects_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_clb_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_dns_record_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_clb_nodes_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_dns_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_identity_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_mon_entity_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_mon_notification_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_mon_notification_plan_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_clb_ssl_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_meta_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_keypair_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_mon_check_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_network_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_mon_alarm_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_queue_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_scaling_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vca_vapp_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vca_nat_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rax_scaling_policy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vca_fw_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_dvswitch_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_cluster_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_host_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_dns_config_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_datacenter_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_dvs_host_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_dvs_portgroup_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_target_canonical_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_migrate_vmk_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_vmkernel_ip_config_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_vswitch_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vsphere_copy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_vm_shell_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_vsan_cluster_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_vmkernel_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vmware_vm_vss_dvs_migrate_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vsphere_guest_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/zypper_repository_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/a10_server_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/a10_service_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/webfaction_db_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/webfaction_domain_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/a10_virtual_server_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/webfaction_app_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/webfaction_site_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/webfaction_mailbox_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/accelerate_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/apache2_module_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/apt_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/acl_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/alternatives_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/airbrake_deployment_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/add_host_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/apt_key_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/apk_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/at_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/authorized_key_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/apt_repository_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/assemble_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/apt_rpm_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/assert_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/async_status_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bigip_gtm_wide_ip_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bigip_pool_member_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bigip_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bigip_monitor_http_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bigpanda_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bower_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bigip_monitor_tcp_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bigip_node_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bigip_pool_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bundler_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/boundary_meter_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/blockinfile_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/consul_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/consul_acl_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/consul_kv_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/campfire_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/composer_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/bzr_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/circonus_annotation_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/capabilities_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/consul_session_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/datadog_event_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/datadog_monitor_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/debconf_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/copy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cronvar_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cpanm_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/cron_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/debug_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/crypttab_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/deploy_helper_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/django_manage_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/dnf_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/dpkg_selections_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/dnsmadeeasy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/dnsimple_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/fail_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/easy_install_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/fetch_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ec2_ami_search_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ejabberd_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/filesystem_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/facter_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/elasticsearch_plugin_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/firewalld_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/file_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/get_url_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/find_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/flowdock_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/git_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/fireball_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/gem_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/getent_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/gluster_volume_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/github_hooks_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/hall_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/hg_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/glance_image_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/hipchat_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/grove_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/group_by_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/haproxy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/homebrew_cask_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/homebrew_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ini_file_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/homebrew_tap_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/irc_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/hostname_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/htpasswd_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/include_vars_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ipify_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/jabber_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/iptables_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/known_hosts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/jboss_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/librato_annotation_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/kernel_blacklist_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/keystone_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/jira_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/layman_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/lvol_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/lineinfile_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/macports_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/logentries_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/locale_gen_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/lvg_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/maven_artifact_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/lldp_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/mail_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/mqtt_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/modprobe_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/mount_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/mongodb_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/mysql_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/monit_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/mysql_db_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/mysql_replication_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/mysql_variables_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/nmcli_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/netscaler_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/nagios_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/newrelic_deployment_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/nexmo_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/nova_compute_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/openvswitch_bridge_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/openvswitch_db_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/openvswitch_port_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ohai_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/npm_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/open_iscsi_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/openbsd_pkg_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/opkg_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/nova_keypair_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pagerduty_alert_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pam_limits_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/patch_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/osx_say_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pacman_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/osx_defaults_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/package_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pagerduty_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pause_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pkg5_publisher_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pear_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pkgin_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pingdom_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pkg5_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ping_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pip_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pkgutil_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/postgresql_lang_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pkgng_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/portage_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/postgresql_privs_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/portinstall_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pushbullet_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/postgresql_db_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/postgresql_ext_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/puppet_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/postgresql_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/quantum_network_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/pushover_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/quantum_router_interface_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/quantum_floating_ip_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/quantum_floating_ip_associate_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rabbitmq_binding_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/quantum_router_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/quantum_router_gateway_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rabbitmq_policy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/quantum_subnet_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rabbitmq_vhost_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rabbitmq_plugin_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rabbitmq_exchange_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rabbitmq_queue_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rabbitmq_parameter_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/redhat_subscription_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rabbitmq_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/riak_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rpm_key_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/seboolean_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rollbar_deployment_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/redis_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rhn_channel_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/rhn_register_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/replace_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/selinux_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/set_fact_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/setup_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/slack_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/sendgrid_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/seport_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/slackpkg_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/sensu_check_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/service_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/selinux_permissive_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/stat_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/subversion_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/slurp_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/supervisorctl_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/solaris_zone_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/sns_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/stackdriver_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/svr4pkg_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/snmp_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/svc_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/twilio_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/swdepot_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/unarchive_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/template_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/sysctl_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/synchronize_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/uptimerobot_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/typetalk_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/ufw_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/uri_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vertica_role_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/urpmi_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vertica_facts_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vertica_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vertica_configuration_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/vertica_schema_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_dotnet_ngen_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/wait_for_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_feature_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_copy_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_chocolatey_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_firewall_rule_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_environment_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_acl_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_file_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_get_url_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_iis_webbinding_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_lineinfile_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_msi_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_iis_webapplication_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_iis_website_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_iis_virtualdirectory_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_iis_webapppool_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_nssm_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_stat_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_template_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_unzip_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_ping_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_package_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_regedit_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_updates_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_service_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_scheduled_task_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_user_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/zabbix_group_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/zabbix_hostmacro_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/win_webpicmd_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/xattr_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/yum_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/yumrepo_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/zabbix_host_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/zabbix_maintenance_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/zypper_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/zabbix_screen_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+ <url>
+ <loc>http://docs.ansible.com/ansible/zfs_module.html</loc>
+ <changefreq>weekly</changefreq>
+ <priority>0.3</priority>
+ </url>
+
+</urlset> \ No newline at end of file
diff --git a/bin/ansible b/bin/ansible
index 7e1aa01a93..627510a72e 100755
--- a/bin/ansible
+++ b/bin/ansible
@@ -60,6 +60,7 @@ if __name__ == '__main__':
try:
display = Display()
+ display.debug("starting run")
sub = None
try:
diff --git a/contrib/inventory/libvirt_lxc.py b/contrib/inventory/libvirt_lxc.py
index 1491afd577..cb34d473cd 100755
--- a/contrib/inventory/libvirt_lxc.py
+++ b/contrib/inventory/libvirt_lxc.py
@@ -27,11 +27,11 @@ result['all'] = {}
pipe = Popen(['virsh', '-q', '-c', 'lxc:///', 'list', '--name', '--all'], stdout=PIPE, universal_newlines=True)
result['all']['hosts'] = [x[:-1] for x in pipe.stdout.readlines()]
result['all']['vars'] = {}
-result['all']['vars']['ansible_connection'] = 'lxc'
+result['all']['vars']['ansible_connection'] = 'libvirt_lxc'
if len(sys.argv) == 2 and sys.argv[1] == '--list':
print(json.dumps(result))
elif len(sys.argv) == 3 and sys.argv[1] == '--host':
- print(json.dumps({'ansible_connection': 'lxc'}))
+ print(json.dumps({'ansible_connection': 'libvirt_lxc'}))
else:
print("Need an argument, either --list or --host <host>")
diff --git a/contrib/inventory/nsot.py b/contrib/inventory/nsot.py
new file mode 100644
index 0000000000..0ca1625df3
--- /dev/null
+++ b/contrib/inventory/nsot.py
@@ -0,0 +1,341 @@
+#!/bin/env python
+
+'''
+nsot
+====
+
+Ansible Dynamic Inventory to pull hosts from NSoT, a flexible CMDB by Dropbox
+
+Features
+--------
+
+* Define host groups in form of NSoT device attribute criteria
+
+* All parameters defined by the spec as of 2015-09-05 are supported.
+
+ + ``--list``: Returns JSON hash of host groups -> hosts and top-level
+ ``_meta`` -> ``hostvars`` which correspond to all device attributes.
+
+ Group vars can be specified in the YAML configuration, noted below.
+
+ + ``--host <hostname>``: Returns JSON hash where every item is a device
+ attribute.
+
+* In addition to all attributes assigned to resource being returned, script
+ will also append ``site_id`` and ``id`` as facts to utilize.
+
+
+Confguration
+------------
+
+Since it'd be annoying and failure prone to guess where you're configuration
+file is, use ``NSOT_INVENTORY_CONFIG`` to specify the path to it.
+
+This file should adhere to the YAML spec. All top-level variable must be
+desired Ansible group-name hashed with single 'query' item to define the NSoT
+attribute query.
+
+Queries follow the normal NSoT query syntax, `shown here`_
+
+.. _shown here: https://github.com/dropbox/pynsot#set-queries
+
+.. code:: yaml
+
+ routers:
+ query: 'deviceType=ROUTER'
+ vars:
+ a: b
+ c: d
+
+ juniper_fw:
+ query: 'deviceType=FIREWALL manufacturer=JUNIPER'
+
+ not_f10:
+ query: '-manufacturer=FORCE10'
+
+The inventory will automatically use your ``.pynsotrc`` like normal pynsot from
+cli would, so make sure that's configured appropriately.
+
+.. note::
+
+ Attributes I'm showing above are influenced from ones that the Trigger
+ project likes. As is the spirit of NSoT, use whichever attributes work best
+ for your workflow.
+
+If config file is blank or absent, the following default groups will be
+created:
+
+* ``routers``: deviceType=ROUTER
+* ``switches``: deviceType=SWITCH
+* ``firewalls``: deviceType=FIREWALL
+
+These are likely not useful for everyone so please use the configuration. :)
+
+.. note::
+
+ By default, resources will only be returned for what your default
+ site is set for in your ``~/.pynsotrc``.
+
+ If you want to specify, add an extra key under the group for ``site: n``.
+
+Output Examples
+---------------
+
+Here are some examples shown from just calling the command directly::
+
+ $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --list | jq '.'
+ {
+ "routers": {
+ "hosts": [
+ "test1.example.com"
+ ],
+ "vars": {
+ "cool_level": "very",
+ "group": "routers"
+ }
+ },
+ "firewalls": {
+ "hosts": [
+ "test2.example.com"
+ ],
+ "vars": {
+ "cool_level": "enough",
+ "group": "firewalls"
+ }
+ },
+ "_meta": {
+ "hostvars": {
+ "test2.example.com": {
+ "make": "SRX",
+ "site_id": 1,
+ "id": 108
+ },
+ "test1.example.com": {
+ "make": "MX80",
+ "site_id": 1,
+ "id": 107
+ }
+ }
+ },
+ "rtr_and_fw": {
+ "hosts": [
+ "test1.example.com",
+ "test2.example.com"
+ ],
+ "vars": {}
+ }
+ }
+
+
+ $ NSOT_INVENTORY_CONFIG=$PWD/test.yaml ansible_nsot --host test1 | jq '.'
+ {
+ "make": "MX80",
+ "site_id": 1,
+ "id": 107
+ }
+
+'''
+
+from __future__ import print_function
+import sys
+import os
+import pkg_resources
+import argparse
+import json
+import yaml
+from textwrap import dedent
+from pynsot.client import get_api_client
+from pynsot.app import HttpServerError
+from click.exceptions import UsageError
+
+
+def warning(*objs):
+ print("WARNING: ", *objs, file=sys.stderr)
+
+
+class NSoTInventory(object):
+ '''NSoT Client object for gather inventory'''
+
+ def __init__(self):
+ self.config = dict()
+ config_env = os.environ.get('NSOT_INVENTORY_CONFIG')
+ if config_env:
+ try:
+ config_file = os.path.abspath(config_env)
+ except IOError: # If file non-existent, use default config
+ self._config_default()
+ except Exception as e:
+ sys.exit('%s\n' % e)
+
+ with open(config_file) as f:
+ try:
+ self.config.update(yaml.safe_load(f))
+ except TypeError: # If empty file, use default config
+ warning('Empty config file')
+ self._config_default()
+ except Exception as e:
+ sys.exit('%s\n' % e)
+ else: # Use defaults if env var missing
+ self._config_default()
+ self.groups = self.config.keys()
+ self.client = get_api_client()
+ self._meta = {'hostvars': dict()}
+
+ def _config_default(self):
+ default_yaml = '''
+ ---
+ routers:
+ query: deviceType=ROUTER
+ switches:
+ query: deviceType=SWITCH
+ firewalls:
+ query: deviceType=FIREWALL
+ '''
+ self.config = yaml.safe_load(dedent(default_yaml))
+
+ def do_list(self):
+ '''Direct callback for when ``--list`` is provided
+
+ Relies on the configuration generated from init to run
+ _inventory_group()
+ '''
+ inventory = dict()
+ for group, contents in self.config.iteritems():
+ group_response = self._inventory_group(group, contents)
+ inventory.update(group_response)
+ inventory.update({'_meta': self._meta})
+ return json.dumps(inventory)
+
+ def do_host(self, host):
+ return json.dumps(self._hostvars(host))
+
+ def _hostvars(self, host):
+ '''Return dictionary of all device attributes
+
+ Depending on number of devices in NSoT, could be rather slow since this
+ has to request every device resource to filter through
+ '''
+ device = [i for i in self.client.devices.get()['data']['devices']
+ if host in i['hostname']][0]
+ attributes = device['attributes']
+ attributes.update({'site_id': device['site_id'], 'id': device['id']})
+ return attributes
+
+ def _inventory_group(self, group, contents):
+ '''Takes a group and returns inventory for it as dict
+
+ :param group: Group name
+ :type group: str
+ :param contents: The contents of the group's YAML config
+ :type contents: dict
+
+ contents param should look like::
+
+ {
+ 'query': 'xx',
+ 'vars':
+ 'a': 'b'
+ }
+
+ Will return something like::
+
+ { group: {
+ hosts: [],
+ vars: {},
+ }
+ '''
+ query = contents.get('query')
+ hostvars = contents.get('vars', dict())
+ site = contents.get('site', dict())
+ obj = {group: dict()}
+ obj[group]['hosts'] = []
+ obj[group]['vars'] = hostvars
+ try:
+ assert isinstance(query, basestring)
+ except:
+ sys.exit('ERR: Group queries must be a single string\n'
+ ' Group: %s\n'
+ ' Query: %s\n' % (group, query)
+ )
+ try:
+ if site:
+ site = self.client.sites(site)
+ devices = site.devices.query.get(query=query)
+ else:
+ devices = self.client.devices.query.get(query=query)
+ except HttpServerError as e:
+ if '500' in str(e.response):
+ _site = 'Correct site id?'
+ _attr = 'Queried attributes actually exist?'
+ questions = _site + '\n' + _attr
+ sys.exit('ERR: 500 from server.\n%s' % questions)
+ else:
+ raise
+ except UsageError:
+ sys.exit('ERR: Could not connect to server. Running?')
+
+ # Would do a list comprehension here, but would like to save code/time
+ # and also acquire attributes in this step
+ for host in devices['data']['devices']:
+ # Iterate through each device that matches query, assign hostname
+ # to the group's hosts array and then use this single iteration as
+ # a chance to update self._meta which will be used in the final
+ # return
+ hostname = host['hostname']
+ obj[group]['hosts'].append(hostname)
+ attributes = host['attributes']
+ attributes.update({'site_id': host['site_id'], 'id': host['id']})
+ self._meta['hostvars'].update({hostname: attributes})
+
+ return obj
+
+
+def parse_args():
+ desc = __doc__.splitlines()[4] # Just to avoid being redundant
+
+ # Establish parser with options and error out if no action provided
+ parser = argparse.ArgumentParser(
+ description=desc,
+ conflict_handler='resolve',
+ )
+
+ # Arguments
+ #
+ # Currently accepting (--list | -l) and (--host | -h)
+ # These must not be allowed together
+ parser.add_argument(
+ '--list', '-l',
+ help='Print JSON object containing hosts to STDOUT',
+ action='store_true',
+ dest='list_', # Avoiding syntax highlighting for list
+ )
+
+ parser.add_argument(
+ '--host', '-h',
+ help='Print JSON object containing hostvars for <host>',
+ action='store',
+ )
+ args = parser.parse_args()
+
+ if not args.list_ and not args.host: # Require at least one option
+ parser.exit(status=1, message='No action requested')
+
+ if args.list_ and args.host: # Do not allow multiple options
+ parser.exit(status=1, message='Too many actions requested')
+
+ return args
+
+
+def main():
+ '''Set up argument handling and callback routing'''
+ args = parse_args()
+ client = NSoTInventory()
+
+ # Callback condition
+ if args.list_:
+ print(client.do_list())
+ elif args.host:
+ print(client.do_host(args.host))
+
+if __name__ == '__main__':
+ main()
diff --git a/contrib/inventory/nsot.yaml b/contrib/inventory/nsot.yaml
new file mode 100644
index 0000000000..ebddbc8234
--- /dev/null
+++ b/contrib/inventory/nsot.yaml
@@ -0,0 +1,22 @@
+---
+juniper_routers:
+ query: 'deviceType=ROUTER manufacturer=JUNIPER'
+ vars:
+ group: juniper_routers
+ netconf: true
+ os: junos
+
+cisco_asa:
+ query: 'manufacturer=CISCO deviceType=FIREWALL'
+ vars:
+ group: cisco_asa
+ routed_vpn: false
+ stateful: true
+
+old_cisco_asa:
+ query: 'manufacturer=CISCO deviceType=FIREWALL -softwareVersion=8.3+'
+ vars:
+ old_nat: true
+
+not_f10:
+ query: '-manufacturer=FORCE10'
diff --git a/contrib/inventory/openstack.py b/contrib/inventory/openstack.py
index 46b43e9221..b82a042c29 100755
--- a/contrib/inventory/openstack.py
+++ b/contrib/inventory/openstack.py
@@ -32,6 +32,13 @@
# all of them and present them as one contiguous inventory.
#
# See the adjacent openstack.yml file for an example config file
+# There are two ansible inventory specific options that can be set in
+# the inventory section.
+# expand_hostvars controls whether or not the inventory will make extra API
+# calls to fill out additional information about each server
+# use_hostnames changes the behavior from registering every host with its UUID
+# and making a group of its hostname to only doing this if the
+# hostname in question has more than one server
import argparse
import collections
@@ -51,7 +58,7 @@ import shade.inventory
CONFIG_FILES = ['/etc/ansible/openstack.yaml']
-def get_groups_from_server(server_vars):
+def get_groups_from_server(server_vars, namegroup=True):
groups = []
region = server_vars['region']
@@ -76,7 +83,8 @@ def get_groups_from_server(server_vars):
groups.append(extra_group)
groups.append('instance-%s' % server_vars['id'])
- groups.append(server_vars['name'])
+ if namegroup:
+ groups.append(server_vars['name'])
for key in ('flavor', 'image'):
if 'name' in server_vars[key]:
@@ -94,9 +102,9 @@ def get_groups_from_server(server_vars):
return groups
-def get_host_groups(inventory):
+def get_host_groups(inventory, refresh=False):
(cache_file, cache_expiration_time) = get_cache_settings()
- if is_cache_stale(cache_file, cache_expiration_time):
+ if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh):
groups = to_json(get_host_groups_from_cloud(inventory))
open(cache_file, 'w').write(groups)
else:
@@ -106,23 +114,44 @@ def get_host_groups(inventory):
def get_host_groups_from_cloud(inventory):
groups = collections.defaultdict(list)
+ firstpass = collections.defaultdict(list)
hostvars = {}
- for server in inventory.list_hosts():
+ list_args = {}
+ if hasattr(inventory, 'extra_config'):
+ use_hostnames = inventory.extra_config['use_hostnames']
+ list_args['expand'] = inventory.extra_config['expand_hostvars']
+ else:
+ use_hostnames = False
+
+ for server in inventory.list_hosts(**list_args):
if 'interface_ip' not in server:
continue
- for group in get_groups_from_server(server):
- groups[group].append(server['id'])
- hostvars[server['id']] = dict(
- ansible_ssh_host=server['interface_ip'],
- openstack=server,
- )
+ firstpass[server['name']].append(server)
+ for name, servers in firstpass.items():
+ if len(servers) == 1 and use_hostnames:
+ server = servers[0]
+ hostvars[name] = dict(
+ ansible_ssh_host=server['interface_ip'],
+ openstack=server)
+ for group in get_groups_from_server(server, namegroup=False):
+ groups[group].append(server['name'])
+ else:
+ for server in servers:
+ server_id = server['id']
+ hostvars[server_id] = dict(
+ ansible_ssh_host=server['interface_ip'],
+ openstack=server)
+ for group in get_groups_from_server(server, namegroup=True):
+ groups[group].append(server_id)
groups['_meta'] = {'hostvars': hostvars}
return groups
-def is_cache_stale(cache_file, cache_expiration_time):
+def is_cache_stale(cache_file, cache_expiration_time, refresh=False):
''' Determines if cache file has expired, or if it is still valid '''
+ if refresh:
+ return True
if os.path.isfile(cache_file):
mod_time = os.path.getmtime(cache_file)
current_time = time.time()
@@ -169,14 +198,24 @@ def main():
try:
config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES
shade.simple_logging(debug=args.debug)
- inventory = shade.inventory.OpenStackInventory(
+ inventory_args = dict(
refresh=args.refresh,
config_files=config_files,
private=args.private,
)
+ if hasattr(shade.inventory.OpenStackInventory, 'extra_config'):
+ inventory_args.update(dict(
+ config_key='ansible',
+ config_defaults={
+ 'use_hostnames': False,
+ 'expand_hostvars': True,
+ }
+ ))
+
+ inventory = shade.inventory.OpenStackInventory(**inventory_args)
if args.list:
- output = get_host_groups(inventory)
+ output = get_host_groups(inventory, refresh=args.refresh)
elif args.host:
output = to_json(inventory.get_host(args.host))
print(output)
diff --git a/contrib/inventory/openstack.yml b/contrib/inventory/openstack.yml
index a99bb02058..1520e2937e 100644
--- a/contrib/inventory/openstack.yml
+++ b/contrib/inventory/openstack.yml
@@ -26,3 +26,6 @@ clouds:
username: stack
password: stack
project_name: stack
+ansible:
+ use_hostnames: True
+ expand_hostvars: False
diff --git a/contrib/inventory/rax.ini b/contrib/inventory/rax.ini
index 5a269e16a3..15948e7b2e 100644
--- a/contrib/inventory/rax.ini
+++ b/contrib/inventory/rax.ini
@@ -55,3 +55,12 @@
# will be ignored, and 4 will be used. Accepts a comma separated list,
# the first found wins.
# access_ip_version = 4
+
+# Environment Variable: RAX_CACHE_MAX_AGE
+# Default: 600
+#
+# A configuration the changes the behavior or the inventory cache.
+# Inventory listing performed before this value will be returned from
+# the cache instead of making a full request for all inventory. Setting
+# this value to 0 will force a full request.
+# cache_max_age = 600 \ No newline at end of file
diff --git a/contrib/inventory/rax.py b/contrib/inventory/rax.py
index 0028f54d20..4ac6b0f47e 100755
--- a/contrib/inventory/rax.py
+++ b/contrib/inventory/rax.py
@@ -355,9 +355,12 @@ def get_cache_file_path(regions):
def _list(regions, refresh_cache=True):
+ cache_max_age = int(get_config(p, 'rax', 'cache_max_age',
+ 'RAX_CACHE_MAX_AGE', 600))
+
if (not os.path.exists(get_cache_file_path(regions)) or
refresh_cache or
- (time() - os.stat(get_cache_file_path(regions))[-1]) > 600):
+ (time() - os.stat(get_cache_file_path(regions))[-1]) > cache_max_age):
# Cache file doesn't exist or older than 10m or refresh cache requested
_list_into_cache(regions)
diff --git a/docs/man/man1/ansible-galaxy.1.asciidoc.in b/docs/man/man1/ansible-galaxy.1.asciidoc.in
index e6f2d0b456..9ffe65e45a 100644
--- a/docs/man/man1/ansible-galaxy.1.asciidoc.in
+++ b/docs/man/man1/ansible-galaxy.1.asciidoc.in
@@ -12,7 +12,7 @@ ansible-galaxy - manage roles using galaxy.ansible.com
SYNOPSIS
--------
-ansible-galaxy [init|info|install|list|remove] [--help] [options] ...
+ansible-galaxy [delete|import|info|init|install|list|login|remove|search|setup] [--help] [options] ...
DESCRIPTION
@@ -20,7 +20,7 @@ DESCRIPTION
*Ansible Galaxy* is a shared repository for Ansible roles.
The ansible-galaxy command can be used to manage these roles,
-or by creating a skeleton framework for roles you'd like to upload to Galaxy.
+or for creating a skeleton framework for roles you'd like to upload to Galaxy.
COMMON OPTIONS
--------------
@@ -29,7 +29,6 @@ COMMON OPTIONS
Show a help message related to the given sub-command.
-
INSTALL
-------
@@ -145,6 +144,204 @@ The path to the directory containing your roles. The default is the *roles_path*
configured in your *ansible.cfg* file (/etc/ansible/roles if not configured)
+SEARCH
+------
+
+The *search* sub-command returns a filtered list of roles found on the remote
+server.
+
+
+USAGE
+~~~~~
+
+$ ansible-galaxy search [options] [searchterm1 searchterm2]
+
+
+OPTIONS
+~~~~~~~
+*--galaxy-tags*::
+
+Provide a comma separated list of Galaxy Tags on which to filter.
+
+*--platforms*::
+
+Provide a comma separated list of Platforms on which to filter.
+
+*--author*::
+
+Specify the username of a Galaxy contributor on which to filter.
+
+*-c*, *--ignore-certs*::
+
+Ignore TLS certificate errors.
+
+*-s*, *--server*::
+
+Override the default server https://galaxy.ansible.com.
+
+
+INFO
+----
+
+The *info* sub-command shows detailed information for a specific role.
+Details returned about the role included information from the local copy
+as well as information from galaxy.ansible.com.
+
+USAGE
+~~~~~
+
+$ ansible-galaxy info [options] role_name[, version]
+
+OPTIONS
+~~~~~~~
+
+*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH'::
+
+The path to the directory containing your roles. The default is the *roles_path*
+configured in your *ansible.cfg* file (/etc/ansible/roles if not configured)
+
+*-c*, *--ignore-certs*::
+
+Ignore TLS certificate errors.
+
+*-s*, *--server*::
+
+Override the default server https://galaxy.ansible.com.
+
+
+LOGIN
+-----
+
+The *login* sub-command is used to authenticate with galaxy.ansible.com.
+Authentication is required to use the import, delete and setup commands.
+It will authenticate the user, retrieve a token from Galaxy, and store it
+in the user's home directory.
+
+USAGE
+~~~~~
+
+$ ansible-galaxy login [options]
+
+The *login* sub-command prompts for a *GitHub* username and password. It does
+NOT send your password to Galaxy. It actually authenticates with GitHub and
+creates a personal access token. It then sends the personal access token to
+Galaxy, which in turn verifies that you are you and returns a Galaxy access
+token. After authentication completes the *GitHub* personal access token is
+destroyed.
+
+If you do not wish to use your GitHub password, or if you have two-factor
+authentication enabled with GitHub, use the *--github-token* option to pass a
+personal access token that you create. Log into GitHub, go to Settings and
+click on Personal Access Token to create a token.
+
+OPTIONS
+~~~~~~~
+
+*-c*, *--ignore-certs*::
+
+Ignore TLS certificate errors.
+
+*-s*, *--server*::
+
+Override the default server https://galaxy.ansible.com.
+
+*--github-token*::
+
+Authenticate using a *GitHub* personal access token rather than a password.
+
+
+IMPORT
+------
+
+Import a role from *GitHub* to galaxy.ansible.com. Requires the user first
+authenticate with galaxy.ansible.com using the *login* subcommand.
+
+USAGE
+~~~~~
+
+$ ansible-galaxy import [options] github_user github_repo
+
+OPTIONS
+~~~~~~~
+*-c*, *--ignore-certs*::
+
+Ignore TLS certificate errors.
+
+*-s*, *--server*::
+
+Override the default server https://galaxy.ansible.com.
+
+*--branch*::
+
+Provide a specific branch to import. When a branch is not specified the
+branch found in meta/main.yml is used. If no branch is specified in
+meta/main.yml, the repo's default branch (usually master) is used.
+
+
+DELETE
+------
+
+The *delete* sub-command will delete a role from galaxy.ansible.com. Requires
+the user first authenticate with galaxy.ansible.com using the *login* subcommand.
+
+USAGE
+~~~~~
+
+$ ansible-galaxy delete [options] github_user github_repo
+
+OPTIONS
+~~~~~~~
+
+*-c*, *--ignore-certs*::
+
+Ignore TLS certificate errors.
+
+*-s*, *--server*::
+
+Override the default server https://galaxy.ansible.com.
+
+
+SETUP
+-----
+
+The *setup* sub-command creates an integration point for *Travis CI*, enabling
+galaxy.ansible.com to receive notifications from *Travis* on build completion.
+Requires the user first authenticate with galaxy.ansible.com using the *login*
+subcommand.
+
+USAGE
+~~~~~
+
+$ ansible-galaxy setup [options] source github_user github_repo secret
+
+* Use *travis* as the source value. In the future additional source values may
+ be added.
+
+* Provide your *Travis* user token as the secret. The token is not stored by
+ galaxy.ansible.com. A hash is created using github_user, github_repo
+ and your token. The hash value is what actually gets stored.
+
+OPTIONS
+~~~~~~~
+
+*-c*, *--ignore-certs*::
+
+Ignore TLS certificate errors.
+
+*-s*, *--server*::
+
+Override the default server https://galaxy.ansible.com.
+
+--list::
+
+Show your configured integrations. Provids the ID of each integration
+which can be used with the remove option.
+
+--remove::
+
+Remove a specific integration. Provide the ID of the integration to
+be removed.
+
AUTHOR
------
diff --git a/docs/man/man1/ansible-playbook.1.asciidoc.in b/docs/man/man1/ansible-playbook.1.asciidoc.in
index 5686162f21..82181982fb 100644
--- a/docs/man/man1/ansible-playbook.1.asciidoc.in
+++ b/docs/man/man1/ansible-playbook.1.asciidoc.in
@@ -96,7 +96,7 @@ Show help page and exit
*-i* 'PATH', *--inventory=*'PATH'::
The 'PATH' to the inventory, which defaults to '/etc/ansible/hosts'.
-Alternatively you can use a comma separated list of hosts or single host with traling comma 'host,'.
+Alternatively, you can use a comma-separated list of hosts or a single host with a trailing comma 'host,'.
*-l* 'SUBSET', *--limit=*'SUBSET'::
diff --git a/docs/man/man1/ansible-pull.1.asciidoc.in b/docs/man/man1/ansible-pull.1.asciidoc.in
index 333b8e34e0..0afba2aeaa 100644
--- a/docs/man/man1/ansible-pull.1.asciidoc.in
+++ b/docs/man/man1/ansible-pull.1.asciidoc.in
@@ -95,6 +95,10 @@ Force running of playbook even if unable to update playbook repository. This
can be useful, for example, to enforce run-time state when a network
connection may not always be up or possible.
+*--full*::
+
+Do a full clone of the repository. By default ansible-pull will do a shallow clone based on the last revision.
+
*-h*, *--help*::
Show the help message and exit.
diff --git a/docsite/Makefile b/docsite/Makefile
index 92129f7851..2b87827c59 100644
--- a/docsite/Makefile
+++ b/docsite/Makefile
@@ -20,6 +20,8 @@ viewdocs: clean staticmin
htmldocs: staticmin
./build-site.py rst
+webdocs: htmldocs
+
clean:
-rm -rf htmlout
-rm -f .buildinfo
@@ -43,4 +45,4 @@ modules: $(FORMATTER) ../hacking/templates/rst.j2
PYTHONPATH=../lib $(FORMATTER) -t rst --template-dir=../hacking/templates --module-dir=../lib/ansible/modules -o rst/
staticmin:
- cat _themes/srtd/static/css/theme.css | sed -e 's/^[ \t]*//g; s/[ \t]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css
+ cat _themes/srtd/static/css/theme.css | sed -e 's/^[ ]*//g; s/[ ]*$$//g; s/\([:{;,]\) /\1/g; s/ {/{/g; s/\/\*.*\*\///g; /^$$/d' | sed -e :a -e '$$!N; s/\n\(.\)/\1/; ta' > _themes/srtd/static/css/theme.min.css
diff --git a/docsite/_themes/srtd/footer.html b/docsite/_themes/srtd/footer.html
index b70cfde7ad..dc1d70a4d1 100644
--- a/docsite/_themes/srtd/footer.html
+++ b/docsite/_themes/srtd/footer.html
@@ -12,8 +12,17 @@
<hr/>
+<script type="text/javascript">
+ (function(w,d,t,u,n,s,e){w['SwiftypeObject']=n;w[n]=w[n]||function(){
+ (w[n].q=w[n].q||[]).push(arguments);};s=d.createElement(t);
+ e=d.getElementsByTagName(t)[0];s.async=1;s.src=u;e.parentNode.insertBefore(s,e);
+ })(window,document,'script','//s.swiftypecdn.com/install/v2/st.js','_st');
+
+ _st('install','yABGvz2N8PwcwBxyfzUc','2.0.0');
+</script>
+
<p>
- &copy; Copyright 2015 <a href="http://ansible.com">Ansible, Inc.</a>.
+ &copy; Copyright 2016 <a href="http://ansible.com">Ansible, Inc.</a>.
{%- if last_updated %}
{% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
diff --git a/docsite/_themes/srtd/layout.html b/docsite/_themes/srtd/layout.html
index f4d7a8a536..cb532191e6 100644
--- a/docsite/_themes/srtd/layout.html
+++ b/docsite/_themes/srtd/layout.html
@@ -150,11 +150,6 @@
</a>
</div>
- <div class="wy-side-nav-search" style="background-color:#5bbdbf;height=80px;margin:'auto auto auto auto'">
- <!-- <a href="{{ pathto(master_doc) }}" class="icon icon-home"> {{ project }}</a> -->
- {% include "searchbox.html" %}
- </div>
-
<div id="menu-id" class="wy-menu wy-menu-vertical" data-spy="affix">
{% set toctree = toctree(maxdepth=2, collapse=False) %}
{% if toctree %}
@@ -166,16 +161,9 @@
<!-- changeable widget -->
<center>
<br/>
-<span class="hs-cta-wrapper" id="hs-cta-wrapper-71d47584-8ef5-4b06-87ae-8d25bc2a837e">
- <span class="hs-cta-node hs-cta-71d47584-8ef5-4b06-87ae-8d25bc2a837e" id="hs-cta-71d47584-8ef5-4b06-87ae-8d25bc2a837e">
- <!--[if lte IE 8]><div id="hs-cta-ie-element"></div><![endif]-->
- <a href="http://cta-redirect.hubspot.com/cta/redirect/330046/71d47584-8ef5-4b06-87ae-8d25bc2a837e"><img class="hs-cta-img" id="hs-cta-img-71d47584-8ef5-4b06-87ae-8d25bc2a837e" style="border-width:0px;" src="https://no-cache.hubspot.com/cta/default/330046/71d47584-8ef5-4b06-87ae-8d25bc2a837e.png" /></a>
- </span>
- <script charset="utf-8" src="https://js.hscta.net/cta/current.js"></script>
- <script type="text/javascript">
- hbspt.cta.load(330046, '71d47584-8ef5-4b06-87ae-8d25bc2a837e');
- </script>
-</span>
+<a href="http://www.ansible.com/docs-left?utm_source=docs">
+ <img style="border-width:0px;" src="https://cdn2.hubspot.net/hubfs/330046/docs-graphics/ASB-docs-left-rail.png" />
+</a>
</center>
@@ -196,15 +184,17 @@
<div class="wy-nav-content">
<div class="rst-content">
- <!-- Tower ads -->
- <a class="DocSiteBanner" href="http://www.ansible.com/tower?utm_source=docs">
- <div class="DocSiteBanner-imgWrapper">
- <img src="{{ pathto('_static/', 1) }}images/banner_ad_1.png">
- </div>
- <div class="DocSiteBanner-imgWrapper">
- <img src="{{ pathto('_static/', 1) }}images/banner_ad_2.png">
- </div>
- </a>
+ <!-- Banner ads -->
+ <div class="DocSiteBanner">
+ <a class="DocSiteBanner-imgWrapper"
+ href="http://www.ansible.com/docs-top?utm_source=docs">
+ <img src="https://cdn2.hubspot.net/hubfs/330046/docs-graphics/ASB-docs-top-left.png">
+ </a>
+ <a class="DocSiteBanner-imgWrapper"
+ href="http://www.ansible.com/docs-top?utm_source=docs">
+ <img src="https://cdn2.hubspot.net/hubfs/330046/docs-graphics/ASB-docs-top-right.png">
+ </a>
+ </div>
{% include "breadcrumbs.html" %}
<div id="page-content">
diff --git a/docsite/_themes/srtd/layout_old.html b/docsite/_themes/srtd/layout_old.html
deleted file mode 100644
index deb8df2a1a..0000000000
--- a/docsite/_themes/srtd/layout_old.html
+++ /dev/null
@@ -1,205 +0,0 @@
-{#
- basic/layout.html
- ~~~~~~~~~~~~~~~~~
-
- Master layout template for Sphinx themes.
-
- :copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-#}
-{%- block doctype -%}
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
- "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-{%- endblock %}
-{%- set reldelim1 = reldelim1 is not defined and ' &raquo;' or reldelim1 %}
-{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %}
-{%- set render_sidebar = (not embedded) and (not theme_nosidebar|tobool) and
- (sidebars != []) %}
-{%- set url_root = pathto('', 1) %}
-{# XXX necessary? #}
-{%- if url_root == '#' %}{% set url_root = '' %}{% endif %}
-{%- if not embedded and docstitle %}
- {%- set titlesuffix = " &mdash; "|safe + docstitle|e %}
-{%- else %}
- {%- set titlesuffix = "" %}
-{%- endif %}
-
-{%- macro relbar() %}
- <div class="related">
- <h3>{{ _('Navigation') }}</h3>
- <ul>
- {%- for rellink in rellinks %}
- <li class="right" {% if loop.first %}style="margin-right: 10px"{% endif %}>
- <a href="{{ pathto(rellink[0]) }}" title="{{ rellink[1]|striptags|e }}"
- {{ accesskey(rellink[2]) }}>{{ rellink[3] }}</a>
- {%- if not loop.first %}{{ reldelim2 }}{% endif %}</li>
- {%- endfor %}
- {%- block rootrellink %}
- <li><a href="{{ pathto(master_doc) }}">{{ shorttitle|e }}</a>{{ reldelim1 }}</li>
- {%- endblock %}
- {%- for parent in parents %}
- <li><a href="{{ parent.link|e }}" {% if loop.last %}{{ accesskey("U") }}{% endif %}>{{ parent.title }}</a>{{ reldelim1 }}</li>
- {%- endfor %}
- {%- block relbaritems %} {% endblock %}
- </ul>
- </div>
-{%- endmacro %}
-
-{%- macro sidebar() %}
- {%- if render_sidebar %}
- <div class="sphinxsidebar">
- <div class="sphinxsidebarwrapper">
- {%- block sidebarlogo %}
- {%- if logo %}
- <p class="logo"><a href="{{ pathto(master_doc) }}">
- <img class="logo" src="{{ pathto('_static/' + logo, 1) }}" alt="Logo"/>
- </a></p>
- {%- endif %}
- {%- endblock %}
- {%- if sidebars != None %}
- {#- new style sidebar: explicitly include/exclude templates #}
- {%- for sidebartemplate in sidebars %}
- {%- include sidebartemplate %}
- {%- endfor %}
- {%- else %}
- {#- old style sidebars: using blocks -- should be deprecated #}
- {%- block sidebartoc %}
- {%- include "localtoc.html" %}
- {%- endblock %}
- {%- block sidebarrel %}
- {%- include "relations.html" %}
- {%- endblock %}
- {%- block sidebarsourcelink %}
- {%- include "sourcelink.html" %}
- {%- endblock %}
- {%- if customsidebar %}
- {%- include customsidebar %}
- {%- endif %}
- {%- block sidebarsearch %}
- {%- include "searchbox.html" %}
- {%- endblock %}
- {%- endif %}
- </div>
- </div>
- {%- endif %}
-{%- endmacro %}
-
-{%- macro script() %}
- <script type="text/javascript">
- var DOCUMENTATION_OPTIONS = {
- URL_ROOT: '{{ url_root }}',
- VERSION: '{{ release|e }}',
- COLLAPSE_INDEX: false,
- FILE_SUFFIX: '{{ '' if no_search_suffix else file_suffix }}',
- HAS_SOURCE: {{ has_source|lower }}
- };
- </script>
- {%- for scriptfile in script_files %}
- <script type="text/javascript" src="{{ pathto(scriptfile, 1) }}"></script>
- {%- endfor %}
-{%- endmacro %}
-
-{%- macro css() %}
- <link rel="stylesheet" href="{{ pathto('_static/' + style, 1) }}" type="text/css" />
- <link rel="stylesheet" href="{{ pathto('_static/pygments.css', 1) }}" type="text/css" />
- {%- for cssfile in css_files %}
- <link rel="stylesheet" href="{{ pathto(cssfile, 1) }}" type="text/css" />
- {%- endfor %}
-{%- endmacro %}
-
-<html xmlns="http://www.w3.org/1999/xhtml">
- <head>
- <meta http-equiv="Content-Type" content="text/html; charset={{ encoding }}" />
- {{ metatags }}
- {%- block htmltitle %}
- <title>{{ title|striptags|e }}{{ titlesuffix }}</title>
- {%- endblock %}
- {{ css() }}
- {%- if not embedded %}
- {{ script() }}
- {%- if use_opensearch %}
- <link rel="search" type="application/opensearchdescription+xml"
- title="{% trans docstitle=docstitle|e %}Search within {{ docstitle }}{% endtrans %}"
- href="{{ pathto('_static/opensearch.xml', 1) }}"/>
- {%- endif %}
- {%- if favicon %}
- <link rel="shortcut icon" href="{{ pathto('_static/' + favicon, 1) }}"/>
- {%- endif %}
- {%- endif %}
-{%- block linktags %}
- {%- if hasdoc('about') %}
- <link rel="author" title="{{ _('About these documents') }}" href="{{ pathto('about') }}" />
- {%- endif %}
- {%- if hasdoc('genindex') %}
- <link rel="index" title="{{ _('Index') }}" href="{{ pathto('genindex') }}" />
- {%- endif %}
- {%- if hasdoc('search') %}
- <link rel="search" title="{{ _('Search') }}" href="{{ pathto('search') }}" />
- {%- endif %}
- {%- if hasdoc('copyright') %}
- <link rel="copyright" title="{{ _('Copyright') }}" href="{{ pathto('copyright') }}" />
- {%- endif %}
- <link rel="top" title="{{ docstitle|e }}" href="{{ pathto('index') }}" />
- {%- if parents %}
- <link rel="up" title="{{ parents[-1].title|striptags|e }}" href="{{ parents[-1].link|e }}" />
- {%- endif %}
- {%- if next %}
- <link rel="next" title="{{ next.title|striptags|e }}" href="{{ next.link|e }}" />
- {%- endif %}
- {%- if prev %}
- <link rel="prev" title="{{ prev.title|striptags|e }}" href="{{ prev.link|e }}" />
- {%- endif %}
-{%- endblock %}
-{%- block extrahead %} {% endblock %}
- </head>
- <body>
-{%- block header %}{% endblock %}
-
-{%- block relbar1 %}{{ relbar() }}{% endblock %}
-
-{%- block content %}
- {%- block sidebar1 %} {# possible location for sidebar #} {% endblock %}
-
- <div class="document">
- {%- block document %}
- <div class="documentwrapper">
- {%- if render_sidebar %}
- <div class="bodywrapper">
- {%- endif %}
- <div class="body">
- {% block body %} {% endblock %}
- </div>
- {%- if render_sidebar %}
- </div>
- {%- endif %}
- </div>
- {%- endblock %}
-
- {%- block sidebar2 %}{{ sidebar() }}{% endblock %}
- <div class="clearer"></div>
- </div>
-{%- endblock %}
-
-{%- block relbar2 %}{{ relbar() }}{% endblock %}
-
-{%- block footer %}
- <div class="footer">
- {%- if show_copyright %}
- {%- if hasdoc('copyright') %}
- {% trans path=pathto('copyright'), copyright=copyright|e %}&copy; <a href="{{ path }}">Copyright</a> {{ copyright }}.{% endtrans %}
- {%- else %}
- {% trans copyright=copyright|e %}&copy; Copyright {{ copyright }}.{% endtrans %}
- {%- endif %}
- {%- endif %}
- {%- if last_updated %}
- {% trans last_updated=last_updated|e %}Last updated on {{ last_updated }}.{% endtrans %}
- {%- endif %}
- {%- if show_sphinx %}
- {% trans sphinx_version=sphinx_version|e %}Created using <a href="http://sphinx-doc.org/">Sphinx</a> {{ sphinx_version }}.{% endtrans %}
- {%- endif %}
- </div>
- <p>asdf asdf asdf asdf 22</p>
-{%- endblock %}
- </body>
-</html>
-
diff --git a/docsite/_themes/srtd/searchbox.html b/docsite/_themes/srtd/searchbox.html
deleted file mode 100644
index b0310cff98..0000000000
--- a/docsite/_themes/srtd/searchbox.html
+++ /dev/null
@@ -1,61 +0,0 @@
-<!-- <form class="wy-form" action="{{ pathto('search') }}" method="get">
- <input type="text" name="q" placeholder="Search docs" />
- <input type="hidden" name="check_keywords" value="yes" />
- <input type="hidden" name="area" value="default" />
-</form> -->
-
-<script>
- (function() {
- var cx = '006019874985968165468:eu5pbnxp4po';
- var gcse = document.createElement('script');
- gcse.type = 'text/javascript';
- gcse.async = true;
- gcse.src = (document.location.protocol == 'https:' ? 'https:' : 'http:') +
- '//www.google.com/cse/cse.js?cx=' + cx;
- var s = document.getElementsByTagName('script')[0];
- s.parentNode.insertBefore(gcse, s);
- })();
-</script>
-
-<form id="search-form-id" action="">
- <input type="text" name="query" id="search-box-id" />
- <a class="search-reset-start" id="search-reset"><i class="fa fa-times"></i></a>
- <a class="search-reset-start" id="search-start"><i class="fa fa-search"></i></a>
-</form>
-
-<script type="text/javascript" src="http://www.google.com/cse/brand?form=search-form-id&inputbox=search-box-id"></script>
-
-<script>
- function executeQuery() {
- var input = document.getElementById('search-box-id');
- var element = google.search.cse.element.getElement('searchresults-only0');
- element.resultsUrl = '/htmlout/search.html'
- if (input.value == '') {
- element.clearAllResults();
- $('#page-content, .rst-footer-buttons, #search-start').show();
- $('#search-results, #search-reset').hide();
- } else {
- $('#page-content, .rst-footer-buttons, #search-start').hide();
- $('#search-results, #search-reset').show();
- element.execute(input.value);
- }
- return false;
- }
-
- $('#search-reset').hide();
-
- $('#search-box-id').css('background-position', '1em center');
-
- $('#search-box-id').on('blur', function() {
- $('#search-box-id').css('background-position', '1em center');
- });
-
- $('#search-start').click(function(e) { executeQuery(); });
- $('#search-reset').click(function(e) { $('#search-box-id').val(''); executeQuery(); });
-
- $('#search-form-id').submit(function(e) {
- console.log('submitting!');
- executeQuery();
- e.preventDefault();
- });
-</script>
diff --git a/docsite/_themes/srtd/static/css/theme.css b/docsite/_themes/srtd/static/css/theme.css
index 4f7cbc8caa..246e513b79 100644
--- a/docsite/_themes/srtd/static/css/theme.css
+++ b/docsite/_themes/srtd/static/css/theme.css
@@ -4723,33 +4723,16 @@ span[id*='MathJax-Span'] {
padding: 0.4045em 1.618em;
}
-
.DocSiteBanner {
- width: 100%;
display: flex;
display: -webkit-flex;
+ justify-content: center;
+ -webkit-justify-content: center;
flex-wrap: wrap;
-webkit-flex-wrap: wrap;
- justify-content: space-between;
- -webkit-justify-content: space-between;
- background-color: #ff5850;
margin-bottom: 25px;
}
.DocSiteBanner-imgWrapper {
max-width: 100%;
}
-
-@media screen and (max-width: 1403px) {
- .DocSiteBanner {
- width: 100%;
- display: flex;
- display: -webkit-flex;
- flex-wrap: wrap;
- -webkit-flex-wrap: wrap;
- justify-content: center;
- -webkit-justify-content: center;
- background-color: #fff;
- margin-bottom: 25px;
- }
-}
diff --git a/docsite/_themes/srtd/static/images/banner_ad_1.png b/docsite/_themes/srtd/static/images/banner_ad_1.png
deleted file mode 100644
index a6555f2567..0000000000
--- a/docsite/_themes/srtd/static/images/banner_ad_1.png
+++ /dev/null
Binary files differ
diff --git a/docsite/_themes/srtd/static/images/banner_ad_2.png b/docsite/_themes/srtd/static/images/banner_ad_2.png
deleted file mode 100644
index f9d6c6d42c..0000000000
--- a/docsite/_themes/srtd/static/images/banner_ad_2.png
+++ /dev/null
Binary files differ
diff --git a/docsite/build-site.py b/docsite/build-site.py
index 587a189f07..24f9fc9a64 100755
--- a/docsite/build-site.py
+++ b/docsite/build-site.py
@@ -15,6 +15,7 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import print_function
__docformat__ = 'restructuredtext'
@@ -24,9 +25,9 @@ import traceback
try:
from sphinx.application import Sphinx
except ImportError:
- print "#################################"
- print "Dependency missing: Python Sphinx"
- print "#################################"
+ print("#################################")
+ print("Dependency missing: Python Sphinx")
+ print("#################################")
sys.exit(1)
import os
@@ -40,7 +41,7 @@ class SphinxBuilder(object):
"""
Run the DocCommand.
"""
- print "Creating html documentation ..."
+ print("Creating html documentation ...")
try:
buildername = 'html'
@@ -69,10 +70,10 @@ class SphinxBuilder(object):
app.builder.build_all()
- except ImportError, ie:
+ except ImportError:
traceback.print_exc()
- except Exception, ex:
- print >> sys.stderr, "FAIL! exiting ... (%s)" % ex
+ except Exception as ex:
+ print("FAIL! exiting ... (%s)" % ex, file=sys.stderr)
def build_docs(self):
self.app.builder.build_all()
@@ -83,9 +84,9 @@ def build_rst_docs():
if __name__ == '__main__':
if '-h' in sys.argv or '--help' in sys.argv:
- print "This script builds the html documentation from rst/asciidoc sources.\n"
- print " Run 'make docs' to build everything."
- print " Run 'make viewdocs' to build and then preview in a web browser."
+ print("This script builds the html documentation from rst/asciidoc sources.\n")
+ print(" Run 'make docs' to build everything.")
+ print(" Run 'make viewdocs' to build and then preview in a web browser.")
sys.exit(0)
build_rst_docs()
@@ -93,4 +94,4 @@ if __name__ == '__main__':
if "view" in sys.argv:
import webbrowser
if not webbrowser.open('htmlout/index.html'):
- print >> sys.stderr, "Could not open on your webbrowser."
+ print("Could not open on your webbrowser.", file=sys.stderr)
diff --git a/docsite/rst/YAMLSyntax.rst b/docsite/rst/YAMLSyntax.rst
index 76683f6ba3..8189a6caf6 100644
--- a/docsite/rst/YAMLSyntax.rst
+++ b/docsite/rst/YAMLSyntax.rst
@@ -20,52 +20,52 @@ Each item in the list is a list of key/value pairs, commonly
called a "hash" or a "dictionary". So, we need to know how
to write lists and dictionaries in YAML.
-There's another small quirk to YAML. All YAML files (regardless of their association with
-Ansible or not) should begin with ``---``. This is part of the YAML
-format and indicates the start of a document.
+There's another small quirk to YAML. All YAML files (regardless of their association with Ansible or not) can optionally
+begin with ``---`` and end with ``...``. This is part of the YAML format and indicates the start and end of a document.
-All members of a list are lines beginning at the same indentation level starting
-with a ``"- "`` (a dash and a space)::
+All members of a list are lines beginning at the same indentation level starting with a ``"- "`` (a dash and a space)::
---
# A list of tasty fruits
- - Apple
- - Orange
- - Strawberry
- - Mango
+ fruits:
+ - Apple
+ - Orange
+ - Strawberry
+ - Mango
+ ...
A dictionary is represented in a simple ``key: value`` form (the colon must be followed by a space)::
- ---
# An employee record
- name: Example Developer
- job: Developer
- skill: Elite
+ - martin:
+ name: Martin D'vloper
+ job: Developer
+ skill: Elite
-Dictionaries can also be represented in an abbreviated form if you really want to::
+Dictionaries and lists can also be represented in an abbreviated form if you really want to::
---
- # An employee record
- {name: Example Developer, job: Developer, skill: Elite}
+ employees:
+ - martin: {name: Martin D'vloper, job: Developer, skill: Elite}
+ fruits: ['Apple', 'Orange', 'Strawberry', 'Mango']
.. _truthiness:
-Ansible doesn't really use these too much, but you can also specify a
-boolean value (true/false) in several forms::
+Ansible doesn't really use these too much, but you can also specify a boolean value (true/false) in several forms::
- ---
create_key: yes
needs_agent: no
knows_oop: True
likes_emacs: TRUE
uses_cvs: false
-Let's combine what we learned so far in an arbitrary YAML example. This really
-has nothing to do with Ansible, but will give you a feel for the format::
+
+Let's combine what we learned so far in an arbitrary YAML example.
+This really has nothing to do with Ansible, but will give you a feel for the format::
---
# An employee record
- name: Example Developer
+ name: Martin D'vloper
job: Developer
skill: Elite
employed: True
@@ -79,8 +79,7 @@ has nothing to do with Ansible, but will give you a feel for the format::
python: Elite
dotnet: Lame
-That's all you really need to know about YAML to start writing
-`Ansible` playbooks.
+That's all you really need to know about YAML to start writing `Ansible` playbooks.
Gotchas
-------
@@ -100,6 +99,14 @@ with a "{", YAML will think it is a dictionary, so you must quote it, like so::
foo: "{{ variable }}"
+The same applies for strings that start or contain any YAML special characters `` [] {} : > | `` .
+
+Boolean conversion is helpful, but this can be a problem when you want a literal `yes` or other boolean values as a string.
+In these cases just use quotes::
+
+ non_boolean: "yes"
+ other_string: "False"
+
.. seealso::
diff --git a/docsite/rst/become.rst b/docsite/rst/become.rst
index 64628515c6..7597643f88 100644
--- a/docsite/rst/become.rst
+++ b/docsite/rst/become.rst
@@ -1,5 +1,5 @@
-Ansible Privilege Escalation
-++++++++++++++++++++++++++++
+Become (Privilege Escalation)
++++++++++++++++++++++++++++++
Ansible can use existing privilege escalation systems to allow a user to execute tasks as another.
@@ -7,17 +7,17 @@ Ansible can use existing privilege escalation systems to allow a user to execute
Become
``````
-Before 1.9 Ansible mostly allowed the use of sudo and a limited use of su to allow a login/remote user to become a different user
-and execute tasks, create resources with the 2nd user's permissions. As of 1.9 'become' supersedes the old sudo/su, while still
-being backwards compatible. This new system also makes it easier to add other privilege escalation tools like pbrun (Powerbroker),
-pfexec and others.
+Before 1.9 Ansible mostly allowed the use of `sudo` and a limited use of `su` to allow a login/remote user to become a different user
+and execute tasks, create resources with the 2nd user's permissions. As of 1.9 `become` supersedes the old sudo/su, while still
+being backwards compatible. This new system also makes it easier to add other privilege escalation tools like `pbrun` (Powerbroker),
+`pfexec` and others.
New directives
--------------
become
- equivalent to adding 'sudo:' or 'su:' to a play or task, set to 'true'/'yes' to activate privilege escalation
+ equivalent to adding `sudo:` or `su:` to a play or task, set to 'true'/'yes' to activate privilege escalation
become_user
equivalent to adding 'sudo_user:' or 'su_user:' to a play or task, set to user with desired privileges
diff --git a/docsite/rst/developing.rst b/docsite/rst/developing.rst
index 2a25899301..c5a1dca061 100644
--- a/docsite/rst/developing.rst
+++ b/docsite/rst/developing.rst
@@ -11,6 +11,7 @@ Learn how to build modules of your own in any language, and also how to extend A
developing_modules
developing_plugins
developing_test_pr
+ developing_releases
Developers will also likely be interested in the fully-discoverable in :doc:`tower`. It's great for embedding Ansible in all manner of applications.
diff --git a/docsite/rst/developing_api.rst b/docsite/rst/developing_api.rst
index 76cebb64f1..96a447c05c 100644
--- a/docsite/rst/developing_api.rst
+++ b/docsite/rst/developing_api.rst
@@ -6,7 +6,7 @@ Python API
There are several interesting ways to use Ansible from an API perspective. You can use
the Ansible python API to control nodes, you can extend Ansible to respond to various python events, you can
write various plugins, and you can plug in inventory data from external data sources. This document
-covers the Runner and Playbook API at a basic level.
+covers the execution and Playbook API at a basic level.
If you are looking to use Ansible programmatically from something other than Python, trigger events asynchronously,
or have access control and logging demands, take a look at :doc:`tower`
@@ -17,11 +17,69 @@ This chapter discusses the Python API.
.. _python_api:
-Python API
-----------
+The Python API is very powerful, and is how the all the ansible CLI tools are implemented.
+In version 2.0 the core ansible got rewritten and the API was mostly rewritten.
+
+:.. note:: Ansible relies on forking processes, as such the API is not thread safe.
+
+.. _python_api_20:
+
+Python API 2.0
+--------------
+
+In 2.0 things get a bit more complicated to start, but you end up with much more discrete and readable classes::
+
+
+ #!/usr/bin/python2
+
+ from collections import namedtuple
+ from ansible.parsing.dataloader import DataLoader
+ from ansible.vars import VariableManager
+ from ansible.inventory import Inventory
+ from ansible.playbook.play import Play
+ from ansible.executor.task_queue_manager import TaskQueueManager
+
+ Options = namedtuple('Options', ['connection','module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check'])
+ # initialize needed objects
+ variable_manager = VariableManager()
+ loader = DataLoader()
+ options = Options(connection='local', module_path='/path/to/mymodules', forks=100, remote_user=None, private_key_file=None, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=None, become_method=None, become_user=None, verbosity=None, check=False)
+ passwords = dict(vault_pass='secret')
+
+ # create inventory and pass to var manager
+ inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list='localhost')
+ variable_manager.set_inventory(inventory)
+
+ # create play with tasks
+ play_source = dict(
+ name = "Ansible Play",
+ hosts = 'localhost',
+ gather_facts = 'no',
+ tasks = [ dict(action=dict(module='debug', args=dict(msg='Hello Galaxy!'))) ]
+ )
+ play = Play().load(play_source, variable_manager=variable_manager, loader=loader)
+
+ # actually run it
+ tqm = None
+ try:
+ tqm = TaskQueueManager(
+ inventory=inventory,
+ variable_manager=variable_manager,
+ loader=loader,
+ options=options,
+ passwords=passwords,
+ stdout_callback='default',
+ )
+ result = tqm.run(play)
+ finally:
+ if tqm is not None:
+ tqm.cleanup()
+
+
+.. _python_api_old:
-The Python API is very powerful, and is how the ansible CLI and ansible-playbook
-are implemented.
+Python API pre 2.0
+------------------
It's pretty simple::
@@ -51,7 +109,7 @@ expressed in the :doc:`modules` documentation.::
A module can return any type of JSON data it wants, so Ansible can
be used as a framework to rapidly build powerful applications and scripts.
-.. _detailed_api_example:
+.. _detailed_api_old_example:
Detailed API Example
````````````````````
@@ -87,9 +145,9 @@ The following script prints out the uptime information for all hosts::
for (hostname, result) in results['dark'].items():
print "%s >>> %s" % (hostname, result)
-Advanced programmers may also wish to read the source to ansible itself, for
-it uses the Runner() API (with all available options) to implement the
-command line tools ``ansible`` and ``ansible-playbook``.
+Advanced programmers may also wish to read the source to ansible itself,
+for it uses the API (with all available options) to implement the ``ansible``
+command line tools (``lib/ansible/cli/``).
.. seealso::
diff --git a/docsite/rst/developing_modules.rst b/docsite/rst/developing_modules.rst
index dd62275828..dc5b7e8f5f 100644
--- a/docsite/rst/developing_modules.rst
+++ b/docsite/rst/developing_modules.rst
@@ -191,7 +191,7 @@ a lot shorter than this::
Let's test that module::
- ansible/hacking/test-module -m ./time -a "time=\"March 14 12:23\""
+ ansible/hacking/test-module -m ./timetest.py -a "time=\"March 14 12:23\""
This should return something like::
@@ -219,7 +219,7 @@ this, just have the module return a `ansible_facts` key, like so, along with oth
}
These 'facts' will be available to all statements called after that module (but not before) in the playbook.
-A good idea might be make a module called 'site_facts' and always call it at the top of each playbook, though
+A good idea might be to make a module called 'site_facts' and always call it at the top of each playbook, though
we're always open to improving the selection of core facts in Ansible as well.
.. _common_module_boilerplate:
@@ -247,7 +247,7 @@ And instantiating the module class like::
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
name = dict(required=True),
- enabled = dict(required=True, choices=BOOLEANS),
+ enabled = dict(required=True, type='bool'),
something = dict(aliases=['whatever'])
)
)
@@ -335,7 +335,7 @@ and guidelines:
* If you have a company module that returns facts specific to your installations, a good name for this module is `site_facts`.
-* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "choices=BOOLEANS" and a module.boolean(value) casting function.
+* Modules accepting boolean status should generally accept 'yes', 'no', 'true', 'false', or anything else a user may likely throw at them. The AnsibleModule common code supports this with "type='bool'".
* Include a minimum of dependencies if possible. If there are dependencies, document them at the top of the module file, and have the module raise JSON error messages when the import fails.
@@ -347,7 +347,7 @@ and guidelines:
* In the event of failure, a key of 'failed' should be included, along with a string explanation in 'msg'. Modules that raise tracebacks (stacktraces) are generally considered 'poor' modules, though Ansible can deal with these returns and will automatically convert anything unparseable into a failed result. If you are using the AnsibleModule common Python code, the 'failed' element will be included for you automatically when you call 'fail_json'.
-* Return codes from modules are not actually not significant, but continue on with 0=success and non-zero=failure for reasons of future proofing.
+* Return codes from modules are actually not significant, but continue on with 0=success and non-zero=failure for reasons of future proofing.
* As results from many hosts will be aggregated at once, modules should return only relevant output. Returning the entire contents of a log file is generally bad form.
@@ -479,9 +479,10 @@ Module checklist
````````````````
* The shebang should always be #!/usr/bin/python, this allows ansible_python_interpreter to work
+* Modules must be written to support Python 2.4. If this is not possible, required minimum python version and rationale should be explained in the requirements section in DOCUMENTATION.
* Documentation: Make sure it exists
* `required` should always be present, be it true or false
- * If `required` is false you need to document `default`, even if the default is 'None' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code.
+ * If `required` is false you need to document `default`, even if the default is 'null' (which is the default if no parameter is supplied). Make sure default parameter in docs matches default parameter in code.
* `default` is not needed for `required: true`
* Remove unnecessary doc like `aliases: []` or `choices: []`
* The version is not a float number and value the current development version
@@ -538,24 +539,34 @@ Windows modules checklist
#!powershell
-then::
+ then::
+
<GPL header>
-then::
+
+ then::
+
# WANT_JSON
# POWERSHELL_COMMON
-then, to parse all arguments into a variable modules generally use::
+ then, to parse all arguments into a variable modules generally use::
+
$params = Parse-Args $args
* Arguments:
* Try and use state present and state absent like other modules
* You need to check that all your mandatory args are present. You can do this using the builtin Get-AnsibleParam function.
* Required arguments::
+
$package = Get-AnsibleParam -obj $params -name name -failifempty $true
+
* Required arguments with name validation::
+
$state = Get-AnsibleParam -obj $params -name "State" -ValidateSet "Present","Absent" -resultobj $resultobj -failifempty $true
+
* Optional arguments with name validation::
+
$state = Get-AnsibleParam -obj $params -name "State" -default "Present" -ValidateSet "Present","Absent"
+
* the If "FailIfEmpty" is true, the resultobj parameter is used to specify the object returned to fail-json. You can also override the default message
using $emptyattributefailmessage (for missing required attributes) and $ValidateSetErrorMessage (for attribute validation errors)
* Look at existing modules for more examples of argument checking.
@@ -586,7 +597,7 @@ Starting in 1.8 you can deprecate modules by renaming them with a preceding _, i
_old_cloud.py, This will keep the module available but hide it from the primary docs and listing.
You can also rename modules and keep an alias to the old name by using a symlink that starts with _.
-This example allows the stat module to be called with fileinfo, making the following examples equivalent
+This example allows the stat module to be called with fileinfo, making the following examples equivalent::
EXAMPLES = '''
ln -s stat.py _fileinfo.py
diff --git a/docsite/rst/developing_releases.rst b/docsite/rst/developing_releases.rst
new file mode 100644
index 0000000000..2332459c30
--- /dev/null
+++ b/docsite/rst/developing_releases.rst
@@ -0,0 +1,48 @@
+Releases
+========
+
+.. contents:: Topics
+ :local:
+
+.. _schedule:
+
+Release Schedule
+````````````````
+Ansible is on a 'flexible' 4 month release schedule, sometimes this can be extended if there is a major change that requires a longer cycle (i.e. 2.0 core rewrite).
+Currently modules get released at the same time as the main Ansible repo, even though they are separated into ansible-modules-core and ansible-modules-extras.
+
+The major features and bugs fixed in a release should be reflected in the CHANGELOG.md, minor ones will be in the commit history (FIXME: add git exmaple to list).
+When a fix/feature gets added to the `devel` branch it will be part of the next release, some bugfixes can be backported to previous releases and might be part of a minor point release if it is deemed necessary.
+
+Sometimes an RC can be extended by a few days if a bugfix makes a change that can have far reaching consequences, so users have enough time to find any new issues that may stem from this.
+
+.. _methods:
+
+Release methods
+````````````````
+
+Ansible normally goes through a 'release candidate', issuing an RC1 for a release, if no major bugs are discovered in it after 5 business days we'll get a final release.
+Otherwise fixes will be applied and an RC2 will be provided for testing and if no bugs after 2 days, the final release will be made, iterating this last step and incrementing the candidate number as we find major bugs.
+
+
+.. _freezing:
+
+Release feature freeze
+``````````````````````
+
+During the release candidate process, the focus will be on bugfixes that affect the RC, new features will be delayed while we try to produce a final version. Some bugfixes that are minor or don't affect the RC will also be postponed until after the release is finalized.
+
+.. seealso::
+
+ :doc:`developing_api`
+ Python API to Playbooks and Ad Hoc Task Execution
+ :doc:`developing_modules`
+ How to develop modules
+ :doc:`developing_plugins`
+ How to develop plugins
+ `Ansible Tower <http://ansible.com/ansible-tower>`_
+ REST API endpoint and GUI for Ansible, syncs with dynamic inventory
+ `Development Mailing List <http://groups.google.com/group/ansible-devel>`_
+ Mailing list for development topics
+ `irc.freenode.net <http://irc.freenode.net>`_
+ #ansible IRC chat channel
diff --git a/docsite/rst/developing_test_pr.rst b/docsite/rst/developing_test_pr.rst
index b6ec4e10ba..cf3d0d7536 100644
--- a/docsite/rst/developing_test_pr.rst
+++ b/docsite/rst/developing_test_pr.rst
@@ -81,27 +81,34 @@ and destination repositories. It will look something like this::
Someuser wants to merge 1 commit into ansible:devel from someuser:feature_branch_name
.. note::
- It is important that the PR request target be ansible:devel, as we do not accept pull requests into any other branch.
- Dot releases are cherry-picked manually by ansible staff.
+ It is important that the PR request target be ansible:devel, as we do not accept pull requests into any other branch. Dot releases are cherry-picked manually by ansible staff.
The username and branch at the end are the important parts, which will be turned into git commands as follows::
git checkout -b testing_PRXXXX devel
git pull https://github.com/someuser/ansible.git feature_branch_name
-The first command creates and switches to a new branch named testing_PRXXXX, where the XXXX is the actual issue number associated
-with the pull request (for example, 1234). This branch is based on the devel branch. The second command pulls the new code from the
-users feature branch into the newly created branch.
+The first command creates and switches to a new branch named testing_PRXXXX, where the XXXX is the actual issue number associated with the pull request (for example, 1234). This branch is based on the devel branch. The second command pulls the new code from the users feature branch into the newly created branch.
.. note::
- If the GitHub user interface shows that the pull request will not merge cleanly, we do not recommend proceeding if you
- are not somewhat familiar with git and coding, as you will have to resolve a merge conflict. This is the responsibility of
- the original pull request contributor.
+ If the GitHub user interface shows that the pull request will not merge cleanly, we do not recommend proceeding if you are not somewhat familiar with git and coding, as you will have to resolve a merge conflict. This is the responsibility of the original pull request contributor.
.. note::
- Some users do not create feature branches, which can cause problems when they have multiple, un-related commits in
- their version of `devel`. If the source looks like `someuser:devel`, make sure there is only one commit listed on
- the pull request.
+ Some users do not create feature branches, which can cause problems when they have multiple, un-related commits in their version of `devel`. If the source looks like `someuser:devel`, make sure there is only one commit listed on the pull request.
+
+Finding a Pull Request for Ansible Modules
+++++++++++++++++++++++++++++++++++++++++++
+Ansible modules are in separate repositories, which are managed as Git submodules. Here's a step by step process for checking out a PR for an Ansible extras module, for instance:
+
+1. git clone https://github.com/ansible/ansible.git
+2. cd ansible
+3. git submodule init
+4. git submodule update --recursive [ fetches the submodules ]
+5. cd lib/ansible/modules/extras
+6. git fetch origin pull/1234/head:pr/1234 [ fetches the specific PR ]
+7. git checkout pr/1234 [ do your testing here ]
+8. cd /path/to/ansible/clone
+9. git submodule update --recursive
For Those About To Test, We Salute You
++++++++++++++++++++++++++++++++++++++
diff --git a/docsite/rst/faq.rst b/docsite/rst/faq.rst
index 90b9a1cb09..e51a1751fe 100644
--- a/docsite/rst/faq.rst
+++ b/docsite/rst/faq.rst
@@ -38,7 +38,7 @@ You can also dictate the connection type to be used, if you want::
foo.example.com
bar.example.com
-You may also wish to keep these in group variables instead, or file in them in a group_vars/<groupname> file.
+You may also wish to keep these in group variables instead, or file them in a group_vars/<groupname> file.
See the rest of the documentation for more information about how to organize variables.
.. _use_ssh:
diff --git a/docsite/rst/galaxy.rst b/docsite/rst/galaxy.rst
index 8df268175a..ba841eed09 100644
--- a/docsite/rst/galaxy.rst
+++ b/docsite/rst/galaxy.rst
@@ -1,55 +1,60 @@
Ansible Galaxy
++++++++++++++
-"Ansible Galaxy" can either refer to a website for sharing and downloading Ansible roles, or a command line tool that helps work with roles.
+"Ansible Galaxy" can either refer to a website for sharing and downloading Ansible roles, or a command line tool for managing and creating roles.
.. contents:: Topics
The Website
```````````
-The website `Ansible Galaxy <https://galaxy.ansible.com>`_, is a free site for finding, downloading, rating, and reviewing all kinds of community developed Ansible roles and can be a great way to get a jumpstart on your automation projects.
+The website `Ansible Galaxy <https://galaxy.ansible.com>`_, is a free site for finding, downloading, and sharing community developed Ansible roles. Downloading roles from Galaxy is a great way to jumpstart your automation projects.
-You can sign up with social auth and use the download client 'ansible-galaxy' which is included in Ansible 1.4.2 and later.
+Access the Galaxy web site using GitHub OAuth, and to install roles use the 'ansible-galaxy' command line tool included in Ansible 1.4.2 and later.
Read the "About" page on the Galaxy site for more information.
The ansible-galaxy command line tool
````````````````````````````````````
-The command line ansible-galaxy has many different subcommands.
+The ansible-galaxy command has many different sub-commands for managing roles both locally and at `galaxy.ansible.com <https://galaxy.ansible.com>`_.
+
+.. note::
+
+ The search, login, import, delete, and setup commands in the Ansible 2.0 version of ansible-galaxy require access to the
+ 2.0 Beta release of the Galaxy web site available at `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_.
+
+ Use the ``--server`` option to access the beta site. For example::
+
+ $ ansible-galaxy search --server https://galaxy-qa.ansible.com mysql --author geerlingguy
+
+ Additionally, you can define a server in ansible.cfg::
+
+ [galaxy]
+ server=https://galaxy-qa.ansible.com
Installing Roles
----------------
-The most obvious is downloading roles from the Ansible Galaxy website::
+The most obvious use of the ansible-galaxy command is downloading roles from `the Ansible Galaxy website <https://galaxy.ansible.com>`_::
- ansible-galaxy install username.rolename
-
-.. _galaxy_cli_roles_path:
+ $ ansible-galaxy install username.rolename
roles_path
-===============
+==========
You can specify a particular directory where you want the downloaded roles to be placed::
- ansible-galaxy install username.role -p ~/Code/ansible_roles/
+ $ ansible-galaxy install username.role -p ~/Code/ansible_roles/
This can be useful if you have a master folder that contains ansible galaxy roles shared across several projects. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured).
-Building out Role Scaffolding
------------------------------
-
-It can also be used to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires::
-
- ansible-galaxy init rolename
-
Installing Multiple Roles From A File
--------------------------------------
+=====================================
To install multiple roles, the ansible-galaxy CLI can be fed a requirements file. All versions of ansible allow the following syntax for installing roles from the Ansible Galaxy website::
- ansible-galaxy install -r requirements.txt
+ $ ansible-galaxy install -r requirements.txt
Where the requirements.txt looks like::
@@ -64,7 +69,7 @@ To request specific versions (tags) of a role, use this syntax in the roles file
Available versions will be listed on the Ansible Galaxy webpage for that role.
Advanced Control over Role Requirements Files
----------------------------------------------
+=============================================
For more advanced control over where to download roles from, including support for remote repositories, Ansible 1.8 and later support a new YAML format for the role requirements file, which must end in a 'yml' extension. It works like this::
@@ -77,14 +82,10 @@ And here's an example showing some specific version downloads from multiple sour
# from galaxy
- src: yatesr.timezone
- # from github
- - src: https://github.com/bennojoy/nginx
-
- # from github installing to a relative path
+ # from GitHub
- src: https://github.com/bennojoy/nginx
- path: vagrant/roles/
- # from github, overriding the name and specifying a specific tag
+ # from GitHub, overriding the name and specifying a specific tag
- src: https://github.com/bennojoy/nginx
version: master
name: nginx_role
@@ -93,19 +94,18 @@ And here's an example showing some specific version downloads from multiple sour
- src: https://some.webserver.example.com/files/master.tar.gz
name: http-role
- # from bitbucket, if bitbucket happens to be operational right now :)
+ # from Bitbucket
- src: git+http://bitbucket.org/willthames/git-ansible-galaxy
version: v1.4
- # from bitbucket, alternative syntax and caveats
+ # from Bitbucket, alternative syntax and caveats
- src: http://bitbucket.org/willthames/hg-ansible-galaxy
scm: hg
- # from gitlab or other git-based scm
+ # from GitLab or other git-based scm
- src: git@gitlab.company.com:mygroup/ansible-base.git
scm: git
version: 0.1.0
- path: roles/
As you can see in the above, there are a large amount of controls available
to customize where roles can be pulled from, and what to save roles as.
@@ -121,3 +121,283 @@ Roles pulled from galaxy work as with other SCM sourced roles above. To download
`irc.freenode.net <http://irc.freenode.net>`_
#ansible IRC chat channel
+Building Role Scaffolding
+-------------------------
+
+Use the init command to initialize the base structure of a new role, saving time on creating the various directories and main.yml files a role requires::
+
+ $ ansible-galaxy init rolename
+
+The above will create the following directory structure in the current working directory:
+
+::
+
+ README.md
+ .travis.yml
+ defaults/
+ main.yml
+ files/
+ handlers/
+ main.yml
+ meta/
+ main.yml
+ templates/
+ tests/
+ inventory
+ test.yml
+ vars/
+ main.yml
+
+.. note::
+
+ .travis.yml and tests/ are new in Ansible 2.0
+
+If a directory matching the name of the role already exists in the current working directory, the init command will result in an error. To ignore the error use the --force option. Force will create the above subdirectories and files, replacing anything that matches.
+
+Search for Roles
+----------------
+
+The search command provides for querying the Galaxy database, allowing for searching by tags, platforms, author and multiple keywords. For example:
+
+::
+
+ $ ansible-galaxy search elasticsearch --author geerlingguy
+
+The search command will return a list of the first 1000 results matching your search:
+
+::
+
+ Found 2 roles matching your search:
+
+ Name Description
+ ---- -----------
+ geerlingguy.elasticsearch Elasticsearch for Linux.
+ geerlingguy.elasticsearch-curator Elasticsearch curator for Linux.
+
+.. note::
+
+ The format of results pictured here is new in Ansible 2.0.
+
+Get More Information About a Role
+---------------------------------
+
+Use the info command To view more detail about a specific role:
+
+::
+
+ $ ansible-galaxy info username.role_name
+
+This returns everything found in Galaxy for the role:
+
+::
+
+ Role: username.rolename
+ description: Installs and configures a thing, a distributed, highly available NoSQL thing.
+ active: True
+ commit: c01947b7bc89ebc0b8a2e298b87ab416aed9dd57
+ commit_message: Adding travis
+ commit_url: https://github.com/username/repo_name/commit/c01947b7bc89ebc0b8a2e298b87ab
+ company: My Company, Inc.
+ created: 2015-12-08T14:17:52.773Z
+ download_count: 1
+ forks_count: 0
+ github_branch:
+ github_repo: repo_name
+ github_user: username
+ id: 6381
+ is_valid: True
+ issue_tracker_url:
+ license: Apache
+ min_ansible_version: 1.4
+ modified: 2015-12-08T18:43:49.085Z
+ namespace: username
+ open_issues_count: 0
+ path: /Users/username/projects/roles
+ scm: None
+ src: username.repo_name
+ stargazers_count: 0
+ travis_status_url: https://travis-ci.org/username/repo_name.svg?branch=master
+ version:
+ watchers_count: 1
+
+
+List Installed Roles
+--------------------
+
+The list command shows the name and version of each role installed in roles_path.
+
+::
+
+ $ ansible-galaxy list
+
+ - chouseknecht.role-install_mongod, master
+ - chouseknecht.test-role-1, v1.0.2
+ - chrismeyersfsu.role-iptables, master
+ - chrismeyersfsu.role-required_vars, master
+
+Remove an Installed Role
+------------------------
+
+The remove command will delete a role from roles_path:
+
+::
+
+ $ ansible-galaxy remove username.rolename
+
+Authenticate with Galaxy
+------------------------
+
+To use the import, delete and setup commands authentication with Galaxy is required. The login command will authenticate the user,retrieve a token from Galaxy, and store it in the user's home directory.
+
+::
+
+ $ ansible-galaxy login
+
+ We need your Github login to identify you.
+ This information will not be sent to Galaxy, only to api.github.com.
+ The password will not be displayed.
+
+ Use --github-token if you do not want to enter your password.
+
+ Github Username: dsmith
+ Password for dsmith:
+ Succesfully logged into Galaxy as dsmith
+
+As depicted above, the login command prompts for a GitHub username and password. It does NOT send your password to Galaxy. It actually authenticates with GitHub and creates a personal access token. It then sends the personal access token to Galaxy, which in turn verifies that you are you and returns a Galaxy access token. After authentication completes the GitHub personal access token is destroyed.
+
+If you do not wish to use your GitHub password, or if you have two-factor authentication enabled with GitHub, use the --github-token option to pass a personal access token that you create. Log into GitHub, go to Settings and click on Personal Access Token to create a token.
+
+.. note::
+
+ The login command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access
+ `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy]
+ section of your ansible.cfg file.
+
+Import a Role
+-------------
+
+Roles can be imported using ansible-galaxy. The import command expects that the user previously authenticated with Galaxy using the login command.
+
+Import any GitHub repo you have access to:
+
+::
+
+ $ ansible-galaxy import github_user github_repo
+
+By default the command will wait for the role to be imported by Galaxy, displaying the results as the import progresses:
+
+::
+
+ Successfully submitted import request 41
+ Starting import 41: role_name=myrole repo=githubuser/ansible-role-repo ref=
+ Retrieving Github repo githubuser/ansible-role-repo
+ Accessing branch: master
+ Parsing and validating meta/main.yml
+ Parsing galaxy_tags
+ Parsing platforms
+ Adding dependencies
+ Parsing and validating README.md
+ Adding repo tags as role versions
+ Import completed
+ Status SUCCESS : warnings=0 errors=0
+
+Use the --branch option to import a specific branch. If not specified, the default branch for the repo will be used.
+
+If the --no-wait option is present, the command will not wait for results. Results of the most recent import for any of your roles is available on the Galaxy web site under My Imports.
+
+.. note::
+
+ The import command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access
+ `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy]
+ section of your ansible.cfg file.
+
+Delete a Role
+-------------
+
+Remove a role from the Galaxy web site using the delete command. You can delete any role that you have access to in GitHub. The delete command expects that the user previously authenticated with Galaxy using the login command.
+
+::
+
+ $ ansible-galaxy delete github_user github_repo
+
+This only removes the role from Galaxy. It does not impact the actual GitHub repo.
+
+.. note::
+
+ The delete command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access
+ `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy]
+ section of your ansible.cfg file.
+
+Setup Travis Integrations
+--------------------------
+
+Using the setup command you can enable notifications from `travis <http://travis-ci.org>`_. The setup command expects that the user previously authenticated with Galaxy using the login command.
+
+::
+
+ $ ansible-galaxy setup travis github_user github_repo xxxtravistokenxxx
+
+ Added integration for travis github_user/github_repo
+
+The setup command requires your Travis token. The Travis token is not stored in Galaxy. It is used along with the GitHub username and repo to create a hash as described in `the Travis documentation <https://docs.travis-ci.com/user/notifications/>`_. The calculated hash is stored in Galaxy and used to verify notifications received from Travis.
+
+The setup command enables Galaxy to respond to notifications. Follow the `Travis getting started guide <https://docs.travis-ci.com/user/getting-started/>`_ to enable the Travis build process for the role repository.
+
+When you create your .travis.yml file add the following to cause Travis to notify Galaxy when a build completes:
+
+::
+
+ notifications:
+ webhooks: https://galaxy.ansible.com/api/v1/notifications/
+
+.. note::
+
+ The setup command in Ansible 2.0 requires using the Galaxy 2.0 Beta site. Use the ``--server`` option to access
+ `https://galaxy-qa.ansible.com <https://galaxy-qa.ansible.com>`_. You can also add a *server* definition in the [galaxy]
+ section of your ansible.cfg file.
+
+
+List Travis Integrations
+========================
+
+Use the --list option to display your Travis integrations:
+
+::
+
+ $ ansible-galaxy setup --list
+
+
+ ID Source Repo
+ ---------- ---------- ----------
+ 2 travis github_user/github_repo
+ 1 travis github_user/github_repo
+
+
+Remove Travis Integrations
+==========================
+
+Use the --remove option to disable and remove a Travis integration:
+
+::
+
+ $ ansible-galaxy setup --remove ID
+
+Provide the ID of the integration you want disabled. Use the --list option to get the ID.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docsite/rst/guide_cloudstack.rst b/docsite/rst/guide_cloudstack.rst
index 93b4540b09..c798b26ea1 100644
--- a/docsite/rst/guide_cloudstack.rst
+++ b/docsite/rst/guide_cloudstack.rst
@@ -178,8 +178,8 @@ Now to the fun part. We create a playbook to create our infrastructure we call i
- name: ensure firewall ports opened
cs_firewall:
- ip_address: {{ public_ip }}
- port: {{ item.port }}
+ ip_address: "{{ public_ip }}"
+ port: "{{ item.port }}"
cidr: "{{ item.cidr | default('0.0.0.0/0') }}"
with_items: cs_firewall
when: public_ip is defined
diff --git a/docsite/rst/guide_vagrant.rst b/docsite/rst/guide_vagrant.rst
index 2aad2f1a03..e5870bdd85 100644
--- a/docsite/rst/guide_vagrant.rst
+++ b/docsite/rst/guide_vagrant.rst
@@ -6,12 +6,13 @@ Using Vagrant and Ansible
Introduction
````````````
-Vagrant is a tool to manage virtual machine environments, and allows you to
-configure and use reproducible work environments on top of various
-virtualization and cloud platforms. It also has integration with Ansible as a
-provisioner for these virtual machines, and the two tools work together well.
+`Vagrant <http://vagrantup.com/>`_ is a tool to manage virtual machine
+environments, and allows you to configure and use reproducible work
+environments on top of various virtualization and cloud platforms.
+It also has integration with Ansible as a provisioner for these virtual
+machines, and the two tools work together well.
-This guide will describe how to use Vagrant and Ansible together.
+This guide will describe how to use Vagrant 1.7+ and Ansible together.
If you're not familiar with Vagrant, you should visit `the documentation
<http://docs.vagrantup.com/v2/>`_.
@@ -27,54 +28,48 @@ Vagrant Setup
The first step once you've installed Vagrant is to create a ``Vagrantfile``
and customize it to suit your needs. This is covered in detail in the Vagrant
-documentation, but here is a quick example:
+documentation, but here is a quick example that includes a section to use the
+Ansible provisioner to manage a single machine:
-.. code-block:: bash
+.. code-block:: ruby
- $ mkdir vagrant-test
- $ cd vagrant-test
- $ vagrant init precise32 http://files.vagrantup.com/precise32.box
+ # This guide is optimized for Vagrant 1.7 and above.
+ # Although versions 1.6.x should behave very similarly, it is recommended
+ # to upgrade instead of disabling the requirement below.
+ Vagrant.require_version ">= 1.7.0"
-This will create a file called Vagrantfile that you can edit to suit your
-needs. The default Vagrantfile has a lot of comments. Here is a simplified
-example that includes a section to use the Ansible provisioner:
+ Vagrant.configure(2) do |config|
-.. code-block:: ruby
+ config.vm.box = "ubuntu/trusty64"
+
+ # Disable the new default behavior introduced in Vagrant 1.7, to
+ # ensure that all Vagrant machines will use the same SSH key pair.
+ # See https://github.com/mitchellh/vagrant/issues/5005
+ config.ssh.insert_key = false
- # Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
- VAGRANTFILE_API_VERSION = "2"
-
- Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- config.vm.box = "precise32"
- config.vm.box_url = "http://files.vagrantup.com/precise32.box"
-
- config.vm.network :public_network
-
- config.vm.provision "ansible" do |ansible|
- ansible.playbook = "playbook.yml"
- end
+ config.vm.provision "ansible" do |ansible|
+ ansible.verbose = "v"
+ ansible.playbook = "playbook.yml"
end
+ end
-The Vagrantfile has a lot of options, but these are the most important ones.
Notice the ``config.vm.provision`` section that refers to an Ansible playbook
-called ``playbook.yml`` in the same directory as the Vagrantfile. Vagrant runs
-the provisioner once the virtual machine has booted and is ready for SSH
+called ``playbook.yml`` in the same directory as the ``Vagrantfile``. Vagrant
+runs the provisioner once the virtual machine has booted and is ready for SSH
access.
+There are a lot of Ansible options you can configure in your ``Vagrantfile``.
+Visit the `Ansible Provisioner documentation
+<http://docs.vagrantup.com/v2/provisioning/ansible.html>`_ for more
+information.
+
.. code-block:: bash
$ vagrant up
-This will start the VM and run the provisioning playbook.
-
-There are a lot of Ansible options you can configure in your Vagrantfile. Some
-particularly useful options are ``ansible.extra_vars``, ``ansible.sudo`` and
-``ansible.sudo_user``, and ``ansible.host_key_checking`` which you can disable
-to avoid SSH connection problems to new virtual machines.
+This will start the VM, and run the provisioning playbook (on the first VM
+startup).
-Visit the `Ansible Provisioner documentation
-<http://docs.vagrantup.com/v2/provisioning/ansible.html>`_ for more
-information.
To re-run a playbook on an existing VM, just run:
@@ -82,7 +77,19 @@ To re-run a playbook on an existing VM, just run:
$ vagrant provision
-This will re-run the playbook.
+This will re-run the playbook against the existing VM.
+
+Note that having the ``ansible.verbose`` option enabled will instruct Vagrant
+to show the full ``ansible-playbook`` command used behind the scene, as
+illustrated by this example:
+
+.. code-block:: bash
+
+ $ PYTHONUNBUFFERED=1 ANSIBLE_FORCE_COLOR=true ANSIBLE_HOST_KEY_CHECKING=false ANSIBLE_SSH_ARGS='-o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=60s' ansible-playbook --private-key=/home/someone/.vagrant.d/insecure_private_key --user=vagrant --connection=ssh --limit='machine1' --inventory-file=/home/someone/coding-in-a-project/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory playbook.yml
+
+This information can be quite useful to debug integration issues and can also
+be used to manually execute Ansible from a shell, as explained in the next
+section.
.. _running_ansible:
@@ -90,44 +97,58 @@ Running Ansible Manually
````````````````````````
Sometimes you may want to run Ansible manually against the machines. This is
-pretty easy to do.
+faster than kicking ``vagrant provision`` and pretty easy to do.
-Vagrant automatically creates an inventory file for each Vagrant machine in
-the same directory located under ``.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory``.
-It configures the inventory file according to the SSH tunnel that Vagrant
-automatically creates, and executes ``ansible-playbook`` with the correct
-username and SSH key options to allow access. A typical automatically-created
-inventory file may look something like this:
+With our ``Vagrantfile`` example, Vagrant automatically creates an Ansible
+inventory file in ``.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory``.
+This inventory is configured according to the SSH tunnel that Vagrant
+automatically creates. A typical automatically-created inventory file for a
+single machine environment may look something like this:
.. code-block:: none
# Generated by Vagrant
- machine ansible_host=127.0.0.1 ansible_port=2222
-
-.. include:: ansible_ssh_changes_note.rst
+ default ansible_ssh_host=127.0.0.1 ansible_ssh_port=2222
If you want to run Ansible manually, you will want to make sure to pass
-``ansible`` or ``ansible-playbook`` commands the correct arguments for the
-username (usually ``vagrant``) and the SSH key (since Vagrant 1.7.0, this will be something like
-``.vagrant/machines/[machine name]/[provider]/private_key``), and the autogenerated inventory file.
+``ansible`` or ``ansible-playbook`` commands the correct arguments, at least
+for the *username*, the *SSH private key* and the *inventory*.
+
+Here is an example using the Vagrant global insecure key (``config.ssh.insert_key``
+must be set to ``false`` in your ``Vagrantfile``):
-Here is an example:
+.. code-block:: bash
+
+ $ ansible-playbook --private-key=~/.vagrant.d/insecure_private_key -u vagrant -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory playbook.yml
+
+Here is a second example using the random private key that Vagrant 1.7+
+automatically configures for each new VM (each key is stored in a path like
+``.vagrant/machines/[machine name]/[provider]/private_key``):
.. code-block:: bash
-
- $ ansible-playbook -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory --private-key=.vagrant/machines/default/virtualbox/private_key -u vagrant playbook.yml
-Note: Vagrant versions prior to 1.7.0 will use the private key located at ``~/.vagrant.d/insecure_private_key.``
+ $ ansible-playbook --private-key=.vagrant/machines/default/virtualbox/private_key -u vagrant -i .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory playbook.yml
+
+Advanced Usages
+```````````````
+
+The "Tips and Tricks" chapter of the `Ansible Provisioner documentation
+<http://docs.vagrantup.com/v2/provisioning/ansible.html>`_ provides detailed information about more advanced Ansible features like:
+
+ - how to parallely execute a playbook in a multi-machine environment
+ - how to integrate a local ``ansible.cfg`` configuration file
.. seealso::
- `Vagrant Home <http://www.vagrantup.com/>`_
- The Vagrant homepage with downloads
- `Vagrant Documentation <http://docs.vagrantup.com/v2/>`_
- Vagrant Documentation
- `Ansible Provisioner <http://docs.vagrantup.com/v2/provisioning/ansible.html>`_
- The Vagrant documentation for the Ansible provisioner
- :doc:`playbooks`
- An introduction to playbooks
+ `Vagrant Home <http://www.vagrantup.com/>`_
+ The Vagrant homepage with downloads
+ `Vagrant Documentation <http://docs.vagrantup.com/v2/>`_
+ Vagrant Documentation
+ `Ansible Provisioner <http://docs.vagrantup.com/v2/provisioning/ansible.html>`_
+ The Vagrant documentation for the Ansible provisioner
+ `Vagrant Issue Tracker <https://github.com/mitchellh/vagrant/issues?q=is%3Aopen+is%3Aissue+label%3Aprovisioners%2Fansible>`_
+ The open issues for the Ansible provisioner in the Vagrant project
+ :doc:`playbooks`
+ An introduction to playbooks
diff --git a/docsite/rst/index.rst b/docsite/rst/index.rst
index 936a485c9e..4f77125bb9 100644
--- a/docsite/rst/index.rst
+++ b/docsite/rst/index.rst
@@ -40,4 +40,5 @@ Ansible, Inc. releases a new major release of Ansible approximately every two mo
faq
glossary
YAMLSyntax
+ porting_guide_2.0
diff --git a/docsite/rst/intro_adhoc.rst b/docsite/rst/intro_adhoc.rst
index 9e104d5836..e9abdccc95 100644
--- a/docsite/rst/intro_adhoc.rst
+++ b/docsite/rst/intro_adhoc.rst
@@ -88,7 +88,7 @@ The ``-f 10`` in the above specifies the usage of 10 simultaneous
processes to use. You can also set this in :doc:`intro_configuration` to avoid setting it again. The default is actually 5, which
is really small and conservative. You are probably going to want to talk to a lot more simultaneous hosts so feel free
to crank this up. If you have more hosts than the value set for the fork count, Ansible will talk to them, but it will
-take a little longer. Feel free to push this value as high as your system can handle it!
+take a little longer. Feel free to push this value as high as your system can handle!
You can also select what Ansible "module" you want to run. Normally commands also take a ``-m`` for module name, but
the default module name is 'command', so we didn't need to
@@ -112,7 +112,7 @@ For example, using double rather than single quotes in the above example would
evaluate the variable on the box you were on.
So far we've been demoing simple command execution, but most Ansible modules usually do not work like
-simple scripts. They make the remote system look like you state, and run the commands necessary to
+simple scripts. They make the remote system look like a state, and run the commands necessary to
get it there. This is commonly referred to as 'idempotence', and is a core design goal of Ansible.
However, we also recognize that running arbitrary commands is equally important, so Ansible easily supports both.
@@ -170,7 +170,7 @@ Ensure a package is not installed::
Ansible has modules for managing packages under many platforms. If your package manager
does not have a module available for it, you can install
-for other packages using the command module or (better!) contribute a module
+packages using the command module or (better!) contribute a module
for other package managers. Stop by the mailing list for info/details.
.. _users_and_groups:
@@ -249,7 +249,7 @@ very quickly. After the time limit (in seconds) runs out (``-B``), the process o
the remote nodes will be terminated.
Typically you'll only be backgrounding long-running
-shell commands or software upgrades only. Backgrounding the copy module does not do a background file transfer. :doc:`Playbooks <playbooks>` also support polling, and have a simplified syntax for this.
+shell commands or software upgrades. Backgrounding the copy module does not do a background file transfer. :doc:`Playbooks <playbooks>` also support polling, and have a simplified syntax for this.
.. _checking_facts:
diff --git a/docsite/rst/intro_bsd.rst b/docsite/rst/intro_bsd.rst
index 17c1b8d151..ba0e07f2c8 100644
--- a/docsite/rst/intro_bsd.rst
+++ b/docsite/rst/intro_bsd.rst
@@ -30,7 +30,7 @@ Bootstrapping BSD
For Ansible to effectively manage your machine, we need to install Python along with a json library, in this case we are using Python 2.7 which already has json included.
On your control machine you can simply execute the following for most versions of FreeBSD::
- ansible -m raw -a “pkg_add -r python27” mybsdhost1
+ ansible -m raw -a “pkg install -y python27” mybsdhost1
Once this is done you can now use other Ansible modules aside from the ``raw`` module.
diff --git a/docsite/rst/intro_configuration.rst b/docsite/rst/intro_configuration.rst
index dda07fc450..51a1ad1e58 100644
--- a/docsite/rst/intro_configuration.rst
+++ b/docsite/rst/intro_configuration.rst
@@ -587,11 +587,12 @@ the sudo implementation is matching CLI flags with the standard sudo::
sudo_flags
==========
-Additional flags to pass to sudo when engaging sudo support. The default is '-H' which preserves the $HOME environment variable
-of the original user. In some situations you may wish to add or remove flags, but in general most users
-will not need to change this setting::
+Additional flags to pass to sudo when engaging sudo support. The default is '-H -S -n' which sets the HOME environment
+variable, prompts for passwords via STDIN, and avoids prompting the user for input of any kind. Note that '-n' will conflict
+with using password-less sudo auth, such as pam_ssh_agent_auth. In some situations you may wish to add or remove flags, but
+in general most users will not need to change this setting:::
- sudo_flags=-H
+ sudo_flags=-H -S -n
.. _sudo_user:
@@ -897,3 +898,19 @@ The normal behaviour is for operations to copy the existing context or use the u
The default list is: nfs,vboxsf,fuse,ramfs::
special_context_filesystems = nfs,vboxsf,fuse,ramfs,myspecialfs
+
+Galaxy Settings
+---------------
+
+The following options can be set in the [galaxy] section of ansible.cfg:
+
+server
+======
+
+Override the default Galaxy server value of https://galaxy.ansible.com. Useful if you have a hosted version of the Galaxy web app or want to point to the testing site https://galaxy-qa.ansible.com. It does not work against private, hosted repos, which Galaxy can use for fetching and installing roles.
+
+ignore_certs
+============
+
+If set to *yes*, ansible-galaxy will not validate TLS certificates. Handy for testing against a server with a self-signed certificate
+.
diff --git a/docsite/rst/intro_dynamic_inventory.rst b/docsite/rst/intro_dynamic_inventory.rst
index 1a2bd6f72c..71f6408176 100644
--- a/docsite/rst/intro_dynamic_inventory.rst
+++ b/docsite/rst/intro_dynamic_inventory.rst
@@ -111,9 +111,8 @@ If you use boto profiles to manage multiple AWS accounts, you can pass ``--profi
aws_access_key_id = <prod access key>
aws_secret_access_key = <prod secret key>
-You can then run ``ec2.py --profile prod`` to get the inventory for the prod account, or run playbooks with: ``ansible-playbook -i 'ec2.py --profile prod' myplaybook.yml``.
-
-Alternatively, use the ``AWS_PROFILE`` variable - e.g. ``AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml``
+You can then run ``ec2.py --profile prod`` to get the inventory for the prod account, this option is not supported by ``anisble-playbook`` though.
+But you can use the ``AWS_PROFILE`` variable - e.g. ``AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml``
Since each region requires its own API call, if you are only using a small set of regions, feel free to edit ``ec2.ini`` and list only the regions you are interested in. There are other config options in ``ec2.ini`` including cache control, and destination variables.
@@ -207,6 +206,77 @@ explicitly clear the cache, you can run the ec2.py script with the ``--refresh-c
# ./ec2.py --refresh-cache
+.. _openstack_example:
+
+Example: OpenStack External Inventory Script
+````````````````````````````````````````````
+
+If you use an OpenStack based cloud, instead of manually maintaining your own inventory file, you can use the openstack.py dynamic inventory to pull information about your compute instances directly from OpenStack.
+
+You can download the latest version of the OpenStack inventory script at: https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py
+
+You can use the inventory script explicitly (by passing the `-i openstack.py` argument to Ansible) or implicitly (by placing the script at `/etc/ansible/hosts`).
+
+Explicit use of inventory script
+++++++++++++++++++++++++++++++++
+
+Download the latest version of the OpenStack dynamic inventory script and make it executable::
+
+ wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py
+ chmod +x openstack.py
+
+Source an OpenStack RC file::
+
+ source openstack.rc
+
+.. note::
+
+ An OpenStack RC file contains the environment variables required by the client tools to establish a connection with the cloud provider, such as the authentication URL, user name, password and region name. For more information on how to download, create or source an OpenStack RC file, please refer to http://docs.openstack.org/cli-reference/content/cli_openrc.html.
+
+You can confirm the file has been successfully sourced by running a simple command, such as `nova list` and ensuring it return no errors.
+
+.. note::
+
+ The OpenStack command line clients are required to run the `nova list` command. For more information on how to install them, please refer to http://docs.openstack.org/cli-reference/content/install_clients.html.
+
+You can test the OpenStack dynamic inventory script manually to confirm it is working as expected::
+
+ ./openstack.py --list
+
+After a few moments you should see some JSON output with information about your compute instances.
+
+Once you confirm the dynamic inventory script is working as expected, you can tell Ansible to use the `openstack.py` script as an inventory file, as illustrated below::
+
+ ansible -i openstack.py all -m ping
+
+Implicit use of inventory script
+++++++++++++++++++++++++++++++++
+
+Download the latest version of the OpenStack dynamic inventory script, make it executable and copy it to `/etc/ansible/hosts`::
+
+ wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.py
+ chmod +x openstack.py
+ sudo cp openstack.py /etc/ansible/hosts
+
+Download the sample configuration file, modify it to suit your needs and copy it to `/etc/ansible/openstack.yml`::
+
+ wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/openstack.yml
+ vi openstack.yml
+ sudo cp openstack.yml /etc/ansible/
+
+You can test the OpenStack dynamic inventory script manually to confirm it is working as expected::
+
+ /etc/ansible/hosts --list
+
+After a few moments you should see some JSON output with information about your compute instances.
+
+Refresh the cache
++++++++++++++++++
+
+Note that the OpenStack dynamic inventory script will cache results to avoid repeated API calls. To explicitly clear the cache, you can run the openstack.py (or hosts) script with the --refresh parameter:
+
+ ./openstack.py --refresh
+
.. _other_inventory_scripts:
Other inventory scripts
diff --git a/docsite/rst/intro_getting_started.rst b/docsite/rst/intro_getting_started.rst
index d6a22a8bb4..7b783209de 100644
--- a/docsite/rst/intro_getting_started.rst
+++ b/docsite/rst/intro_getting_started.rst
@@ -33,7 +33,7 @@ In releases up to and including Ansible 1.2, the default was strictly paramiko.
Occasionally you'll encounter a device that doesn't support SFTP. This is rare, but should it occur, you can switch to SCP mode in :doc:`intro_configuration`.
-When speaking with remote machines, Ansible by default assumes you are using SSH keys. SSH keys are encouraged but password authentication can also be used where needed by supplying the option ``--ask-pass``. If using sudo features and when sudo requires a password, also supply ``--ask-sudo-pass``.
+When speaking with remote machines, Ansible by default assumes you are using SSH keys. SSH keys are encouraged but password authentication can also be used where needed by supplying the option ``--ask-pass``. If using sudo features and when sudo requires a password, also supply ``--ask-become-pass`` (previously ``--ask-sudo-pass`` which has been depricated).
While it may be common sense, it is worth sharing: Any management system benefits from being run near the machines being managed. If you are running Ansible in a cloud, consider running it from a machine inside that cloud. In most cases this will work better than on the open Internet.
diff --git a/docsite/rst/intro_installation.rst b/docsite/rst/intro_installation.rst
index 28bbd69151..99e2661226 100644
--- a/docsite/rst/intro_installation.rst
+++ b/docsite/rst/intro_installation.rst
@@ -27,12 +27,11 @@ What Version To Pick?
`````````````````````
Because it runs so easily from source and does not require any installation of software on remote
-machines, many users will actually track the development version.
+machines, many users will actually track the development version.
-Ansible's release cycles are usually about two months long. Due to this
-short release cycle, minor bugs will generally be fixed in the next release versus maintaining
-backports on the stable branch. Major bugs will still have maintenance releases when needed, though
-these are infrequent.
+Ansible's release cycles are usually about four months long. Due to this short release cycle,
+minor bugs will generally be fixed in the next release versus maintaining backports on the stable branch.
+Major bugs will still have maintenance releases when needed, though these are infrequent.
If you are wishing to run the latest released version of Ansible and you are running Red Hat Enterprise Linux (TM), CentOS, Fedora, Debian, or Ubuntu, we recommend using the OS package manager.
@@ -52,8 +51,8 @@ This includes Red Hat, Debian, CentOS, OS X, any of the BSDs, and so on.
.. note::
- As of 2.0 ansible uses a few more file handles to manage it's forks, OS X has a very low setting so if you want to use 15 or more forks
- you'll need to raise the ulimit, like so ``sudo launchctl limit maxfiles 1024 2048``. Or just any time you see a "Too many open files" error.
+ As of 2.0 ansible uses a few more file handles to manage its forks, OS X has a very low setting so if you want to use 15 or more forks
+ you'll need to raise the ulimit, like so ``sudo launchctl limit maxfiles 1024 unlimited``. Or just any time you see a "Too many open files" error.
.. _managed_node_requirements:
diff --git a/docsite/rst/intro_patterns.rst b/docsite/rst/intro_patterns.rst
index a97aed42d4..f7f7994887 100644
--- a/docsite/rst/intro_patterns.rst
+++ b/docsite/rst/intro_patterns.rst
@@ -31,7 +31,7 @@ It is also possible to address a specific host or set of hosts by name::
192.168.1.50
192.168.1.*
-The following patterns address one or more groups. Groups separated by a comma indicate an "OR" configuration.
+The following patterns address one or more groups. Groups separated by a colon indicate an "OR" configuration.
This means the host may be in either one group or the other::
webservers
diff --git a/docsite/rst/intro_windows.rst b/docsite/rst/intro_windows.rst
index 64cd64b885..b9b195643b 100644
--- a/docsite/rst/intro_windows.rst
+++ b/docsite/rst/intro_windows.rst
@@ -26,12 +26,12 @@ Installing on the Control Machine
On a Linux control machine::
- pip install https://github.com/diyan/pywinrm/archive/master.zip#egg=pywinrm
+ pip install "pywinrm>=0.1.1"
Active Directory Support
++++++++++++++++++++++++
-If you wish to connect to domain accounts published through Active Directory (as opposed to local accounts created on the remote host), you will need to install the "python-kerberos" module and the MIT krb5 libraries it depends on.
+If you wish to connect to domain accounts published through Active Directory (as opposed to local accounts created on the remote host), you will need to install the "python-kerberos" module on the Ansible control host (and the MIT krb5 libraries it depends on). The Ansible control host also requires a properly configured computer account in Active Directory.
Installing python-kerberos dependencies
---------------------------------------
@@ -131,7 +131,9 @@ To test this, ping the windows host you want to control by name then use the ip
If you get different hostnames back than the name you originally pinged, speak to your active directory administrator and get them to check that DNS Scavenging is enabled and that DNS and DHCP are updating each other.
-Check your ansible controller's clock is synchronised with your domain controller. Kerberos is time sensitive and a little clock drift can cause tickets not be granted.
+Ensure that the Ansible controller has a properly configured computer account in the domain.
+
+Check your Ansible controller's clock is synchronised with your domain controller. Kerberos is time sensitive and a little clock drift can cause tickets not be granted.
Check you are using the real fully qualified domain name for the domain. Sometimes domains are commonly known to users by aliases. To check this run:
@@ -165,6 +167,8 @@ In group_vars/windows.yml, define the following inventory variables::
ansible_password: SecretPasswordGoesHere
ansible_port: 5986
ansible_connection: winrm
+ # The following is necessary for Python 2.7.9+ when using default WinRM self-signed certificates:
+ ansible_winrm_server_cert_validation: ignore
Although Ansible is mostly an SSH-oriented system, Windows management will not happen over SSH (`yet <http://blogs.msdn.com/b/powershell/archive/2015/06/03/looking-forward-microsoft-support-for-secure-shell-ssh.aspx>`).
@@ -189,6 +193,7 @@ Since 2.0, the following custom inventory variables are also supported for addit
* ``ansible_winrm_path``: Specify an alternate path to the WinRM endpoint. Ansible uses ``/wsman`` by default.
* ``ansible_winrm_realm``: Specify the realm to use for Kerberos authentication. If the username contains ``@``, Ansible will use the part of the username after ``@`` by default.
* ``ansible_winrm_transport``: Specify one or more transports as a comma-separated list. By default, Ansible will use ``kerberos,plaintext`` if the ``kerberos`` module is installed and a realm is defined, otherwise ``plaintext``.
+* ``ansible_winrm_server_cert_validation``: Specify the server certificate validation mode (``ignore`` or ``validate``). Ansible defaults to ``validate`` on Python 2.7.9 and higher, which will result in certificate validation errors against the Windows self-signed certificates. Unless verifiable certificates have been configured on the WinRM listeners, this should be set to ``ignore``
* ``ansible_winrm_*``: Any additional keyword arguments supported by ``winrm.Protocol`` may be provided.
.. _windows_system_prep:
@@ -221,7 +226,7 @@ Getting to PowerShell 3.0 or higher
PowerShell 3.0 or higher is needed for most provided Ansible modules for Windows, and is also required to run the above setup script. Note that PowerShell 3.0 is only supported on Windows 7 SP1, Windows Server 2008 SP1, and later releases of Windows.
-Looking at an ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 <https://github.com/cchurch/ansible/blob/devel/examples/scripts/upgrade_to_ps3.ps1>`_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above.
+Looking at an Ansible checkout, copy the `examples/scripts/upgrade_to_ps3.ps1 <https://github.com/cchurch/ansible/blob/devel/examples/scripts/upgrade_to_ps3.ps1>`_ script onto the remote host and run a PowerShell console as an administrator. You will now be running PowerShell 3 and can try connectivity again using the win_ping technique referenced above.
.. _what_windows_modules_are_available:
@@ -248,13 +253,10 @@ Note there are a few other Ansible modules that don't start with "win" that also
Developers: Supported modules and how it works
``````````````````````````````````````````````
-Developing ansible modules are covered in a `later section of the documentation <http://docs.ansible.com/developing_modules.html>`_, with a focus on Linux/Unix.
-What if you want to write Windows modules for ansible though?
-
-For Windows, ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding.
+Developing Ansible modules are covered in a `later section of the documentation <http://docs.ansible.com/developing_modules.html>`_, with a focus on Linux/Unix.
+What if you want to write Windows modules for Ansible though?
-Windows modules live in a "windows/" subfolder in the Ansible "library/" subtree. For example, if a module is named
-"library/windows/win_ping", there will be embedded documentation in the "win_ping" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense.
+For Windows, Ansible modules are implemented in PowerShell. Skim those Linux/Unix module development chapters before proceeding. Windows modules in the core and extras repo live in a "windows/" subdir. Custom modules can go directly into the Ansible "library/" directories or those added in ansible.cfg. Documentation lives in a a `.py` file with the same name. For example, if a module is named "win_ping", there will be embedded documentation in the "win_ping.py" file, and the actual PowerShell code will live in a "win_ping.ps1" file. Take a look at the sources and this will make more sense.
Modules (ps1 files) should start as follows::
@@ -317,6 +319,14 @@ Running individual commands uses the 'raw' module, as opposed to the shell or co
register: ipconfig
- debug: var=ipconfig
+Running common DOS commands like 'del", 'move', or 'copy" is unlikely to work on a remote Windows Server using Powershell, but they can work by prefacing the commands with "CMD /C" and enclosing the command in double quotes as in this example::
+
+ - name: another raw module example
+ hosts: windows
+ tasks:
+ - name: Move file on remote Windows Server from one location to another
+ raw: CMD /C "MOVE /Y C:\teststuff\myfile.conf C:\builds\smtp.conf"
+
And for a final example, here's how to use the win_stat module to test for file existence. Note that the data returned by the win_stat module is slightly different than what is provided by the Linux equivalent::
- name: test stat module
@@ -351,7 +361,7 @@ form of new modules, tweaks to existing modules, documentation, or something els
:doc:`developing_modules`
How to write modules
:doc:`playbooks`
- Learning ansible's configuration management language
+ Learning Ansible's configuration management language
`List of Windows Modules <http://docs.ansible.com/list_of_windows_modules.html>`_
Windows specific module list, all implemented in PowerShell
`Mailing List <http://groups.google.com/group/ansible-project>`_
diff --git a/docsite/rst/modules_core.rst b/docsite/rst/modules_core.rst
index 6364a1556f..4d692dad15 100644
--- a/docsite/rst/modules_core.rst
+++ b/docsite/rst/modules_core.rst
@@ -8,6 +8,6 @@ The source of these modules is hosted on GitHub in the `ansible-modules-core <ht
If you believe you have found a bug in a core module and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core <http://github.com/ansible/ansible-modules-core>`_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group <https://groups.google.com/forum/#!forum/ansible-project>`_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group <https://groups.google.com/forum/#!forum/ansible-devel>`_.
+Should you have a question rather than a bug report, inquiries are welcome on the `ansible-project google group <https://groups.google.com/forum/#!forum/ansible-project>`_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group <https://groups.google.com/forum/#!forum/ansible-devel>`_.
Documentation updates for these modules can also be edited directly in the module itself and by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
diff --git a/docsite/rst/playbooks_best_practices.rst b/docsite/rst/playbooks_best_practices.rst
index ecca4d75cb..653d7f8a19 100644
--- a/docsite/rst/playbooks_best_practices.rst
+++ b/docsite/rst/playbooks_best_practices.rst
@@ -254,8 +254,8 @@ What about just my webservers in Boston?::
What about just the first 10, and then the next 10?::
- ansible-playbook -i production webservers.yml --limit boston[0-10]
- ansible-playbook -i production webservers.yml --limit boston[10-20]
+ ansible-playbook -i production webservers.yml --limit boston[1-10]
+ ansible-playbook -i production webservers.yml --limit boston[11-20]
And of course just basic ad-hoc stuff is also possible.::
diff --git a/docsite/rst/playbooks_conditionals.rst b/docsite/rst/playbooks_conditionals.rst
index 15d397c7ac..47cc844f45 100644
--- a/docsite/rst/playbooks_conditionals.rst
+++ b/docsite/rst/playbooks_conditionals.rst
@@ -47,7 +47,7 @@ decide to do something conditionally based on success or failure::
- command: /bin/something
when: result|failed
- command: /bin/something_else
- when: result|success
+ when: result|succeeded
- command: /bin/still/something_else
when: result|skipped
diff --git a/docsite/rst/playbooks_delegation.rst b/docsite/rst/playbooks_delegation.rst
index 4411e4aa29..fa808abb65 100644
--- a/docsite/rst/playbooks_delegation.rst
+++ b/docsite/rst/playbooks_delegation.rst
@@ -130,6 +130,29 @@ Here is an example::
Note that you must have passphrase-less SSH keys or an ssh-agent configured for this to work, otherwise rsync
will need to ask for a passphrase.
+.. _delegate_facts:
+
+Delegated facts
+```````````````
+
+.. versionadded:: 2.0
+
+By default, any fact gathered by a delegated task are assigned to the `inventory_hostname` (the current host) instead of the host which actually produced the facts (the delegated to host).
+In 2.0, the directive `delegate_facts` may be set to `True` to assign the task's gathered facts to the delegated host instead of the current one.::
+
+
+ - hosts: app_servers
+ tasks:
+ - name: gather facts from db servers
+ setup:
+ delegate_to: "{{item}}"
+ delegate_facts: True
+ with_items: "{{groups['dbservers'}}"
+
+The above will gather facts for the machines in the dbservers group and assign the facts to those machines and not to app_servers.
+This way you can lookup `hostvars['dbhost1']['default_ipv4_addresses'][0]` even though dbservers were not part of the play, or left out by using `--limit`.
+
+
.. _run_once:
Run Once
@@ -159,13 +182,18 @@ This can be optionally paired with "delegate_to" to specify an individual host t
delegate_to: web01.example.org
When "run_once" is not used with "delegate_to" it will execute on the first host, as defined by inventory,
-in the group(s) of hosts targeted by the play. e.g. webservers[0] if the play targeted "hosts: webservers".
+in the group(s) of hosts targeted by the play - e.g. webservers[0] if the play targeted "hosts: webservers".
-This approach is similar, although more concise and cleaner than applying a conditional to a task such as::
+This approach is similar to applying a conditional to a task such as::
- command: /opt/application/upgrade_db.py
when: inventory_hostname == webservers[0]
+.. note::
+ When used together with "serial", tasks marked as "run_once" will be ran on one host in *each* serial batch.
+ If it's crucial that the task is run only once regardless of "serial" mode, use
+ :code:`inventory_hostname == my_group_name[0]` construct.
+
.. _local_playbooks:
Local Playbooks
diff --git a/docsite/rst/playbooks_environment.rst b/docsite/rst/playbooks_environment.rst
index da050f007d..f909bfcd6e 100644
--- a/docsite/rst/playbooks_environment.rst
+++ b/docsite/rst/playbooks_environment.rst
@@ -31,7 +31,7 @@ The environment can also be stored in a variable, and accessed like so::
tasks:
- apt: name=cobbler state=installed
- environment: proxy_env
+ environment: "{{proxy_env}}"
You can also use it at a playbook level::
diff --git a/docsite/rst/playbooks_filters.rst b/docsite/rst/playbooks_filters.rst
index 7d4ace9c4b..05ff830e3b 100644
--- a/docsite/rst/playbooks_filters.rst
+++ b/docsite/rst/playbooks_filters.rst
@@ -58,12 +58,17 @@ The following tasks are illustrative of how filters can be used with conditional
- debug: msg="it changed"
when: result|changed
+ - debug: msg="it succeeded in Ansible >= 2.1"
+ when: result|succeeded
+
- debug: msg="it succeeded"
when: result|success
- debug: msg="it was skipped"
when: result|skipped
+.. note:: From 2.1 You can also use success, failure, change, skip so the grammer matches, for those that want to be strict about it.
+
.. _forcing_variables_to_be_defined:
Forcing Variables To Be Defined
@@ -352,6 +357,39 @@ override those in `b`, and so on.
This behaviour does not depend on the value of the `hash_behaviour`
setting in `ansible.cfg`.
+.. _extract_filter:
+
+Extracting values from containers
+---------------------------------
+
+.. versionadded:: 2.1
+
+The `extract` filter is used to map from a list of indices to a list of
+values from a container (hash or array)::
+
+ {{ [0,2]|map('extract', ['x','y','z'])|list }}
+ {{ ['x','y']|map('extract', {'x': 42, 'y': 31})|list }}
+
+The results of the above expressions would be::
+
+ ['x', 'z']
+ [42, 31]
+
+The filter can take another argument::
+
+ {{ groups['x']|map('extract', hostvars, 'ec2_ip_address')|list }}
+
+This takes the list of hosts in group 'x', looks them up in `hostvars`,
+and then looks up the `ec2_ip_address` of the result. The final result
+is a list of IP addresses for the hosts in group 'x'.
+
+The third argument to the filter can also be a list, for a recursive
+lookup inside the container::
+
+ {{ ['a']|map('extract', b, ['x','y'])|list }}
+
+This would return a list containing the value of `b['a']['x']['y']`.
+
.. _comment_filter:
Comment Filter
@@ -514,20 +552,25 @@ To match strings against a regex, use the "match" or "search" filter::
To replace text in a string with regex, use the "regex_replace" filter::
- # convert "ansible" to "able"
+ # convert "ansible" to "able"
{{ 'ansible' | regex_replace('^a.*i(.*)$', 'a\\1') }}
# convert "foobar" to "bar"
{{ 'foobar' | regex_replace('^f.*o(.*)$', '\\1') }}
+ # convert "localhost:80" to "localhost, 80" using named groups
+ {{ 'localhost:80' | regex_replace('^(?P<host>.+):(?P<port>\\d+)$', '\\g<host>, \\g<port>') }}
+
.. note:: Prior to ansible 2.0, if "regex_replace" filter was used with variables inside YAML arguments (as opposed to simpler 'key=value' arguments),
then you needed to escape backreferences (e.g. ``\\1``) with 4 backslashes (``\\\\``) instead of 2 (``\\``).
+.. versionadded:: 2.0
+
To escape special characters within a regex, use the "regex_escape" filter::
# convert '^f.*o(.*)$' to '\^f\.\*o\(\.\*\)\$'
{{ '^f.*o(.*)$' | regex_escape() }}
-
+
To make use of one attribute from each item in a list of complex variables, use the "map" filter (see the `Jinja2 map() docs`_ for more)::
# get a comma-separated list of the mount points (e.g. "/,/mnt/stuff") on a host
diff --git a/docsite/rst/playbooks_intro.rst b/docsite/rst/playbooks_intro.rst
index e0f1aec5c1..55cd3359be 100644
--- a/docsite/rst/playbooks_intro.rst
+++ b/docsite/rst/playbooks_intro.rst
@@ -41,7 +41,7 @@ Each playbook is composed of one or more 'plays' in a list.
The goal of a play is to map a group of hosts to some well defined roles, represented by
things ansible calls tasks. At a basic level, a task is nothing more than a call
-to an ansible module, which you should have learned about in earlier chapters.
+to an ansible module (see :doc:`Modules`).
By composing a playbook of multiple 'plays', it is possible to
orchestrate multi-machine deployments, running certain steps on all
@@ -386,6 +386,7 @@ won't need them for much else.
* Handler names live in a global namespace.
* If two handler tasks have the same name, only one will run.
`* <https://github.com/ansible/ansible/issues/4943>`_
+ * You cannot notify a handler that is defined inside of an include
Roles are described later on, but it's worthwhile to point out that:
diff --git a/docsite/rst/playbooks_lookups.rst b/docsite/rst/playbooks_lookups.rst
index 25560e284d..3c2222c337 100644
--- a/docsite/rst/playbooks_lookups.rst
+++ b/docsite/rst/playbooks_lookups.rst
@@ -240,6 +240,112 @@ If you're not using 2.0 yet, you can do something similar with the credstash too
debug: msg="Poor man's credstash lookup! {{ lookup('pipe', 'credstash -r us-west-1 get my-other-password') }}"
+.. _dns_lookup:
+
+The DNS Lookup (dig)
+````````````````````
+.. versionadded:: 1.9.0
+
+.. warning:: This lookup depends on the `dnspython <http://www.dnspython.org/>`_
+ library.
+
+The ``dig`` lookup runs queries against DNS servers to retrieve DNS records for
+a specific name (*FQDN* - fully qualified domain name). It is possible to lookup any DNS record in this manner.
+
+There is a couple of different syntaxes that can be used to specify what record
+should be retrieved, and for which name. It is also possible to explicitly
+specify the DNS server(s) to use for lookups.
+
+In its simplest form, the ``dig`` lookup plugin can be used to retrieve an IPv4
+address (DNS ``A`` record) associated with *FQDN*:
+
+.. note:: If you need to obtain the ``AAAA`` record (IPv6 address), you must
+ specify the record type explicitly. Syntax for specifying the record
+ type is described below.
+
+.. note:: The trailing dot in most of the examples listed is purely optional,
+ but is specified for completeness/correctness sake.
+
+::
+
+ - debug: msg="The IPv4 address for example.com. is {{ lookup('dig', 'example.com.')}}"
+
+In addition to (default) ``A`` record, it is also possible to specify a different
+record type that should be queried. This can be done by either passing-in
+additional parameter of format ``qtype=TYPE`` to the ``dig`` lookup, or by
+appending ``/TYPE`` to the *FQDN* being queried. For example::
+
+ - debug: msg="The TXT record for gmail.com. is {{ lookup('dig', 'gmail.com.', 'qtype=TXT') }}"
+ - debug: msg="The TXT record for gmail.com. is {{ lookup('dig', 'gmail.com./TXT') }}"
+
+If multiple values are associated with the requested record, the results will be
+returned as a comma-separated list. In such cases you may want to pass option
+``wantlist=True`` to the plugin, which will result in the record values being
+returned as a list over which you can iterate later on::
+
+ - debug: msg="One of the MX records for gmail.com. is {{ item }}"
+ with_items: "{{ lookup('dig', 'gmail.com./MX', wantlist=True) }}"
+
+In case of reverse DNS lookups (``PTR`` records), you can also use a convenience
+syntax of format ``IP_ADDRESS/PTR``. The following three lines would produce the
+same output::
+
+ - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8/PTR') }}"
+ - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8.in-addr.arpa./PTR') }}"
+ - debug: msg="Reverse DNS for 8.8.8.8 is {{ lookup('dig', '8.8.8.8.in-addr.arpa.', 'qtype=PTR') }}"
+
+By default, the lookup will rely on system-wide configured DNS servers for
+performing the query. It is also possible to explicitly specify DNS servers to
+query using the ``@DNS_SERVER_1,DNS_SERVER_2,...,DNS_SERVER_N`` notation. This
+needs to be passed-in as an additional parameter to the lookup. For example::
+
+ - debug: msg="Querying 8.8.8.8 for IPv4 address for example.com. produces {{ lookup('dig', 'example.com', '@8.8.8.8') }}"
+
+In some cases the DNS records may hold a more complex data structure, or it may
+be useful to obtain the results in a form of a dictionary for future
+processing. The ``dig`` lookup supports parsing of a number of such records,
+with the result being returned as a dictionary. This way it is possible to
+easily access such nested data. This return format can be requested by
+passing-in the ``flat=0`` option to the lookup. For example::
+
+ - debug: msg="XMPP service for gmail.com. is available at {{ item.target }} on port {{ item.port }}"
+ with_items: "{{ lookup('dig', '_xmpp-server._tcp.gmail.com./SRV', 'flat=0', wantlist=True) }}"
+
+Take note that due to the way Ansible lookups work, you must pass the
+``wantlist=True`` argument to the lookup, otherwise Ansible will report errors.
+
+Currently the dictionary results are supported for the following records:
+
+.. note:: *ALL* is not a record per-se, merely the listed fields are available
+ for any record results you retrieve in the form of a dictionary.
+
+========== =============================================================================
+Record Fields
+---------- -----------------------------------------------------------------------------
+*ALL* owner, ttl, type
+A address
+AAAA address
+CNAME target
+DNAME target
+DLV algorithm, digest_type, key_tag, digest
+DNSKEY flags, algorithm, protocol, key
+DS algorithm, digest_type, key_tag, digest
+HINFO cpu, os
+LOC latitude, longitude, altitude, size, horizontal_precision, vertical_precision
+MX preference, exchange
+NAPTR order, preference, flags, service, regexp, replacement
+NS target
+NSEC3PARAM algorithm, flags, iterations, salt
+PTR target
+RP mbox, txt
+SOA mname, rname, serial, refresh, retry, expire, minimum
+SPF strings
+SRV priority, weight, port, target
+SSHFP algorithm, fp_type, fingerprint
+TLSA usage, selector, mtype, cert
+TXT strings
+========== =============================================================================
+
.. _more_lookups:
More Lookups
diff --git a/docsite/rst/playbooks_loops.rst b/docsite/rst/playbooks_loops.rst
index 9cb8083b9b..6f14922dea 100644
--- a/docsite/rst/playbooks_loops.rst
+++ b/docsite/rst/playbooks_loops.rst
@@ -96,7 +96,7 @@ And you want to print every user's name and phone number. You can loop through
Looping over Files
``````````````````
-``with_file`` iterates over a list of files, setting `item` to the content of each file in sequence. It can be used like this::
+``with_file`` iterates over the content of a list of files, `item` will be set to the content of each file in sequence. It can be used like this::
---
- hosts: all
@@ -516,10 +516,37 @@ Subsequent loops over the registered variable to inspect the results may look li
+.. _looping_over_the_inventory:
+
+Looping over the inventory
+``````````````````````````
+
+If you wish to loop over the inventory, or just a subset of it, there is multiple ways.
+One can use a regular ``with_items`` with the ``play_hosts`` or ``groups`` variables, like this::
+
+ # show all the hosts in the inventory
+ - debug: msg={{ item }}
+ with_items: "{{groups['all']}}"
+
+ # show all the hosts in the current play
+ - debug: msg={{ item }}
+ with_items: play_hosts
+
+There is also a specific lookup plugin ``inventory_hostname`` that can be used like this::
+
+ # show all the hosts in the inventory
+ - debug: msg={{ item }}
+ with_inventory_hostname: all
+
+ # show all the hosts matching the pattern, ie all but the group www
+ - debug: msg={{ item }}
+ with_inventory_hostname: all:!www
+
+More information on the patterns can be found on :doc:`intro_patterns`
+
.. _loops_and_includes:
Loops and Includes
-
``````````````````
In 2.0 you are able to use `with_` loops and task includes (but not playbook includes), this adds the ability to loop over the set of tasks in one shot.
diff --git a/docsite/rst/playbooks_roles.rst b/docsite/rst/playbooks_roles.rst
index 516403ac80..73c9710f51 100644
--- a/docsite/rst/playbooks_roles.rst
+++ b/docsite/rst/playbooks_roles.rst
@@ -132,7 +132,7 @@ Note that you cannot do variable substitution when including one playbook
inside another.
.. note::
- You can not conditionally path the location to an include file,
+ You can not conditionally pass the location to an include file,
like you can with 'vars_files'. If you find yourself needing to do
this, consider how you can restructure your playbook to be more
class/role oriented. This is to say you cannot use a 'fact' to
@@ -191,11 +191,8 @@ This designates the following behaviors, for each role 'x':
- If roles/x/handlers/main.yml exists, handlers listed therein will be added to the play
- If roles/x/vars/main.yml exists, variables listed therein will be added to the play
- If roles/x/meta/main.yml exists, any role dependencies listed therein will be added to the list of roles (1.3 and later)
-- Any copy tasks can reference files in roles/x/files/ without having to path them relatively or absolutely
-- Any script tasks can reference scripts in roles/x/files/ without having to path them relatively or absolutely
-- Any template tasks can reference files in roles/x/templates/ without having to path them relatively or absolutely
-- Any include tasks can reference files in roles/x/tasks/ without having to path them relatively or absolutely
-
+- Any copy, script, template or include tasks (in the role) can reference files in roles/x/{files,templates,tasks}/ (dir depends on task) without having to path them relatively or absolutely
+
In Ansible 1.4 and later you can configure a roles_path to search for roles. Use this to check all of your common roles out to one location, and share
them easily between multiple playbook projects. See :doc:`intro_configuration` for details about how to set this up in ansible.cfg.
@@ -216,8 +213,8 @@ Also, should you wish to parameterize roles, by adding variables, you can do so,
- hosts: webservers
roles:
- common
- - { role: foo_app_instance, dir: '/opt/a', port: 5000 }
- - { role: foo_app_instance, dir: '/opt/b', port: 5001 }
+ - { role: foo_app_instance, dir: '/opt/a', app_port: 5000 }
+ - { role: foo_app_instance, dir: '/opt/b', app_port: 5001 }
While it's probably not something you should do often, you can also conditionally apply roles like so::
@@ -287,7 +284,7 @@ a list of roles and parameters to insert before the specified role, such as the
---
dependencies:
- { role: common, some_parameter: 3 }
- - { role: apache, port: 80 }
+ - { role: apache, appache_port: 80 }
- { role: postgres, dbname: blarg, other_parameter: 12 }
Role dependencies can also be specified as a full path, just like top level roles::
diff --git a/docsite/rst/playbooks_variables.rst b/docsite/rst/playbooks_variables.rst
index 18f1e57f72..122c0ef923 100644
--- a/docsite/rst/playbooks_variables.rst
+++ b/docsite/rst/playbooks_variables.rst
@@ -793,10 +793,10 @@ Basically, anything that goes into "role defaults" (the defaults folder inside t
.. rubric:: Footnotes
-.. [1] Tasks in each role will see their own role's defaults tasks outside of roles will the last role's defaults
-.. [2] Variables defined in inventory file or provided by dynamic inventory
+.. [1] Tasks in each role will see their own role's defaults. Tasks defined outside of a role will see the last role's defaults.
+.. [2] Variables defined in inventory file or provided by dynamic inventory.
-.. note:: Within a any section, redefining a var will overwrite the previous instance.
+.. note:: Within any section, redefining a var will overwrite the previous instance.
If multiple groups have the same variable, the last one loaded wins.
If you define a variable twice in a play's vars: section, the 2nd one wins.
.. note:: the previous describes the default config `hash_behavior=replace`, switch to 'merge' to only partially overwrite.
diff --git a/docsite/rst/porting_guide_2.0.rst b/docsite/rst/porting_guide_2.0.rst
new file mode 100644
index 0000000000..a26763fc14
--- /dev/null
+++ b/docsite/rst/porting_guide_2.0.rst
@@ -0,0 +1,183 @@
+Porting Guide
+=============
+
+
+Playbook
+--------
+
+* backslash escapes When specifying parameters in jinja2 expressions in YAML
+ dicts, backslashes sometimes needed to be escaped twice. This has been fixed
+ in 2.0.x so that escaping once works. The following example shows how
+ playbooks must be modified::
+
+ # Syntax in 1.9.x
+ - debug:
+ msg: "{{ 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') }}"
+ # Syntax in 2.0.x
+ - debug:
+ msg: "{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}"
+
+ # Output:
+ "msg": "test1 1\\3"
+
+To make an escaped string that will work on all versions you have two options::
+
+- debug: msg="{{ 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') }}"
+
+uses key=value escaping which has not changed. The other option is to check for the ansible version::
+
+"{{ (ansible_version|version_compare('ge', '2.0'))|ternary( 'test1_junk 1\\3' | regex_replace('(.*)_junk (.*)', '\\1 \\2') , 'test1_junk 1\\\\3' | regex_replace('(.*)_junk (.*)', '\\\\1 \\\\2') ) }}"
+
+* trailing newline When a string with a trailing newline was specified in the
+ playbook via yaml dict format, the trailing newline was stripped. When
+ specified in key=value format, the trailing newlines were kept. In v2, both
+ methods of specifying the string will keep the trailing newlines. If you
+ relied on the trailing newline being stripped, you can change your playbook
+ using the following as an example::
+
+ # Syntax in 1.9.x
+ vars:
+ message: >
+ Testing
+ some things
+ tasks:
+ - debug:
+ msg: "{{ message }}"
+
+ # Syntax in 2.0.x
+ vars:
+ old_message: >
+ Testing
+ some things
+ message: "{{ old_messsage[:-1] }}"
+ - debug:
+ msg: "{{ message }}"
+ # Output
+ "msg": "Testing some things"
+
+* When specifying complex args as a variable, the variable must use the full jinja2
+ variable syntax (```{{var_name}}```) - bare variable names there are no longer accepted.
+ In fact, even specifying args with variables has been deprecated, and will not be
+ allowed in future versions::
+
+ ---
+ - hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ my_dirs:
+ - { path: /tmp/3a, state: directory, mode: 0755 }
+ - { path: /tmp/3b, state: directory, mode: 0700 }
+ tasks:
+ - file:
+ args: "{{item}}" # <- args here uses the full variable syntax
+ with_items: my_dirs
+
+* porting task includes
+* More dynamic. Corner-case formats that were not supposed to work now do not, as expected.
+* variables defined in the yaml dict format https://github.com/ansible/ansible/issues/13324
+* templating (variables in playbooks and template lookups) has improved with regard to keeping the original instead of turning everything into a string.
+ If you need the old behavior, quote the value to pass it around as a string.
+* Empty variables and variables set to null in yaml are no longer converted to empty strings. They will retain the value of `None`.
+ You can override the `null_representation` setting to an empty string in your config file by setting the `ANSIBLE_NULL_REPRESENTATION` environment variable.
+* Extras callbacks must be whitelisted in ansible.cfg. Copying is no longer necessary but whitelisting in ansible.cfg must be completed.
+* dnf module has been rewritten. Some minor changes in behavior may be observed.
+* win_updates has been rewritten and works as expected now.
+
+Deprecated
+----------
+
+While all items listed here will show a deprecation warning message, they still work as they did in 1.9.x. Please note that they will be removed in 2.2 (Ansible always waits two major releases to remove a deprecated feature).
+
+* Bare variables in `with_` loops should instead use the “{{var}}” syntax, which helps eliminate ambiguity.
+* The ansible-galaxy text format requirements file. Users should use the YAML format for requirements instead.
+* Undefined variables within a `with_` loop’s list currently do not interrupt the loop, but they do issue a warning; in the future, they will issue an error.
+* Using dictionary variables to set all task parameters is unsafe and will be removed in a future version. For example::
+
+ - hosts: localhost
+ gather_facts: no
+ vars:
+ debug_params:
+ msg: "hello there"
+ tasks:
+ # These are both deprecated:
+ - debug: "{{debug_params}}"
+ - debug:
+ args: "{{debug_params}}"
+
+ # Use this instead:
+ - debug:
+ msg: "{{debug_params['msg']}}"
+
+* Host patterns should use a comma (,) or colon (:) instead of a semicolon (;) to separate hosts/groups in the pattern.
+* Ranges specified in host patterns should use the [x:y] syntax, instead of [x-y].
+* Playbooks using privilege escalation should always use “become*” options rather than the old su*/sudo* options.
+* The “short form” for vars_prompt is no longer supported.
+ For example::
+
+ vars_prompt:
+ variable_name: "Prompt string"
+
+* Specifying variables at the top level of a task include statement is no longer supported. For example::
+
+ - include: foo.yml
+ a: 1
+
+Should now be::
+
+ - include: foo.yml
+ args:
+ a: 1
+
+* Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only.
+* Bare variables in the `environment` dictionary (for plays/tasks/etc.) are no longer supported. Variables specified there should use the full variable syntax: ‘{{foo}}’.
+* Tags should no longer be specified with other parameters in a task include. Instead, they should be specified as an option on the task.
+ For example::
+
+ - include: foo.yml tags=a,b,c
+
+ Should be::
+
+ - include: foo.yml
+ tags: [a, b, c]
+
+* The first_available_file option on tasks has been deprecated. Users should use the with_first_found option or lookup (‘first_found’, …) plugin.
+
+
+Porting plugins
+===============
+
+In ansible-1.9.x, you would generally copy an existing plugin to create a new one. Simply implementing the methods and attributes that the caller of the plugin expected made it a plugin of that type. In ansible-2.0, most plugins are implemented by subclassing a base class for each plugin type. This way the custom plugin does not need to contain methods which are not customized.
+
+
+Lookup plugins
+--------------
+* lookup plugins ; import version
+
+
+Connection plugins
+------------------
+
+* connection plugins
+
+Action plugins
+--------------
+
+* action plugins
+
+Callback plugins
+----------------
+
+* callback plugins
+
+Connection plugins
+------------------
+
+* connection plugins
+
+
+Porting custom scripts
+======================
+
+Custom scripts that used the ``ansible.runner.Runner`` API in 1.x have to be ported in 2.x. Please refer to:
+https://github.com/ansible/ansible/blob/devel/docsite/rst/developing_api.rst
diff --git a/examples/ansible.cfg b/examples/ansible.cfg
index 74aef7a024..b357738b39 100644
--- a/examples/ansible.cfg
+++ b/examples/ansible.cfg
@@ -14,7 +14,6 @@
#inventory = /etc/ansible/hosts
#library = /usr/share/my_modules/
#remote_tmp = $HOME/.ansible/tmp
-#pattern = *
#forks = 5
#poll_interval = 15
#sudo_user = root
@@ -182,7 +181,7 @@
#no_log = False
# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
-#no_target_syslog = True
+#no_target_syslog = False
# controls the compression level of variables sent to
# worker processes. At the default of 0, no compression
@@ -263,3 +262,14 @@
# the default behaviour that copies the existing context or uses the user default
# needs to be changed to use the file system dependent context.
#special_context_filesystems=nfs,vboxsf,fuse,ramfs
+
+[colors]
+#verbose = blue
+#warn = bright purple
+#error = red
+#debug = dark gray
+#deprecate = purple
+#skip = cyan
+#unreachable = red
+#ok = green
+#changed = yellow
diff --git a/examples/hosts b/examples/hosts
index ce4cbb7caa..841f4bc650 100644
--- a/examples/hosts
+++ b/examples/hosts
@@ -10,35 +10,35 @@
# Ex 1: Ungrouped hosts, specify before any group headers.
-green.example.com
-blue.example.com
-192.168.100.1
-192.168.100.10
+## green.example.com
+## blue.example.com
+## 192.168.100.1
+## 192.168.100.10
# Ex 2: A collection of hosts belonging to the 'webservers' group
-[webservers]
-alpha.example.org
-beta.example.org
-192.168.1.100
-192.168.1.110
+## [webservers]
+## alpha.example.org
+## beta.example.org
+## 192.168.1.100
+## 192.168.1.110
# If you have multiple hosts following a pattern you can specify
# them like this:
-www[001:006].example.com
+## www[001:006].example.com
# Ex 3: A collection of database servers in the 'dbservers' group
-[dbservers]
-
-db01.intranet.mydomain.net
-db02.intranet.mydomain.net
-10.25.1.56
-10.25.1.57
+## [dbservers]
+##
+## db01.intranet.mydomain.net
+## db02.intranet.mydomain.net
+## 10.25.1.56
+## 10.25.1.57
# Here's another example of host ranges, this time there are no
# leading 0s:
-db-[99:101]-node.example.com
+## db-[99:101]-node.example.com
diff --git a/hacking/env-setup b/hacking/env-setup
index 433fe7843d..c2872dcc18 100644
--- a/hacking/env-setup
+++ b/hacking/env-setup
@@ -57,10 +57,10 @@ fi
cd "$ANSIBLE_HOME"
if [ "$verbosity" = silent ] ; then
gen_egg_info > /dev/null 2>&1
- find . -type f -name "*.pyc" -delete > /dev/null 2>&1
+ find . -type f -name "*.pyc" -exec rm {} \; > /dev/null 2>&1
else
gen_egg_info
- find . -type f -name "*.pyc" -delete
+ find . -type f -name "*.pyc" -exec rm {} \;
fi
cd "$current_dir"
)
diff --git a/hacking/module_formatter.py b/hacking/module_formatter.py
index f4ab5d7d9a..4c94ca3f2c 100755
--- a/hacking/module_formatter.py
+++ b/hacking/module_formatter.py
@@ -140,7 +140,7 @@ def list_modules(module_dir, depth=0):
if os.path.isdir(d):
res = list_modules(d, depth + 1)
- for key in res.keys():
+ for key in list(res.keys()):
if key in categories:
categories[key] = merge_hash(categories[key], res[key])
res.pop(key, None)
@@ -451,7 +451,7 @@ def main():
categories = list_modules(options.module_dir)
last_category = None
- category_names = categories.keys()
+ category_names = list(categories.keys())
category_names.sort()
category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py
index ad6193463a..a55a40daa0 100644
--- a/lib/ansible/__init__.py
+++ b/lib/ansible/__init__.py
@@ -19,5 +19,5 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-__version__ = '2.0.0'
+__version__ = '2.1.0'
__author__ = 'Ansible, Inc.'
diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
index 012872be7c..12ba8f8900 100644
--- a/lib/ansible/cli/__init__.py
+++ b/lib/ansible/cli/__init__.py
@@ -32,7 +32,7 @@ import subprocess
from ansible import __version__
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
-from ansible.utils.unicode import to_bytes
+from ansible.utils.unicode import to_bytes, to_unicode
try:
from __main__ import display
@@ -66,7 +66,7 @@ class CLI(object):
LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
- def __init__(self, args):
+ def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
@@ -75,6 +75,7 @@ class CLI(object):
self.options = None
self.parser = None
self.action = None
+ self.callback = callback
def set_action(self):
"""
@@ -104,9 +105,9 @@ class CLI(object):
if self.options.verbosity > 0:
if C.CONFIG_FILE:
- display.display("Using %s as config file" % C.CONFIG_FILE)
+ display.display(u"Using %s as config file" % to_unicode(C.CONFIG_FILE))
else:
- display.display("No config file found; using defaults")
+ display.display(u"No config file found; using defaults")
@staticmethod
def ask_vault_passwords(ask_new_vault_pass=False, rekey=False):
@@ -191,12 +192,9 @@ class CLI(object):
if runas_opts:
# Check for privilege escalation conflicts
- if (op.su or op.su_user or op.ask_su_pass) and \
- (op.sudo or op.sudo_user or op.ask_sudo_pass) or \
- (op.su or op.su_user or op.ask_su_pass) and \
- (op.become or op.become_user or op.become_ask_pass) or \
- (op.sudo or op.sudo_user or op.ask_sudo_pass) and \
- (op.become or op.become_user or op.become_ask_pass):
+ if (op.su or op.su_user) and (op.sudo or op.sudo_user) or \
+ (op.su or op.su_user) and (op.become or op.become_user) or \
+ (op.sudo or op.sudo_user) and (op.become or op.become_user):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
"and su arguments ('-su', '--su-user', and '--ask-su-pass') "
@@ -213,7 +211,7 @@ class CLI(object):
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
- async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False):
+ async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False, runas_prompt_opts=False):
''' create an options parser for most ansible scripts '''
# TODO: implement epilog parsing
@@ -246,14 +244,15 @@ class CLI(object):
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
if vault_opts:
- parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
+ parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, dest='vault_password_file',
help="vault password file", action="callback", callback=CLI.expand_tilde, type=str)
parser.add_option('--new-vault-password-file', dest='new_vault_password_file',
help="new vault password file for rekey", action="callback", callback=CLI.expand_tilde, type=str)
parser.add_option('--output', default=None, dest='output_file',
- help='output file name for encrypt or decrypt; use - for stdout')
+ help='output file name for encrypt or decrypt; use - for stdout',
+ action="callback", callback=CLI.expand_tilde, type=str)
if subset_opts:
parser.add_option('-t', '--tags', dest='tags', default='all',
@@ -269,10 +268,6 @@ class CLI(object):
if runas_opts:
# priv user defaults to root later on to enable detecting when this option was given here
- parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password (deprecated, use become)')
- parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
- help='ask for su password (deprecated, use become)')
parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
@@ -289,6 +284,12 @@ class CLI(object):
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
parser.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
+
+ if runas_opts or runas_prompt_opts:
+ parser.add_option('-K', '--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
+ help='ask for sudo password (deprecated, use become)')
+ parser.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
+ help='ask for su password (deprecated, use become)')
parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py
index 25f29fc297..97df8fcdbf 100644
--- a/lib/ansible/cli/adhoc.py
+++ b/lib/ansible/cli/adhoc.py
@@ -70,7 +70,7 @@ class AdHocCLI(CLI):
help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
default=C.DEFAULT_MODULE_NAME)
- self.options, self.args = self.parser.parse_args()
+ self.options, self.args = self.parser.parse_args(self.args[1:])
if len(self.args) != 1:
raise AnsibleOptionsError("Missing target hosts")
@@ -124,17 +124,13 @@ class AdHocCLI(CLI):
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
variable_manager.set_inventory(inventory)
- hosts = inventory.list_hosts(pattern)
- no_hosts = False
- if len(hosts) == 0:
- display.warning("provided hosts list is empty, only localhost is available")
- no_hosts = True
if self.options.subset:
inventory.subset(self.options.subset)
- if len(inventory.list_hosts(pattern)) == 0 and not no_hosts:
- # Invalid limit
- raise AnsibleError("Specified --limit does not match any hosts")
+
+ hosts = inventory.list_hosts(pattern)
+ if len(hosts) == 0:
+ raise AnsibleError("Specified hosts options do not match any hosts")
if self.options.listhosts:
display.display(' hosts (%d):' % len(hosts))
@@ -158,14 +154,18 @@ class AdHocCLI(CLI):
play_ds = self._play_ds(pattern, self.options.seconds, self.options.poll_interval)
play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
- if self.options.one_line:
+ if self.callback:
+ cb = self.callback
+ elif self.options.one_line:
cb = 'oneline'
else:
cb = 'minimal'
+ run_tree=False
if self.options.tree:
C.DEFAULT_CALLBACK_WHITELIST.append('tree')
C.TREE_DIR = self.options.tree
+ run_tree=True
# now create a task queue manager to execute the play
self._tqm = None
@@ -177,6 +177,8 @@ class AdHocCLI(CLI):
options=self.options,
passwords=passwords,
stdout_callback=cb,
+ run_additional_callbacks=C.DEFAULT_LOAD_CALLBACK_PLUGINS,
+ run_tree=run_tree,
)
result = self._tqm.run(play)
finally:
diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py
index 4eef1dd5dd..265b1c9a3f 100644
--- a/lib/ansible/cli/doc.py
+++ b/lib/ansible/cli/doc.py
@@ -62,7 +62,7 @@ class DocCLI(CLI):
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
- self.options, self.args = self.parser.parse_args()
+ self.options, self.args = self.parser.parse_args(self.args[1:])
display.verbosity = self.options.verbosity
def run(self):
@@ -90,7 +90,8 @@ class DocCLI(CLI):
for module in self.args:
try:
- filename = module_loader.find_plugin(module)
+ # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
+ filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
@@ -167,7 +168,8 @@ class DocCLI(CLI):
if module in module_docs.BLACKLIST_MODULES:
continue
- filename = module_loader.find_plugin(module)
+ # if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
+ filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
index 31c21146fc..476a7d0f89 100644
--- a/lib/ansible/cli/galaxy.py
+++ b/lib/ansible/cli/galaxy.py
@@ -22,10 +22,10 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-import os
import os.path
import sys
import yaml
+import time
from collections import defaultdict
from jinja2 import Environment
@@ -36,6 +36,8 @@ from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.role import GalaxyRole
+from ansible.galaxy.login import GalaxyLogin
+from ansible.galaxy.token import GalaxyToken
from ansible.playbook.role.requirement import RoleRequirement
try:
@@ -44,14 +46,12 @@ except ImportError:
from ansible.utils.display import Display
display = Display()
-
class GalaxyCLI(CLI):
- VALID_ACTIONS = ("init", "info", "install", "list", "remove", "search")
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
-
+ VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
+
def __init__(self, args):
-
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
@@ -67,7 +67,17 @@ class GalaxyCLI(CLI):
self.set_action()
# options specific to actions
- if self.action == "info":
+ if self.action == "delete":
+ self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
+ elif self.action == "import":
+ self.parser.set_usage("usage: %prog import [options] github_user github_repo")
+ self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True,
+ help='Don\'t wait for import results.')
+ self.parser.add_option('--branch', dest='reference',
+ help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
+ self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
+ help='Check the status of the most recent import request for given github_user/github_repo.')
+ elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
@@ -88,31 +98,42 @@ class GalaxyCLI(CLI):
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
+ elif self.action == "login":
+ self.parser.set_usage("usage: %prog login [options]")
+ self.parser.add_option('--github-token', dest='token', default=None,
+ help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.add_option('--platforms', dest='platforms',
help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='tags',
help='list of galaxy tags to filter by')
- self.parser.set_usage("usage: %prog search [<search_term>] [--galaxy-tags <galaxy_tag1,galaxy_tag2>] [--platforms platform]")
+ self.parser.add_option('--author', dest='author',
+ help='GitHub username')
+ self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]")
+ elif self.action == "setup":
+ self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
+ self.parser.add_option('--remove', dest='remove_id', default=None,
+ help='Remove the integration matching the provided ID value. Use --list to see ID values.')
+ self.parser.add_option('--list', dest="setup_list", action='store_true', default=False,
+ help='List all of your integrations.')
# options that apply to more than one action
- if self.action != "init":
+ if not self.action in ("delete","import","init","login","setup"):
self.parser.add_option('-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. '
'The default is the roles_path configured in your '
'ansible.cfg file (/etc/ansible/roles if not configured)')
- if self.action in ("info","init","install","search"):
- self.parser.add_option('-s', '--server', dest='api_server', default="https://galaxy.ansible.com",
+ if self.action in ("import","info","init","install","login","search","setup","delete"):
+ self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER,
help='The API server destination')
- self.parser.add_option('-c', '--ignore-certs', action='store_false', dest='validate_certs', default=True,
+ self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=False,
help='Ignore SSL certificate validation errors.')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False,
help='Force overwriting an existing role')
- # get options, args and galaxy object
self.options, self.args =self.parser.parse_args()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
@@ -120,15 +141,13 @@ class GalaxyCLI(CLI):
return True
def run(self):
-
+
super(GalaxyCLI, self).run()
# if not offline, get connect to galaxy api
- if self.action in ("info","install", "search") or (self.action == 'init' and not self.options.offline):
- api_server = self.options.api_server
- self.api = GalaxyAPI(self.galaxy, api_server)
- if not self.api:
- raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server)
+ if self.action in ("import","info","install","search","login","setup","delete") or \
+ (self.action == 'init' and not self.options.offline):
+ self.api = GalaxyAPI(self.galaxy)
self.execute()
@@ -188,7 +207,7 @@ class GalaxyCLI(CLI):
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
- # create the default README.md
+ # create default README.md
if not os.path.exists(role_path):
os.makedirs(role_path)
readme_path = os.path.join(role_path, "README.md")
@@ -196,9 +215,16 @@ class GalaxyCLI(CLI):
f.write(self.galaxy.default_readme)
f.close()
+ # create default .travis.yml
+ travis = Environment().from_string(self.galaxy.default_travis).render()
+ f = open(os.path.join(role_path, '.travis.yml'), 'w')
+ f.write(travis)
+ f.close()
+
for dir in GalaxyRole.ROLE_DIRS:
dir_path = os.path.join(init_path, role_name, dir)
main_yml_path = os.path.join(dir_path, 'main.yml')
+
# create the directory if it doesn't exist already
if not os.path.exists(dir_path):
os.makedirs(dir_path)
@@ -234,6 +260,20 @@ class GalaxyCLI(CLI):
f.write(rendered_meta)
f.close()
pass
+ elif dir == "tests":
+ # create tests/test.yml
+ inject = dict(
+ role_name = role_name
+ )
+ playbook = Environment().from_string(self.galaxy.default_test).render(inject)
+ f = open(os.path.join(dir_path, 'test.yml'), 'w')
+ f.write(playbook)
+ f.close()
+
+ # create tests/inventory
+ f = open(os.path.join(dir_path, 'inventory'), 'w')
+ f.write('localhost')
+ f.close()
elif dir not in ('files','templates'):
# just write a (mostly) empty YAML file for main.yml
f = open(main_yml_path, 'w')
@@ -325,7 +365,7 @@ class GalaxyCLI(CLI):
for role in required_roles:
role = RoleRequirement.role_yaml_parse(role)
- display.debug('found role %s in yaml file' % str(role))
+ display.vvv('found role %s in yaml file' % str(role))
if 'name' not in role and 'scm' not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
@@ -348,7 +388,7 @@ class GalaxyCLI(CLI):
roles_left.append(GalaxyRole(self.galaxy, rname.strip()))
for role in roles_left:
- display.debug('Installing role %s ' % role.name)
+ display.vvv('Installing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None and not force:
@@ -458,21 +498,187 @@ class GalaxyCLI(CLI):
return 0
def execute_search(self):
-
+ page_size = 1000
search = None
- if len(self.args) > 1:
- raise AnsibleOptionsError("At most a single search term is allowed.")
- elif len(self.args) == 1:
- search = self.args.pop()
- response = self.api.search_roles(search, self.options.platforms, self.options.tags)
+ if len(self.args):
+ terms = []
+ for i in range(len(self.args)):
+ terms.append(self.args.pop())
+ search = '+'.join(terms[::-1])
- if 'count' in response:
- display.display("Found %d roles matching your search:\n" % response['count'])
+ if not search and not self.options.platforms and not self.options.tags and not self.options.author:
+ raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
+
+ response = self.api.search_roles(search, platforms=self.options.platforms,
+ tags=self.options.tags, author=self.options.author, page_size=page_size)
+
+ if response['count'] == 0:
+ display.display("No roles match your search.", color=C.COLOR_ERROR)
+ return True
data = ''
- if 'results' in response:
- for role in response['results']:
- data += self._display_role_info(role)
+
+ if response['count'] > page_size:
+ data += ("\nFound %d roles matching your search. Showing first %s.\n" % (response['count'], page_size))
+ else:
+ data += ("\nFound %d roles matching your search:\n" % response['count'])
+
+ max_len = []
+ for role in response['results']:
+ max_len.append(len(role['username'] + '.' + role['name']))
+ name_len = max(max_len)
+ format_str = " %%-%ds %%s\n" % name_len
+ data +='\n'
+ data += (format_str % ("Name", "Description"))
+ data += (format_str % ("----", "-----------"))
+ for role in response['results']:
+ data += (format_str % (role['username'] + '.' + role['name'],role['description']))
self.pager(data)
+
+ return True
+
+ def execute_login(self):
+ """
+ Verify user's identify via Github and retreive an auth token from Galaxy.
+ """
+ # Authenticate with github and retrieve a token
+ if self.options.token is None:
+ login = GalaxyLogin(self.galaxy)
+ github_token = login.create_github_token()
+ else:
+ github_token = self.options.token
+
+ galaxy_response = self.api.authenticate(github_token)
+
+ if self.options.token is None:
+ # Remove the token we created
+ login.remove_github_token()
+
+ # Store the Galaxy token
+ token = GalaxyToken()
+ token.set(galaxy_response['token'])
+
+ display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username'])
+ return 0
+
+ def execute_import(self):
+ """
+ Import a role into Galaxy
+ """
+
+ colors = {
+ 'INFO': 'normal',
+ 'WARNING': C.COLOR_WARN,
+ 'ERROR': C.COLOR_ERROR,
+ 'SUCCESS': C.COLOR_OK,
+ 'FAILED': C.COLOR_ERROR,
+ }
+
+ if len(self.args) < 2:
+ raise AnsibleError("Expected a github_username and github_repository. Use --help.")
+
+ github_repo = self.args.pop()
+ github_user = self.args.pop()
+
+ if self.options.check_status:
+ task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
+ else:
+ # Submit an import request
+ task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference)
+
+ if len(task) > 1:
+ # found multiple roles associated with github_user/github_repo
+ display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo),
+ color='yellow')
+ display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
+ for t in task:
+ display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
+ display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), color=C.COLOR_CHANGED)
+ return 0
+ # found a single role as expected
+ display.display("Successfully submitted import request %d" % task[0]['id'])
+ if not self.options.wait:
+ display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
+ display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo']))
+
+ if self.options.check_status or self.options.wait:
+ # Get the status of the import
+ msg_list = []
+ finished = False
+ while not finished:
+ task = self.api.get_import_task(task_id=task[0]['id'])
+ for msg in task[0]['summary_fields']['task_messages']:
+ if msg['id'] not in msg_list:
+ display.display(msg['message_text'], color=colors[msg['message_type']])
+ msg_list.append(msg['id'])
+ if task[0]['state'] in ['SUCCESS', 'FAILED']:
+ finished = True
+ else:
+ time.sleep(10)
+
+ return 0
+
+ def execute_setup(self):
+ """
+ Setup an integration from Github or Travis
+ """
+
+ if self.options.setup_list:
+ # List existing integration secrets
+ secrets = self.api.list_secrets()
+ if len(secrets) == 0:
+ # None found
+ display.display("No integrations found.")
+ return 0
+ display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
+ display.display("---------- ---------- ----------", color=C.COLOR_OK)
+ for secret in secrets:
+ display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
+ secret['github_repo']),color=C.COLOR_OK)
+ return 0
+
+ if self.options.remove_id:
+ # Remove a secret
+ self.api.remove_secret(self.options.remove_id)
+ display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
+ return 0
+
+ if len(self.args) < 4:
+ raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
+ return 0
+
+ secret = self.args.pop()
+ github_repo = self.args.pop()
+ github_user = self.args.pop()
+ source = self.args.pop()
+
+ resp = self.api.add_secret(source, github_user, github_repo, secret)
+ display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
+
+ return 0
+
+ def execute_delete(self):
+ """
+ Delete a role from galaxy.ansible.com
+ """
+
+ if len(self.args) < 2:
+ raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
+
+ github_repo = self.args.pop()
+ github_user = self.args.pop()
+ resp = self.api.delete_role(github_user, github_repo)
+
+ if len(resp['deleted_roles']) > 1:
+ display.display("Deleted the following roles:")
+ display.display("ID User Name")
+ display.display("------ --------------- ----------")
+ for role in resp['deleted_roles']:
+ display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name))
+
+ display.display(resp['status'])
+
+ return True
+
diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py
index fc81f96456..dfd06b1920 100644
--- a/lib/ansible/cli/playbook.py
+++ b/lib/ansible/cli/playbook.py
@@ -30,6 +30,7 @@ from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.inventory import Inventory
from ansible.parsing.dataloader import DataLoader
+from ansible.playbook.play_context import PlayContext
from ansible.utils.vars import load_extra_vars
from ansible.vars import VariableManager
@@ -72,7 +73,7 @@ class PlaybookCLI(CLI):
parser.add_option('--start-at-task', dest='start_at_task',
help="start the playbook at the task matching this name")
- self.options, self.args = parser.parse_args()
+ self.options, self.args = parser.parse_args(self.args[1:])
self.parser = parser
@@ -152,18 +153,10 @@ class PlaybookCLI(CLI):
for p in results:
display.display('\nplaybook: %s' % p['playbook'])
- i = 1
- for play in p['plays']:
- if play.name:
- playname = play.name
- else:
- playname = '#' + str(i)
-
- msg = "\n PLAY: %s" % (playname)
- mytags = set()
- if self.options.listtags and play.tags:
- mytags = mytags.union(set(play.tags))
- msg += ' TAGS: [%s]' % (','.join(mytags))
+ for idx, play in enumerate(p['plays']):
+ msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
+ mytags = set(play.tags)
+ msg += '\tTAGS: [%s]' % (','.join(mytags))
if self.options.listhosts:
playhosts = set(inventory.get_hosts(play.hosts))
@@ -173,23 +166,40 @@ class PlaybookCLI(CLI):
display.display(msg)
+ all_tags = set()
if self.options.listtags or self.options.listtasks:
- taskmsg = ' tasks:'
+ taskmsg = ''
+ if self.options.listtasks:
+ taskmsg = ' tasks:\n'
+ all_vars = variable_manager.get_vars(loader=loader, play=play)
+ play_context = PlayContext(play=play, options=self.options)
for block in play.compile():
+ block = block.filter_tagged_tasks(play_context, all_vars)
if not block.has_tasks():
continue
- j = 1
for task in block.block:
- taskmsg += "\n %s" % task
- if self.options.listtags and task.tags:
- taskmsg += " TAGS: [%s]" % ','.join(mytags.union(set(task.tags)))
- j = j + 1
+ if task.action == 'meta':
+ continue
+
+ all_tags.update(task.tags)
+ if self.options.listtasks:
+ cur_tags = list(mytags.union(set(task.tags)))
+ cur_tags.sort()
+ if task.name:
+ taskmsg += " %s" % task.get_name()
+ else:
+ taskmsg += " %s" % task.action
+ taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
+
+ if self.options.listtags:
+ cur_tags = list(mytags.union(all_tags))
+ cur_tags.sort()
+ taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
display.display(taskmsg)
- i = i + 1
return 0
else:
return results
diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py
index 04586c1d0c..2571717766 100644
--- a/lib/ansible/cli/pull.py
+++ b/lib/ansible/cli/pull.py
@@ -64,18 +64,24 @@ class PullCLI(CLI):
subset_opts=True,
inventory_opts=True,
module_opts=True,
+ runas_prompt_opts=True,
)
# options unique to pull
- self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run')
+ self.parser.add_option('--purge', default=False, action='store_true',
+ help='purge checkout after playbook run')
self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
help='only run the playbook if the repository has been updated')
self.parser.add_option('-s', '--sleep', dest='sleep', default=None,
help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests')
self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
help='run the playbook even if the repository could not be updated')
- self.parser.add_option('-d', '--directory', dest='dest', default='~/.ansible/pull', help='directory to checkout repository to')
- self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
+ self.parser.add_option('-d', '--directory', dest='dest', default=None,
+ help='directory to checkout repository to')
+ self.parser.add_option('-U', '--url', dest='url', default=None,
+ help='URL of the playbook repository')
+ self.parser.add_option('--full', dest='fullclone', action='store_true',
+ help='Do a full clone, instead of a shallow one.')
self.parser.add_option('-C', '--checkout', dest='checkout',
help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.')
self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
@@ -86,7 +92,13 @@ class PullCLI(CLI):
help='verify GPG signature of checked out commit, if it fails abort running the playbook.'
' This needs the corresponding VCS module to support such an operation')
- self.options, self.args = self.parser.parse_args()
+ self.options, self.args = self.parser.parse_args(self.args[1:])
+
+ if not self.options.dest:
+ hostname = socket.getfqdn()
+ # use a hostname dependent directory, in case of $HOME on nfs
+ self.options.dest = os.path.join('~/.ansible/pull', hostname)
+ self.options.dest = os.path.expandvars(os.path.expanduser(self.options.dest))
if self.options.sleep:
try:
@@ -119,7 +131,7 @@ class PullCLI(CLI):
node = platform.node()
host = socket.getfqdn()
limit_opts = 'localhost,%s,127.0.0.1' % ','.join(set([host, node, host.split('.')[0], node.split('.')[0]]))
- base_opts = '-c local "%s"' % limit_opts
+ base_opts = '-c local '
if self.options.verbosity > 0:
base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ])
@@ -130,7 +142,7 @@ class PullCLI(CLI):
else:
inv_opts = self.options.inventory
- #TODO: enable more repo modules hg/svn?
+ #FIXME: enable more repo modules hg/svn?
if self.options.module_name == 'git':
repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest)
if self.options.checkout:
@@ -145,13 +157,17 @@ class PullCLI(CLI):
if self.options.verify:
repo_opts += ' verify_commit=yes'
+ if not self.options.fullclone:
+ repo_opts += ' depth=1'
+
+
path = module_loader.find_plugin(self.options.module_name)
if path is None:
raise AnsibleOptionsError(("module '%s' not found.\n" % self.options.module_name))
bin_path = os.path.dirname(os.path.abspath(sys.argv[0]))
- cmd = '%s/ansible -i "%s" %s -m %s -a "%s"' % (
- bin_path, inv_opts, base_opts, self.options.module_name, repo_opts
+ cmd = '%s/ansible -i "%s" %s -m %s -a "%s" "%s"' % (
+ bin_path, inv_opts, base_opts, self.options.module_name, repo_opts, limit_opts
)
for ev in self.options.extra_vars:
@@ -163,6 +179,8 @@ class PullCLI(CLI):
time.sleep(self.options.sleep)
# RUN the Checkout command
+ display.debug("running ansible with VCS module to checkout repo")
+ display.vvvv('EXEC: %s' % cmd)
rc, out, err = run_cmd(cmd, live=True)
if rc != 0:
@@ -174,8 +192,7 @@ class PullCLI(CLI):
display.display("Repository has not changed, quitting.")
return 0
- playbook = self.select_playbook(path)
-
+ playbook = self.select_playbook(self.options.dest)
if playbook is None:
raise AnsibleOptionsError("Could not find a playbook to run.")
@@ -187,16 +204,18 @@ class PullCLI(CLI):
cmd += ' -i "%s"' % self.options.inventory
for ev in self.options.extra_vars:
cmd += ' -e "%s"' % ev
- if self.options.ask_sudo_pass:
- cmd += ' -K'
+ if self.options.ask_sudo_pass or self.options.ask_su_pass or self.options.become_ask_pass:
+ cmd += ' --ask-become-pass'
if self.options.tags:
cmd += ' -t "%s"' % self.options.tags
- if self.options.limit:
- cmd += ' -l "%s"' % self.options.limit
+ if self.options.subset:
+ cmd += ' -l "%s"' % self.options.subset
os.chdir(self.options.dest)
# RUN THE PLAYBOOK COMMAND
+ display.debug("running ansible-playbook to do actual work")
+ display.debug('EXEC: %s' % cmd)
rc, out, err = run_cmd(cmd, live=True)
if self.options.purge:
diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py
index ac148d4770..9908f17e57 100644
--- a/lib/ansible/cli/vault.py
+++ b/lib/ansible/cli/vault.py
@@ -69,7 +69,7 @@ class VaultCLI(CLI):
elif self.action == "rekey":
self.parser.set_usage("usage: %prog rekey [options] file_name")
- self.options, self.args = self.parser.parse_args()
+ self.options, self.args = self.parser.parse_args(self.args[1:])
display.verbosity = self.options.verbosity
can_output = ['encrypt', 'decrypt']
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 6ecaaac0b3..9b84825d6b 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -120,19 +120,23 @@ DEFAULT_COW_WHITELIST = ['bud-frogs', 'bunny', 'cheese', 'daemon', 'default', 'd
# sections in config file
DEFAULTS='defaults'
+# FIXME: add deprecation warning when these get set
+#### DEPRECATED VARS ####
+# use more sanely named 'inventory'
DEPRECATED_HOST_LIST = get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', '/etc/ansible/hosts', ispath=True)
+# this is not used since 0.5 but people might still have in config
+DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, None)
-# generally configurable things
+#### GENERALLY CONFIGURABLE THINGS ####
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', DEPRECATED_HOST_LIST, ispath=True)
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, ispath=True)
DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', ispath=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
-DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*')
DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
-DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8')
+DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', os.getenv('LANG', 'en_US.UTF-8'))
DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', None)
@@ -159,7 +163,7 @@ DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level',
# disclosure
DEFAULT_NO_LOG = get_config(p, DEFAULTS, 'no_log', 'ANSIBLE_NO_LOG', False, boolean=True)
-DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', True, boolean=True)
+DEFAULT_NO_TARGET_SYSLOG = get_config(p, DEFAULTS, 'no_target_syslog', 'ANSIBLE_NO_TARGET_SYSLOG', False, boolean=True)
# selinux
DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf, ramfs', islist=True)
@@ -197,7 +201,7 @@ DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pa
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
-DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, yum, pkgng, zypper, dnf", islist=True)
+DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apt, dnf, package, pkgng, yum, zypper", islist=True)
# paths
DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', ispath=True)
DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', ispath=True)
@@ -255,12 +259,25 @@ ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_k
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
# galaxy related
-DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
+GALAXY_SERVER = get_config(p, 'galaxy', 'server', 'ANSIBLE_GALAXY_SERVER', 'https://galaxy.ansible.com')
+GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBLE_GALAXY_IGNORE', False, boolean=True)
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', islist=True)
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
+STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], islist=True )
+
+# colors
+COLOR_VERBOSE = get_config(p, 'colors', 'verbose', 'ANSIBLE_COLOR_VERBOSE', 'blue')
+COLOR_WARN = get_config(p, 'colors', 'warn', 'ANSIBLE_COLOR_WARN', 'bright purple')
+COLOR_ERROR = get_config(p, 'colors', 'error', 'ANSIBLE_COLOR_ERROR', 'red')
+COLOR_DEBUG = get_config(p, 'colors', 'debug', 'ANSIBLE_COLOR_DEBUG', 'dark gray')
+COLOR_DEPRECATE = get_config(p, 'colors', 'deprecate', 'ANSIBLE_COLOR_DEPRECATE', 'purple')
+COLOR_SKIP = get_config(p, 'colors', 'skip', 'ANSIBLE_COLOR_SKIP', 'cyan')
+COLOR_UNREACHABLE = get_config(p, 'colors', 'unreachable', 'ANSIBLE_COLOR_UNREACHABLE', 'bright red')
+COLOR_OK = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_OK', 'green')
+COLOR_CHANGED = get_config(p, 'colors', 'ok', 'ANSIBLE_COLOR_CHANGED', 'yellow')
# non-configurable things
MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
index a2411b7bef..faf7c33416 100644
--- a/lib/ansible/errors/__init__.py
+++ b/lib/ansible/errors/__init__.py
@@ -44,7 +44,7 @@ class AnsibleError(Exception):
which should be returned by the DataLoader() class.
'''
- def __init__(self, message, obj=None, show_content=True):
+ def __init__(self, message="", obj=None, show_content=True):
# we import this here to prevent an import loop problem,
# since the objects code also imports ansible.errors
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
@@ -54,9 +54,9 @@ class AnsibleError(Exception):
if obj and isinstance(obj, AnsibleBaseYAMLObject):
extended_error = self._get_extended_error()
if extended_error:
- self.message = 'ERROR! %s\n\n%s' % (message, to_str(extended_error))
+ self.message = '%s\n\n%s' % (to_str(message), to_str(extended_error))
else:
- self.message = 'ERROR! %s' % message
+ self.message = '%s' % to_str(message)
def __str__(self):
return self.message
diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py
index 152b5dbb37..ba32273256 100644
--- a/lib/ansible/executor/module_common.py
+++ b/lib/ansible/executor/module_common.py
@@ -39,6 +39,7 @@ REPLACER_WINDOWS = "# POWERSHELL_COMMON"
REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>"
REPLACER_JSONARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
+REPLACER_SELINUX = "<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
@@ -172,6 +173,7 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal
module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
module_data = module_data.replace(REPLACER_WINARGS, module_args_json)
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
+ module_data = module_data.replace(REPLACER_SELINUX, ','.join(C.DEFAULT_SELINUX_SPECIAL_FS))
if module_style == 'new':
facility = C.DEFAULT_SYSLOG_FACILITY
@@ -200,4 +202,3 @@ def modify_module(module_path, module_args, task_vars=dict(), strip_comments=Fal
module_data = b"\n".join(lines)
return (module_data, module_style, shebang)
-
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
index fd59478ead..09caeec2d9 100644
--- a/lib/ansible/executor/play_iterator.py
+++ b/lib/ansible/executor/play_iterator.py
@@ -49,6 +49,7 @@ class HostState:
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_role = None
+ self.cur_dep_chain = None
self.run_state = PlayIterator.ITERATING_SETUP
self.fail_state = PlayIterator.FAILED_NONE
self.pending_setup = False
@@ -57,14 +58,32 @@ class HostState:
self.always_child_state = None
def __repr__(self):
- return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % (
+ def _run_state_to_string(n):
+ states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"]
+ try:
+ return states[n]
+ except IndexError:
+ return "UNKNOWN STATE"
+
+ def _failed_state_to_string(n):
+ states = {1:"FAILED_SETUP", 2:"FAILED_TASKS", 4:"FAILED_RESCUE", 8:"FAILED_ALWAYS"}
+ if n == 0:
+ return "FAILED_NONE"
+ else:
+ ret = []
+ for i in (1, 2, 4, 8):
+ if n & i:
+ ret.append(states[i])
+ return "|".join(ret)
+
+ return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? %s, rescue child state? %s, always child state? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
self.cur_role,
- self.run_state,
- self.fail_state,
+ _run_state_to_string(self.run_state),
+ _failed_state_to_string(self.fail_state),
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
@@ -84,6 +103,8 @@ class HostState:
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
+ if self.cur_dep_chain is not None:
+ new_state.cur_dep_chain = self.cur_dep_chain[:]
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
@@ -119,30 +140,35 @@ class PlayIterator:
self._blocks.append(new_block)
self._host_states = {}
+ start_at_matched = False
for host in inventory.get_hosts(self._play.hosts):
- self._host_states[host.name] = HostState(blocks=self._blocks)
- # if the host's name is in the variable manager's fact cache, then set
- # its _gathered_facts flag to true for smart gathering tests later
- if host.name in variable_manager._fact_cache:
- host._gathered_facts = True
- # if we're looking to start at a specific task, iterate through
- # the tasks for this host until we find the specified task
- if play_context.start_at_task is not None and not start_at_done:
- while True:
- (s, task) = self.get_next_task_for_host(host, peek=True)
- if s.run_state == self.ITERATING_COMPLETE:
- break
- if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
- task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
- # we have our match, so clear the start_at_task field on the
- # play context to flag that we've started at a task (and future
- # plays won't try to advance)
- play_context.start_at_task = None
- break
- else:
- self.get_next_task_for_host(host)
- # finally, reset the host's state to ITERATING_SETUP
- self._host_states[host.name].run_state = self.ITERATING_SETUP
+ self._host_states[host.name] = HostState(blocks=self._blocks)
+ # if the host's name is in the variable manager's fact cache, then set
+ # its _gathered_facts flag to true for smart gathering tests later
+ if host.name in variable_manager._fact_cache:
+ host._gathered_facts = True
+ # if we're looking to start at a specific task, iterate through
+ # the tasks for this host until we find the specified task
+ if play_context.start_at_task is not None and not start_at_done:
+ while True:
+ (s, task) = self.get_next_task_for_host(host, peek=True)
+ if s.run_state == self.ITERATING_COMPLETE:
+ break
+ if task.name == play_context.start_at_task or fnmatch.fnmatch(task.name, play_context.start_at_task) or \
+ task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
+ start_at_matched = True
+ break
+ else:
+ self.get_next_task_for_host(host)
+
+ # finally, reset the host's state to ITERATING_SETUP
+ self._host_states[host.name].run_state = self.ITERATING_SETUP
+
+ if start_at_matched:
+ # we have our match, so clear the start_at_task field on the
+ # play context to flag that we've started at a task (and future
+ # plays won't try to advance)
+ play_context.start_at_task = None
# Extend the play handlers list to include the handlers defined in roles
self._play.handlers.extend(play.compile_roles_handlers())
@@ -189,13 +215,21 @@ class PlayIterator:
s.pending_setup = False
if not task:
+ old_s = s
(s, task) = self._get_next_task_from_state(s, peek=peek)
+ def _roles_are_different(ra, rb):
+ if ra != rb:
+ return True
+ else:
+ return old_s.cur_dep_chain != task._block._dep_chain
+
if task and task._role:
# if we had a current role, mark that role as completed
- if s.cur_role and task._role != s.cur_role and host.name in s.cur_role._had_task_run and not peek:
+ if s.cur_role and _roles_are_different(task._role, s.cur_role) and host.name in s.cur_role._had_task_run and not peek:
s.cur_role._completed[host.name] = True
s.cur_role = task._role
+ s.cur_dep_chain = task._block._dep_chain
if not peek:
self._host_states[host.name] = s
@@ -324,13 +358,21 @@ class PlayIterator:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= self.FAILED_TASKS
- state.run_state = self.ITERATING_RESCUE
+ if state._blocks[state.cur_block].rescue:
+ state.run_state = self.ITERATING_RESCUE
+ elif state._blocks[state.cur_block].always:
+ state.run_state = self.ITERATING_ALWAYS
+ else:
+ state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= self.FAILED_RESCUE
- state.run_state = self.ITERATING_ALWAYS
+ if state._blocks[state.cur_block].always:
+ state.run_state = self.ITERATING_ALWAYS
+ else:
+ state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
@@ -347,6 +389,28 @@ class PlayIterator:
def get_failed_hosts(self):
return dict((host, True) for (host, state) in iteritems(self._host_states) if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE)
+ def _check_failed_state(self, state):
+ if state is None:
+ return False
+ elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
+ return True
+ elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
+ return True
+ elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
+ return True
+ elif state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE:
+ if state.run_state == self.ITERATING_RESCUE and state.fail_state&self.FAILED_RESCUE == 0:
+ return False
+ elif state.run_state == self.ITERATING_ALWAYS and state.fail_state&self.FAILED_ALWAYS == 0:
+ return False
+ else:
+ return True
+ return False
+
+ def is_failed(self, host):
+ s = self.get_host_state(host)
+ return self._check_failed_state(s)
+
def get_original_task(self, host, task):
'''
Finds the task in the task list which matches the UUID of the given task.
@@ -396,7 +460,8 @@ class PlayIterator:
return None
def _insert_tasks_into_state(self, state, task_list):
- if state.fail_state != self.FAILED_NONE:
+ # if we've failed at all, or if the task list is empty, just return the current state
+ if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list:
return state
if state.run_state == self.ITERATING_TASKS:
diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
index b6cbf2b1ba..bcfe1bebbe 100644
--- a/lib/ansible/executor/playbook_executor.py
+++ b/lib/ansible/executor/playbook_executor.py
@@ -31,8 +31,6 @@ from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.playbook import Playbook
from ansible.template import Templar
-from ansible.utils.color import colorize, hostcolor
-from ansible.utils.encrypt import do_encrypt
from ansible.utils.unicode import to_unicode
try:
@@ -83,6 +81,10 @@ class PlaybookExecutor:
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
+ else:
+ # make sure the tqm has callbacks loaded
+ self._tqm.load_callbacks()
+ self._tqm.send_callback('v2_playbook_on_start', pb)
i = 1
plays = pb.get_plays()
@@ -108,10 +110,12 @@ class PlaybookExecutor:
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
- if vname not in play.vars:
+ if vname not in self._variable_manager.extra_vars:
if self._tqm:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
- play.vars[vname] = self._do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
+ play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
+ else: # we are either in --list-<option> or syntax check
+ play.vars[vname] = default
# Create a temporary copy of the play here, so we can run post_validate
# on it without the templating changes affecting the original object.
@@ -128,8 +132,6 @@ class PlaybookExecutor:
entry['plays'].append(new_play)
else:
- # make sure the tqm has callbacks loaded
- self._tqm.load_callbacks()
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
# we are actually running plays
@@ -149,9 +151,7 @@ class PlaybookExecutor:
# conditions are met, we break out, otherwise we only break out if the entire
# batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts)
- if new_play.any_errors_fatal and failed_hosts_count > 0:
- break
- elif new_play.max_fail_percentage is not None and \
+ if new_play.max_fail_percentage is not None and \
int((new_play.max_fail_percentage)/100.0 * len(batch)) > int((len(batch) - failed_hosts_count) / len(batch) * 100.0):
break
elif len(batch) == failed_hosts_count:
@@ -171,6 +171,10 @@ class PlaybookExecutor:
if entry:
entrylist.append(entry) # per playbook
+ # send the stats callback for this playbook
+ if self._tqm is not None:
+ self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
+
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
@@ -186,35 +190,6 @@ class PlaybookExecutor:
display.display("No issues encountered")
return result
- # TODO: this stat summary stuff should be cleaned up and moved
- # to a new method, if it even belongs here...
- display.banner("PLAY RECAP")
-
- hosts = sorted(self._tqm._stats.processed.keys())
- for h in hosts:
- t = self._tqm._stats.summarize(h)
-
- display.display(u"%s : %s %s %s %s" % (
- hostcolor(h, t),
- colorize(u'ok', t['ok'], 'green'),
- colorize(u'changed', t['changed'], 'yellow'),
- colorize(u'unreachable', t['unreachable'], 'red'),
- colorize(u'failed', t['failures'], 'red')),
- screen_only=True
- )
-
- display.display(u"%s : %s %s %s %s" % (
- hostcolor(h, t, False),
- colorize(u'ok', t['ok'], None),
- colorize(u'changed', t['changed'], None),
- colorize(u'unreachable', t['unreachable'], None),
- colorize(u'failed', t['failures'], None)),
- log_only=True
- )
-
- display.display("", screen_only=True)
- # END STATS STUFF
-
return result
def _cleanup(self, signum=None, framenum=None):
@@ -258,48 +233,3 @@ class PlaybookExecutor:
return serialized_batches
- def _do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
-
- if sys.__stdin__.isatty():
- if prompt and default is not None:
- msg = "%s [%s]: " % (prompt, default)
- elif prompt:
- msg = "%s: " % prompt
- else:
- msg = 'input for %s: ' % varname
-
- def do_prompt(prompt, private):
- if sys.stdout.encoding:
- msg = prompt.encode(sys.stdout.encoding)
- else:
- # when piping the output, or at other times when stdout
- # may not be the standard file descriptor, the stdout
- # encoding may not be set, so default to something sane
- msg = prompt.encode(locale.getpreferredencoding())
- if private:
- return getpass.getpass(msg)
- return raw_input(msg)
-
- if confirm:
- while True:
- result = do_prompt(msg, private)
- second = do_prompt("confirm " + msg, private)
- if result == second:
- break
- display.display("***** VALUES ENTERED DO NOT MATCH ****")
- else:
- result = do_prompt(msg, private)
- else:
- result = None
- display.warning("Not prompting as we are not in interactive mode")
-
- # if result is false and default is not None
- if not result and default is not None:
- result = default
-
- if encrypt:
- result = do_encrypt(result, encrypt, salt_size, salt)
-
- # handle utf-8 chars
- result = to_unicode(result, errors='strict')
- return result
diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py
index 2d13aa44cd..13c91b3ba7 100644
--- a/lib/ansible/executor/process/result.py
+++ b/lib/ansible/executor/process/result.py
@@ -58,7 +58,7 @@ class ResultProcess(multiprocessing.Process):
def _send_result(self, result):
debug(u"sending result: %s" % ([text_type(x) for x in result],))
- self._final_q.put(result, block=False)
+ self._final_q.put(result)
debug("done sending result")
def _read_worker_result(self):
@@ -73,7 +73,7 @@ class ResultProcess(multiprocessing.Process):
try:
if not rslt_q.empty():
debug("worker %d has data to read" % self._cur_worker)
- result = rslt_q.get(block=False)
+ result = rslt_q.get()
debug("got a result from worker %d: %s" % (self._cur_worker, result))
break
except queue.Empty:
@@ -101,7 +101,7 @@ class ResultProcess(multiprocessing.Process):
try:
result = self._read_worker_result()
if result is None:
- time.sleep(0.01)
+ time.sleep(0.0001)
continue
clean_copy = strip_internal_keys(result._result)
@@ -110,7 +110,7 @@ class ResultProcess(multiprocessing.Process):
# if this task is registering a result, do it now
if result._task.register:
- self._send_result(('register_host_var', result._host, result._task.register, clean_copy))
+ self._send_result(('register_host_var', result._host, result._task, clean_copy))
# send callbacks, execute other options based on the result status
# TODO: this should all be cleaned up and probably moved to a sub-function.
@@ -142,8 +142,6 @@ class ResultProcess(multiprocessing.Process):
# notifies all other threads
for notify in result_item['_ansible_notify']:
self._send_result(('notify_handler', result, notify))
- # now remove the notify field from the results, as its no longer needed
- result_item.pop('_ansible_notify')
if 'add_host' in result_item:
# this task added a new host (add_host module)
diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py
index 1cc1f7df43..73f5faa78b 100644
--- a/lib/ansible/executor/process/worker.py
+++ b/lib/ansible/executor/process/worker.py
@@ -59,12 +59,18 @@ class WorkerProcess(multiprocessing.Process):
for reading later.
'''
- def __init__(self, tqm, main_q, rslt_q, loader):
+ def __init__(self, rslt_q, task_vars, host, task, play_context, loader, variable_manager, shared_loader_obj):
+ super(WorkerProcess, self).__init__()
# takes a task queue manager as the sole param:
- self._main_q = main_q
- self._rslt_q = rslt_q
- self._loader = loader
+ self._rslt_q = rslt_q
+ self._task_vars = task_vars
+ self._host = host
+ self._task = task
+ self._play_context = play_context
+ self._loader = loader
+ self._variable_manager = variable_manager
+ self._shared_loader_obj = shared_loader_obj
# dupe stdin, if we have one
self._new_stdin = sys.stdin
@@ -82,8 +88,6 @@ class WorkerProcess(multiprocessing.Process):
# couldn't get stdin's fileno, so we just carry on
pass
- super(WorkerProcess, self).__init__()
-
def run(self):
'''
Called when the process is started, and loops indefinitely
@@ -97,72 +101,45 @@ class WorkerProcess(multiprocessing.Process):
if HAS_ATFORK:
atfork()
- while True:
- task = None
- try:
- debug("waiting for a message...")
- (host, task, basedir, zip_vars, hostvars, compressed_vars, play_context, shared_loader_obj) = self._main_q.get()
-
- if compressed_vars:
- job_vars = json.loads(zlib.decompress(zip_vars))
- else:
- job_vars = zip_vars
- job_vars['hostvars'] = hostvars
-
- debug("there's work to be done! got a task/handler to work on: %s" % task)
-
- # because the task queue manager starts workers (forks) before the
- # playbook is loaded, set the basedir of the loader inherted by
- # this fork now so that we can find files correctly
- self._loader.set_basedir(basedir)
-
- # Serializing/deserializing tasks does not preserve the loader attribute,
- # since it is passed to the worker during the forking of the process and
- # would be wasteful to serialize. So we set it here on the task now, and
- # the task handles updating parent/child objects as needed.
- task.set_loader(self._loader)
-
- # execute the task and build a TaskResult from the result
- debug("running TaskExecutor() for %s/%s" % (host, task))
- executor_result = TaskExecutor(
- host,
- task,
- job_vars,
- play_context,
- self._new_stdin,
- self._loader,
- shared_loader_obj,
- ).run()
- debug("done running TaskExecutor() for %s/%s" % (host, task))
- task_result = TaskResult(host, task, executor_result)
-
- # put the result on the result queue
- debug("sending task result")
- self._rslt_q.put(task_result)
- debug("done sending task result")
-
- except queue.Empty:
- pass
- except AnsibleConnectionFailure:
+ try:
+ # execute the task and build a TaskResult from the result
+ debug("running TaskExecutor() for %s/%s" % (self._host, self._task))
+ executor_result = TaskExecutor(
+ self._host,
+ self._task,
+ self._task_vars,
+ self._play_context,
+ self._new_stdin,
+ self._loader,
+ self._shared_loader_obj,
+ ).run()
+
+ debug("done running TaskExecutor() for %s/%s" % (self._host, self._task))
+ self._host.vars = dict()
+ self._host.groups = []
+ task_result = TaskResult(self._host, self._task, executor_result)
+
+ # put the result on the result queue
+ debug("sending task result")
+ self._rslt_q.put(task_result)
+ debug("done sending task result")
+
+ except AnsibleConnectionFailure:
+ self._host.vars = dict()
+ self._host.groups = []
+ task_result = TaskResult(self._host, self._task, dict(unreachable=True))
+ self._rslt_q.put(task_result, block=False)
+
+ except Exception as e:
+ if not isinstance(e, (IOError, EOFError, KeyboardInterrupt)) or isinstance(e, TemplateNotFound):
try:
- if task:
- task_result = TaskResult(host, task, dict(unreachable=True))
- self._rslt_q.put(task_result, block=False)
+ self._host.vars = dict()
+ self._host.groups = []
+ task_result = TaskResult(self._host, self._task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
+ self._rslt_q.put(task_result, block=False)
except:
- break
- except Exception as e:
- if isinstance(e, (IOError, EOFError, KeyboardInterrupt)) and not isinstance(e, TemplateNotFound):
- break
- else:
- try:
- if task:
- task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
- self._rslt_q.put(task_result, block=False)
- except:
- debug("WORKER EXCEPTION: %s" % e)
- debug("WORKER EXCEPTION: %s" % traceback.format_exc())
- break
+ debug("WORKER EXCEPTION: %s" % e)
+ debug("WORKER EXCEPTION: %s" % traceback.format_exc())
debug("WORKER PROCESS EXITING")
-
diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
index 2dcb5f9631..1417bc9d2c 100644
--- a/lib/ansible/executor/task_executor.py
+++ b/lib/ansible/executor/task_executor.py
@@ -35,7 +35,7 @@ from ansible.template import Templar
from ansible.utils.encrypt import key_for_hostname
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unicode import to_unicode
-from ansible.vars.unsafe_proxy import UnsafeProxy
+from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var
try:
from __main__ import display
@@ -67,6 +67,7 @@ class TaskExecutor:
self._new_stdin = new_stdin
self._loader = loader
self._shared_loader_obj = shared_loader_obj
+ self._connection = None
def run(self):
'''
@@ -145,7 +146,7 @@ class TaskExecutor:
except AttributeError:
pass
except Exception as e:
- display.debug("error closing connection: %s" % to_unicode(e))
+ display.debug(u"error closing connection: %s" % to_unicode(e))
def _get_loop_items(self):
'''
@@ -153,16 +154,19 @@ class TaskExecutor:
and returns the items result.
'''
- # create a copy of the job vars here so that we can modify
- # them temporarily without changing them too early for other
- # parts of the code that might still need a pristine version
- #vars_copy = self._job_vars.copy()
- vars_copy = self._job_vars
+ # save the play context variables to a temporary dictionary,
+ # so that we can modify the job vars without doing a full copy
+ # and later restore them to avoid modifying things too early
+ play_context_vars = dict()
+ self._play_context.update_vars(play_context_vars)
- # now we update them with the play context vars
- self._play_context.update_vars(vars_copy)
+ old_vars = dict()
+ for k in play_context_vars.keys():
+ if k in self._job_vars:
+ old_vars[k] = self._job_vars[k]
+ self._job_vars[k] = play_context_vars[k]
- templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=vars_copy)
+ templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=self._job_vars)
items = None
if self._task.loop:
if self._task.loop in self._shared_loader_obj.lookup_loader:
@@ -179,16 +183,25 @@ class TaskExecutor:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar,
loader=self._loader, fail_on_undefined=True, convert_bare=True)
except AnsibleUndefinedVariable as e:
- if 'has no attribute' in str(e):
+ if u'has no attribute' in to_unicode(e):
loop_terms = []
display.deprecated("Skipping task due to undefined attribute, in the future this will be a fatal error.")
else:
raise
items = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader,
- templar=templar).run(terms=loop_terms, variables=vars_copy)
+ templar=templar).run(terms=loop_terms, variables=self._job_vars)
else:
raise AnsibleError("Unexpected failure in finding the lookup named '%s' in the available lookup plugins" % self._task.loop)
+ # now we restore any old job variables that may have been modified,
+ # and delete them if they were in the play context vars but not in
+ # the old variables dictionary
+ for k in play_context_vars.keys():
+ if k in old_vars:
+ self._job_vars[k] = old_vars[k]
+ else:
+ del self._job_vars[k]
+
if items:
from ansible.vars.unsafe_proxy import UnsafeProxy
for idx, item in enumerate(items):
@@ -218,7 +231,7 @@ class TaskExecutor:
tmp_task = self._task.copy()
tmp_play_context = self._play_context.copy()
except AnsibleParserError as e:
- results.append(dict(failed=True, msg=str(e)))
+ results.append(dict(failed=True, msg=to_unicode(e)))
continue
# now we swap the internal task and play context with their copies,
@@ -232,6 +245,7 @@ class TaskExecutor:
# now update the result with the item info, and append the result
# to the list of results
res['item'] = item
+ #TODO: send item results to callback here, instead of all at the end
results.append(res)
return results
@@ -302,6 +316,11 @@ class TaskExecutor:
# do the same kind of post validation step on it here before we use it.
self._play_context.post_validate(templar=templar)
+ # now that the play context is finalized, if the remote_addr is not set
+ # default to using the host's address field as the remote address
+ if not self._play_context.remote_addr:
+ self._play_context.remote_addr = self._host.address
+
# We also add "magic" variables back into the variables dict to make sure
# a certain subset of variables exist.
self._play_context.update_vars(variables)
@@ -348,8 +367,13 @@ class TaskExecutor:
self._task.args = variable_params
# get the connection and the handler for this execution
- self._connection = self._get_connection(variables=variables, templar=templar)
- self._connection.set_host_overrides(host=self._host)
+ if not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr:
+ self._connection = self._get_connection(variables=variables, templar=templar)
+ self._connection.set_host_overrides(host=self._host)
+ else:
+ # if connection is reused, its _play_context is no longer valid and needs
+ # to be replaced with the one templated above, in case other data changed
+ self._connection._play_context = self._play_context
self._handler = self._get_action_handler(connection=self._connection, templar=templar)
@@ -372,30 +396,36 @@ class TaskExecutor:
# make a copy of the job vars here, in case we need to update them
# with the registered variable value later on when testing conditions
- #vars_copy = variables.copy()
vars_copy = variables.copy()
display.debug("starting attempt loop")
result = None
for attempt in range(retries):
if attempt > 0:
- display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color="dark gray")
+ display.display("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result), color=C.COLOR_DEBUG)
result['attempts'] = attempt + 1
display.debug("running the handler")
try:
result = self._handler.run(task_vars=variables)
except AnsibleConnectionFailure as e:
- return dict(unreachable=True, msg=str(e))
+ return dict(unreachable=True, msg=to_unicode(e))
display.debug("handler run complete")
+ # update the local copy of vars with the registered value, if specified,
+ # or any facts which may have been generated by the module execution
+ if self._task.register:
+ vars_copy[self._task.register] = wrap_var(result.copy())
+
if self._task.async > 0:
# the async_wrapper module returns dumped JSON via its stdout
# response, so we parse it here and replace the result
try:
+ if 'skipped' in result and result['skipped'] or 'failed' in result and result['failed']:
+ return result
result = json.loads(result.get('stdout'))
except (TypeError, ValueError) as e:
- return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e))
+ return dict(failed=True, msg=u"The async task did not return valid JSON: %s" % to_unicode(e))
if self._task.poll > 0:
result = self._poll_async_result(result=result, templar=templar)
@@ -416,11 +446,6 @@ class TaskExecutor:
return failed_when_result
return False
- # update the local copy of vars with the registered value, if specified,
- # or any facts which may have been generated by the module execution
- if self._task.register:
- vars_copy[self._task.register] = result
-
if 'ansible_facts' in result:
vars_copy.update(result['ansible_facts'])
@@ -437,7 +462,7 @@ class TaskExecutor:
if attempt < retries - 1:
cond = Conditional(loader=self._loader)
- cond.when = self._task.until
+ cond.when = [ self._task.until ]
if cond.evaluate_conditional(templar, vars_copy):
break
@@ -450,7 +475,7 @@ class TaskExecutor:
# do the final update of the local variables here, for both registered
# values and any facts which may have been created
if self._task.register:
- variables[self._task.register] = result
+ variables[self._task.register] = wrap_var(result)
if 'ansible_facts' in result:
variables.update(result['ansible_facts'])
@@ -528,9 +553,6 @@ class TaskExecutor:
correct connection object from the list of connection plugins
'''
- if not self._play_context.remote_addr:
- self._play_context.remote_addr = self._host.address
-
if self._task.delegate_to is not None:
# since we're delegating, we don't want to use interpreter values
# which would have been set for the original target host
diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
index 001d71e9e0..b5260c1f41 100644
--- a/lib/ansible/executor/task_queue_manager.py
+++ b/lib/ansible/executor/task_queue_manager.py
@@ -19,6 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from multiprocessing.managers import SyncManager, DictProxy
import multiprocessing
import os
import tempfile
@@ -32,6 +33,8 @@ from ansible.executor.stats import AggregateStats
from ansible.playbook.play_context import PlayContext
from ansible.plugins import callback_loader, strategy_loader, module_loader
from ansible.template import Templar
+from ansible.vars.hostvars import HostVars
+from ansible.plugins.callback import CallbackBase
try:
from __main__ import display
@@ -54,7 +57,7 @@ class TaskQueueManager:
which dispatches the Play's tasks to hosts.
'''
- def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None):
+ def __init__(self, inventory, variable_manager, loader, options, passwords, stdout_callback=None, run_additional_callbacks=True, run_tree=False):
self._inventory = inventory
self._variable_manager = variable_manager
@@ -63,6 +66,8 @@ class TaskQueueManager:
self._stats = AggregateStats()
self.passwords = passwords
self._stdout_callback = stdout_callback
+ self._run_additional_callbacks = run_additional_callbacks
+ self._run_tree = run_tree
self._callbacks_loaded = False
self._callback_plugins = []
@@ -94,14 +99,10 @@ class TaskQueueManager:
def _initialize_processes(self, num):
self._workers = []
- for i in xrange(num):
+ for i in range(num):
main_q = multiprocessing.Queue()
rslt_q = multiprocessing.Queue()
-
- prc = WorkerProcess(self, main_q, rslt_q, self._loader)
- prc.start()
-
- self._workers.append((prc, main_q, rslt_q))
+ self._workers.append([None, main_q, rslt_q])
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
@@ -142,8 +143,16 @@ class TaskQueueManager:
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
- if self._stdout_callback not in callback_loader:
- raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
+ if isinstance(self._stdout_callback, CallbackBase):
+ stdout_callback_loaded = True
+ elif isinstance(self._stdout_callback, basestring):
+ if self._stdout_callback not in callback_loader:
+ raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
+ else:
+ self._stdout_callback = callback_loader.get(self._stdout_callback)
+ stdout_callback_loaded = True
+ else:
+ raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
@@ -157,7 +166,9 @@ class TaskQueueManager:
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
- elif callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST):
+ elif callback_name == 'tree' and self._run_tree:
+ pass
+ elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
continue
self._callback_plugins.append(callback_plugin())
@@ -173,11 +184,6 @@ class TaskQueueManager:
are done with the current task).
'''
- # Fork # of forks, # of hosts or serial, whichever is lowest
- contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(play.hosts))]
- contenders = [ v for v in contenders if v is not None and v > 0 ]
- self._initialize_processes(min(contenders))
-
if not self._callbacks_loaded:
self.load_callbacks()
@@ -187,6 +193,17 @@ class TaskQueueManager:
new_play = play.copy()
new_play.post_validate(templar)
+ self.hostvars = HostVars(
+ inventory=self._inventory,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ )
+
+ # Fork # of forks, # of hosts or serial, whichever is lowest
+ contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))]
+ contenders = [ v for v in contenders if v is not None and v > 0 ]
+ self._initialize_processes(min(contenders))
+
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
@@ -236,7 +253,8 @@ class TaskQueueManager:
for (worker_prc, main_q, rslt_q) in self._workers:
rslt_q.close()
main_q.close()
- worker_prc.terminate()
+ if worker_prc and worker_prc.is_alive():
+ worker_prc.terminate()
def clear_failed_hosts(self):
self._failed_hosts = dict()
@@ -260,7 +278,7 @@ class TaskQueueManager:
self._terminated = True
def send_callback(self, method_name, *args, **kwargs):
- for callback_plugin in self._callback_plugins:
+ for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
@@ -272,10 +290,28 @@ class TaskQueueManager:
for method in methods:
if method is not None:
try:
- method(*args, **kwargs)
+ # temporary hack, required due to a change in the callback API, so
+ # we don't break backwards compatibility with callbacks which were
+ # designed to use the original API
+ # FIXME: target for removal and revert to the original code here
+ # after a year (2017-01-14)
+ if method_name == 'v2_playbook_on_start':
+ import inspect
+ (f_args, f_varargs, f_keywords, f_defaults) = inspect.getargspec(method)
+ if 'playbook' in f_args:
+ method(*args, **kwargs)
+ else:
+ method()
+ else:
+ method(*args, **kwargs)
except Exception as e:
+ import traceback
+ orig_tb = traceback.format_exc()
try:
v1_method = method.replace('v2_','')
v1_method(*args, **kwargs)
except Exception:
- display.warning('Error when using %s: %s' % (method, str(e)))
+ if display.verbosity >= 3:
+ display.warning(orig_tb, formatted=True)
+ else:
+ display.warning('Error when using %s: %s' % (method, str(e)))
diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py
index 00d8c25aec..e526b0aa87 100644
--- a/lib/ansible/galaxy/__init__.py
+++ b/lib/ansible/galaxy/__init__.py
@@ -49,9 +49,34 @@ class Galaxy(object):
this_dir, this_filename = os.path.split(__file__)
self.DATA_PATH = os.path.join(this_dir, "data")
- #TODO: move to getter for lazy loading
- self.default_readme = self._str_from_data_file('readme')
- self.default_meta = self._str_from_data_file('metadata_template.j2')
+ self._default_readme = None
+ self._default_meta = None
+ self._default_test = None
+ self._default_travis = None
+
+ @property
+ def default_readme(self):
+ if self._default_readme is None:
+ self._default_readme = self._str_from_data_file('readme')
+ return self._default_readme
+
+ @property
+ def default_meta(self):
+ if self._default_meta is None:
+ self._default_meta = self._str_from_data_file('metadata_template.j2')
+ return self._default_meta
+
+ @property
+ def default_test(self):
+ if self._default_test is None:
+ self._default_test = self._str_from_data_file('test_playbook.j2')
+ return self._default_test
+
+ @property
+ def default_travis(self):
+ if self._default_travis is None:
+ self._default_travis = self._str_from_data_file('travis.j2')
+ return self._default_travis
def add_role(self, role):
self.roles[role.name] = role
diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
index 2918688406..eec9ee932e 100644
--- a/lib/ansible/galaxy/api.py
+++ b/lib/ansible/galaxy/api.py
@@ -25,11 +25,15 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
+import urllib
+
from urllib2 import quote as urlquote, HTTPError
from urlparse import urlparse
+import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
+from ansible.galaxy.token import GalaxyToken
try:
from __main__ import display
@@ -43,45 +47,111 @@ class GalaxyAPI(object):
SUPPORTED_VERSIONS = ['v1']
- def __init__(self, galaxy, api_server):
-
+ def __init__(self, galaxy):
self.galaxy = galaxy
+ self.token = GalaxyToken()
+ self._api_server = C.GALAXY_SERVER
+ self._validate_certs = not C.GALAXY_IGNORE_CERTS
+
+ # set validate_certs
+ if galaxy.options.ignore_certs:
+ self._validate_certs = False
+ display.vvv('Validate TLS certificates: %s' % self._validate_certs)
+
+ # set the API server
+ if galaxy.options.api_server != C.GALAXY_SERVER:
+ self._api_server = galaxy.options.api_server
+ display.vvv("Connecting to galaxy_server: %s" % self._api_server)
+
+ server_version = self.get_server_api_version()
+ if not server_version in self.SUPPORTED_VERSIONS:
+ raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
- try:
- urlparse(api_server, scheme='https')
- except:
- raise AnsibleError("Invalid server API url passed: %s" % api_server)
+ self.baseurl = '%s/api/%s' % (self._api_server, server_version)
+ self.version = server_version # for future use
+ display.vvv("Base API: %s" % self.baseurl)
- server_version = self.get_server_api_version('%s/api/' % (api_server))
- if not server_version:
- raise AnsibleError("Could not retrieve server API version: %s" % api_server)
+ def __auth_header(self):
+ token = self.token.get()
+ if token is None:
+ raise AnsibleError("No access token. You must first use login to authenticate and obtain an access token.")
+ return {'Authorization': 'Token ' + token}
- if server_version in self.SUPPORTED_VERSIONS:
- self.baseurl = '%s/api/%s' % (api_server, server_version)
- self.version = server_version # for future use
- display.vvvvv("Base API: %s" % self.baseurl)
- else:
- raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
+ def __call_galaxy(self, url, args=None, headers=None, method=None):
+ if args and not headers:
+ headers = self.__auth_header()
+ try:
+ display.vvv(url)
+ resp = open_url(url, data=args, validate_certs=self._validate_certs, headers=headers, method=method)
+ data = json.load(resp)
+ except HTTPError as e:
+ res = json.load(e)
+ raise AnsibleError(res['detail'])
+ return data
- def get_server_api_version(self, api_server):
+ @property
+ def api_server(self):
+ return self._api_server
+
+ @property
+ def validate_certs(self):
+ return self._validate_certs
+
+ def get_server_api_version(self):
"""
Fetches the Galaxy API current version to ensure
the API server is up and reachable.
"""
- #TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1)
- # also should set baseurl using supported_versions which has path
- return 'v1'
-
try:
- data = json.load(open_url(api_server, validate_certs=self.galaxy.options.validate_certs))
- return data.get("current_version", 'v1')
- except Exception:
- # TODO: report error
- return None
+ url = '%s/api/' % self._api_server
+ data = json.load(open_url(url, validate_certs=self._validate_certs))
+ return data['current_version']
+ except Exception as e:
+ raise AnsibleError("The API server (%s) is not responding, please try again later." % url)
+
+ def authenticate(self, github_token):
+ """
+ Retrieve an authentication token
+ """
+ url = '%s/tokens/' % self.baseurl
+ args = urllib.urlencode({"github_token": github_token})
+ resp = open_url(url, data=args, validate_certs=self._validate_certs, method="POST")
+ data = json.load(resp)
+ return data
+ def create_import_task(self, github_user, github_repo, reference=None):
+ """
+ Post an import request
+ """
+ url = '%s/imports/' % self.baseurl
+ args = urllib.urlencode({
+ "github_user": github_user,
+ "github_repo": github_repo,
+ "github_reference": reference if reference else ""
+ })
+ data = self.__call_galaxy(url, args=args)
+ if data.get('results', None):
+ return data['results']
+ return data
+
+ def get_import_task(self, task_id=None, github_user=None, github_repo=None):
+ """
+ Check the status of an import task.
+ """
+ url = '%s/imports/' % self.baseurl
+ if not task_id is None:
+ url = "%s?id=%d" % (url,task_id)
+ elif not github_user is None and not github_repo is None:
+ url = "%s?github_user=%s&github_repo=%s" % (url,github_user,github_repo)
+ else:
+ raise AnsibleError("Expected task_id or github_user and github_repo")
+
+ data = self.__call_galaxy(url)
+ return data['results']
+
def lookup_role_by_name(self, role_name, notify=True):
"""
- Find a role by name
+ Find a role by name.
"""
role_name = urlquote(role_name)
@@ -92,18 +162,12 @@ class GalaxyAPI(object):
if notify:
display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
except:
- raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name)
+ raise AnsibleError("Invalid role name (%s). Specify role as format: username.rolename" % role_name)
url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
- display.vvvv("- %s" % (url))
- try:
- data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
- if len(data["results"]) != 0:
- return data["results"][0]
- except:
- # TODO: report on connection/availability errors
- pass
-
+ data = self.__call_galaxy(url)
+ if len(data["results"]) != 0:
+ return data["results"][0]
return None
def fetch_role_related(self, related, role_id):
@@ -114,13 +178,12 @@ class GalaxyAPI(object):
try:
url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
- data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
+ data = self.__call_galaxy(url)
results = data['results']
done = (data.get('next', None) is None)
while not done:
url = '%s%s' % (self.baseurl, data['next'])
- display.display(url)
- data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
+ data = self.__call_galaxy(url)
results += data['results']
done = (data.get('next', None) is None)
return results
@@ -131,10 +194,9 @@ class GalaxyAPI(object):
"""
Fetch the list of items specified.
"""
-
try:
url = '%s/%s/?page_size' % (self.baseurl, what)
- data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
+ data = self.__call_galaxy(url)
if "results" in data:
results = data['results']
else:
@@ -144,41 +206,64 @@ class GalaxyAPI(object):
done = (data.get('next', None) is None)
while not done:
url = '%s%s' % (self.baseurl, data['next'])
- display.display(url)
- data = json.load(open_url(url, validate_certs=self.galaxy.options.validate_certs))
+ data = self.__call_galaxy(url)
results += data['results']
done = (data.get('next', None) is None)
return results
except Exception as error:
raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
- def search_roles(self, search, platforms=None, tags=None):
+ def search_roles(self, search, **kwargs):
- search_url = self.baseurl + '/roles/?page=1'
+ search_url = self.baseurl + '/search/roles/?'
if search:
- search_url += '&search=' + urlquote(search)
+ search_url += '&autocomplete=' + urlquote(search)
+
+ tags = kwargs.get('tags',None)
+ platforms = kwargs.get('platforms', None)
+ page_size = kwargs.get('page_size', None)
+ author = kwargs.get('author', None)
- if tags is None:
- tags = []
- elif isinstance(tags, basestring):
+ if tags and isinstance(tags, basestring):
tags = tags.split(',')
+ search_url += '&tags_autocomplete=' + '+'.join(tags)
+
+ if platforms and isinstance(platforms, basestring):
+ platforms = platforms.split(',')
+ search_url += '&platforms_autocomplete=' + '+'.join(platforms)
- for tag in tags:
- search_url += '&chain__tags__name=' + urlquote(tag)
+ if page_size:
+ search_url += '&page_size=%s' % page_size
- if platforms is None:
- platforms = []
- elif isinstance(platforms, basestring):
- platforms = platforms.split(',')
+ if author:
+ search_url += '&username_autocomplete=%s' % author
+
+ data = self.__call_galaxy(search_url)
+ return data
- for plat in platforms:
- search_url += '&chain__platforms__name=' + urlquote(plat)
+ def add_secret(self, source, github_user, github_repo, secret):
+ url = "%s/notification_secrets/" % self.baseurl
+ args = urllib.urlencode({
+ "source": source,
+ "github_user": github_user,
+ "github_repo": github_repo,
+ "secret": secret
+ })
+ data = self.__call_galaxy(url, args=args)
+ return data
- display.debug("Executing query: %s" % search_url)
- try:
- data = json.load(open_url(search_url, validate_certs=self.galaxy.options.validate_certs))
- except HTTPError as e:
- raise AnsibleError("Unsuccessful request to server: %s" % str(e))
+ def list_secrets(self):
+ url = "%s/notification_secrets" % self.baseurl
+ data = self.__call_galaxy(url, headers=self.__auth_header())
+ return data
+
+ def remove_secret(self, secret_id):
+ url = "%s/notification_secrets/%s/" % (self.baseurl, secret_id)
+ data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
+ return data
+ def delete_role(self, github_user, github_repo):
+ url = "%s/removerole/?github_user=%s&github_repo=%s" % (self.baseurl,github_user,github_repo)
+ data = self.__call_galaxy(url, headers=self.__auth_header(), method='DELETE')
return data
diff --git a/lib/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2
index c618adb3d4..1054c64bdf 100644
--- a/lib/ansible/galaxy/data/metadata_template.j2
+++ b/lib/ansible/galaxy/data/metadata_template.j2
@@ -2,9 +2,11 @@ galaxy_info:
author: {{ author }}
description: {{description}}
company: {{ company }}
+
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: {{ issue_tracker_url }}
+
# Some suggested licenses:
# - BSD (default)
# - MIT
@@ -13,7 +15,17 @@ galaxy_info:
# - Apache
# - CC-BY
license: {{ license }}
+
min_ansible_version: {{ min_ansible_version }}
+
+ # Optionally specify the branch Galaxy will use when accessing the GitHub
+ # repo for this role. During role install, if no tags are available,
+ # Galaxy will use this branch. During import Galaxy will access files on
+ # this branch. If travis integration is cofigured, only notification for this
+ # branch will be accepted. Otherwise, in all cases, the repo's default branch
+ # (usually master) will be used.
+ #github_branch:
+
#
# Below are all platforms currently available. Just uncomment
# the ones that apply to your role. If you don't see your
@@ -28,6 +40,7 @@ galaxy_info:
# - {{ version }}
{%- endfor %}
{%- endfor %}
+
galaxy_tags: []
# List tags for your role here, one per line. A tag is
# a keyword that describes and categorizes the role.
@@ -36,6 +49,7 @@ galaxy_info:
#
# NOTE: A tag is limited to a single word comprised of
# alphanumeric characters. Maximum 20 tags per role.
+
dependencies: []
# List your role dependencies here, one per line.
# Be sure to remove the '[]' above if you add dependencies
diff --git a/lib/ansible/galaxy/data/test_playbook.j2 b/lib/ansible/galaxy/data/test_playbook.j2
new file mode 100644
index 0000000000..45824f6051
--- /dev/null
+++ b/lib/ansible/galaxy/data/test_playbook.j2
@@ -0,0 +1,5 @@
+---
+- hosts: localhost
+ remote_user: root
+ roles:
+ - {{ role_name }} \ No newline at end of file
diff --git a/lib/ansible/galaxy/data/travis.j2 b/lib/ansible/galaxy/data/travis.j2
new file mode 100644
index 0000000000..36bbf6208c
--- /dev/null
+++ b/lib/ansible/galaxy/data/travis.j2
@@ -0,0 +1,29 @@
+---
+language: python
+python: "2.7"
+
+# Use the new container infrastructure
+sudo: false
+
+# Install ansible
+addons:
+ apt:
+ packages:
+ - python-pip
+
+install:
+ # Install ansible
+ - pip install ansible
+
+ # Check ansible version
+ - ansible --version
+
+ # Create ansible.cfg with correct roles_path
+ - printf '[defaults]\nroles_path=../' >ansible.cfg
+
+script:
+ # Basic role syntax check
+ - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
+
+notifications:
+ webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file
diff --git a/lib/ansible/galaxy/login.py b/lib/ansible/galaxy/login.py
new file mode 100644
index 0000000000..3edaed7bc7
--- /dev/null
+++ b/lib/ansible/galaxy/login.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+
+########################################################################
+#
+# (C) 2015, Chris Houseknecht <chouse@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import getpass
+import json
+import urllib
+
+from urllib2 import quote as urlquote, HTTPError
+from urlparse import urlparse
+
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.module_utils.urls import open_url
+from ansible.utils.color import stringc
+
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
+class GalaxyLogin(object):
+ ''' Class to handle authenticating user with Galaxy API prior to performing CUD operations '''
+
+ GITHUB_AUTH = 'https://api.github.com/authorizations'
+
+ def __init__(self, galaxy, github_token=None):
+ self.galaxy = galaxy
+ self.github_username = None
+ self.github_password = None
+
+ if github_token == None:
+ self.get_credentials()
+
+ def get_credentials(self):
+ display.display(u'\n\n' + "We need your " + stringc("Github login",'bright cyan') +
+ " to identify you.", screen_only=True)
+ display.display("This information will " + stringc("not be sent to Galaxy",'bright cyan') +
+ ", only to " + stringc("api.github.com.","yellow"), screen_only=True)
+ display.display("The password will not be displayed." + u'\n\n', screen_only=True)
+ display.display("Use " + stringc("--github-token",'yellow') +
+ " if you do not want to enter your password." + u'\n\n', screen_only=True)
+
+ try:
+ self.github_username = raw_input("Github Username: ")
+ except:
+ pass
+
+ try:
+ self.github_password = getpass.getpass("Password for %s: " % self.github_username)
+ except:
+ pass
+
+ if not self.github_username or not self.github_password:
+ raise AnsibleError("Invalid Github credentials. Username and password are required.")
+
+ def remove_github_token(self):
+ '''
+ If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot
+ retrieve the token after creation, so we are forced to create a new one.
+ '''
+ try:
+ tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
+ url_password=self.github_password, force_basic_auth=True,))
+ except HTTPError as e:
+ res = json.load(e)
+ raise AnsibleError(res['message'])
+
+ for token in tokens:
+ if token['note'] == 'ansible-galaxy login':
+ display.vvvvv('removing token: %s' % token['token_last_eight'])
+ try:
+ open_url('https://api.github.com/authorizations/%d' % token['id'], url_username=self.github_username,
+ url_password=self.github_password, method='DELETE', force_basic_auth=True,)
+ except HTTPError as e:
+ res = json.load(e)
+ raise AnsibleError(res['message'])
+
+ def create_github_token(self):
+ '''
+ Create a personal authorization token with a note of 'ansible-galaxy login'
+ '''
+ self.remove_github_token()
+ args = json.dumps({"scopes":["public_repo"], "note":"ansible-galaxy login"})
+ try:
+ data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username,
+ url_password=self.github_password, force_basic_auth=True, data=args))
+ except HTTPError as e:
+ res = json.load(e)
+ raise AnsibleError(res['message'])
+ return data['token']
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
index dc9da5d79c..700664c4cd 100644
--- a/lib/ansible/galaxy/role.py
+++ b/lib/ansible/galaxy/role.py
@@ -46,7 +46,7 @@ class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
- ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
+ ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
@@ -130,13 +130,11 @@ class GalaxyRole(object):
install_date=datetime.datetime.utcnow().strftime("%c"),
)
info_path = os.path.join(self.path, self.META_INSTALL)
- try:
- f = open(info_path, 'w+')
- self._install_info = yaml.safe_dump(info, f)
- except:
- return False
- finally:
- f.close()
+ with open(info_path, 'w+') as f:
+ try:
+ self._install_info = yaml.safe_dump(info, f)
+ except:
+ return False
return True
@@ -198,10 +196,10 @@ class GalaxyRole(object):
role_data = self.src
tmp_file = self.fetch(role_data)
else:
- api = GalaxyAPI(self.galaxy, self.options.api_server)
+ api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
- raise AnsibleError("- sorry, %s was not found on %s." % (self.src, self.options.api_server))
+ raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
@@ -213,8 +211,10 @@ class GalaxyRole(object):
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
self.version = str(loose_versions[-1])
+ elif role_data.get('github_branch', None):
+ self.version = role_data['github_branch']
else:
- self.version = 'master'
+ self.version = 'master'
elif self.version != 'master':
if role_versions and self.version not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions))
diff --git a/lib/ansible/galaxy/token.py b/lib/ansible/galaxy/token.py
new file mode 100644
index 0000000000..02ca833069
--- /dev/null
+++ b/lib/ansible/galaxy/token.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+########################################################################
+#
+# (C) 2015, Chris Houseknecht <chouse@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import yaml
+from stat import *
+
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
+
+class GalaxyToken(object):
+ ''' Class to storing and retrieving token in ~/.ansible_galaxy '''
+
+ def __init__(self):
+ self.file = os.path.expanduser("~") + '/.ansible_galaxy'
+ self.config = yaml.safe_load(self.__open_config_for_read())
+ if not self.config:
+ self.config = {}
+
+ def __open_config_for_read(self):
+ if os.path.isfile(self.file):
+ display.vvv('Opened %s' % self.file)
+ return open(self.file, 'r')
+ # config.yml not found, create and chomd u+rw
+ f = open(self.file,'w')
+ f.close()
+ os.chmod(self.file,S_IRUSR|S_IWUSR) # owner has +rw
+ display.vvv('Created %s' % self.file)
+ return open(self.file, 'r')
+
+ def set(self, token):
+ self.config['token'] = token
+ self.save()
+
+ def get(self):
+ return self.config.get('token', None)
+
+ def save(self):
+ with open(self.file,'w') as f:
+ yaml.safe_dump(self.config,f,default_flow_style=False)
+ \ No newline at end of file
diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py
index 4d866587da..3d9ad3516d 100644
--- a/lib/ansible/inventory/__init__.py
+++ b/lib/ansible/inventory/__init__.py
@@ -78,6 +78,10 @@ class Inventory(object):
self._restriction = None
self._subset = None
+ # clear the cache here, which is only useful if more than
+ # one Inventory objects are created when using the API directly
+ self.clear_pattern_cache()
+
self.parse_inventory(host_list)
def serialize(self):
@@ -109,7 +113,12 @@ class Inventory(object):
pass
elif isinstance(host_list, list):
for h in host_list:
- (host, port) = parse_address(h, allow_ranges=False)
+ try:
+ (host, port) = parse_address(h, allow_ranges=False)
+ except AnsibleError as e:
+ display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_unicode(e))
+ host = h
+ port = None
all.add_host(Host(host, port))
elif self._loader.path_exists(host_list):
#TODO: switch this to a plugin loader and a 'condition' per plugin on which it should be tried, restoring 'inventory pllugins'
@@ -178,25 +187,26 @@ class Inventory(object):
if self._restriction:
pattern_hash += u":%s" % to_unicode(self._restriction)
- if pattern_hash in HOSTS_PATTERNS_CACHE:
- return HOSTS_PATTERNS_CACHE[pattern_hash][:]
+ if pattern_hash not in HOSTS_PATTERNS_CACHE:
- patterns = Inventory.split_host_pattern(pattern)
- hosts = self._evaluate_patterns(patterns)
+ patterns = Inventory.split_host_pattern(pattern)
+ hosts = self._evaluate_patterns(patterns)
- # mainly useful for hostvars[host] access
- if not ignore_limits_and_restrictions:
- # exclude hosts not in a subset, if defined
- if self._subset:
- subset = self._evaluate_patterns(self._subset)
- hosts = [ h for h in hosts if h in subset ]
+ # mainly useful for hostvars[host] access
+ if not ignore_limits_and_restrictions:
+ # exclude hosts not in a subset, if defined
+ if self._subset:
+ subset = self._evaluate_patterns(self._subset)
+ hosts = [ h for h in hosts if h in subset ]
- # exclude hosts mentioned in any restriction (ex: failed hosts)
- if self._restriction is not None:
- hosts = [ h for h in hosts if h in self._restriction ]
+ # exclude hosts mentioned in any restriction (ex: failed hosts)
+ if self._restriction is not None:
+ hosts = [ h for h in hosts if h in self._restriction ]
- HOSTS_PATTERNS_CACHE[pattern_hash] = hosts[:]
- return hosts
+ seen = set()
+ HOSTS_PATTERNS_CACHE[pattern_hash] = [x for x in hosts if x not in seen and not seen.add(x)]
+
+ return HOSTS_PATTERNS_CACHE[pattern_hash][:]
@classmethod
def split_host_pattern(cls, pattern):
@@ -227,15 +237,13 @@ class Inventory(object):
# If it doesn't, it could still be a single pattern. This accounts for
# non-separator uses of colons: IPv6 addresses and [x:y] host ranges.
else:
- (base, port) = parse_address(pattern, allow_ranges=True)
- if base:
+ try:
+ (base, port) = parse_address(pattern, allow_ranges=True)
patterns = [pattern]
-
- # The only other case we accept is a ':'-separated list of patterns.
- # This mishandles IPv6 addresses, and is retained only for backwards
- # compatibility.
-
- else:
+ except:
+ # The only other case we accept is a ':'-separated list of patterns.
+ # This mishandles IPv6 addresses, and is retained only for backwards
+ # compatibility.
patterns = re.findall(
r'''(?: # We want to match something comprising:
[^\s:\[\]] # (anything other than whitespace or ':[]'
@@ -388,7 +396,7 @@ class Inventory(object):
end = -1
subscript = (int(start), int(end))
if sep == '-':
- display.deprecated("Use [x:y] inclusive subscripts instead of [x-y]", version=2.0, removed=True)
+ display.warning("Use [x:y] inclusive subscripts instead of [x-y] which has been removed")
return (pattern, subscript)
@@ -455,6 +463,8 @@ class Inventory(object):
def clear_pattern_cache(self):
''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
+ global HOSTS_PATTERNS_CACHE
+ HOSTS_PATTERNS_CACHE = {}
self._pattern_cache = {}
def groups_for_host(self, host):
@@ -729,12 +739,12 @@ class Inventory(object):
if group and host is None:
# load vars in dir/group_vars/name_of_group
- base_path = os.path.realpath(os.path.join(basedir, "group_vars/%s" % group.name))
- results = self._variable_manager.add_group_vars_file(base_path, self._loader)
+ base_path = os.path.realpath(os.path.join(to_unicode(basedir, errors='strict'), "group_vars/%s" % group.name))
+ results = combine_vars(results, self._variable_manager.add_group_vars_file(base_path, self._loader))
elif host and group is None:
# same for hostvars in dir/host_vars/name_of_host
- base_path = os.path.realpath(os.path.join(basedir, "host_vars/%s" % host.name))
- results = self._variable_manager.add_host_vars_file(base_path, self._loader)
+ base_path = os.path.realpath(os.path.join(to_unicode(basedir, errors='strict'), "host_vars/%s" % host.name))
+ results = combine_vars(results, self._variable_manager.add_host_vars_file(base_path, self._loader))
# all done, results is a dictionary of variables for this particular host.
return results
diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py
index 7ae9611ddf..e716987fd5 100644
--- a/lib/ansible/inventory/dir.py
+++ b/lib/ansible/inventory/dir.py
@@ -192,6 +192,8 @@ class InventoryDirectory(object):
if group.name not in self.groups:
# it's brand new, add him!
self.groups[group.name] = group
+ # the Group class does not (yet) implement __eq__/__ne__,
+ # so unlike Host we do a regular comparison here
if self.groups[group.name] != group:
# different object, merge
self._merge_groups(self.groups[group.name], group)
@@ -200,6 +202,9 @@ class InventoryDirectory(object):
if host.name not in self.hosts:
# Papa's got a brand new host
self.hosts[host.name] = host
+ # because the __eq__/__ne__ methods in Host() compare the
+ # name fields rather than references, we use id() here to
+ # do the object comparison for merges
if self.hosts[host.name] != host:
# different object, merge
self._merge_hosts(self.hosts[host.name], host)
diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py
index a561b951b4..6263dcbc80 100644
--- a/lib/ansible/inventory/host.py
+++ b/lib/ansible/inventory/host.py
@@ -19,6 +19,8 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+import uuid
+
from ansible.inventory.group import Group
from ansible.utils.vars import combine_vars
@@ -38,7 +40,7 @@ class Host:
def __eq__(self, other):
if not isinstance(other, Host):
return False
- return self.name == other.name
+ return self._uuid == other._uuid
def __ne__(self, other):
return not self.__eq__(other)
@@ -55,6 +57,7 @@ class Host:
name=self.name,
vars=self.vars.copy(),
address=self.address,
+ uuid=self._uuid,
gathered_facts=self._gathered_facts,
groups=groups,
)
@@ -65,6 +68,7 @@ class Host:
self.name = data.get('name')
self.vars = data.get('vars', dict())
self.address = data.get('address', '')
+ self._uuid = data.get('uuid', uuid.uuid4())
groups = data.get('groups', [])
for group_data in groups:
@@ -84,6 +88,7 @@ class Host:
self.set_variable('ansible_port', int(port))
self._gathered_facts = False
+ self._uuid = uuid.uuid4()
def __repr__(self):
return self.get_name()
diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py
index 537fde1ef9..4d43977004 100644
--- a/lib/ansible/inventory/ini.py
+++ b/lib/ansible/inventory/ini.py
@@ -124,6 +124,9 @@ class InventoryParser(object):
del pending_declarations[groupname]
continue
+ elif line.startswith('['):
+ self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + \
+ "in the section entry, and that there are no other invalid characters")
# It's not a section, so the current state tells us what kind of
# definition it must be. The individual parsers will raise an
@@ -264,9 +267,12 @@ class InventoryParser(object):
# Can the given hostpattern be parsed as a host with an optional port
# specification?
- (pattern, port) = parse_address(hostpattern, allow_ranges=True)
- if not pattern:
- self._raise_error("Can't parse '%s' as host[:port]" % hostpattern)
+ try:
+ (pattern, port) = parse_address(hostpattern, allow_ranges=True)
+ except:
+ # not a recognizable host pattern
+ pattern = hostpattern
+ port = None
# Once we have separated the pattern, we expand it into list of one or
# more hostnames, depending on whether it contains any [x:y] ranges.
diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py
index 6dfb1d2af0..95e48eff58 100644
--- a/lib/ansible/inventory/script.py
+++ b/lib/ansible/inventory/script.py
@@ -31,6 +31,7 @@ from ansible.errors import AnsibleError
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
+from ansible.utils.unicode import to_str, to_unicode
class InventoryScript:
@@ -57,12 +58,17 @@ class InventoryScript:
if sp.returncode != 0:
raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
- self.data = stdout
+ # make sure script output is unicode so that json loader will output
+ # unicode strings itself
+ try:
+ self.data = to_unicode(stdout, errors="strict")
+ except Exception as e:
+ raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_str(self.filename), to_str(e)))
+
# see comment about _meta below
self.host_vars_from_top = None
self._parse(stderr)
-
def _parse(self, err):
all_hosts = {}
@@ -72,13 +78,11 @@ class InventoryScript:
self.raw = self._loader.load(self.data)
except Exception as e:
sys.stderr.write(err + "\n")
- raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(self.filename, str(e)))
+ raise AnsibleError("failed to parse executable inventory script results from {0}: {1}".format(to_str(self.filename), to_str(e)))
if not isinstance(self.raw, Mapping):
sys.stderr.write(err + "\n")
- raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(self.filename))
-
- self.raw = json_dict_bytes_to_unicode(self.raw)
+ raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(to_str(self.filename)))
group = None
for (group_name, data) in self.raw.items():
@@ -103,7 +107,7 @@ class InventoryScript:
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
- elif not any(k in data for k in ('hosts','vars')):
+ elif not any(k in data for k in ('hosts','vars','children')):
data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data:
@@ -112,7 +116,7 @@ class InventoryScript:
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']:
- if not hostname in all_hosts:
+ if hostname not in all_hosts:
all_hosts[hostname] = Host(hostname)
host = all_hosts[hostname]
group.add_host(host)
@@ -145,10 +149,12 @@ class InventoryScript:
def get_host_variables(self, host):
""" Runs <script> --host <hostname> to determine additional host variables """
if self.host_vars_from_top is not None:
- got = self.host_vars_from_top.get(host.name, {})
+ try:
+ got = self.host_vars_from_top.get(host.name, {})
+ except AttributeError as e:
+ raise AnsibleError("Improperly formated host information for %s: %s" % (host.name,to_str(e)))
return got
-
cmd = [self.filename, "--host", host.name]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -161,4 +167,3 @@ class InventoryScript:
return json_dict_bytes_to_unicode(self._loader.load(out))
except ValueError:
raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
-
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index e14ebfa680..42ea8e7906 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -34,8 +34,8 @@ ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>"
MODULE_ARGS = "<<INCLUDE_ANSIBLE_MODULE_ARGS>>"
MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"
-BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1]
-BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0]
+BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1, True]
+BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0, False]
BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>"
@@ -213,7 +213,7 @@ except ImportError:
elif isinstance(node, ast.List):
return list(map(_convert, node.nodes))
elif isinstance(node, ast.Dict):
- return dict((_convert(k), _convert(v)) for k, v in node.items)
+ return dict((_convert(k), _convert(v)) for k, v in node.items())
elif isinstance(node, ast.Name):
if node.name in _safe_names:
return _safe_names[node.name]
@@ -369,7 +369,12 @@ def return_values(obj):
sensitive values pre-jsonification."""
if isinstance(obj, basestring):
if obj:
- yield obj
+ if isinstance(obj, bytes):
+ yield obj
+ else:
+ # Unicode objects should all convert to utf-8
+ # (still must deal with surrogateescape on python3)
+ yield obj.encode('utf-8')
return
elif isinstance(obj, Sequence):
for element in obj:
@@ -391,10 +396,22 @@ def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
if isinstance(value, basestring):
- if value in no_log_strings:
+ if isinstance(value, unicode):
+ # This should work everywhere on python2. Need to check
+ # surrogateescape on python3
+ bytes_value = value.encode('utf-8')
+ value_is_unicode = True
+ else:
+ bytes_value = value
+ value_is_unicode = False
+ if bytes_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
- value = value.replace(omit_me, '*' * 8)
+ bytes_value = bytes_value.replace(omit_me, '*' * 8)
+ if value_is_unicode:
+ value = unicode(bytes_value, 'utf-8', errors='replace')
+ else:
+ value = bytes_value
elif isinstance(value, Sequence):
return [remove_values(elem, no_log_strings) for elem in value]
elif isinstance(value, Mapping):
@@ -497,8 +514,11 @@ class AnsibleModule(object):
self.no_log = no_log
self.cleanup_files = []
self._debug = False
+ self._diff = False
+ self._verbosity = 0
self.aliases = {}
+ self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
@@ -507,6 +527,15 @@ class AnsibleModule(object):
self.params = self._load_params()
+ # append to legal_inputs and then possibly check against them
+ try:
+ self.aliases = self._handle_aliases()
+ except Exception:
+ e = get_exception()
+ # use exceptions here cause its not safe to call vail json until no_log is processed
+ print('{"failed": true, "msg": "Module alias error: %s"}' % str(e))
+ sys.exit(1)
+
# Save parameter values that should never be logged
self.no_log_values = set()
# Use the argspec to determine which args are no_log
@@ -517,15 +546,10 @@ class AnsibleModule(object):
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
- # check the locale as set by the current environment, and
- # reset to LANG=C if it's an invalid/unavailable locale
+ # check the locale as set by the current environment, and reset to
+ # a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
- self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug']
-
- # append to legal_inputs and then possibly check against them
- self.aliases = self._handle_aliases()
-
self._check_arguments(check_invalid_arguments)
# check exclusive early
@@ -554,7 +578,7 @@ class AnsibleModule(object):
self._set_defaults(pre=False)
- if not self.no_log:
+ if not self.no_log and self._verbosity >= 3:
self._log_invocation()
# finally, make sure we're in a sane working dir
@@ -728,7 +752,7 @@ class AnsibleModule(object):
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
- def set_context_if_different(self, path, context, changed):
+ def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
@@ -749,6 +773,14 @@ class AnsibleModule(object):
new_context[i] = cur_context[i]
if cur_context != new_context:
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['secontext'] = cur_context
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['secontext'] = new_context
+
try:
if self.check_mode:
return True
@@ -762,7 +794,7 @@ class AnsibleModule(object):
changed = True
return changed
- def set_owner_if_different(self, path, owner, changed):
+ def set_owner_if_different(self, path, owner, changed, diff=None):
path = os.path.expanduser(path)
if owner is None:
return changed
@@ -775,6 +807,15 @@ class AnsibleModule(object):
except KeyError:
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
+
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['owner'] = orig_uid
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['owner'] = uid
+
if self.check_mode:
return True
try:
@@ -784,7 +825,7 @@ class AnsibleModule(object):
changed = True
return changed
- def set_group_if_different(self, path, group, changed):
+ def set_group_if_different(self, path, group, changed, diff=None):
path = os.path.expanduser(path)
if group is None:
return changed
@@ -797,6 +838,15 @@ class AnsibleModule(object):
except KeyError:
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
+
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['group'] = orig_gid
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['group'] = gid
+
if self.check_mode:
return True
try:
@@ -806,7 +856,7 @@ class AnsibleModule(object):
changed = True
return changed
- def set_mode_if_different(self, path, mode, changed):
+ def set_mode_if_different(self, path, mode, changed, diff=None):
path = os.path.expanduser(path)
path_stat = os.lstat(path)
@@ -828,6 +878,15 @@ class AnsibleModule(object):
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
+
+ if diff is not None:
+ if 'before' not in diff:
+ diff['before'] = {}
+ diff['before']['mode'] = oct(prev_mode)
+ if 'after' not in diff:
+ diff['after'] = {}
+ diff['after']['mode'] = oct(mode)
+
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
@@ -961,27 +1020,27 @@ class AnsibleModule(object):
or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
- def set_fs_attributes_if_different(self, file_args, changed):
+ def set_fs_attributes_if_different(self, file_args, changed, diff=None):
# set modes owners and context as needed
changed = self.set_context_if_different(
- file_args['path'], file_args['secontext'], changed
+ file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
- file_args['path'], file_args['owner'], changed
+ file_args['path'], file_args['owner'], changed, diff
)
changed = self.set_group_if_different(
- file_args['path'], file_args['group'], changed
+ file_args['path'], file_args['group'], changed, diff
)
changed = self.set_mode_if_different(
- file_args['path'], file_args['mode'], changed
+ file_args['path'], file_args['mode'], changed, diff
)
return changed
- def set_directory_attributes_if_different(self, file_args, changed):
- return self.set_fs_attributes_if_different(file_args, changed)
+ def set_directory_attributes_if_different(self, file_args, changed, diff=None):
+ return self.set_fs_attributes_if_different(file_args, changed, diff)
- def set_file_attributes_if_different(self, file_args, changed):
- return self.set_fs_attributes_if_different(file_args, changed)
+ def set_file_attributes_if_different(self, file_args, changed, diff=None):
+ return self.set_fs_attributes_if_different(file_args, changed, diff)
def add_path_info(self, kwargs):
'''
@@ -1034,7 +1093,6 @@ class AnsibleModule(object):
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
- e = get_exception()
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
@@ -1047,6 +1105,7 @@ class AnsibleModule(object):
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
def _handle_aliases(self):
+ # this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} #alias:canon
for (k,v) in self.argument_spec.items():
self._legal_inputs.append(k)
@@ -1055,11 +1114,11 @@ class AnsibleModule(object):
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
- self.fail_json(msg="internal error: required and default are mutually exclusive for %s" % k)
+ raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if type(aliases) != list:
- self.fail_json(msg='internal error: aliases must be a list')
+ raise Exception('internal error: aliases must be a list')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
@@ -1082,6 +1141,12 @@ class AnsibleModule(object):
elif k == '_ansible_debug':
self._debug = self.boolean(v)
+ elif k == '_ansible_diff':
+ self._diff = self.boolean(v)
+
+ elif k == '_ansible_verbosity':
+ self._verbosity = v
+
elif check_invalid_arguments and k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
@@ -1257,7 +1322,7 @@ class AnsibleModule(object):
if isinstance(value, bool):
return value
- if isinstance(value, basestring):
+ if isinstance(value, basestring) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
@@ -1414,7 +1479,6 @@ class AnsibleModule(object):
self.log(msg, log_args=log_args)
-
def _set_cwd(self):
try:
cwd = os.getcwd()
@@ -1507,6 +1571,8 @@ class AnsibleModule(object):
self.add_path_info(kwargs)
if not 'changed' in kwargs:
kwargs['changed'] = False
+ if 'invocation' not in kwargs:
+ kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print(self.jsonify(kwargs))
@@ -1517,6 +1583,8 @@ class AnsibleModule(object):
self.add_path_info(kwargs)
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
+ if 'invocation' not in kwargs:
+ kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
self.do_cleanup_files()
print(self.jsonify(kwargs))
@@ -1687,25 +1755,29 @@ class AnsibleModule(object):
# rename might not preserve context
self.set_context_if_different(dest, context, False)
- def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None):
+ def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None):
'''
Execute a command, returns rc, stdout, and stderr.
- args is the command to run
- If args is a list, the command will be run with shell=False.
- If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
- If args is a string and use_unsafe_shell=True it run with shell=True.
- Other arguments:
- - check_rc (boolean) Whether to call fail_json in case of
- non zero RC. Default is False.
- - close_fds (boolean) See documentation for subprocess.Popen().
- Default is True.
- - executable (string) See documentation for subprocess.Popen().
- Default is None.
- - prompt_regex (string) A regex string (not a compiled regex) which
- can be used to detect prompts in the stdout
- which would otherwise cause the execution
- to hang (especially if no input data is
- specified)
+
+ :arg args: is the command to run
+ * If args is a list, the command will be run with shell=False.
+ * If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
+ * If args is a string and use_unsafe_shell=True it runs with shell=True.
+ :kw check_rc: Whether to call fail_json in case of non zero RC.
+ Default False
+ :kw close_fds: See documentation for subprocess.Popen(). Default True
+ :kw executable: See documentation for subprocess.Popen(). Default None
+ :kw data: If given, information to write to the stdin of the command
+ :kw binary_data: If False, append a newline to the data. Default False
+ :kw path_prefix: If given, additional path to find the command in.
+ This adds to the PATH environment vairable so helper commands in
+ the same directory can also be found
+ :kw cwd: iIf given, working directory to run the command inside
+ :kw use_unsafe_shell: See `args` parameter. Default False
+ :kw prompt_regex: Regex string (not a compiled regex) which can be
+ used to detect prompts in the stdout which would otherwise cause
+ the execution to hang (especially if no input data is specified)
+ :kwarg environ_update: dictionary to *update* os.environ with
'''
shell = False
@@ -1736,10 +1808,15 @@ class AnsibleModule(object):
msg = None
st_in = None
- # Set a temporary env path if a prefix is passed
- env=os.environ
+ # Manipulate the environ we'll send to the new process
+ old_env_vals = {}
+ if environ_update:
+ for key, val in environ_update.items():
+ old_env_vals[key] = os.environ.get(key, None)
+ os.environ[key] = val
if path_prefix:
- env['PATH']="%s:%s" % (path_prefix, env['PATH'])
+ old_env_vals['PATH'] = os.environ['PATH']
+ os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# create a printable version of the command for use
# in reporting later, which strips out things like
@@ -1781,11 +1858,10 @@ class AnsibleModule(object):
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
- stderr=subprocess.PIPE
+ stderr=subprocess.PIPE,
+ env=os.environ,
)
- if path_prefix:
- kwargs['env'] = env
if cwd and os.path.isdir(cwd):
kwargs['cwd'] = cwd
@@ -1864,6 +1940,13 @@ class AnsibleModule(object):
except:
self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args)
+ # Restore env settings
+ for key, val in old_env_vals.items():
+ if val is None:
+ del os.environ[key]
+ else:
+ os.environ[key] = val
+
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
diff --git a/lib/ansible/module_utils/cloudstack.py b/lib/ansible/module_utils/cloudstack.py
index 53d142847d..d9b29fefe7 100644
--- a/lib/ansible/module_utils/cloudstack.py
+++ b/lib/ansible/module_utils/cloudstack.py
@@ -78,6 +78,10 @@ class AnsibleCloudStack(object):
self.returns = {}
# these values will be casted to int
self.returns_to_int = {}
+ # these keys will be compared case sensitive in self.has_changed()
+ self.case_sensitive_keys = [
+ 'id',
+ ]
self.module = module
self._connect()
@@ -138,16 +142,14 @@ class AnsibleCloudStack(object):
continue
if key in current_dict:
-
- # API returns string for int in some cases, just to make sure
- if isinstance(value, int):
- current_dict[key] = int(current_dict[key])
- elif isinstance(value, str):
- current_dict[key] = str(current_dict[key])
-
- # Only need to detect a singe change, not every item
- if value != current_dict[key]:
+ if self.case_sensitive_keys and key in self.case_sensitive_keys:
+ if str(value) != str(current_dict[key]):
+ return True
+ # Test for diff in case insensitive way
+ elif str(value).lower() != str(current_dict[key]).lower():
return True
+ else:
+ return True
return False
@@ -218,7 +220,7 @@ class AnsibleCloudStack(object):
vms = self.cs.listVirtualMachines(**args)
if vms:
for v in vms['virtualmachine']:
- if vm in [ v['name'], v['displayname'], v['id'] ]:
+ if vm.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]:
self.vm = v
return self._get_by_key(key, self.vm)
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
@@ -238,7 +240,7 @@ class AnsibleCloudStack(object):
if zones:
for z in zones['zone']:
- if zone in [ z['name'], z['id'] ]:
+ if zone.lower() in [ z['name'].lower(), z['id'] ]:
self.zone = z
return self._get_by_key(key, self.zone)
self.module.fail_json(msg="zone '%s' not found" % zone)
diff --git a/lib/ansible/module_utils/eapi.py b/lib/ansible/module_utils/eapi.py
deleted file mode 100644
index f858cafdca..0000000000
--- a/lib/ansible/module_utils/eapi.py
+++ /dev/null
@@ -1,155 +0,0 @@
-#
-# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-"""
-This module adds shared support for Arista EOS devices using eAPI over
-HTTP/S transport. It is built on module_utils/urls.py which is required
-for proper operation.
-
-In order to use this module, include it as part of a custom
-module as shown below.
-
-** Note: The order of the import statements does matter. **
-
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.eapi import *
-
-The eapi module provides the following common argument spec:
-
- * host (str) - [Required] The IPv4 address or FQDN of the network device
-
- * port (str) - Overrides the default port to use for the HTTP/S
- connection. The default values are 80 for HTTP and
- 443 for HTTPS
-
- * url_username (str) - [Required] The username to use to authenticate
- the HTTP/S connection. Aliases: username
-
- * url_password (str) - [Required] The password to use to authenticate
- the HTTP/S connection. Aliases: password
-
- * use_ssl (bool) - Specifies whether or not to use an encrypted (HTTPS)
- connection or not. The default value is False.
-
- * enable_mode (bool) - Specifies whether or not to enter `enable` mode
- prior to executing the command list. The default value is True
-
- * enable_password (str) - The password for entering `enable` mode
- on the switch if configured.
-
-In order to communicate with Arista EOS devices, the eAPI feature
-must be enabled and configured on the device.
-
-"""
-def eapi_argument_spec(spec=None):
- """Creates an argument spec for working with eAPI
- """
- arg_spec = url_argument_spec()
- arg_spec.update(dict(
- host=dict(required=True),
- port=dict(),
- url_username=dict(required=True, aliases=['username']),
- url_password=dict(required=True, aliases=['password']),
- use_ssl=dict(default=True, type='bool'),
- enable_mode=dict(default=True, type='bool'),
- enable_password=dict()
- ))
- if spec:
- arg_spec.update(spec)
- return arg_spec
-
-def eapi_url(module):
- """Construct a valid Arist eAPI URL
- """
- if module.params['use_ssl']:
- proto = 'https'
- else:
- proto = 'http'
- host = module.params['host']
- url = '{}://{}'.format(proto, host)
- if module.params['port']:
- url = '{}:{}'.format(url, module.params['port'])
- return '{}/command-api'.format(url)
-
-def to_list(arg):
- """Convert the argument to a list object
- """
- if isinstance(arg, (list, tuple)):
- return list(arg)
- elif arg is not None:
- return [arg]
- else:
- return []
-
-def eapi_body(commands, encoding, reqid=None):
- """Create a valid eAPI JSON-RPC request message
- """
- params = dict(version=1, cmds=to_list(commands), format=encoding)
- return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params)
-
-def eapi_enable_mode(module):
- """Build commands for entering `enable` mode on the switch
- """
- if module.params['enable_mode']:
- passwd = module.params['enable_password']
- if passwd:
- return dict(cmd='enable', input=passwd)
- else:
- return 'enable'
-
-def eapi_command(module, commands, encoding='json'):
- """Send an ordered list of commands to the device over eAPI
- """
- commands = to_list(commands)
- url = eapi_url(module)
-
- enable = eapi_enable_mode(module)
- if enable:
- commands.insert(0, enable)
-
- data = eapi_body(commands, encoding)
- data = module.jsonify(data)
-
- headers = {'Content-Type': 'application/json-rpc'}
-
- response, headers = fetch_url(module, url, data=data, headers=headers,
- method='POST')
-
- if headers['status'] != 200:
- module.fail_json(**headers)
-
- response = module.from_json(response.read())
- if 'error' in response:
- err = response['error']
- module.fail_json(msg='json-rpc error', **err)
-
- if enable:
- response['result'].pop(0)
-
- return response['result'], headers
-
-def eapi_configure(module, commands):
- """Send configuration commands to the device over eAPI
- """
- commands.insert(0, 'configure')
- response, headers = eapi_command(module, commands)
- response.pop(0)
- return response, headers
-
-
diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py
index 2edfd9e5d8..fdb326a7f1 100644
--- a/lib/ansible/module_utils/ec2.py
+++ b/lib/ansible/module_utils/ec2.py
@@ -41,21 +41,30 @@ except:
HAS_LOOSE_VERSION = False
+class AnsibleAWSError(Exception):
+ pass
+
+
def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params):
+ profile = params.pop('profile_name', None)
+ params['aws_session_token'] = params.pop('security_token', None)
+ params['verify'] = params.pop('validate_certs', None)
+
if conn_type not in ['both', 'resource', 'client']:
module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call')
if conn_type == 'resource':
- resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params)
return resource
elif conn_type == 'client':
- client = boto3.session.Session().client(resource, region_name=region, endpoint_url=endpoint, **params)
+ client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params)
return client
else:
- resource = boto3.session.Session().resource(resource, region_name=region, endpoint_url=endpoint, **params)
- client = boto3.session.Session().client(resource, region_name=region, endpoint_url=endpoint, **params)
+ resource = boto3.session.Session(profile_name=profile).resource(resource, region_name=region, endpoint_url=endpoint, **params)
+ client = boto3.session.Session(profile_name=profile).client(resource, region_name=region, endpoint_url=endpoint, **params)
return client, resource
+
def aws_common_argument_spec():
return dict(
ec2_url=dict(),
@@ -158,13 +167,12 @@ def get_aws_connection_info(module, boto3=False):
if profile_name:
boto_params['profile_name'] = profile_name
-
else:
boto_params = dict(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
security_token=security_token)
- # profile_name only works as a key in boto >= 2.24
+ # profile_name only works as a key in boto >= 2.24
# so only set profile_name if passed as an argument
if profile_name:
if not boto_supports_profile_name():
@@ -174,6 +182,10 @@ def get_aws_connection_info(module, boto3=False):
if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
boto_params['validate_certs'] = validate_certs
+ for param, value in boto_params.items():
+ if isinstance(value, str):
+ boto_params[param] = unicode(value, 'utf-8', 'strict')
+
return region, ec2_url, boto_params
@@ -196,9 +208,9 @@ def connect_to_aws(aws_module, region, **params):
conn = aws_module.connect_to_region(region, **params)
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
- raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
+ raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
- raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
+ raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
return conn
@@ -214,13 +226,13 @@ def ec2_connect(module):
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **boto_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
# Otherwise, no region so we fallback to the old connection method
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
diff --git a/lib/ansible/module_utils/eos.py b/lib/ansible/module_utils/eos.py
new file mode 100644
index 0000000000..a89869dced
--- /dev/null
+++ b/lib/ansible/module_utils/eos.py
@@ -0,0 +1,227 @@
+#
+# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
+
+NET_COMMON_ARGS = dict(
+ host=dict(required=True),
+ port=dict(type='int'),
+ username=dict(required=True),
+ password=dict(no_log=True),
+ authorize=dict(default=False, type='bool'),
+ auth_pass=dict(no_log=True),
+ transport=dict(choices=['cli', 'eapi']),
+ use_ssl=dict(default=True, type='bool'),
+ provider=dict()
+)
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+class Eapi(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ # sets the module_utils/urls.py req parameters
+ self.module.params['url_username'] = module.params['username']
+ self.module.params['url_password'] = module.params['password']
+
+ self.url = None
+ self.enable = None
+
+ def _get_body(self, commands, encoding, reqid=None):
+ """Create a valid eAPI JSON-RPC request message
+ """
+ params = dict(version=1, cmds=commands, format=encoding)
+ return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params)
+
+ def connect(self):
+ host = self.module.params['host']
+ port = self.module.params['port']
+
+ if self.module.params['use_ssl']:
+ proto = 'https'
+ if not port:
+ port = 443
+ else:
+ proto = 'http'
+ if not port:
+ port = 80
+
+ self.url = '%s://%s:%s/command-api' % (proto, host, port)
+
+ def authorize(self):
+ if self.module.params['auth_pass']:
+ passwd = self.module.params['auth_pass']
+ self.enable = dict(cmd='enable', input=passwd)
+ else:
+ self.enable = 'enable'
+
+ def send(self, commands, encoding='json'):
+ """Send commands to the device.
+ """
+ clist = to_list(commands)
+
+ if self.enable is not None:
+ clist.insert(0, self.enable)
+
+ data = self._get_body(clist, encoding)
+ data = self.module.jsonify(data)
+
+ headers = {'Content-Type': 'application/json-rpc'}
+
+ response, headers = fetch_url(self.module, self.url, data=data,
+ headers=headers, method='POST')
+
+ if headers['status'] != 200:
+ self.module.fail_json(**headers)
+
+ response = self.module.from_json(response.read())
+ if 'error' in response:
+ err = response['error']
+ self.module.fail_json(msg='json-rpc error', **err)
+
+ if self.enable:
+ response['result'].pop(0)
+
+ return response['result']
+
+class Cli(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.shell = None
+
+ def connect(self, **kwargs):
+ host = self.module.params['host']
+ port = self.module.params['port'] or 22
+
+ username = self.module.params['username']
+ password = self.module.params['password']
+
+ self.shell = Shell()
+ self.shell.open(host, port=port, username=username, password=password)
+
+ def authorize(self):
+ passwd = self.module.params['auth_pass']
+ self.send(Command('enable', prompt=NET_PASSWD_RE, response=passwd))
+
+ def send(self, commands, encoding='text'):
+ return self.shell.send(commands)
+
+class NetworkModule(AnsibleModule):
+
+ def __init__(self, *args, **kwargs):
+ super(NetworkModule, self).__init__(*args, **kwargs)
+ self.connection = None
+ self._config = None
+
+ @property
+ def config(self):
+ if not self._config:
+ self._config = self.get_config()
+ return self._config
+
+ def _load_params(self):
+ params = super(NetworkModule, self)._load_params()
+ provider = params.get('provider') or dict()
+ for key, value in provider.items():
+ if key in NET_COMMON_ARGS.keys():
+ params[key] = value
+ return params
+
+ def connect(self):
+ if self.params['transport'] == 'eapi':
+ self.connection = Eapi(self)
+ else:
+ self.connection = Cli(self)
+
+ try:
+ self.connection.connect()
+ self.execute('terminal length 0')
+
+ if self.params['authorize']:
+ self.connection.authorize()
+
+ except Exception, exc:
+ self.fail_json(msg=exc.message)
+
+ def configure(self, commands):
+ commands = to_list(commands)
+ commands.insert(0, 'configure terminal')
+ responses = self.execute(commands)
+ responses.pop(0)
+
+ return responses
+
+ def config_replace(self, commands):
+ if self.params['transport'] == 'cli':
+ self.fail_json(msg='config replace only supported over eapi')
+
+ cmd = 'configure replace terminal:'
+ commands = '\n'.join(to_list(commands))
+ command = dict(cmd=cmd, input=commands)
+ self.execute(command)
+
+ def execute(self, commands, **kwargs):
+ try:
+ return self.connection.send(commands, **kwargs)
+ except Exception, exc:
+ self.fail_json(msg=exc.message, commands=commands)
+
+ def disconnect(self):
+ self.connection.close()
+
+ def parse_config(self, cfg):
+ return parse(cfg, indent=3)
+
+ def get_config(self):
+ cmd = 'show running-config'
+ if self.params.get('include_defaults'):
+ cmd += ' all'
+ if self.params['transport'] == 'cli':
+ return self.execute(cmd)[0]
+ else:
+ resp = self.execute(cmd, encoding='text')
+ return resp[0]
+
+
+def get_module(**kwargs):
+ """Return instance of NetworkModule
+ """
+ argument_spec = NET_COMMON_ARGS.copy()
+ if kwargs.get('argument_spec'):
+ argument_spec.update(kwargs['argument_spec'])
+ kwargs['argument_spec'] = argument_spec
+
+ module = NetworkModule(**kwargs)
+
+ # HAS_PARAMIKO is set by module_utils/shell.py
+ if module.params['transport'] == 'cli' and not HAS_PARAMIKO:
+ module.fail_json(msg='paramiko is required but does not appear to be installed')
+
+ module.connect()
+
+ return module
+
diff --git a/lib/ansible/module_utils/f5.py b/lib/ansible/module_utils/f5.py
index e04e6b2f1e..ba336377e7 100644
--- a/lib/ansible/module_utils/f5.py
+++ b/lib/ansible/module_utils/f5.py
@@ -51,19 +51,35 @@ def f5_argument_spec():
def f5_parse_arguments(module):
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
- if not module.params['validate_certs']:
- disable_ssl_cert_validation()
+
+ if module.params['validate_certs']:
+ import ssl
+ if not hasattr(ssl, 'SSLContext'):
+ module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+
return (module.params['server'],module.params['user'],module.params['password'],module.params['state'],module.params['partition'],module.params['validate_certs'])
-def bigip_api(bigip, user, password):
- api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
- return api
+def bigip_api(bigip, user, password, validate_certs):
+ try:
+ # bigsuds >= 1.0.3
+ api = bigsuds.BIGIP(hostname=bigip, username=user, password=password, verify=validate_certs)
+ except TypeError:
+ # bigsuds < 1.0.3, no verify param
+ if validate_certs:
+ # Note: verified we have SSLContext when we parsed params
+ api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
+ else:
+ import ssl
+ if hasattr(ssl, 'SSLContext'):
+ # Really, you should never do this. It disables certificate
+ # verification *globally*. But since older bigip libraries
+ # don't give us a way to toggle verification we need to
+ # disable it at the global level.
+ # From https://www.python.org/dev/peps/pep-0476/#id29
+ ssl._create_default_https_context = ssl._create_unverified_context
+ api = bigsuds.BIGIP(hostname=bigip, username=user, password=password)
-def disable_ssl_cert_validation():
- # You probably only want to do this for testing and never in production.
- # From https://www.python.org/dev/peps/pep-0476/#id29
- import ssl
- ssl._create_default_https_context = ssl._create_unverified_context
+ return api
# Fully Qualified name (with the partition)
def fq_name(partition,name):
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 4120a51fb5..18fa26332b 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -119,6 +119,7 @@ class Facts(object):
('/etc/gentoo-release', 'Gentoo'),
('/etc/os-release', 'Debian'),
('/etc/lsb-release', 'Mandriva'),
+ ('/etc/altlinux-release', 'Altlinux'),
('/etc/os-release', 'NA'),
)
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
@@ -270,7 +271,7 @@ class Facts(object):
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
- Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
+ Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
@@ -323,7 +324,7 @@ class Facts(object):
for (path, name) in Facts.OSDIST_LIST:
if os.path.exists(path):
if os.path.getsize(path) > 0:
- if self.facts['distribution'] in ('Fedora', ):
+ if self.facts['distribution'] in ('Fedora', 'Altlinux', ):
# Once we determine the value is one of these distros
# we trust the values are always correct
break
@@ -356,6 +357,13 @@ class Facts(object):
else:
self.facts['distribution'] = data.split()[0]
break
+ elif name == 'Altlinux':
+ data = get_file_content(path)
+ if 'ALT Linux' in data:
+ self.facts['distribution'] = name
+ else:
+ self.facts['distribution'] = data.split()[0]
+ break
elif name == 'OtherLinux':
data = get_file_content(path)
if 'Amazon' in data:
@@ -524,7 +532,10 @@ class Facts(object):
keytypes = ('dsa', 'rsa', 'ecdsa', 'ed25519')
if self.facts['system'] == 'Darwin':
- keydir = '/etc'
+ if self.facts['distribution'] == 'MacOSX' and LooseVersion(self.facts['distribution_version']) >= LooseVersion('10.11') :
+ keydir = '/etc/ssh'
+ else:
+ keydir = '/etc'
else:
keydir = '/etc/ssh'
@@ -544,21 +555,23 @@ class Facts(object):
self.facts['pkg_mgr'] = 'openbsd_pkg'
def get_service_mgr_facts(self):
- #TODO: detect more custom init setups like bootscripts, dmd, s6, etc
+ #TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, runit, etc
# also other OSs other than linux might need to check across several possible candidates
# try various forms of querying pid 1
proc_1 = get_file_content('/proc/1/comm')
if proc_1 is None:
rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
+ else:
+ proc_1 = os.path.basename(proc_1)
- if proc_1 in ['init', '/sbin/init']:
- # many systems return init, so this cannot be trusted
+ if proc_1 == 'init' or proc_1.endswith('sh'):
+ # many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
if proc_1 is not None:
- self.facts['service_mgr'] = proc_1
+ self.facts['service_mgr'] = proc_1.strip()
# start with the easy ones
elif self.facts['distribution'] == 'MacOSX':
@@ -567,7 +580,7 @@ class Facts(object):
self.facts['service_mgr'] = 'launchd'
else:
self.facts['service_mgr'] = 'systemstarter'
- elif self.facts['system'].endswith('BSD') or self.facts['system'] in ['Bitrig', 'DragonFly']:
+ elif 'BSD' in self.facts['system'] or self.facts['system'] in ['Bitrig', 'DragonFly']:
#FIXME: we might want to break out to individual BSDs
self.facts['service_mgr'] = 'bsdinit'
elif self.facts['system'] == 'AIX':
@@ -576,12 +589,11 @@ class Facts(object):
#FIXME: smf?
self.facts['service_mgr'] = 'svcs'
elif self.facts['system'] == 'Linux':
-
if self._check_systemd():
self.facts['service_mgr'] = 'systemd'
elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
self.facts['service_mgr'] = 'upstart'
- elif module.get_bin_path('rc-service'):
+ elif os.path.realpath('/sbin/rc') == '/sbin/openrc':
self.facts['service_mgr'] = 'openrc'
elif os.path.exists('/etc/init.d/'):
self.facts['service_mgr'] = 'sysvinit'
@@ -2971,14 +2983,19 @@ def get_file_content(path, default=None, strip=True):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
try:
- datafile = open(path)
- data = datafile.read()
- if strip:
- data = data.strip()
- if len(data) == 0:
- data = default
- finally:
- datafile.close()
+ try:
+ datafile = open(path)
+ data = datafile.read()
+ if strip:
+ data = data.strip()
+ if len(data) == 0:
+ data = default
+ finally:
+ datafile.close()
+ except:
+ # ignore errors as some jails/containers might have readable permissions but not allow reads to proc
+ # done in 2 blocks for 2.4 compat
+ pass
return data
def get_file_lines(path):
diff --git a/lib/ansible/module_utils/ios.py b/lib/ansible/module_utils/ios.py
new file mode 100644
index 0000000000..95937ca219
--- /dev/null
+++ b/lib/ansible/module_utils/ios.py
@@ -0,0 +1,134 @@
+#
+# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
+
+NET_COMMON_ARGS = dict(
+ host=dict(required=True),
+ port=dict(default=22, type='int'),
+ username=dict(required=True),
+ password=dict(no_log=True),
+ authorize=dict(default=False, type='bool'),
+ auth_pass=dict(no_log=True),
+ provider=dict()
+)
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+class Cli(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.shell = None
+
+ def connect(self, **kwargs):
+ host = self.module.params['host']
+ port = self.module.params['port'] or 22
+
+ username = self.module.params['username']
+ password = self.module.params['password']
+
+ self.shell = Shell()
+ self.shell.open(host, port=port, username=username, password=password)
+
+ def authorize(self):
+ passwd = self.module.params['auth_pass']
+ self.send(Command('enable', prompt=NET_PASSWD_RE, response=passwd))
+
+ def send(self, commands):
+ return self.shell.send(commands)
+
+class NetworkModule(AnsibleModule):
+
+ def __init__(self, *args, **kwargs):
+ super(NetworkModule, self).__init__(*args, **kwargs)
+ self.connection = None
+ self._config = None
+
+ @property
+ def config(self):
+ if not self._config:
+ self._config = self.get_config()
+ return self._config
+
+ def _load_params(self):
+ params = super(NetworkModule, self)._load_params()
+ provider = params.get('provider') or dict()
+ for key, value in provider.items():
+ if key in NET_COMMON_ARGS.keys():
+ params[key] = value
+ return params
+
+ def connect(self):
+ try:
+ self.connection = Cli(self)
+ self.connection.connect()
+ self.execute('terminal length 0')
+
+ if self.params['authorize']:
+ self.connection.authorize()
+
+ except Exception, exc:
+ self.fail_json(msg=exc.message)
+
+ def configure(self, commands):
+ commands = to_list(commands)
+ commands.insert(0, 'configure terminal')
+ responses = self.execute(commands)
+ responses.pop(0)
+ return responses
+
+ def execute(self, commands, **kwargs):
+ return self.connection.send(commands)
+
+ def disconnect(self):
+ self.connection.close()
+
+ def parse_config(self, cfg):
+ return parse(cfg, indent=1)
+
+ def get_config(self):
+ cmd = 'show running-config'
+ if self.params.get('include_defaults'):
+ cmd += ' all'
+ return self.execute(cmd)[0]
+
+def get_module(**kwargs):
+ """Return instance of NetworkModule
+ """
+ argument_spec = NET_COMMON_ARGS.copy()
+ if kwargs.get('argument_spec'):
+ argument_spec.update(kwargs['argument_spec'])
+ kwargs['argument_spec'] = argument_spec
+
+ module = NetworkModule(**kwargs)
+
+ # HAS_PARAMIKO is set by module_utils/shell.py
+ if not HAS_PARAMIKO:
+ module.fail_json(msg='paramiko is required but does not appear to be installed')
+
+ module.connect()
+ return module
+
diff --git a/lib/ansible/module_utils/iosxr.py b/lib/ansible/module_utils/iosxr.py
new file mode 100644
index 0000000000..7ca360c5ef
--- /dev/null
+++ b/lib/ansible/module_utils/iosxr.py
@@ -0,0 +1,122 @@
+#
+# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
+
+NET_COMMON_ARGS = dict(
+ host=dict(required=True),
+ port=dict(default=22, type='int'),
+ username=dict(required=True),
+ password=dict(no_log=True),
+ provider=dict()
+)
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+class Cli(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.shell = None
+
+ def connect(self, **kwargs):
+ host = self.module.params['host']
+ port = self.module.params['port'] or 22
+
+ username = self.module.params['username']
+ password = self.module.params['password']
+
+ self.shell = Shell()
+ self.shell.open(host, port=port, username=username, password=password)
+
+ def send(self, commands):
+ return self.shell.send(commands)
+
+class NetworkModule(AnsibleModule):
+
+ def __init__(self, *args, **kwargs):
+ super(NetworkModule, self).__init__(*args, **kwargs)
+ self.connection = None
+ self._config = None
+
+ @property
+ def config(self):
+ if not self._config:
+ self._config = self.get_config()
+ return self._config
+
+ def _load_params(self):
+ params = super(NetworkModule, self)._load_params()
+ provider = params.get('provider') or dict()
+ for key, value in provider.items():
+ if key in NET_COMMON_ARGS.keys():
+ params[key] = value
+ return params
+
+ def connect(self):
+ try:
+ self.connection = Cli(self)
+ self.connection.connect()
+ self.execute('terminal length 0')
+ except Exception, exc:
+ self.fail_json(msg=exc.message)
+
+ def configure(self, commands):
+ commands = to_list(commands)
+ commands.insert(0, 'configure terminal')
+ commands.append('commit')
+ responses = self.execute(commands)
+ responses.pop(0)
+ responses.pop()
+ return responses
+
+ def execute(self, commands, **kwargs):
+ return self.connection.send(commands)
+
+ def disconnect(self):
+ self.connection.close()
+
+ def parse_config(self, cfg):
+ return parse(cfg, indent=1)
+
+ def get_config(self):
+ return self.execute('show running-config')[0]
+
+def get_module(**kwargs):
+ """Return instance of NetworkModule
+ """
+ argument_spec = NET_COMMON_ARGS.copy()
+ if kwargs.get('argument_spec'):
+ argument_spec.update(kwargs['argument_spec'])
+ kwargs['argument_spec'] = argument_spec
+
+ module = NetworkModule(**kwargs)
+
+ if not HAS_PARAMIKO:
+ module.fail_json(msg='paramiko is required but does not appear to be installed')
+
+ module.connect()
+ return module
+
diff --git a/lib/ansible/module_utils/junos.py b/lib/ansible/module_utils/junos.py
new file mode 100644
index 0000000000..33af9266e7
--- /dev/null
+++ b/lib/ansible/module_utils/junos.py
@@ -0,0 +1,122 @@
+#
+# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+NET_COMMON_ARGS = dict(
+ host=dict(required=True),
+ port=dict(default=22, type='int'),
+ username=dict(required=True),
+ password=dict(no_log=True),
+ provider=dict()
+)
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+class Cli(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.shell = None
+
+ def connect(self, **kwargs):
+ host = self.module.params['host']
+ port = self.module.params['port'] or 22
+
+ username = self.module.params['username']
+ password = self.module.params['password']
+
+ self.shell = Shell()
+ self.shell.open(host, port=port, username=username, password=password)
+
+ def send(self, commands):
+ return self.shell.send(commands)
+
+
+class NetworkModule(AnsibleModule):
+
+ def __init__(self, *args, **kwargs):
+ super(NetworkModule, self).__init__(*args, **kwargs)
+ self.connection = None
+ self._config = None
+
+ @property
+ def config(self):
+ if not self._config:
+ self._config = self.get_config()
+ return self._config
+
+ def _load_params(self):
+ params = super(NetworkModule, self)._load_params()
+ provider = params.get('provider') or dict()
+ for key, value in provider.items():
+ if key in NET_COMMON_ARGS.keys():
+ params[key] = value
+ return params
+
+ def connect(self):
+ self.connection = Cli(self)
+ self.connection.connect()
+ self.execute('cli')
+ self.execute('set cli screen-length 0')
+
+ def configure(self, commands):
+ commands = to_list(commands)
+ commands.insert(0, 'configure')
+ commands.append('commit and-quit')
+ responses = self.execute(commands)
+ responses.pop(0)
+ responses.pop()
+ return responses
+
+ def execute(self, commands, **kwargs):
+ return self.connection.send(commands)
+
+ def disconnect(self):
+ self.connection.close()
+
+ def parse_config(self, cfg):
+ return parse(cfg, indent=4)
+
+ def get_config(self):
+ cmd = 'show configuration'
+ return self.execute(cmd)[0]
+
+def get_module(**kwargs):
+ """Return instance of NetworkModule
+ """
+ argument_spec = NET_COMMON_ARGS.copy()
+ if kwargs.get('argument_spec'):
+ argument_spec.update(kwargs['argument_spec'])
+ kwargs['argument_spec'] = argument_spec
+ kwargs['check_invalid_arguments'] = False
+
+ module = NetworkModule(**kwargs)
+
+ # HAS_PARAMIKO is set by module_utils/shell.py
+ if not HAS_PARAMIKO:
+ module.fail_json(msg='paramiko is required but does not appear to be installed')
+
+ module.connect()
+ return module
+
diff --git a/lib/ansible/module_utils/known_hosts.py b/lib/ansible/module_utils/known_hosts.py
index d2644d9766..52b0bb74b0 100644
--- a/lib/ansible/module_utils/known_hosts.py
+++ b/lib/ansible/module_utils/known_hosts.py
@@ -28,7 +28,11 @@
import os
import hmac
-import urlparse
+
+try:
+ import urlparse
+except ImportError:
+ import urllib.parse as urlparse
try:
from hashlib import sha1
@@ -74,12 +78,12 @@ def get_fqdn(repo_url):
if "@" in repo_url and "://" not in repo_url:
# most likely an user@host:path or user@host/path type URL
repo_url = repo_url.split("@", 1)[1]
- if ":" in repo_url:
- repo_url = repo_url.split(":")[0]
- result = repo_url
+ if repo_url.startswith('['):
+ result = repo_url.split(']', 1)[0] + ']'
+ elif ":" in repo_url:
+ result = repo_url.split(":")[0]
elif "/" in repo_url:
- repo_url = repo_url.split("/")[0]
- result = repo_url
+ result = repo_url.split("/")[0]
elif "://" in repo_url:
# this should be something we can parse with urlparse
parts = urlparse.urlparse(repo_url)
@@ -87,11 +91,13 @@ def get_fqdn(repo_url):
# ensure we actually have a parts[1] before continuing.
if parts[1] != '':
result = parts[1]
- if ":" in result:
- result = result.split(":")[0]
if "@" in result:
result = result.split("@", 1)[1]
+ if result[0].startswith('['):
+ result = result.split(']', 1)[0] + ']'
+ elif ":" in result:
+ result = result.split(":")[0]
return result
def check_hostkey(module, fqdn):
@@ -169,7 +175,7 @@ def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
if not os.path.exists(user_ssh_dir):
if create_dir:
try:
- os.makedirs(user_ssh_dir, 0700)
+ os.makedirs(user_ssh_dir, int('700', 8))
except:
module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
else:
diff --git a/lib/ansible/module_utils/mysql.py b/lib/ansible/module_utils/mysql.py
new file mode 100644
index 0000000000..48e00adfd9
--- /dev/null
+++ b/lib/ansible/module_utils/mysql.py
@@ -0,0 +1,66 @@
+# This code is part of Ansible, but is an independent component.
+# This particular file snippet, and this file snippet only, is BSD licensed.
+# Modules you write using this snippet, which is embedded dynamically by Ansible
+# still belong to the author of the module, and may assign their own license
+# to the complete work.
+#
+# Copyright (c), Jonathan Mainguy <jon@soh.re>, 2015
+# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without modification,
+# are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None):
+ config = {
+ 'host': module.params['login_host'],
+ 'ssl': {
+ }
+ }
+
+ if module.params['login_unix_socket']:
+ config['unix_socket'] = module.params['login_unix_socket']
+ else:
+ config['port'] = module.params['login_port']
+
+ if os.path.exists(config_file):
+ config['read_default_file'] = config_file
+
+ # If login_user or login_password are given, they should override the
+ # config file
+ if login_user is not None:
+ config['user'] = login_user
+ if login_password is not None:
+ config['passwd'] = login_password
+ if ssl_cert is not None:
+ config['ssl']['cert'] = ssl_cert
+ if ssl_key is not None:
+ config['ssl']['key'] = ssl_key
+ if ssl_ca is not None:
+ config['ssl']['ca'] = ssl_ca
+ if db is not None:
+ config['db'] = db
+
+ db_connection = MySQLdb.connect(**config)
+ if cursor_class is not None:
+ return db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
+ else:
+ return db_connection.cursor()
diff --git a/lib/ansible/module_utils/netcfg.py b/lib/ansible/module_utils/netcfg.py
new file mode 100644
index 0000000000..afd8be3a56
--- /dev/null
+++ b/lib/ansible/module_utils/netcfg.py
@@ -0,0 +1,85 @@
+#
+# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import re
+import collections
+
+class ConfigLine(object):
+
+ def __init__(self, text):
+ self.text = text
+ self.children = list()
+ self.parents = list()
+ self.raw = None
+
+ def __str__(self):
+ return self.raw
+
+ def __eq__(self, other):
+ if self.text == other.text:
+ return self.parents == other.parents
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+def parse(lines, indent):
+ toplevel = re.compile(r'\S')
+ childline = re.compile(r'^\s*(.+)$')
+ repl = r'([{|}|;])'
+
+ ancestors = list()
+ config = list()
+
+ for line in str(lines).split('\n'):
+ text = str(re.sub(repl, '', line)).strip()
+
+ cfg = ConfigLine(text)
+ cfg.raw = line
+
+ if not text or text[0] in ['!', '#']:
+ continue
+
+ # handle top level commands
+ if toplevel.match(line):
+ ancestors = [cfg]
+
+ # handle sub level commands
+ else:
+ match = childline.match(line)
+ line_indent = match.start(1)
+ level = int(line_indent / indent)
+ parent_level = level - 1
+
+ cfg.parents = ancestors[:level]
+
+ if level > len(ancestors):
+ config.append(cfg)
+ continue
+
+ for i in range(level, len(ancestors)):
+ ancestors.pop()
+
+ ancestors.append(cfg)
+ ancestors[parent_level].children.append(cfg)
+
+ config.append(cfg)
+
+ return config
+
+
diff --git a/lib/ansible/module_utils/nxapi.py b/lib/ansible/module_utils/nxapi.py
deleted file mode 100644
index 0589b9a50c..0000000000
--- a/lib/ansible/module_utils/nxapi.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#
-# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-"""
-This module adds support for Cisco NXAPI to Ansible shared
-module_utils. It builds on module_utils/urls.py to provide
-NXAPI support over HTTP/S which is required for proper operation.
-
-In order to use this module, include it as part of a custom
-module as shown below.
-
-** Note: The order of the import statements does matter. **
-
-from ansible.module_utils.basic import *
-from ansible.module_utils.urls import *
-from ansible.module_utils.nxapi import *
-
-The nxapi module provides the following common argument spec:
-
- * host (str) - [Required] The IPv4 address or FQDN of the network device
-
- * port (str) - Overrides the default port to use for the HTTP/S
- connection. The default values are 80 for HTTP and
- 443 for HTTPS
-
- * url_username (str) - [Required] The username to use to authenticate
- the HTTP/S connection. Aliases: username
-
- * url_password (str) - [Required] The password to use to authenticate
- the HTTP/S connection. Aliases: password
-
- * use_ssl (bool) - Specifies whether or not to use an encrypted (HTTPS)
- connection or not. The default value is False.
-
- * command_type (str) - The type of command to send to the remote
- device. Valid values in `cli_show`, `cli_show_ascii`, 'cli_conf`
- and `bash`. The default value is `cli_show_ascii`
-
-In order to communicate with Cisco NXOS devices, the NXAPI feature
-must be enabled and configured on the device.
-
-"""
-
-NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash']
-
-def nxapi_argument_spec(spec=None):
- """Creates an argument spec for working with NXAPI
- """
- arg_spec = url_argument_spec()
- arg_spec.update(dict(
- host=dict(required=True),
- port=dict(),
- url_username=dict(required=True, aliases=['username']),
- url_password=dict(required=True, aliases=['password']),
- use_ssl=dict(default=False, type='bool'),
- command_type=dict(default='cli_show_ascii', choices=NXAPI_COMMAND_TYPES)
- ))
- if spec:
- arg_spec.update(spec)
- return arg_spec
-
-def nxapi_url(module):
- """Constructs a valid NXAPI url
- """
- if module.params['use_ssl']:
- proto = 'https'
- else:
- proto = 'http'
- host = module.params['host']
- url = '{}://{}'.format(proto, host)
- port = module.params['port']
- if module.params['port']:
- url = '{}:{}'.format(url, module.params['port'])
- url = '{}/ins'.format(url)
- return url
-
-def nxapi_body(commands, command_type, **kwargs):
- """Encodes a NXAPI JSON request message
- """
- if isinstance(commands, (list, set, tuple)):
- commands = ' ;'.join(commands)
-
- msg = {
- 'version': kwargs.get('version') or '1.2',
- 'type': command_type,
- 'chunk': kwargs.get('chunk') or '0',
- 'sid': kwargs.get('sid'),
- 'input': commands,
- 'output_format': 'json'
- }
-
- return dict(ins_api=msg)
-
-def nxapi_command(module, commands, command_type=None, **kwargs):
- """Sends the list of commands to the device over NXAPI
- """
- url = nxapi_url(module)
-
- command_type = command_type or module.params['command_type']
-
- data = nxapi_body(commands, command_type)
- data = module.jsonify(data)
-
- headers = {'Content-Type': 'text/json'}
-
- response, headers = fetch_url(module, url, data=data, headers=headers,
- method='POST')
-
- status = kwargs.get('status') or 200
- if headers['status'] != status:
- module.fail_json(**headers)
-
- response = module.from_json(response.read())
- return response, headers
-
diff --git a/lib/ansible/module_utils/nxos.py b/lib/ansible/module_utils/nxos.py
new file mode 100644
index 0000000000..d8eb0f97de
--- /dev/null
+++ b/lib/ansible/module_utils/nxos.py
@@ -0,0 +1,217 @@
+#
+# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
+
+NET_COMMON_ARGS = dict(
+ host=dict(required=True),
+ port=dict(type='int'),
+ username=dict(required=True),
+ password=dict(no_log=True),
+ transport=dict(choices=['cli', 'nxapi']),
+ use_ssl=dict(default=False, type='bool'),
+ provider=dict()
+)
+
+NXAPI_COMMAND_TYPES = ['cli_show', 'cli_show_ascii', 'cli_conf', 'bash']
+NXAPI_ENCODINGS = ['json', 'xml']
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+class Nxapi(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ # sets the module_utils/urls.py req parameters
+ self.module.params['url_username'] = module.params['username']
+ self.module.params['url_password'] = module.params['password']
+
+ self.url = None
+ self.enable = None
+
+ def _get_body(self, commands, command_type, encoding, version='1.2', chunk='0', sid=None):
+ """Encodes a NXAPI JSON request message
+ """
+ if isinstance(commands, (list, set, tuple)):
+ commands = ' ;'.join(commands)
+
+ if encoding not in NXAPI_ENCODINGS:
+ self.module.fail_json("Invalid encoding. Received %s. Expected one of %s" %
+ (encoding, ','.join(NXAPI_ENCODINGS)))
+
+ msg = {
+ 'version': version,
+ 'type': command_type,
+ 'chunk': chunk,
+ 'sid': sid,
+ 'input': commands,
+ 'output_format': encoding
+ }
+ return dict(ins_api=msg)
+
+ def connect(self):
+ host = self.module.params['host']
+ port = self.module.params['port']
+
+ if self.module.params['use_ssl']:
+ proto = 'https'
+ if not port:
+ port = 443
+ else:
+ proto = 'http'
+ if not port:
+ port = 80
+
+ self.url = '%s://%s:%s/ins' % (proto, host, port)
+
+ def send(self, commands, command_type='cli_show_ascii', encoding='json'):
+ """Send commands to the device.
+ """
+ clist = to_list(commands)
+
+ if command_type not in NXAPI_COMMAND_TYPES:
+ self.module.fail_json(msg="Invalid command_type. Received %s. Expected one of %s." %
+ (command_type, ','.join(NXAPI_COMMAND_TYPES)))
+
+ data = self._get_body(clist, command_type, encoding)
+ data = self.module.jsonify(data)
+
+ headers = {'Content-Type': 'application/json'}
+
+ response, headers = fetch_url(self.module, self.url, data=data, headers=headers,
+ method='POST')
+
+ if headers['status'] != 200:
+ self.module.fail_json(**headers)
+
+ response = self.module.from_json(response.read())
+ if 'error' in response:
+ err = response['error']
+ self.module.fail_json(msg='json-rpc error % ' % str(err))
+
+ return response
+
+class Cli(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.shell = None
+
+ def connect(self, **kwargs):
+ host = self.module.params['host']
+ port = self.module.params['port'] or 22
+
+ username = self.module.params['username']
+ password = self.module.params['password']
+
+ self.shell = Shell()
+ self.shell.open(host, port=port, username=username, password=password)
+
+ def send(self, commands, encoding='text'):
+ return self.shell.send(commands)
+
+class NetworkModule(AnsibleModule):
+
+ def __init__(self, *args, **kwargs):
+ super(NetworkModule, self).__init__(*args, **kwargs)
+ self.connection = None
+ self._config = None
+
+ @property
+ def config(self):
+ if not self._config:
+ self._config = self.get_config()
+ return self._config
+
+ def _load_params(self):
+ params = super(NetworkModule, self)._load_params()
+ provider = params.get('provider') or dict()
+ for key, value in provider.items():
+ if key in NET_COMMON_ARGS.keys():
+ params[key] = value
+ return params
+
+ def connect(self):
+ if self.params['transport'] == 'nxapi':
+ self.connection = Nxapi(self)
+ else:
+ self.connection = Cli(self)
+
+ try:
+ self.connection.connect()
+ self.execute('terminal length 0')
+ except Exception, exc:
+ self.fail_json(msg=exc.message)
+
+ def configure(self, commands):
+ commands = to_list(commands)
+ if self.params['transport'] == 'cli':
+ commands.insert(0, 'configure terminal')
+ responses = self.execute(commands)
+ responses.pop(0)
+ else:
+ responses = self.execute(commands, command_type='cli_conf')
+ return responses
+
+ def execute(self, commands, **kwargs):
+ try:
+ return self.connection.send(commands, **kwargs)
+ except Exception, exc:
+ self.fail_json(msg=exc.message)
+
+ def disconnect(self):
+ self.connection.close()
+
+ def parse_config(self, cfg):
+ return parse(cfg, indent=2)
+
+ def get_config(self):
+ cmd = 'show running-config'
+ if self.params.get('include_defaults'):
+ cmd += ' all'
+ if self.params['transport'] == 'cli':
+ return self.execute(cmd)[0]
+ else:
+ resp = self.execute(cmd)
+ if not resp.get('ins_api').get('outputs').get('output').get('body'):
+ self.fail_json(msg="Unrecognized response: %s" % str(resp))
+ return resp['ins_api']['outputs']['output']['body']
+
+def get_module(**kwargs):
+ """Return instance of NetworkModule
+ """
+ argument_spec = NET_COMMON_ARGS.copy()
+ if kwargs.get('argument_spec'):
+ argument_spec.update(kwargs['argument_spec'])
+ kwargs['argument_spec'] = argument_spec
+
+ module = NetworkModule(**kwargs)
+
+ # HAS_PARAMIKO is set by module_utils/shell.py
+ if module.params['transport'] == 'cli' and not HAS_PARAMIKO:
+ module.fail_json(msg='paramiko is required but does not appear to be installed')
+
+ module.connect()
+ return module
diff --git a/lib/ansible/module_utils/openswitch.py b/lib/ansible/module_utils/openswitch.py
new file mode 100644
index 0000000000..ba3eb7b44a
--- /dev/null
+++ b/lib/ansible/module_utils/openswitch.py
@@ -0,0 +1,247 @@
+#
+# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+import time
+import json
+
+try:
+ from runconfig import runconfig
+ from opsrest.settings import settings
+ from opsrest.manager import OvsdbConnectionManager
+ from opslib import restparser
+ HAS_OPS = True
+except ImportError:
+ HAS_OPS = False
+
+NET_PASSWD_RE = re.compile(r"[\r\n]?password: $", re.I)
+
+NET_COMMON_ARGS = dict(
+ host=dict(),
+ port=dict(type='int'),
+ username=dict(),
+ password=dict(no_log=True),
+ use_ssl=dict(default=True, type='int'),
+ transport=dict(default='ssh', choices=['ssh', 'cli', 'rest']),
+ provider=dict()
+)
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+def get_idl():
+ manager = OvsdbConnectionManager(settings.get('ovs_remote'),
+ settings.get('ovs_schema'))
+ manager.start()
+ idl = manager.idl
+
+ init_seq_no = 0
+ while (init_seq_no == idl.change_seqno):
+ idl.run()
+ time.sleep(1)
+
+ return idl
+
+def get_schema():
+ return restparser.parseSchema(settings.get('ext_schema'))
+
+def get_runconfig():
+ idl = get_idl()
+ schema = get_schema()
+ return runconfig.RunConfigUtil(idl, schema)
+
+class Response(object):
+
+ def __init__(self, resp, hdrs):
+ self.body = resp.read()
+ self.headers = hdrs
+
+ @property
+ def json(self):
+ try:
+ return json.loads(self.body)
+ except ValueError:
+ return None
+
+class Rest(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.baseurl = None
+
+ def connect(self):
+ host = self.module.params['host']
+ port = self.module.params['port']
+
+ if self.module.params['use_ssl']:
+ proto = 'https'
+ if not port:
+ port = 443
+ else:
+ proto = 'http'
+ if not port:
+ port = 80
+
+ self.baseurl = '%s://%s:%s/rest/v1' % (proto, host, port)
+
+ def _url_builder(self, path):
+ if path[0] == '/':
+ path = path[1:]
+ return '%s/%s' % (self.baseurl, path)
+
+ def send(self, method, path, data=None, headers=None):
+ url = self._url_builder(path)
+ data = self.module.jsonify(data)
+
+ if headers is None:
+ headers = dict()
+ headers.update({'Content-Type': 'application/json'})
+
+ resp, hdrs = fetch_url(self.module, url, data=data, headers=headers,
+ method=method)
+
+ return Response(resp, hdrs)
+
+ def get(self, path, data=None, headers=None):
+ return self.send('GET', path, data, headers)
+
+ def put(self, path, data=None, headers=None):
+ return self.send('PUT', path, data, headers)
+
+ def post(self, path, data=None, headers=None):
+ return self.send('POST', path, data, headers)
+
+ def delete(self, path, data=None, headers=None):
+ return self.send('DELETE', path, data, headers)
+
+class Cli(object):
+
+ def __init__(self, module):
+ self.module = module
+ self.shell = None
+
+ def connect(self, **kwargs):
+ host = self.module.params['host']
+ port = self.module.params['port'] or 22
+
+ username = self.module.params['username']
+ password = self.module.params['password']
+
+ self.shell = Shell()
+ self.shell.open(host, port=port, username=username, password=password)
+
+ def send(self, commands, encoding='text'):
+ return self.shell.send(commands)
+
+class NetworkModule(AnsibleModule):
+
+ def __init__(self, *args, **kwargs):
+ super(NetworkModule, self).__init__(*args, **kwargs)
+ self.connection = None
+ self._config = None
+ self._runconfig = None
+
+ @property
+ def config(self):
+ if not self._config:
+ self._config = self.get_config()
+ return self._config
+
+ def _load_params(self):
+ params = super(NetworkModule, self)._load_params()
+ provider = params.get('provider') or dict()
+ for key, value in provider.items():
+ if key in NET_COMMON_ARGS.keys():
+ params[key] = value
+ return params
+
+ def connect(self):
+ if self.params['transport'] == 'rest':
+ self.connection = Rest(self)
+ elif self.params['transport'] == 'cli':
+ self.connection = Cli(self)
+
+ self.connection.connect()
+
+ def configure(self, config):
+ if self.params['transport'] == 'cli':
+ commands = to_list(config)
+ commands.insert(0, 'configure terminal')
+ responses = self.execute(commands)
+ responses.pop(0)
+ return responses
+ elif self.params['transport'] == 'rest':
+ path = '/system/full-configuration'
+ return self.connection.put(path, data=config)
+ else:
+ if not self._runconfig:
+ self._runconfig = get_runconfig()
+ self._runconfig.write_config_to_db(config)
+
+ def execute(self, commands, **kwargs):
+ try:
+ return self.connection.send(commands, **kwargs)
+ except Exception, exc:
+ self.fail_json(msg=exc.message, commands=commands)
+
+ def disconnect(self):
+ self.connection.close()
+
+ def parse_config(self, cfg):
+ return parse(cfg, indent=4)
+
+ def get_config(self):
+ if self.params['transport'] == 'cli':
+ return self.execute('show running-config')[0]
+
+ elif self.params['transport'] == 'rest':
+ resp = self.connection.get('/system/full-configuration')
+ return resp.json
+
+ else:
+ if not self._runconfig:
+ self._runconfig = get_runconfig()
+ return self._runconfig.get_running_config()
+
+
+def get_module(**kwargs):
+ """Return instance of NetworkModule
+ """
+ argument_spec = NET_COMMON_ARGS.copy()
+ if kwargs.get('argument_spec'):
+ argument_spec.update(kwargs['argument_spec'])
+ kwargs['argument_spec'] = argument_spec
+
+ module = NetworkModule(**kwargs)
+
+ if not HAS_OPS and module.params['transport'] == 'ssh':
+ module.fail_json(msg='could not import ops library')
+
+ # HAS_PARAMIKO is set by module_utils/shell.py
+ if module.params['transport'] == 'cli' and not HAS_PARAMIKO:
+ module.fail_json(msg='paramiko is required but does not appear to be installed')
+
+ if module.params['transport'] in ['cli', 'rest']:
+ module.connect()
+
+ return module
+
diff --git a/lib/ansible/module_utils/shell.py b/lib/ansible/module_utils/shell.py
new file mode 100644
index 0000000000..13506c4322
--- /dev/null
+++ b/lib/ansible/module_utils/shell.py
@@ -0,0 +1,196 @@
+#
+# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+import re
+import socket
+
+from StringIO import StringIO
+
+try:
+ import paramiko
+ HAS_PARAMIKO = True
+except ImportError:
+ HAS_PARAMIKO = False
+
+
+ANSI_RE = re.compile(r'(\x1b\[\?1h\x1b=)')
+
+CLI_PROMPTS_RE = [
+ re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*[>|#|%](?:\s*)$'),
+ re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*\(.+\)#(?:\s*)$')
+]
+
+CLI_ERRORS_RE = [
+ re.compile(r"% ?Error"),
+ re.compile(r"^% \w+", re.M),
+ re.compile(r"% ?Bad secret"),
+ re.compile(r"invalid input", re.I),
+ re.compile(r"(?:incomplete|ambiguous) command", re.I),
+ re.compile(r"connection timed out", re.I),
+ re.compile(r"[^\r\n]+ not found", re.I),
+ re.compile(r"'[^']' +returned error code: ?\d+"),
+]
+
+def to_list(val):
+ if isinstance(val, (list, tuple)):
+ return list(val)
+ elif val is not None:
+ return [val]
+ else:
+ return list()
+
+class ShellError(Exception):
+
+ def __init__(self, msg, command=None):
+ super(ShellError, self).__init__(msg)
+ self.message = msg
+ self.command = command
+
+class Command(object):
+
+ def __init__(self, command, prompt=None, response=None):
+ self.command = command
+ self.prompt = prompt
+ self.response = response
+
+ def __str__(self):
+ return self.command
+
+class Shell(object):
+
+ def __init__(self):
+ self.ssh = None
+ self.shell = None
+
+ self.prompts = list()
+ self.prompts.extend(CLI_PROMPTS_RE)
+
+ self.errors = list()
+ self.errors.extend(CLI_ERRORS_RE)
+
+ def open(self, host, port=22, username=None, password=None,
+ timeout=10, key_filename=None, pkey=None, look_for_keys=None):
+
+ self.ssh = paramiko.SSHClient()
+ self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+
+ # unless explicitly set, disable look for keys if a password is
+ # present. this changes the default search order paramiko implements
+ if not look_for_keys:
+ look_for_keys = password is None
+
+ self.ssh.connect(host, port=port, username=username, password=password,
+ timeout=timeout, look_for_keys=look_for_keys, pkey=pkey,
+ key_filename=key_filename)
+
+ self.shell = self.ssh.invoke_shell()
+ self.shell.settimeout(10)
+ self.receive()
+
+ def strip(self, data):
+ return ANSI_RE.sub('', data)
+
+ def receive(self, cmd=None):
+ recv = StringIO()
+
+ while True:
+ data = self.shell.recv(200)
+
+ recv.write(data)
+ recv.seek(recv.tell() - 200)
+
+ window = self.strip(recv.read())
+
+ if isinstance(cmd, Command):
+ self.handle_input(window, prompt=cmd.prompt,
+ response=cmd.response)
+
+ try:
+ if self.read(window):
+ resp = self.strip(recv.getvalue())
+ return self.sanitize(cmd, resp)
+ except ShellError, exc:
+ exc.command = cmd
+ raise
+
+ def send(self, commands):
+ responses = list()
+ try:
+ for command in to_list(commands):
+ cmd = '%s\r' % str(command)
+ self.shell.sendall(cmd)
+ responses.append(self.receive(command))
+ except socket.timeout, exc:
+ raise ShellError("timeout trying to send command", cmd)
+ return responses
+
+ def close(self):
+ self.shell.close()
+
+ def handle_input(self, resp, prompt, response):
+ if not prompt or not response:
+ return
+
+ prompt = to_list(prompt)
+ response = to_list(response)
+
+ for pr, ans in zip(prompt, response):
+ match = pr.search(resp)
+ if match:
+ cmd = '%s\r' % ans
+ self.shell.sendall(cmd)
+
+ def sanitize(self, cmd, resp):
+ cleaned = []
+ for line in resp.splitlines():
+ if line.startswith(str(cmd)) or self.read(line):
+ continue
+ cleaned.append(line)
+ return "\n".join(cleaned)
+
+ def read(self, response):
+ for regex in self.errors:
+ if regex.search(response):
+ raise ShellError('%s' % response)
+
+ for regex in self.prompts:
+ if regex.search(response):
+ return True
+
+def get_cli_connection(module):
+ host = module.params['host']
+ port = module.params['port']
+ if not port:
+ port = 22
+
+ username = module.params['username']
+ password = module.params['password']
+
+ try:
+ cli = Cli()
+ cli.open(host, port=port, username=username, password=password)
+ except paramiko.ssh_exception.AuthenticationException, exc:
+ module.fail_json(msg=exc.message)
+ except socket.error, exc:
+ host = '%s:%s' % (host, port)
+ module.fail_json(msg=exc.strerror, errno=exc.errno, host=host)
+ except socket.timeout:
+ module.fail_json(msg='socket timed out')
+
+ return cli
+
diff --git a/lib/ansible/module_utils/urls.py b/lib/ansible/module_utils/urls.py
index 979d5943dd..41613f6cb6 100644
--- a/lib/ansible/module_utils/urls.py
+++ b/lib/ansible/module_utils/urls.py
@@ -310,36 +310,45 @@ class NoSSLError(SSLValidationError):
"""Needed to connect to an HTTPS url but no ssl library available to verify the certificate"""
pass
+# Some environments (Google Compute Engine's CoreOS deploys) do not compile
+# against openssl and thus do not have any HTTPS support.
+CustomHTTPSConnection = CustomHTTPSHandler = None
+if hasattr(httplib, 'HTTPSConnection') and hasattr(urllib2, 'HTTPSHandler'):
+ class CustomHTTPSConnection(httplib.HTTPSConnection):
+ def __init__(self, *args, **kwargs):
+ httplib.HTTPSConnection.__init__(self, *args, **kwargs)
+ if HAS_SSLCONTEXT:
+ self.context = create_default_context()
+ if self.cert_file:
+ self.context.load_cert_chain(self.cert_file, self.key_file)
+
+ def connect(self):
+ "Connect to a host on a given (SSL) port."
+
+ if hasattr(self, 'source_address'):
+ sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
+ else:
+ sock = socket.create_connection((self.host, self.port), self.timeout)
+
+ server_hostname = self.host
+ # Note: self._tunnel_host is not available on py < 2.6 but this code
+ # isn't used on py < 2.6 (lack of create_connection)
+ if self._tunnel_host:
+ self.sock = sock
+ self._tunnel()
+ server_hostname = self._tunnel_host
+
+ if HAS_SSLCONTEXT:
+ self.sock = self.context.wrap_socket(sock, server_hostname=server_hostname)
+ else:
+ self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
-class CustomHTTPSConnection(httplib.HTTPSConnection):
- def __init__(self, *args, **kwargs):
- httplib.HTTPSConnection.__init__(self, *args, **kwargs)
- if HAS_SSLCONTEXT:
- self.context = create_default_context()
- if self.cert_file:
- self.context.load_cert_chain(self.cert_file, self.key_file)
-
- def connect(self):
- "Connect to a host on a given (SSL) port."
-
- if hasattr(self, 'source_address'):
- sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
- else:
- sock = socket.create_connection((self.host, self.port), self.timeout)
- if self._tunnel_host:
- self.sock = sock
- self._tunnel()
- if HAS_SSLCONTEXT:
- self.sock = self.context.wrap_socket(sock, server_hostname=self.host)
- else:
- self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=PROTOCOL)
-
-class CustomHTTPSHandler(urllib2.HTTPSHandler):
+ class CustomHTTPSHandler(urllib2.HTTPSHandler):
- def https_open(self, req):
- return self.do_open(CustomHTTPSConnection, req)
+ def https_open(self, req):
+ return self.do_open(CustomHTTPSConnection, req)
- https_request = urllib2.AbstractHTTPHandler.do_request_
+ https_request = urllib2.AbstractHTTPHandler.do_request_
def generic_urlparse(parts):
'''
@@ -373,7 +382,10 @@ def generic_urlparse(parts):
# get the username, password, etc.
try:
netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
- (auth, hostname, port) = netloc_re.match(parts[1])
+ match = netloc_re.match(parts[1])
+ auth = match.group(1)
+ hostname = match.group(2)
+ port = match.group(3)
if port:
# the capture group for the port will include the ':',
# so remove it and convert the port to an integer
@@ -383,6 +395,8 @@ def generic_urlparse(parts):
# and then split it up based on the first ':' found
auth = auth[:-1]
username, password = auth.split(':', 1)
+ else:
+ username = password = None
generic_parts['username'] = username
generic_parts['password'] = password
generic_parts['hostname'] = hostname
@@ -390,7 +404,7 @@ def generic_urlparse(parts):
except:
generic_parts['username'] = None
generic_parts['password'] = None
- generic_parts['hostname'] = None
+ generic_parts['hostname'] = parts[1]
generic_parts['port'] = None
return generic_parts
@@ -532,7 +546,8 @@ class SSLValidationHandler(urllib2.BaseHandler):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if https_proxy:
proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy))
- s.connect((proxy_parts.get('hostname'), proxy_parts.get('port')))
+ port = proxy_parts.get('port') or 443
+ s.connect((proxy_parts.get('hostname'), port))
if proxy_parts.get('scheme') == 'http':
s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
if proxy_parts.get('username'):
@@ -542,7 +557,7 @@ class SSLValidationHandler(urllib2.BaseHandler):
connect_result = s.recv(4096)
self.validate_proxy_response(connect_result)
if context:
- ssl_s = context.wrap_socket(s, server_hostname=proxy_parts.get('hostname'))
+ ssl_s = context.wrap_socket(s, server_hostname=self.hostname)
else:
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED, ssl_version=PROTOCOL)
match_hostname(ssl_s.getpeercert(), self.hostname)
@@ -661,8 +676,9 @@ def open_url(url, data=None, headers=None, method=None, use_proxy=True,
handlers.append(proxyhandler)
# pre-2.6 versions of python cannot use the custom https
- # handler, since the socket class is lacking this method
- if hasattr(socket, 'create_connection'):
+ # handler, since the socket class is lacking create_connection.
+ # Some python builds lack HTTPS support.
+ if hasattr(socket, 'create_connection') and CustomHTTPSHandler:
handlers.append(CustomHTTPSHandler)
opener = urllib2.build_opener(*handlers)
diff --git a/lib/ansible/module_utils/vca.py b/lib/ansible/module_utils/vca.py
index 56341ec555..9737cca8b4 100644
--- a/lib/ansible/module_utils/vca.py
+++ b/lib/ansible/module_utils/vca.py
@@ -35,8 +35,8 @@ class VcaError(Exception):
def vca_argument_spec():
return dict(
- username=dict(),
- password=dict(),
+ username=dict(type='str', aliases=['user'], required=True),
+ password=dict(type='str', aliases=['pass','passwd'], required=True, no_log=True),
org=dict(),
service_id=dict(),
instance_id=dict(),
@@ -108,7 +108,10 @@ class VcaAnsibleModule(AnsibleModule):
def create_instance(self):
service_type = self.params.get('service_type', DEFAULT_SERVICE_TYPE)
- host = self.params.get('host', LOGIN_HOST.get('service_type'))
+ if service_type == 'vcd':
+ host = self.params['host']
+ else:
+ host = LOGIN_HOST[service_type]
username = self.params['username']
version = self.params.get('api_version')
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
-Subproject 572771d0b1eb6d94ea9a596b7a719d3a2d0b651
+Subproject 09e2457eb0e811ac293065dd77cd31597ceb2da
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
-Subproject e5362cc76a25a734ddacf4d8ac496d9127c4a46
+Subproject e8427cb32a07ebaa4682192675a075fc336f656
diff --git a/lib/ansible/parsing/dataloader.py b/lib/ansible/parsing/dataloader.py
index aaa878bb5f..c54ba78f1f 100644
--- a/lib/ansible/parsing/dataloader.py
+++ b/lib/ansible/parsing/dataloader.py
@@ -52,9 +52,7 @@ class DataLoader():
Usage:
dl = DataLoader()
- (or)
- dl = DataLoader(vault_password='foo')
-
+ # optionally: dl.set_vault_password('foo')
ds = dl.load('...')
ds = dl.load_from_file('/path/to/file')
'''
diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py
index 3d158202ff..fbf5e1c3d6 100644
--- a/lib/ansible/parsing/mod_args.py
+++ b/lib/ansible/parsing/mod_args.py
@@ -21,7 +21,7 @@ __metaclass__ = type
from ansible.compat.six import iteritems, string_types
-from ansible.errors import AnsibleParserError
+from ansible.errors import AnsibleParserError,AnsibleError
from ansible.plugins import module_loader
from ansible.parsing.splitter import parse_kv, split_args
from ansible.template import Templar
@@ -137,7 +137,16 @@ class ModuleArgsParser:
# than those which may be parsed/normalized next
final_args = dict()
if additional_args:
- final_args.update(additional_args)
+ if isinstance(additional_args, string_types):
+ templar = Templar(loader=None)
+ if templar._contains_vars(additional_args):
+ final_args['_variable_params'] = additional_args
+ else:
+ raise AnsibleParserError("Complex args containing variables cannot use bare variables, and must use the full variable style ('{{var_name}}')")
+ elif isinstance(additional_args, dict):
+ final_args.update(additional_args)
+ else:
+ raise AnsibleParserError('Complex args must be a dictionary or variable string ("{{var}}").')
# how we normalize depends if we figured out what the module name is
# yet. If we have already figured it out, it's an 'old style' invocation.
@@ -155,6 +164,13 @@ class ModuleArgsParser:
tmp_args = parse_kv(tmp_args)
args.update(tmp_args)
+ # only internal variables can start with an underscore, so
+ # we don't allow users to set them directy in arguments
+ if args and action not in ('command', 'shell', 'script', 'raw'):
+ for arg in args:
+ if arg.startswith('_ansible_'):
+ raise AnsibleError("invalid parameter specified for action '%s': '%s'" % (action, arg))
+
# finally, update the args we're going to return with the ones
# which were normalized above
if args:
@@ -206,18 +222,21 @@ class ModuleArgsParser:
action = None
args = None
+ actions_allowing_raw = ('command', 'shell', 'script', 'raw')
if isinstance(thing, dict):
# form is like: copy: { src: 'a', dest: 'b' } ... common for structured (aka "complex") args
thing = thing.copy()
if 'module' in thing:
- action = thing['module']
+ action, module_args = self._split_module_string(thing['module'])
args = thing.copy()
+ check_raw = action in actions_allowing_raw
+ args.update(parse_kv(module_args, check_raw=check_raw))
del args['module']
elif isinstance(thing, string_types):
# form is like: copy: src=a dest=b ... common shorthand throughout ansible
(action, args) = self._split_module_string(thing)
- check_raw = action in ('command', 'shell', 'script', 'raw')
+ check_raw = action in actions_allowing_raw
args = parse_kv(args, check_raw=check_raw)
else:
diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py
index 3bbf6e7e9c..feb0cd2b34 100644
--- a/lib/ansible/parsing/splitter.py
+++ b/lib/ansible/parsing/splitter.py
@@ -65,8 +65,8 @@ def parse_kv(args, check_raw=False):
raise
raw_params = []
- for x in vargs:
- x = _decode_escapes(x)
+ for orig_x in vargs:
+ x = _decode_escapes(orig_x)
if "=" in x:
pos = 0
try:
@@ -83,19 +83,14 @@ def parse_kv(args, check_raw=False):
k = x[:pos]
v = x[pos + 1:]
- # only internal variables can start with an underscore, so
- # we don't allow users to set them directy in arguments
- if k.startswith('_'):
- raise AnsibleError("invalid parameter specified: '%s'" % k)
-
# FIXME: make the retrieval of this list of shell/command
# options a function, so the list is centralized
if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
- raw_params.append(x)
+ raw_params.append(orig_x)
else:
options[k.strip()] = unquote(v.strip())
else:
- raw_params.append(x)
+ raw_params.append(orig_x)
# recombine the free-form params, if any were found, and assign
# them to a special option for use later by the shell/command module
diff --git a/lib/ansible/parsing/utils/addresses.py b/lib/ansible/parsing/utils/addresses.py
index 387f05c627..ebfd850ac6 100644
--- a/lib/ansible/parsing/utils/addresses.py
+++ b/lib/ansible/parsing/utils/addresses.py
@@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
+from ansible.errors import AnsibleParserError, AnsibleError
# Components that match a numeric or alphanumeric begin:end or begin:end:step
# range expression inside square brackets.
@@ -162,6 +163,7 @@ patterns = {
$
'''.format(label=label), re.X|re.I|re.UNICODE
),
+
}
def parse_address(address, allow_ranges=False):
@@ -183,8 +185,8 @@ def parse_address(address, allow_ranges=False):
# First, we extract the port number if one is specified.
port = None
- for type in ['bracketed_hostport', 'hostport']:
- m = patterns[type].match(address)
+ for matching in ['bracketed_hostport', 'hostport']:
+ m = patterns[matching].match(address)
if m:
(address, port) = m.groups()
port = int(port)
@@ -194,22 +196,20 @@ def parse_address(address, allow_ranges=False):
# numeric ranges, or a hostname with alphanumeric ranges.
host = None
- for type in ['ipv4', 'ipv6', 'hostname']:
- m = patterns[type].match(address)
+ for matching in ['ipv4', 'ipv6', 'hostname']:
+ m = patterns[matching].match(address)
if m:
host = address
continue
# If it isn't any of the above, we don't understand it.
-
if not host:
- return (None, None)
-
- # If we get to this point, we know that any included ranges are valid. If
- # the caller is prepared to handle them, all is well. Otherwise we treat
- # it as a parse failure.
+ raise AnsibleError("Not a valid network hostname: %s" % address)
+ # If we get to this point, we know that any included ranges are valid.
+ # If the caller is prepared to handle them, all is well.
+ # Otherwise we treat it as a parse failure.
if not allow_ranges and '[' in host:
- return (None, None)
+ raise AnsibleParserError("Detected range in host but was asked to ignore ranges")
return (host, port)
diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py
index d8cf66feca..dc30dd0ffb 100644
--- a/lib/ansible/parsing/vault/__init__.py
+++ b/lib/ansible/parsing/vault/__init__.py
@@ -22,6 +22,7 @@ import shlex
import shutil
import sys
import tempfile
+import random
from io import BytesIO
from subprocess import call
from ansible.errors import AnsibleError
@@ -70,7 +71,7 @@ try:
except ImportError:
pass
-from ansible.compat.six import PY3, byte2int
+from ansible.compat.six import PY3
from ansible.utils.unicode import to_unicode, to_bytes
HAS_ANY_PBKDF2HMAC = HAS_PBKDF2 or HAS_PBKDF2HMAC
@@ -220,21 +221,91 @@ class VaultEditor:
def __init__(self, password):
self.vault = VaultLib(password)
+ def _shred_file_custom(self, tmp_path):
+ """"Destroy a file, when shred (core-utils) is not available
+
+ Unix `shred' destroys files "so that they can be recovered only with great difficulty with
+ specialised hardware, if at all". It is based on the method from the paper
+ "Secure Deletion of Data from Magnetic and Solid-State Memory",
+ Proceedings of the Sixth USENIX Security Symposium (San Jose, California, July 22-25, 1996).
+
+ We do not go to that length to re-implement shred in Python; instead, overwriting with a block
+ of random data should suffice.
+
+ See https://github.com/ansible/ansible/pull/13700 .
+ """
+
+ file_len = os.path.getsize(tmp_path)
+
+ if file_len > 0: # avoid work when file was empty
+ max_chunk_len = min(1024*1024*2, file_len)
+
+ passes = 3
+ with open(tmp_path, "wb") as fh:
+ for _ in range(passes):
+ fh.seek(0, 0)
+ # get a random chunk of data, each pass with other length
+ chunk_len = random.randint(max_chunk_len//2, max_chunk_len)
+ data = os.urandom(chunk_len)
+
+ for _ in range(0, file_len // chunk_len):
+ fh.write(data)
+ fh.write(data[:file_len % chunk_len])
+
+ assert(fh.tell() == file_len) # FIXME remove this assert once we have unittests to check its accuracy
+ os.fsync(fh)
+
+
+ def _shred_file(self, tmp_path):
+ """Securely destroy a decrypted file
+
+ Note standard limitations of GNU shred apply (For flash, overwriting would have no effect
+ due to wear leveling; for other storage systems, the async kernel->filesystem->disk calls never
+ guarantee data hits the disk; etc). Furthermore, if your tmp dirs is on tmpfs (ramdisks),
+ it is a non-issue.
+
+ Nevertheless, some form of overwriting the data (instead of just removing the fs index entry) is
+ a good idea. If shred is not available (e.g. on windows, or no core-utils installed), fall back on
+ a custom shredding method.
+ """
+
+ if not os.path.isfile(tmp_path):
+ # file is already gone
+ return
+
+ try:
+ r = call(['shred', tmp_path])
+ except OSError:
+ # shred is not available on this system, or some other error occured.
+ r = 1
+
+ if r != 0:
+ # we could not successfully execute unix shred; therefore, do custom shred.
+ self._shred_file_custom(tmp_path)
+
+ os.remove(tmp_path)
+
def _edit_file_helper(self, filename, existing_data=None, force_save=False):
# Create a tempfile
_, tmp_path = tempfile.mkstemp()
if existing_data:
- self.write_data(existing_data, tmp_path)
+ self.write_data(existing_data, tmp_path, shred=False)
# drop the user into an editor on the tmp file
- call(self._editor_shell_command(tmp_path))
+ try:
+ call(self._editor_shell_command(tmp_path))
+ except:
+ # whatever happens, destroy the decrypted file
+ self._shred_file(tmp_path)
+ raise
+
tmpdata = self.read_data(tmp_path)
# Do nothing if the content has not changed
if existing_data == tmpdata and not force_save:
- os.remove(tmp_path)
+ self._shred_file(tmp_path)
return
# encrypt new data and write out to tmp
@@ -258,7 +329,7 @@ class VaultEditor:
ciphertext = self.read_data(filename)
plaintext = self.vault.decrypt(ciphertext)
- self.write_data(plaintext, output_file or filename)
+ self.write_data(plaintext, output_file or filename, shred=False)
def create_file(self, filename):
""" create a new encrypted file """
@@ -323,13 +394,21 @@ class VaultEditor:
return data
- def write_data(self, data, filename):
+ def write_data(self, data, filename, shred=True):
+ """write data to given path
+
+ if shred==True, make sure that the original data is first shredded so
+ that is cannot be recovered
+ """
bytes = to_bytes(data, errors='strict')
if filename == '-':
sys.stdout.write(bytes)
else:
if os.path.isfile(filename):
- os.remove(filename)
+ if shred:
+ self._shred_file(filename)
+ else:
+ os.remove(filename)
with open(filename, "wb") as fh:
fh.write(bytes)
@@ -338,6 +417,7 @@ class VaultEditor:
# overwrite dest with src
if os.path.isfile(dest):
prev = os.stat(dest)
+ # old file 'dest' was encrypted, no need to _shred_file
os.remove(dest)
shutil.move(src, dest)
diff --git a/lib/ansible/parsing/yaml/dumper.py b/lib/ansible/parsing/yaml/dumper.py
index a51289b09b..a8a5015b8e 100644
--- a/lib/ansible/parsing/yaml/dumper.py
+++ b/lib/ansible/parsing/yaml/dumper.py
@@ -22,7 +22,7 @@ __metaclass__ = type
import yaml
from ansible.compat.six import PY3
-from ansible.parsing.yaml.objects import AnsibleUnicode
+from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping
from ansible.vars.hostvars import HostVars
class AnsibleDumper(yaml.SafeDumper):
@@ -50,3 +50,13 @@ AnsibleDumper.add_representer(
represent_hostvars,
)
+AnsibleDumper.add_representer(
+ AnsibleSequence,
+ yaml.representer.SafeRepresenter.represent_list,
+)
+
+AnsibleDumper.add_representer(
+ AnsibleMapping,
+ yaml.representer.SafeRepresenter.represent_dict,
+)
+
diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py
index 888299e1d9..947224d61f 100644
--- a/lib/ansible/playbook/__init__.py
+++ b/lib/ansible/playbook/__init__.py
@@ -25,6 +25,7 @@ from ansible.errors import AnsibleParserError
from ansible.playbook.play import Play
from ansible.playbook.playbook_include import PlaybookInclude
from ansible.plugins import get_all_plugin_loaders
+from ansible import constants as C
try:
from __main__ import display
@@ -44,6 +45,7 @@ class Playbook:
self._entries = []
self._basedir = os.getcwd()
self._loader = loader
+ self._file_name = None
@staticmethod
def load(file_name, variable_manager=None, loader=None):
@@ -61,6 +63,8 @@ class Playbook:
# set the loaders basedir
self._loader.set_basedir(self._basedir)
+ self._file_name = file_name
+
# dynamically load any plugins from the playbook directory
for name, obj in get_all_plugin_loaders():
if obj.subdir:
@@ -84,7 +88,7 @@ class Playbook:
if pb is not None:
self._entries.extend(pb._entries)
else:
- display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color='cyan')
+ display.display("skipping playbook include '%s' due to conditional test failure" % entry.get('include', entry), color=C.COLOR_SKIP)
else:
entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
self._entries.append(entry_obj)
diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py
index 703d9dbca1..0befb9d80d 100644
--- a/lib/ansible/playbook/attribute.py
+++ b/lib/ansible/playbook/attribute.py
@@ -19,6 +19,7 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from copy import deepcopy
class Attribute:
@@ -32,6 +33,11 @@ class Attribute:
self.priority = priority
self.always_post_validate = always_post_validate
+ if default is not None and self.isa in ('list', 'dict', 'set'):
+ self.default = deepcopy(default)
+ else:
+ self.default = default
+
def __eq__(self, other):
return other.priority == self.priority
diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py
index c41cef8d30..7725b5c3c9 100644
--- a/lib/ansible/playbook/base.py
+++ b/lib/ansible/playbook/base.py
@@ -27,7 +27,7 @@ import uuid
from functools import partial
from inspect import getmembers
-from ansible.compat.six import iteritems, string_types, text_type
+from ansible.compat.six import iteritems, string_types
from jinja2.exceptions import UndefinedError
@@ -36,6 +36,7 @@ from ansible.parsing.dataloader import DataLoader
from ansible.playbook.attribute import Attribute, FieldAttribute
from ansible.utils.boolean import boolean
from ansible.utils.vars import combine_vars, isidentifier
+from ansible.utils.unicode import to_unicode
BASE_ATTRIBUTES = {}
@@ -48,7 +49,7 @@ class Base:
_remote_user = FieldAttribute(isa='string')
# variables
- _vars = FieldAttribute(isa='dict', default=dict(), priority=100)
+ _vars = FieldAttribute(isa='dict', priority=100)
# flags and misc. settings
_environment = FieldAttribute(isa='list')
@@ -76,6 +77,10 @@ class Base:
# and initialize the base attributes
self._initialize_base_attributes()
+ # and init vars, avoid using defaults in field declaration as it lives across plays
+ self.vars = dict()
+
+
# The following three functions are used to programatically define data
# descriptors (aka properties) for the Attributes of all of the playbook
# objects (tasks, blocks, plays, etc).
@@ -148,7 +153,7 @@ class Base:
setattr(Base, name, property(getter, setter, deleter))
# Place the value into the instance so that the property can
- # process and hold that value/
+ # process and hold that value.
setattr(self, name, value.default)
def preprocess_data(self, ds):
@@ -310,7 +315,7 @@ class Base:
# and make sure the attribute is of the type it should be
if value is not None:
if attribute.isa == 'string':
- value = text_type(value)
+ value = to_unicode(value)
elif attribute.isa == 'int':
value = int(value)
elif attribute.isa == 'float':
diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py
index 643f2b555d..1e579751d4 100644
--- a/lib/ansible/playbook/become.py
+++ b/lib/ansible/playbook/become.py
@@ -90,16 +90,18 @@ class Become:
display.deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
- # if we are becoming someone else, but some fields are unset,
- # make sure they're initialized to the default config values
- if ds.get('become', False):
- if ds.get('become_method', None) is None:
- ds['become_method'] = C.DEFAULT_BECOME_METHOD
- if ds.get('become_user', None) is None:
- ds['become_user'] = C.DEFAULT_BECOME_USER
return ds
+ def set_become_defaults(self, become, become_method, become_user):
+ ''' if we are becoming someone else, but some fields are unset,
+ make sure they're initialized to the default config values '''
+ if become:
+ if become_method is None:
+ become_method = C.DEFAULT_BECOME_METHOD
+ if become_user is None:
+ become_user = C.DEFAULT_BECOME_USER
+
def _get_attr_become(self):
'''
Override for the 'become' getattr fetcher, used from Base.
diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py
index 201e881ef4..b31ffbcfe8 100644
--- a/lib/ansible/playbook/block.py
+++ b/lib/ansible/playbook/block.py
@@ -34,6 +34,8 @@ class Block(Base, Become, Conditional, Taggable):
_rescue = FieldAttribute(isa='list', default=[])
_always = FieldAttribute(isa='list', default=[])
_delegate_to = FieldAttribute(isa='list')
+ _delegate_facts = FieldAttribute(isa='bool', default=False)
+ _any_errors_fatal = FieldAttribute(isa='bool')
# for future consideration? this would be functionally
# similar to the 'else' clause for exceptions
@@ -42,11 +44,20 @@ class Block(Base, Become, Conditional, Taggable):
def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False, implicit=False):
self._play = play
self._role = role
- self._task_include = task_include
- self._parent_block = parent_block
+ self._task_include = None
+ self._parent_block = None
self._use_handlers = use_handlers
self._implicit = implicit
- self._dep_chain = []
+
+ if task_include:
+ self._task_include = task_include
+ elif parent_block:
+ self._parent_block = parent_block
+
+ if parent_block:
+ self._dep_chain = parent_block._dep_chain[:]
+ else:
+ self._dep_chain = []
super(Block, self).__init__()
@@ -329,6 +340,16 @@ class Block(Base, Become, Conditional, Taggable):
return environment
+ def _get_attr_any_errors_fatal(self):
+ '''
+ Override for the 'tags' getattr fetcher, used from Base.
+ '''
+ any_errors_fatal = self._attributes['any_errors_fatal']
+ if hasattr(self, '_get_parent_attribute'):
+ if self._get_parent_attribute('any_errors_fatal'):
+ any_errors_fatal = True
+ return any_errors_fatal
+
def filter_tagged_tasks(self, play_context, all_vars):
'''
Creates a new block, with task lists filtered based on the tags contained
@@ -340,7 +361,9 @@ class Block(Base, Become, Conditional, Taggable):
for task in target:
if isinstance(task, Block):
tmp_list.append(evaluate_block(task))
- elif task.action in ('meta', 'include') or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
+ elif task.action == 'meta' \
+ or (task.action == 'include' and task.evaluate_tags([], play_context.skip_tags, all_vars=all_vars)) \
+ or task.evaluate_tags(play_context.only_tags, play_context.skip_tags, all_vars=all_vars):
tmp_list.append(task)
return tmp_list
@@ -355,3 +378,4 @@ class Block(Base, Become, Conditional, Taggable):
def has_tasks(self):
return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
+
diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py
index fc178e2fa1..c8c6a9359e 100644
--- a/lib/ansible/playbook/conditional.py
+++ b/lib/ansible/playbook/conditional.py
@@ -22,7 +22,7 @@ __metaclass__ = type
from jinja2.exceptions import UndefinedError
from ansible.compat.six import text_type
-from ansible.errors import AnsibleError
+from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
@@ -89,16 +89,22 @@ class Conditional:
# make sure the templar is using the variables specifed to this method
templar.set_available_variables(variables=all_vars)
- conditional = templar.template(conditional)
- if not isinstance(conditional, basestring) or conditional == "":
- return conditional
-
- # a Jinja2 evaluation that results in something Python can eval!
- presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
- conditional = templar.template(presented, fail_on_undefined=False)
-
- val = conditional.strip()
- if val == presented:
+ try:
+ conditional = templar.template(conditional)
+ if not isinstance(conditional, text_type) or conditional == "":
+ return conditional
+
+ # a Jinja2 evaluation that results in something Python can eval!
+ presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
+ conditional = templar.template(presented)
+ val = conditional.strip()
+ if val == "True":
+ return True
+ elif val == "False":
+ return False
+ else:
+ raise AnsibleError("unable to evaluate conditional: %s" % original)
+ except (AnsibleUndefinedVariable, UndefinedError) as e:
# the templating failed, meaning most likely a
# variable was undefined. If we happened to be
# looking for an undefined variable, return True,
@@ -108,11 +114,5 @@ class Conditional:
elif "is defined" in original:
return False
else:
- raise AnsibleError("error while evaluating conditional: %s (%s)" % (original, presented))
- elif val == "True":
- return True
- elif val == "False":
- return False
- else:
- raise AnsibleError("unable to evaluate conditional: %s" % original)
+ raise AnsibleError("error while evaluating conditional (%s): %s" % (original, e))
diff --git a/lib/ansible/playbook/included_file.py b/lib/ansible/playbook/included_file.py
index 6fc3bd5cbf..cc756a75a9 100644
--- a/lib/ansible/playbook/included_file.py
+++ b/lib/ansible/playbook/included_file.py
@@ -24,6 +24,12 @@ import os
from ansible.errors import AnsibleError
from ansible.template import Templar
+try:
+ from __main__ import display
+except ImportError:
+ from ansible.utils.display import Display
+ display = Display()
+
class IncludedFile:
def __init__(self, filename, args, task):
@@ -43,9 +49,15 @@ class IncludedFile:
return "%s (%s): %s" % (self._filename, self._args, self._hosts)
@staticmethod
- def process_include_results(results, tqm, iterator, loader, variable_manager):
+ def process_include_results(results, tqm, iterator, inventory, loader, variable_manager):
included_files = []
+ def get_original_host(host):
+ if host.name in inventory._hosts_cache:
+ return inventory._hosts_cache[host.name]
+ else:
+ return inventory.get_host(host.name)
+
for res in results:
if res._task.action == 'include':
@@ -61,9 +73,10 @@ class IncludedFile:
if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result:
continue
- original_task = iterator.get_original_task(res._host, res._task)
+ original_host = get_original_host(res._host)
+ original_task = iterator.get_original_task(original_host, res._task)
- task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=res._host, task=original_task)
+ task_vars = variable_manager.get_vars(loader=loader, play=iterator._play, host=original_host, task=original_task)
templar = Templar(loader=loader, variables=task_vars)
include_variables = include_result.get('include_variables', dict())
@@ -75,14 +88,19 @@ class IncludedFile:
# handle relative includes by walking up the list of parent include
# tasks and checking the relative result to see if it exists
parent_include = original_task._task_include
+ cumulative_path = None
while parent_include is not None:
parent_include_dir = templar.template(os.path.dirname(parent_include.args.get('_raw_params')))
+ if cumulative_path is None:
+ cumulative_path = parent_include_dir
+ elif not os.path.isabs(cumulative_path):
+ cumulative_path = os.path.join(parent_include_dir, cumulative_path)
include_target = templar.template(include_result['include'])
if original_task._role:
- new_basedir = os.path.join(original_task._role._role_path, 'tasks', parent_include_dir)
+ new_basedir = os.path.join(original_task._role._role_path, 'tasks', cumulative_path)
include_file = loader.path_dwim_relative(new_basedir, 'tasks', include_target)
else:
- include_file = loader.path_dwim_relative(loader.get_basedir(), parent_include_dir, include_target)
+ include_file = loader.path_dwim_relative(loader.get_basedir(), cumulative_path, include_target)
if os.path.exists(include_file):
break
@@ -105,6 +123,6 @@ class IncludedFile:
except ValueError:
included_files.append(inc_file)
- inc_file.add_host(res._host)
+ inc_file.add_host(original_host)
return included_files
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index ed61416e95..bc03314864 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -64,7 +64,7 @@ class Play(Base, Taggable, Become):
# Connection
_gather_facts = FieldAttribute(isa='bool', default=None, always_post_validate=True)
- _hosts = FieldAttribute(isa='list', default=[], required=True, listof=string_types, always_post_validate=True)
+ _hosts = FieldAttribute(isa='list', required=True, listof=string_types, always_post_validate=True)
_name = FieldAttribute(isa='string', default='', always_post_validate=True)
# Variable Attributes
diff --git a/lib/ansible/playbook/play_context.py b/lib/ansible/playbook/play_context.py
index d8c1d4cfb9..409f9661b8 100644
--- a/lib/ansible/playbook/play_context.py
+++ b/lib/ansible/playbook/play_context.py
@@ -125,6 +125,18 @@ TASK_ATTRIBUTE_OVERRIDES = (
'remote_user',
)
+RESET_VARS = (
+ 'ansible_connection',
+ 'ansible_ssh_host',
+ 'ansible_ssh_pass',
+ 'ansible_ssh_port',
+ 'ansible_ssh_user',
+ 'ansible_ssh_private_key_file',
+ 'ansible_ssh_pipelining',
+ 'ansible_user',
+ 'ansible_host',
+ 'ansible_port',
+)
class PlayContext(Base):
@@ -316,6 +328,13 @@ class PlayContext(Base):
# the host name in the delegated variable dictionary here
delegated_host_name = templar.template(task.delegate_to)
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(delegated_host_name, dict())
+
+ delegated_transport = C.DEFAULT_TRANSPORT
+ for transport_var in MAGIC_VARIABLE_MAPPING.get('connection'):
+ if transport_var in delegated_vars:
+ delegated_transport = delegated_vars[transport_var]
+ break
+
# make sure this delegated_to host has something set for its remote
# address, otherwise we default to connecting to it by name. This
# may happen when users put an IP entry into their inventory, or if
@@ -326,15 +345,38 @@ class PlayContext(Base):
else:
display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % delegated_host_name)
delegated_vars['ansible_host'] = delegated_host_name
+
+ # reset the port back to the default if none was specified, to prevent
+ # the delegated host from inheriting the original host's setting
+ for port_var in MAGIC_VARIABLE_MAPPING.get('port'):
+ if port_var in delegated_vars:
+ break
+ else:
+ if delegated_transport == 'winrm':
+ delegated_vars['ansible_port'] = 5986
+ else:
+ delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT
+
+ # and likewise for the remote user
+ for user_var in MAGIC_VARIABLE_MAPPING.get('remote_user'):
+ if user_var in delegated_vars:
+ break
+ else:
+ delegated_vars['ansible_user'] = task.remote_user or self.remote_user
else:
delegated_vars = dict()
+ attrs_considered = []
for (attr, variable_names) in iteritems(MAGIC_VARIABLE_MAPPING):
for variable_name in variable_names:
+ if attr in attrs_considered:
+ continue
if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
setattr(new_info, attr, delegated_vars[variable_name])
+ attrs_considered.append(attr)
elif variable_name in variables:
setattr(new_info, attr, variables[variable_name])
+ attrs_considered.append(attr)
# make sure we get port defaults if needed
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
@@ -367,6 +409,13 @@ class PlayContext(Base):
if new_info.no_log is None:
new_info.no_log = C.DEFAULT_NO_LOG
+ # set become defaults if not previouslly set
+ task.set_become_defaults(new_info.become, new_info.become_method, new_info.become_user)
+
+ # have always_run override check mode
+ if task.always_run:
+ new_info.check_mode = False
+
return new_info
def make_become_cmd(self, cmd, executable=None):
@@ -473,7 +522,8 @@ class PlayContext(Base):
# TODO: should we be setting the more generic values here rather than
# the more specific _ssh_ ones?
- for special_var in ['ansible_connection', 'ansible_ssh_host', 'ansible_ssh_pass', 'ansible_ssh_port', 'ansible_ssh_user', 'ansible_ssh_private_key_file', 'ansible_ssh_pipelining']:
+ for special_var in RESET_VARS:
+
if special_var not in variables:
for prop, varnames in MAGIC_VARIABLE_MAPPING.items():
if special_var in varnames:
diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py
index 0f505bd3a9..2ce076edb1 100644
--- a/lib/ansible/playbook/playbook_include.py
+++ b/lib/ansible/playbook/playbook_include.py
@@ -22,7 +22,7 @@ __metaclass__ = type
import os
from ansible.compat.six import iteritems
-from ansible.errors import AnsibleParserError
+from ansible.errors import AnsibleParserError, AnsibleError
from ansible.parsing.splitter import split_args, parse_kv
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import FieldAttribute
@@ -55,18 +55,25 @@ class PlaybookInclude(Base, Conditional, Taggable):
# playbook objects
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
- all_vars = dict()
+ all_vars = self.vars.copy()
if variable_manager:
- all_vars = variable_manager.get_vars(loader=loader)
+ all_vars.update(variable_manager.get_vars(loader=loader))
templar = Templar(loader=loader, variables=all_vars)
- if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars):
- return None
+
+ try:
+ forward_conditional = False
+ if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars):
+ return None
+ except AnsibleError:
+ # conditional evaluation raised an error, so we set a flag to indicate
+ # we need to forward the conditionals on to the included play(s)
+ forward_conditional = True
# then we use the object to load a Playbook
pb = Playbook(loader=loader)
- file_name = new_obj.include
+ file_name = templar.template(new_obj.include)
if not os.path.isabs(file_name):
file_name = os.path.join(basedir, file_name)
@@ -85,6 +92,13 @@ class PlaybookInclude(Base, Conditional, Taggable):
if entry._included_path is None:
entry._included_path = os.path.dirname(file_name)
+ # Check to see if we need to forward the conditionals on to the included
+ # plays. If so, we can take a shortcut here and simply prepend them to
+ # those attached to each block (if any)
+ if forward_conditional:
+ for task_block in entry.tasks:
+ task_block.when = self.when[:] + task_block.when
+
return pb
def preprocess_data(self, ds):
diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py
index 9d65639785..f192ea6c94 100644
--- a/lib/ansible/playbook/role/__init__.py
+++ b/lib/ansible/playbook/role/__init__.py
@@ -43,7 +43,10 @@ __all__ = ['Role', 'hash_params']
# strategies (ansible/plugins/strategy/__init__.py)
def hash_params(params):
if not isinstance(params, dict):
- return params
+ if isinstance(params, list):
+ return frozenset(params)
+ else:
+ return params
else:
s = set()
for k,v in iteritems(params):
@@ -61,6 +64,7 @@ def hash_params(params):
class Role(Base, Become, Conditional, Taggable):
_delegate_to = FieldAttribute(isa='string')
+ _delegate_facts = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None):
self._role_name = None
@@ -149,7 +153,7 @@ class Role(Base, Become, Conditional, Taggable):
current_when = getattr(self, 'when')[:]
current_when.extend(role_include.when)
setattr(self, 'when', current_when)
-
+
current_tags = getattr(self, 'tags')[:]
current_tags.extend(role_include.tags)
setattr(self, 'tags', current_tags)
@@ -171,11 +175,17 @@ class Role(Base, Become, Conditional, Taggable):
task_data = self._load_role_yaml('tasks')
if task_data:
- self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader)
+ try:
+ self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader)
+ except AssertionError:
+ raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data)
handler_data = self._load_role_yaml('handlers')
if handler_data:
- self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader)
+ try:
+ self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader)
+ except:
+ raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=task_data)
# vars and default vars are regular dictionaries
self._role_vars = self._load_role_yaml('vars')
@@ -258,6 +268,12 @@ class Role(Base, Become, Conditional, Taggable):
inherited_vars = combine_vars(inherited_vars, parent._role_params)
return inherited_vars
+ def get_role_params(self):
+ params = {}
+ for dep in self.get_all_dependencies():
+ params = combine_vars(params, dep._role_params)
+ return params
+
def get_vars(self, dep_chain=[], include_params=True):
all_vars = self.get_inherited_vars(dep_chain, include_params=include_params)
@@ -307,7 +323,7 @@ class Role(Base, Become, Conditional, Taggable):
return host.name in self._completed and not self._metadata.allow_duplicates
- def compile(self, play, dep_chain=[]):
+ def compile(self, play, dep_chain=None):
'''
Returns the task list for this role, which is created by first
recursively compiling the tasks for all direct dependencies, and
@@ -321,18 +337,20 @@ class Role(Base, Become, Conditional, Taggable):
block_list = []
# update the dependency chain here
+ if dep_chain is None:
+ dep_chain = []
new_dep_chain = dep_chain + [self]
deps = self.get_direct_dependencies()
for dep in deps:
dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
- for dep_block in dep_blocks:
- new_dep_block = dep_block.copy()
- new_dep_block._dep_chain = new_dep_chain
- new_dep_block._play = play
- block_list.append(new_dep_block)
+ block_list.extend(dep_blocks)
- block_list.extend(self._task_blocks)
+ for task_block in self._task_blocks:
+ new_task_block = task_block.copy()
+ new_task_block._dep_chain = new_dep_chain
+ new_task_block._play = play
+ block_list.append(new_task_block)
return block_list
diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py
index 7e8f47e9be..ac7f40050c 100644
--- a/lib/ansible/playbook/role/definition.py
+++ b/lib/ansible/playbook/role/definition.py
@@ -135,46 +135,44 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
append it to the default role path
'''
- role_path = unfrackpath(role_name)
+ # we always start the search for roles in the base directory of the playbook
+ role_search_paths = [
+ os.path.join(self._loader.get_basedir(), u'roles'),
+ self._loader.get_basedir(),
+ ]
+
+ # also search in the configured roles path
+ if C.DEFAULT_ROLES_PATH:
+ configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)
+ role_search_paths.extend(configured_paths)
+
+ # finally, append the roles basedir, if it was set, so we can
+ # search relative to that directory for dependent roles
+ if self._role_basedir:
+ role_search_paths.append(self._role_basedir)
+
+ # create a templar class to template the dependency names, in
+ # case they contain variables
+ if self._variable_manager is not None:
+ all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
+ else:
+ all_vars = dict()
+ templar = Templar(loader=self._loader, variables=all_vars)
+ role_name = templar.template(role_name)
+
+ # now iterate through the possible paths and return the first one we find
+ for path in role_search_paths:
+ path = templar.template(path)
+ role_path = unfrackpath(os.path.join(path, role_name))
+ if self._loader.path_exists(role_path):
+ return (role_name, role_path)
+
+ # if not found elsewhere try to extract path from name
+ role_path = unfrackpath(role_name)
if self._loader.path_exists(role_path):
role_name = os.path.basename(role_name)
return (role_name, role_path)
- else:
- # we always start the search for roles in the base directory of the playbook
- role_search_paths = [
- os.path.join(self._loader.get_basedir(), u'roles'),
- u'./roles',
- self._loader.get_basedir(),
- u'./'
- ]
-
- # also search in the configured roles path
- if C.DEFAULT_ROLES_PATH:
- configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)
- role_search_paths.extend(configured_paths)
-
- # finally, append the roles basedir, if it was set, so we can
- # search relative to that directory for dependent roles
- if self._role_basedir:
- role_search_paths.append(self._role_basedir)
-
- # create a templar class to template the dependency names, in
- # case they contain variables
- if self._variable_manager is not None:
- all_vars = self._variable_manager.get_vars(loader=self._loader, play=self._play)
- else:
- all_vars = dict()
-
- templar = Templar(loader=self._loader, variables=all_vars)
- role_name = templar.template(role_name)
-
- # now iterate through the possible paths and return the first one we find
- for path in role_search_paths:
- path = templar.template(path)
- role_path = unfrackpath(os.path.join(path, role_name))
- if self._loader.path_exists(role_path):
- return (role_name, role_path)
raise AnsibleError("the role '%s' was not found in %s" % (role_name, ":".join(role_search_paths)), obj=self._ds)
@@ -190,7 +188,12 @@ class RoleDefinition(Base, Become, Conditional, Taggable):
for (key, value) in iteritems(ds):
# use the list of FieldAttribute values to determine what is and is not
# an extra parameter for this role (or sub-class of this role)
- if key not in base_attribute_names:
+ # FIXME: hard-coded list of exception key names here corresponds to the
+ # connection fields in the Base class. There may need to be some
+ # other mechanism where we exclude certain kinds of field attributes,
+ # or make this list more automatic in some way so we don't have to
+ # remember to update it manually.
+ if key not in base_attribute_names or key in ('connection', 'port', 'remote_user'):
# this key does not match a field attribute, so it must be a role param
role_params[key] = value
else:
diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py
index 67949e2e12..43e2d9e4fc 100644
--- a/lib/ansible/playbook/role/include.py
+++ b/lib/ansible/playbook/role/include.py
@@ -40,7 +40,8 @@ class RoleInclude(RoleDefinition):
is included for execution in a play.
"""
- _delegate_to = FieldAttribute(isa='string')
+ _delegate_to = FieldAttribute(isa='string')
+ _delegate_facts = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None, role_basedir=None, variable_manager=None, loader=None):
super(RoleInclude, self).__init__(play=play, role_basedir=role_basedir, variable_manager=variable_manager, loader=loader)
diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py
index 8f5cfa0934..54ca377d08 100644
--- a/lib/ansible/playbook/taggable.py
+++ b/lib/ansible/playbook/taggable.py
@@ -38,7 +38,11 @@ class Taggable:
if isinstance(ds, list):
return ds
elif isinstance(ds, basestring):
- return [ ds ]
+ value = ds.split(',')
+ if isinstance(value, list):
+ return [ x.strip() for x in value ]
+ else:
+ return [ ds ]
else:
raise AnsibleError('tags must be specified as a list', obj=ds)
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
index 38cca4b3a2..154ff53d5e 100644
--- a/lib/ansible/playbook/task.py
+++ b/lib/ansible/playbook/task.py
@@ -72,6 +72,7 @@ class Task(Base, Conditional, Taggable, Become):
_changed_when = FieldAttribute(isa='string')
_delay = FieldAttribute(isa='int', default=5)
_delegate_to = FieldAttribute(isa='string')
+ _delegate_facts = FieldAttribute(isa='bool', default=False)
_failed_when = FieldAttribute(isa='string')
_first_available_file = FieldAttribute(isa='list')
_loop = FieldAttribute(isa='string', private=True)
@@ -81,7 +82,7 @@ class Task(Base, Conditional, Taggable, Become):
_poll = FieldAttribute(isa='int')
_register = FieldAttribute(isa='string')
_retries = FieldAttribute(isa='int', default=3)
- _until = FieldAttribute(isa='list')
+ _until = FieldAttribute(isa='string')
def __init__(self, block=None, role=None, task_include=None):
''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
@@ -106,11 +107,10 @@ class Task(Base, Conditional, Taggable, Become):
elif self.name:
return self.name
else:
- flattened_args = self._merge_kv(self.args)
if self._role:
- return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args)
+ return "%s : %s" % (self._role.get_name(), self.action)
else:
- return "%s %s" % (self.action, flattened_args)
+ return "%s" % (self.action,)
def _merge_kv(self, ds):
if ds is None:
@@ -133,7 +133,10 @@ class Task(Base, Conditional, Taggable, Become):
def __repr__(self):
''' returns a human readable representation of the task '''
- return "TASK: %s" % self.get_name()
+ if self.get_name() == 'meta ':
+ return "TASK: meta (%s)" % self.args['_raw_params']
+ else:
+ return "TASK: %s" % self.get_name()
def _preprocess_loop(self, ds, new_ds, k, v):
''' take a lookup plugin name and store it correctly '''
@@ -213,14 +216,6 @@ class Task(Base, Conditional, Taggable, Become):
return super(Task, self).preprocess_data(new_ds)
- def _load_any_errors_fatal(self, attr, value):
- '''
- Exists only to show a deprecation warning, as this attribute is not valid
- at the task level.
- '''
- display.deprecated("Setting any_errors_fatal on a task is no longer supported. This should be set at the play level only")
- return None
-
def post_validate(self, templar):
'''
Override of base class post_validate, to also do final validation on
@@ -256,6 +251,27 @@ class Task(Base, Conditional, Taggable, Become):
break
return templar.template(value, convert_bare=True)
+ def _post_validate_changed_when(self, attr, value, templar):
+ '''
+ changed_when is evaluated after the execution of the task is complete,
+ and should not be templated during the regular post_validate step.
+ '''
+ return value
+
+ def _post_validate_failed_when(self, attr, value, templar):
+ '''
+ failed_when is evaluated after the execution of the task is complete,
+ and should not be templated during the regular post_validate step.
+ '''
+ return value
+
+ def _post_validate_until(self, attr, value, templar):
+ '''
+ until is evaluated after the execution of the task is complete,
+ and should not be templated during the regular post_validate step.
+ '''
+ return value
+
def get_vars(self):
all_vars = dict()
if self._block:
@@ -272,6 +288,14 @@ class Task(Base, Conditional, Taggable, Become):
return all_vars
+ def get_include_params(self):
+ all_vars = dict()
+ if self._task_include:
+ all_vars.update(self._task_include.get_include_params())
+ if self.action == 'include':
+ all_vars.update(self.vars)
+ return all_vars
+
def copy(self, exclude_block=False):
new_me = super(Task, self).copy()
@@ -390,3 +414,14 @@ class Task(Base, Conditional, Taggable, Become):
if parent_environment is not None:
environment = self._extend_value(environment, parent_environment)
return environment
+
+ def _get_attr_any_errors_fatal(self):
+ '''
+ Override for the 'tags' getattr fetcher, used from Base.
+ '''
+ any_errors_fatal = self._attributes['any_errors_fatal']
+ if hasattr(self, '_get_parent_attribute'):
+ if self._get_parent_attribute('any_errors_fatal'):
+ any_errors_fatal = True
+ return any_errors_fatal
+
diff --git a/lib/ansible/plugins/__init__.py b/lib/ansible/plugins/__init__.py
index 87de300e3c..139e5a7d61 100644
--- a/lib/ansible/plugins/__init__.py
+++ b/lib/ansible/plugins/__init__.py
@@ -213,15 +213,6 @@ class PluginLoader:
def find_plugin(self, name, mod_type=''):
''' Find a plugin named name '''
- # The particular cache to look for modules within. This matches the
- # requested mod_type
- pull_cache = self._plugin_path_cache[mod_type]
- try:
- return pull_cache[name]
- except KeyError:
- # Cache miss. Now let's find the plugin
- pass
-
if mod_type:
suffix = mod_type
elif self.class_name:
@@ -232,6 +223,15 @@ class PluginLoader:
# they can have any suffix
suffix = ''
+ # The particular cache to look for modules within. This matches the
+ # requested mod_type
+ pull_cache = self._plugin_path_cache[suffix]
+ try:
+ return pull_cache[name]
+ except KeyError:
+ # Cache miss. Now let's find the plugin
+ pass
+
# TODO: Instead of using the self._paths cache (PATH_CACHE) and
# self._searched_paths we could use an iterator. Before enabling that
# we need to make sure we don't want to add additional directories
diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py
index 64e9a18fa7..62a2e7806f 100644
--- a/lib/ansible/plugins/action/__init__.py
+++ b/lib/ansible/plugins/action/__init__.py
@@ -24,6 +24,7 @@ import json
import os
import pipes
import random
+import re
import stat
import tempfile
import time
@@ -119,7 +120,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, mod_type)
if module_path:
break
- else:
+ else: # This is a for-else: http://bit.ly/1ElPkyg
# Use Windows version of ping module to check module paths when
# using a connection that supports .ps1 suffixes. We check specifically
# for win_ping here, otherwise the code would look for ping.ps1
@@ -151,15 +152,21 @@ class ActionBase(with_metaclass(ABCMeta, object)):
if not isinstance(environments, list):
environments = [ environments ]
+ # the environments as inherited need to be reversed, to make
+ # sure we merge in the parent's values first so those in the
+ # block then task 'win' in precedence
+ environments.reverse()
for environment in environments:
if environment is None:
continue
- if not isinstance(environment, dict):
- raise AnsibleError("environment must be a dictionary, received %s (%s)" % (environment, type(environment)))
+ temp_environment = self._templar.template(environment)
+ if not isinstance(temp_environment, dict):
+ raise AnsibleError("environment must be a dictionary, received %s (%s)" % (temp_environment, type(temp_environment)))
# very deliberately using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
- final_environment.update(environment)
+ final_environment.update(temp_environment)
+ final_environment = self._templar.template(final_environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
@@ -201,9 +208,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
tmp_mode = 0o755
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
- display.debug("executing _low_level_execute_command to create the tmp path")
result = self._low_level_execute_command(cmd, sudoable=False)
- display.debug("done with creation of tmp path")
# error handling on this seems a little aggressive?
if result['rc'] != 0:
@@ -228,7 +233,11 @@ class ActionBase(with_metaclass(ABCMeta, object)):
output = output + u": %s" % result['stdout']
raise AnsibleConnectionFailure(output)
- rc = self._connection._shell.join_path(result['stdout'].strip(), u'').splitlines()[-1]
+ try:
+ rc = self._connection._shell.join_path(result['stdout'].strip(), u'').splitlines()[-1]
+ except IndexError:
+ # stdout was empty or just space, set to / to trigger error in next if
+ rc = '/'
# Catch failure conditions, files should never be
# written to locations in /.
@@ -244,9 +253,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
- display.debug("calling _low_level_execute_command to remove the tmp path")
self._low_level_execute_command(cmd, sudoable=False)
- display.debug("done removing the tmp path")
def _transfer_data(self, remote_path, data):
'''
@@ -281,9 +288,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
'''
cmd = self._connection._shell.chmod(mode, path)
- display.debug("calling _low_level_execute_command to chmod the remote path")
res = self._low_level_execute_command(cmd, sudoable=sudoable)
- display.debug("done with chmod call")
return res
def _remote_checksum(self, path, all_vars):
@@ -294,9 +299,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
python_interp = all_vars.get('ansible_python_interpreter', 'python')
cmd = self._connection._shell.checksum(path, python_interp)
- display.debug("calling _low_level_execute_command to get the remote checksum")
data = self._low_level_execute_command(cmd, sudoable=True)
- display.debug("done getting the remote checksum")
try:
data2 = data['stdout'].strip().splitlines()[-1]
if data2 == u'':
@@ -324,9 +327,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
expand_path = '~%s' % self._play_context.become_user
cmd = self._connection._shell.expand_user(expand_path)
- display.debug("calling _low_level_execute_command to expand the remote user path")
data = self._low_level_execute_command(cmd, sudoable=False)
- display.debug("done expanding the remote user path")
#initial_fragment = utils.last_non_blank_line(data['stdout'])
initial_fragment = data['stdout'].strip().splitlines()[-1]
@@ -356,6 +357,14 @@ class ActionBase(with_metaclass(ABCMeta, object)):
return data[idx:]
+ def _strip_success_message(self, data):
+ '''
+ Removes the BECOME-SUCCESS message from the data.
+ '''
+ if data.strip().startswith('BECOME-SUCCESS-'):
+ data = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', data)
+ return data
+
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=None, persist_files=False, delete_remote_tmp=True):
'''
Transfer and run a module along with its arguments.
@@ -371,22 +380,28 @@ class ActionBase(with_metaclass(ABCMeta, object)):
module_args = self._task.args
# set check mode in the module arguments, if required
- if self._play_context.check_mode and not self._task.always_run:
+ if self._play_context.check_mode:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
+ else:
+ module_args['_ansible_check_mode'] = False
# set no log in the module arguments, if required
- if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG:
- module_args['_ansible_no_log'] = True
+ module_args['_ansible_no_log'] = self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG
# set debug in the module arguments, if required
- if C.DEFAULT_DEBUG:
- module_args['_ansible_debug'] = True
+ module_args['_ansible_debug'] = C.DEFAULT_DEBUG
+
+ # let module know we are in diff mode
+ module_args['_ansible_diff'] = self._play_context.diff
+
+ # let module know our verbosity
+ module_args['_ansible_verbosity'] = self._display.verbosity
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
if not shebang:
- raise AnsibleError("module is missing interpreter line")
+ raise AnsibleError("module (%s) is missing interpreter line" % module_name)
# a remote tmp path may be necessary and not already created
remote_module_path = None
@@ -395,8 +410,9 @@ class ActionBase(with_metaclass(ABCMeta, object)):
tmp = self._make_tmp_path()
if tmp:
- remote_module_path = self._connection._shell.join_path(tmp, module_name)
- if module_style == 'old':
+ remote_module_filename = self._connection._shell.get_remote_filename(module_name)
+ remote_module_path = self._connection._shell.join_path(tmp, remote_module_filename)
+ if module_style in ['old', 'non_native_want_json']:
# we'll also need a temp file to hold our module arguments
args_file_path = self._connection._shell.join_path(tmp, 'args')
@@ -408,8 +424,10 @@ class ActionBase(with_metaclass(ABCMeta, object)):
# the remote system, which can be read and parsed by the module
args_data = ""
for k,v in iteritems(module_args):
- args_data += '%s="%s" ' % (k, pipes.quote(v))
+ args_data += '%s="%s" ' % (k, pipes.quote(text_type(v)))
self._transfer_data(args_file_path, args_data)
+ elif module_style == 'non_native_want_json':
+ self._transfer_data(args_file_path, json.dumps(module_args))
display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
@@ -421,7 +439,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
cmd = ""
in_data = None
- if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES:
+ if self._connection.has_pipelining and self._play_context.pipelining and not C.DEFAULT_KEEP_REMOTE_FILES and module_style == 'new':
in_data = module_data
else:
if remote_module_path:
@@ -442,9 +460,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
# specified in the play, not the sudo_user
sudoable = False
- display.debug("calling _low_level_execute_command() for command %s" % cmd)
res = self._low_level_execute_command(cmd, sudoable=sudoable, in_data=in_data)
- display.debug("_low_level_execute_command returned ok")
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if self._play_context.become and self._play_context.become_user != 'root':
@@ -461,9 +477,10 @@ class ActionBase(with_metaclass(ABCMeta, object)):
if 'stderr' in res and res['stderr'].startswith(u'Traceback'):
data['exception'] = res['stderr']
else:
- data['msg'] = res.get('stdout', u'')
+ data['msg'] = "MODULE FAILURE"
+ data['module_stdout'] = res.get('stdout', u'')
if 'stderr' in res:
- data['msg'] += res['stderr']
+ data['module_stderr'] = res['stderr']
# pre-split stdout into lines, if stdout is in the data and there
# isn't already a stdout_lines value there
@@ -473,8 +490,7 @@ class ActionBase(with_metaclass(ABCMeta, object)):
display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
- def _low_level_execute_command(self, cmd, sudoable=True, in_data=None,
- executable=None, encoding_errors='replace'):
+ def _low_level_execute_command(self, cmd, sudoable=True, in_data=None, executable=C.DEFAULT_EXECUTABLE, encoding_errors='replace'):
'''
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
@@ -490,23 +506,22 @@ class ActionBase(with_metaclass(ABCMeta, object)):
'''
if executable is not None:
- cmd = executable + ' -c ' + cmd
+ cmd = executable + ' -c ' + pipes.quote(cmd)
- display.debug("in _low_level_execute_command() (%s)" % (cmd,))
+ display.debug("_low_level_execute_command(): starting")
if not cmd:
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
- display.debug("no command, exiting _low_level_execute_command()")
+ display.debug("_low_level_execute_command(): no command, exiting")
return dict(stdout='', stderr='')
allow_same_user = C.BECOME_ALLOW_SAME_USER
same_user = self._play_context.become_user == self._play_context.remote_user
if sudoable and self._play_context.become and (allow_same_user or not same_user):
- display.debug("using become for this command")
+ display.debug("_low_level_execute_command(): using become for this command")
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
- display.debug("executing the command %s through the connection" % cmd)
+ display.debug("_low_level_execute_command(): executing: %s" % (cmd,))
rc, stdout, stderr = self._connection.exec_command(cmd, in_data=in_data, sudoable=sudoable)
- display.debug("command execution done")
# stdout and stderr may be either a file-like or a bytes object.
# Convert either one to a text type
@@ -524,10 +539,11 @@ class ActionBase(with_metaclass(ABCMeta, object)):
else:
err = stderr
- display.debug("done with _low_level_execute_command() (%s)" % (cmd,))
if rc is None:
rc = 0
+ display.debug("_low_level_execute_command() done: rc=%d, stdout=%s, stderr=%s" % (rc, stdout, stderr))
+
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
def _get_first_available_file(self, faf, of=None, searchdir='files'):
diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py
index 4bf43f1400..b3aec20437 100644
--- a/lib/ansible/plugins/action/add_host.py
+++ b/lib/ansible/plugins/action/add_host.py
@@ -53,9 +53,13 @@ class ActionModule(ActionBase):
new_name = self._task.args.get('name', self._task.args.get('hostname', None))
display.vv("creating host via 'add_host': hostname=%s" % new_name)
- name, port = parse_address(new_name, allow_ranges=False)
- if not name:
- raise AnsibleError("Invalid inventory hostname: %s" % new_name)
+ try:
+ name, port = parse_address(new_name, allow_ranges=False)
+ except:
+ # not a parsable hostname, but might still be usable
+ name = new_name
+ port = None
+
if port:
self._task.args['ansible_ssh_port'] = port
diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py
index 51e2413af2..5e04f37ff1 100644
--- a/lib/ansible/plugins/action/async.py
+++ b/lib/ansible/plugins/action/async.py
@@ -48,7 +48,7 @@ class ActionModule(ActionBase):
env_string = self._compute_environment_string()
module_args = self._task.args.copy()
- if self._play_context.no_log or not C.DEFAULT_NO_TARGET_SYSLOG:
+ if self._play_context.no_log or C.DEFAULT_NO_TARGET_SYSLOG:
module_args['_ansible_no_log'] = True
# configure, upload, and chmod the target module
@@ -75,4 +75,8 @@ class ActionModule(ActionBase):
result['changed'] = True
+ # be sure to strip out the BECOME-SUCCESS message, which may
+ # be there depending on the output of the module
+ result['stdout'] = self._strip_success_message(result.get('stdout', ''))
+
return result
diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py
index 5a5a805e74..2af20eddfc 100644
--- a/lib/ansible/plugins/action/debug.py
+++ b/lib/ansible/plugins/action/debug.py
@@ -19,33 +19,46 @@ __metaclass__ = type
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
-
+from ansible.utils.unicode import to_unicode
+from ansible.errors import AnsibleUndefinedVariable
class ActionModule(ActionBase):
''' Print statements during execution '''
TRANSFERS_FILES = False
+ VALID_ARGS = set(['msg', 'var'])
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
+ for arg in self._task.args:
+ if arg not in self.VALID_ARGS:
+ return {"failed": True, "msg": "'%s' is not a valid option in debug" % arg}
+
+ if 'msg' in self._task.args and 'var' in self._task.args:
+ return {"failed": True, "msg": "'msg' and 'var' are incompatible options"}
+
result = super(ActionModule, self).run(tmp, task_vars)
if 'msg' in self._task.args:
- if 'fail' in self._task.args and boolean(self._task.args['fail']):
- result['failed'] = True
- result['msg'] = self._task.args['msg']
- else:
- result['msg'] = self._task.args['msg']
- # FIXME: move the LOOKUP_REGEX somewhere else
- elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']):
- results = self._templar.template(self._task.args['var'], convert_bare=True)
- if results == self._task.args['var']:
+ result['msg'] = self._task.args['msg']
+
+ elif 'var' in self._task.args:
+ try:
+ results = self._templar.template(self._task.args['var'], convert_bare=True, fail_on_undefined=True)
+ if results == self._task.args['var']:
+ raise AnsibleUndefinedVariable
+ except AnsibleUndefinedVariable:
results = "VARIABLE IS NOT DEFINED!"
- result[self._task.args['var']] = results
+
+ if type(self._task.args['var']) in (list, dict):
+ # If var is a list or dict, use the type as key to display
+ result[to_unicode(type(self._task.args['var']))] = results
+ else:
+ result[self._task.args['var']] = results
else:
- result['msg'] = 'here we are'
+ result['msg'] = 'Hello world!'
# force flag to make debug output module always verbose
result['_ansible_verbose_always'] = True
diff --git a/lib/ansible/plugins/action/fetch.py b/lib/ansible/plugins/action/fetch.py
index 478eac3f82..0dacd02145 100644
--- a/lib/ansible/plugins/action/fetch.py
+++ b/lib/ansible/plugins/action/fetch.py
@@ -60,20 +60,22 @@ class ActionModule(ActionBase):
source = self._connection._shell.join_path(source)
source = self._remote_expand_user(source)
- # calculate checksum for the remote file
- remote_checksum = self._remote_checksum(source, all_vars=task_vars)
+ remote_checksum = None
+ if not self._play_context.become:
+ # calculate checksum for the remote file, don't bother if using become as slurp will be used
+ remote_checksum = self._remote_checksum(source, all_vars=task_vars)
- # use slurp if sudo and permissions are lacking
+ # use slurp if permissions are lacking or privilege escalation is needed
remote_data = None
- if remote_checksum in ('1', '2') or self._play_context.become:
+ if remote_checksum in ('1', '2', None):
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
if slurpres.get('failed'):
- if remote_checksum == '1' and not fail_on_missing:
+ if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'):
result['msg'] = "the remote file does not exist, not transferring, ignored"
result['file'] = source
result['changed'] = False
- return result
- result.update(slurpres)
+ else:
+ result.update(slurpres)
return result
else:
if slurpres['encoding'] == 'base64':
@@ -115,8 +117,8 @@ class ActionModule(ActionBase):
dest = dest.replace("//","/")
if remote_checksum in ('0', '1', '2', '3', '4'):
- # these don't fail because you may want to transfer a log file that possibly MAY exist
- # but keep going to fetch other log files
+ # these don't fail because you may want to transfer a log file that
+ # possibly MAY exist but keep going to fetch other log files
if remote_checksum == '0':
result['msg'] = "unable to calculate the checksum of the remote file"
result['file'] = source
@@ -162,25 +164,24 @@ class ActionModule(ActionBase):
except (IOError, OSError) as e:
raise AnsibleError("Failed to fetch the file: %s" % e)
new_checksum = secure_hash(dest)
- # For backwards compatibility. We'll return None on FIPS enabled
- # systems
+ # For backwards compatibility. We'll return None on FIPS enabled systems
try:
new_md5 = md5(dest)
except ValueError:
new_md5 = None
if validate_checksum and new_checksum != remote_checksum:
- result.update(dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
- return result
- result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
- return result
+ result.update(dict(failed=True, md5sum=new_md5,
+ msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
+ checksum=new_checksum, remote_checksum=remote_checksum))
+ else:
+ result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
else:
- # For backwards compatibility. We'll return None on FIPS enabled
- # systems
+ # For backwards compatibility. We'll return None on FIPS enabled systems
try:
local_md5 = md5(dest)
except ValueError:
local_md5 = None
-
result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))
- return result
+
+ return result
diff --git a/lib/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py
index a891d3c70d..99f9db2a88 100644
--- a/lib/ansible/plugins/action/group_by.py
+++ b/lib/ansible/plugins/action/group_by.py
@@ -40,6 +40,6 @@ class ActionModule(ActionBase):
group_name = self._task.args.get('key')
group_name = group_name.replace(' ','-')
- result['changed'] = True
+ result['changed'] = False
result['add_group'] = group_name
return result
diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py
index bf93fdad2d..932ad8309c 100644
--- a/lib/ansible/plugins/action/normal.py
+++ b/lib/ansible/plugins/action/normal.py
@@ -18,6 +18,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
@@ -27,12 +28,16 @@ class ActionModule(ActionBase):
task_vars = dict()
results = super(ActionModule, self).run(tmp, task_vars)
- results.update(self._execute_module(tmp=tmp, task_vars=task_vars))
-
+ # remove as modules might hide due to nolog
+ del results['invocation']['module_args']
+ results = merge_hash(results, self._execute_module(tmp=tmp, task_vars=task_vars))
# Remove special fields from the result, which can only be set
# internally by the executor engine. We do this only here in
# the 'normal' action, as other action plugins may set this.
- for field in ('ansible_notify',):
+ #
+ # We don't want modules to determine that running the module fires
+ # notify handlers. That's for the playbook to decide.
+ for field in ('_ansible_notify',):
if field in results:
results.pop(field)
diff --git a/lib/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py
index f3a70ed7e3..97fa9ac320 100644
--- a/lib/ansible/plugins/action/pause.py
+++ b/lib/ansible/plugins/action/pause.py
@@ -105,6 +105,8 @@ class ActionModule(ActionBase):
result['start'] = str(datetime.datetime.now())
result['user_input'] = ''
+ fd = None
+ old_settings = None
try:
if seconds is not None:
# setup the alarm handler
@@ -159,7 +161,7 @@ class ActionModule(ActionBase):
finally:
# cleanup and save some information
# restore the old settings for the duped stdin fd
- if isatty(fd):
+ if not(None in (fd, old_settings)) and isatty(fd):
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
duration = time.time() - start
diff --git a/lib/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py
index d6fa2f3559..c9718db413 100644
--- a/lib/ansible/plugins/action/raw.py
+++ b/lib/ansible/plugins/action/raw.py
@@ -19,8 +19,6 @@ __metaclass__ = type
from ansible.plugins.action import ActionBase
-import re
-
class ActionModule(ActionBase):
TRANSFERS_FILES = False
@@ -42,7 +40,6 @@ class ActionModule(ActionBase):
# for some modules (script, raw), the sudo success key
# may leak into the stdout due to the way the sudo/su
# command is constructed, so we filter that out here
- if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'):
- result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout'])
+ result['stdout'] = self._strip_success_message(result.get('stdout', ''))
return result
diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py
index 2670cc9290..45004d5ed4 100644
--- a/lib/ansible/plugins/action/synchronize.py
+++ b/lib/ansible/plugins/action/synchronize.py
@@ -131,7 +131,10 @@ class ActionModule(ActionBase):
src_host = '127.0.0.1'
inventory_hostname = task_vars.get('inventory_hostname')
dest_host_inventory_vars = task_vars['hostvars'].get(inventory_hostname)
- dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname)
+ try:
+ dest_host = dest_host_inventory_vars['ansible_host']
+ except KeyError:
+ dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname)
dest_is_local = dest_host in C.LOCALHOST
diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py
index 109f3e80c0..d8339e57b9 100644
--- a/lib/ansible/plugins/action/template.py
+++ b/lib/ansible/plugins/action/template.py
@@ -63,8 +63,13 @@ class ActionModule(ActionBase):
dest = self._task.args.get('dest', None)
faf = self._task.first_available_file
force = boolean(self._task.args.get('force', True))
+ state = self._task.args.get('state', None)
- if (source is None and faf is not None) or dest is None:
+ if state is not None:
+ result['failed'] = True
+ result['msg'] = "'state' cannot be specified on a template"
+ return result
+ elif (source is None and faf is not None) or dest is None:
result['failed'] = True
result['msg'] = "src and dest are required"
return result
@@ -150,7 +155,7 @@ class ActionModule(ActionBase):
diff = {}
new_module_args = self._task.args.copy()
- if force and local_checksum != remote_checksum:
+ if (remote_checksum == '1') or (force and local_checksum != remote_checksum):
result['changed'] = True
# if showing diffs, we need to get the remote value
diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py
index cd89b936fe..b6c43a3c59 100644
--- a/lib/ansible/plugins/action/unarchive.py
+++ b/lib/ansible/plugins/action/unarchive.py
@@ -69,13 +69,13 @@ class ActionModule(ActionBase):
source = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', source)
remote_checksum = self._remote_checksum(dest, all_vars=task_vars)
- if remote_checksum != '3':
+ if remote_checksum == '4':
result['failed'] = True
- result['msg'] = "dest '%s' must be an existing dir" % dest
+ result['msg'] = "python isn't present on the system. Unable to compute checksum"
return result
- elif remote_checksum == '4':
+ elif remote_checksum != '3':
result['failed'] = True
- result['msg'] = "python isn't present on the system. Unable to compute checksum"
+ result['msg'] = "dest '%s' must be an existing dir" % dest
return result
if copy:
diff --git a/lib/ansible/plugins/callback/__init__.py b/lib/ansible/plugins/callback/__init__.py
index 2b6875ae20..faf04b1180 100644
--- a/lib/ansible/plugins/callback/__init__.py
+++ b/lib/ansible/plugins/callback/__init__.py
@@ -59,6 +59,21 @@ class CallbackBase:
version = getattr(self, 'CALLBACK_VERSION', '1.0')
self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version))
+ ''' helper for callbacks, so they don't all have to include deepcopy '''
+ _copy_result = deepcopy
+
+ def _copy_result_exclude(self, result, exclude):
+ values = []
+ for e in exclude:
+ values.append(getattr(result, e))
+ setattr(result, e, None)
+
+ result_copy = deepcopy(result)
+ for i,e in enumerate(exclude):
+ setattr(result, e, values[i])
+
+ return result_copy
+
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if result.get('_ansible_no_log', False):
return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result"))
@@ -101,6 +116,10 @@ class CallbackBase:
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
+ # format complex structures into 'files'
+ for x in ['before', 'after']:
+ if isinstance(diff[x], dict):
+ diff[x] = json.dumps(diff[x], sort_keys=True, indent=4)
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
@@ -126,7 +145,7 @@ class CallbackBase:
def _process_items(self, result):
for res in result._result['results']:
- newres = deepcopy(result)
+ newres = self._copy_result_exclude(result, ['_result'])
res['item'] = self._get_item(res)
newres._result = res
if 'failed' in res and res['failed']:
@@ -136,6 +155,12 @@ class CallbackBase:
else:
self.v2_playbook_item_on_ok(newres)
+ def _clean_results(self, result, task_name):
+ if 'changed' in result and task_name in ['debug']:
+ del result['changed']
+ if 'invocation' in result and task_name in ['debug']:
+ del result['invocation']
+
def set_play_context(self, play_context):
pass
@@ -246,7 +271,7 @@ class CallbackBase:
def v2_runner_on_file_diff(self, result, diff):
pass #no v1 correspondance
- def v2_playbook_on_start(self):
+ def v2_playbook_on_start(self, playbook):
self.playbook_on_start()
def v2_playbook_on_notify(self, result, handler):
@@ -304,3 +329,12 @@ class CallbackBase:
def v2_playbook_on_include(self, included_file):
pass #no v1 correspondance
+
+ def v2_playbook_item_on_ok(self, result):
+ pass
+
+ def v2_playbook_item_on_failed(self, result):
+ pass
+
+ def v2_playbook_item_on_skipped(self, result):
+ pass
diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py
index 611244760d..dfad657934 100644
--- a/lib/ansible/plugins/callback/default.py
+++ b/lib/ansible/plugins/callback/default.py
@@ -21,6 +21,7 @@ __metaclass__ = type
from ansible import constants as C
from ansible.plugins.callback import CallbackBase
+from ansible.utils.color import colorize, hostcolor
class CallbackModule(CallbackBase):
@@ -43,7 +44,7 @@ class CallbackModule(CallbackBase):
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
- self._display.display(msg, color='red')
+ self._display.display(msg, color=C.COLOR_ERROR)
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
@@ -52,15 +53,16 @@ class CallbackModule(CallbackBase):
self._process_items(result)
else:
if delegated_vars:
- self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red')
+ self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
- self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
+ self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
if result._task.ignore_errors:
- self._display.display("...ignoring", color='cyan')
+ self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_ok(self, result):
+ self._clean_results(result._result, result._task.action)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if result._task.action == 'include':
return
@@ -69,13 +71,13 @@ class CallbackModule(CallbackBase):
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: [%s]" % result._host.get_name()
- color = 'yellow'
+ color = C.COLOR_CHANGED
else:
if delegated_vars:
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: [%s]" % result._host.get_name()
- color = 'green'
+ color = C.COLOR_OK
if result._task.loop and 'results' in result._result:
self._process_items(result)
@@ -95,17 +97,17 @@ class CallbackModule(CallbackBase):
msg = "skipping: [%s]" % result._host.get_name()
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += " => %s" % self._dump_results(result._result)
- self._display.display(msg, color='cyan')
+ self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
- self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color='red')
+ self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
- self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
+ self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
def v2_playbook_on_no_hosts_matched(self):
- self._display.display("skipping: no hosts matched", color='cyan')
+ self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
@@ -115,7 +117,7 @@ class CallbackModule(CallbackBase):
if self._display.verbosity > 2:
path = task.get_path()
if path:
- self._display.display("task path: %s" % path, color='dark gray')
+ self._display.display("task path: %s" % path, color=C.COLOR_DEBUG)
def v2_playbook_on_cleanup_task_start(self, task):
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
@@ -133,7 +135,11 @@ class CallbackModule(CallbackBase):
self._display.banner(msg)
def v2_on_file_diff(self, result):
- if 'diff' in result._result and result._result['diff']:
+ if result._task.loop and 'results' in result._result:
+ for res in result._result['results']:
+ if 'diff' in res and res['diff']:
+ self._display.display(self._get_diff(res['diff']))
+ elif 'diff' in result._result and result._result['diff']:
self._display.display(self._get_diff(result._result['diff']))
def v2_playbook_item_on_ok(self, result):
@@ -146,13 +152,13 @@ class CallbackModule(CallbackBase):
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: [%s]" % result._host.get_name()
- color = 'yellow'
+ color = C.COLOR_CHANGED
else:
if delegated_vars:
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: [%s]" % result._host.get_name()
- color = 'green'
+ color = C.COLOR_OK
msg += " => (item=%s)" % (result._result['item'],)
@@ -170,15 +176,15 @@ class CallbackModule(CallbackBase):
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
- self._display.display(msg, color='red')
+ self._display.display(msg, color=C.COLOR_ERROR)
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
if delegated_vars:
- self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color='red')
+ self._display.display("failed: [%s -> %s] => (item=%s) => %s" % (result._host.get_name(), delegated_vars['ansible_host'], result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
- self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red')
+ self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color=C.COLOR_ERROR)
self._handle_warnings(result._result)
@@ -186,10 +192,37 @@ class CallbackModule(CallbackBase):
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), result._result['item'])
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result:
msg += " => %s" % self._dump_results(result._result)
- self._display.display(msg, color='cyan')
+ self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_include(self, included_file):
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
- color = 'cyan'
- self._display.display(msg, color='cyan')
+ color = C.COLOR_SKIP
+ self._display.display(msg, color=C.COLOR_SKIP)
+
+ def v2_playbook_on_stats(self, stats):
+ self._display.banner("PLAY RECAP")
+
+ hosts = sorted(stats.processed.keys())
+ for h in hosts:
+ t = stats.summarize(h)
+
+ self._display.display(u"%s : %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize(u'ok', t['ok'], C.COLOR_OK),
+ colorize(u'changed', t['changed'], C.COLOR_CHANGED),
+ colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
+ colorize(u'failed', t['failures'], C.COLOR_ERROR)),
+ screen_only=True
+ )
+
+ self._display.display(u"%s : %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize(u'ok', t['ok'], None),
+ colorize(u'changed', t['changed'], None),
+ colorize(u'unreachable', t['unreachable'], None),
+ colorize(u'failed', t['failures'], None)),
+ log_only=True
+ )
+
+ self._display.display("", screen_only=True)
diff --git a/lib/ansible/plugins/callback/hipchat.py b/lib/ansible/plugins/callback/hipchat.py
index 5c2e3c6060..b31140128b 100644
--- a/lib/ansible/plugins/callback/hipchat.py
+++ b/lib/ansible/plugins/callback/hipchat.py
@@ -50,9 +50,9 @@ class CallbackModule(CallbackBase):
CALLBACK_NAME = 'hipchat'
CALLBACK_NEEDS_WHITELIST = True
- def __init__(self, display):
+ def __init__(self):
- super(CallbackModule, self).__init__(display)
+ super(CallbackModule, self).__init__()
if not HAS_PRETTYTABLE:
self.disabled = True
diff --git a/lib/ansible/plugins/callback/log_plays.py b/lib/ansible/plugins/callback/log_plays.py
index 5d342a94f5..7b708a74ba 100644
--- a/lib/ansible/plugins/callback/log_plays.py
+++ b/lib/ansible/plugins/callback/log_plays.py
@@ -45,9 +45,9 @@ class CallbackModule(CallbackBase):
TIME_FORMAT="%b %d %Y %H:%M:%S"
MSG_FORMAT="%(now)s - %(category)s - %(data)s\n\n"
- def __init__(self, display):
+ def __init__(self):
- super(CallbackModule, self).__init__(display)
+ super(CallbackModule, self).__init__()
if not os.path.exists("/var/log/ansible/hosts"):
os.makedirs("/var/log/ansible/hosts")
diff --git a/lib/ansible/plugins/callback/logentries.py b/lib/ansible/plugins/callback/logentries.py
new file mode 100644
index 0000000000..281ca044c5
--- /dev/null
+++ b/lib/ansible/plugins/callback/logentries.py
@@ -0,0 +1,345 @@
+""" (c) 2015, Logentries.com, Jimmy Tang <jimmy.tang@logentries.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+This callback plugin will generate json objects to be sent to logentries
+for auditing/debugging purposes.
+
+Todo:
+
+* Better formatting of output before sending out to logentries data/api nodes.
+
+To use:
+
+Add this to your ansible.cfg file in the defaults block
+
+ [defaults]
+ callback_plugins = ./callback_plugins
+ callback_stdout = logentries
+ callback_whitelist = logentries
+
+Copy the callback plugin into the callback_plugins directory
+
+Either set the environment variables
+
+ export LOGENTRIES_API=data.logentries.com
+ export LOGENTRIES_PORT=10000
+ export LOGENTRIES_ANSIBLE_TOKEN=dd21fc88-f00a-43ff-b977-e3a4233c53af
+
+Or create a logentries.ini config file that sites next to the plugin with the following contents
+
+ [logentries]
+ api = data.logentries.com
+ port = 10000
+ tls_port = 20000
+ use_tls = no
+ token = dd21fc88-f00a-43ff-b977-e3a4233c53af
+ flatten = False
+
+
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import socket
+import random
+import time
+import codecs
+import ConfigParser
+import uuid
+try:
+ import certifi
+ HAS_CERTIFI = True
+except ImportError:
+ HAS_CERTIFI = False
+
+try:
+ import flatdict
+ HAS_FLATDICT = True
+except ImportError:
+ HAS_FLATDICT = False
+
+from ansible.plugins.callback import CallbackBase
+
+
+def to_unicode(ch):
+ return codecs.unicode_escape_decode(ch)[0]
+
+
+def is_unicode(ch):
+ return isinstance(ch, unicode)
+
+
+def create_unicode(ch):
+ return unicode(ch, 'utf-8')
+
+
+class PlainTextSocketAppender(object):
+ def __init__(self,
+ verbose=True,
+ LE_API='data.logentries.com',
+ LE_PORT=80,
+ LE_TLS_PORT=443):
+
+ self.LE_API = LE_API
+ self.LE_PORT = LE_PORT
+ self.LE_TLS_PORT = LE_TLS_PORT
+ self.MIN_DELAY = 0.1
+ self.MAX_DELAY = 10
+ # Error message displayed when an incorrect Token has been detected
+ self.INVALID_TOKEN = ("\n\nIt appears the LOGENTRIES_TOKEN "
+ "parameter you entered is incorrect!\n\n")
+ # Unicode Line separator character \u2028
+ self.LINE_SEP = to_unicode('\u2028')
+
+ self.verbose = verbose
+ self._conn = None
+
+ def open_connection(self):
+ self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self._conn.connect((self.LE_API, self.LE_PORT))
+
+ def reopen_connection(self):
+ self.close_connection()
+
+ root_delay = self.MIN_DELAY
+ while True:
+ try:
+ self.open_connection()
+ return
+ except Exception:
+ if self.verbose:
+ self._display.warning("Unable to connect to Logentries")
+
+ root_delay *= 2
+ if (root_delay > self.MAX_DELAY):
+ root_delay = self.MAX_DELAY
+
+ wait_for = root_delay + random.uniform(0, root_delay)
+
+ try:
+ time.sleep(wait_for)
+ except KeyboardInterrupt:
+ raise
+
+ def close_connection(self):
+ if self._conn is not None:
+ self._conn.close()
+
+ def put(self, data):
+ # Replace newlines with Unicode line separator
+ # for multi-line events
+ if not is_unicode(data):
+ multiline = create_unicode(data).replace('\n', self.LINE_SEP)
+ else:
+ multiline = data.replace('\n', self.LINE_SEP)
+ multiline += "\n"
+ # Send data, reconnect if needed
+ while True:
+ try:
+ self._conn.send(multiline.encode('utf-8'))
+ except socket.error:
+ self.reopen_connection()
+ continue
+ break
+
+ self.close_connection()
+
+
+try:
+ import ssl
+ HAS_SSL=True
+except ImportError: # for systems without TLS support.
+ SocketAppender = PlainTextSocketAppender
+ HAS_SSL=False
+else:
+
+ class TLSSocketAppender(PlainTextSocketAppender):
+ def open_connection(self):
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock = ssl.wrap_socket(
+ sock=sock,
+ keyfile=None,
+ certfile=None,
+ server_side=False,
+ cert_reqs=ssl.CERT_REQUIRED,
+ ssl_version=getattr(
+ ssl, 'PROTOCOL_TLSv1_2', ssl.PROTOCOL_TLSv1),
+ ca_certs=certifi.where(),
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True, )
+ sock.connect((self.LE_API, self.LE_TLS_PORT))
+ self._conn = sock
+
+ SocketAppender = TLSSocketAppender
+
+
+class CallbackModule(CallbackBase):
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'notification'
+ CALLBACK_NAME = 'logentries'
+ CALLBACK_NEEDS_WHITELIST = True
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+
+ if not HAS_SSL:
+ self._display.warning("Unable to import ssl module. Will send over port 80.")
+
+ if not HAS_CERTIFI:
+ self.disabled =True
+ self._display.warning('The `certifi` python module is not installed. '
+ 'Disabling the Logentries callback plugin.')
+
+ if not HAS_FLATDICT:
+ self.disabled =True
+ self._display.warning('The `flatdict` python module is not installed. '
+ 'Disabling the Logentries callback plugin.')
+
+ config_path = os.path.abspath(os.path.dirname(__file__))
+ config = ConfigParser.ConfigParser()
+ try:
+ config.readfp(open(os.path.join(config_path, 'logentries.ini')))
+ if config.has_option('logentries', 'api'):
+ self.api_uri = config.get('logentries', 'api')
+ if config.has_option('logentries', 'port'):
+ self.api_port = config.getint('logentries', 'port')
+ if config.has_option('logentries', 'tls_port'):
+ self.api_tls_port = config.getint('logentries', 'tls_port')
+ if config.has_option('logentries', 'use_tls'):
+ self.use_tls = config.getboolean('logentries', 'use_tls')
+ if config.has_option('logentries', 'token'):
+ self.token = config.get('logentries', 'token')
+ if config.has_option('logentries', 'flatten'):
+ self.flatten = config.getboolean('logentries', 'flatten')
+
+ except:
+ self.api_uri = os.getenv('LOGENTRIES_API')
+ if self.api_uri is None:
+ self.api_uri = 'data.logentries.com'
+
+ try:
+ self.api_port = int(os.getenv('LOGENTRIES_PORT'))
+ if self.api_port is None:
+ self.api_port = 80
+ except TypeError:
+ self.api_port = 80
+
+ try:
+ self.api_tls_port = int(os.getenv('LOGENTRIES_TLS_PORT'))
+ if self.api_tls_port is None:
+ self.api_tls_port = 443
+ except TypeError:
+ self.api_tls_port = 443
+
+ # this just needs to be set to use TLS
+ self.use_tls = os.getenv('LOGENTRIES_USE_TLS')
+ if self.use_tls is None:
+ self.use_tls = False
+ elif self.use_tls.lower() in ['yes', 'true']:
+ self.use_tls = True
+
+ self.token = os.getenv('LOGENTRIES_ANSIBLE_TOKEN')
+ if self.token is None:
+ self.disabled = True
+ self._display.warning('Logentries token could not be loaded. The logentries token can be provided using the `LOGENTRIES_TOKEN` environment variable')
+
+ self.flatten = os.getenv('LOGENTRIES_FLATTEN')
+ if self.flatten is None:
+ self.flatten = False
+ elif self.flatten.lower() in ['yes', 'true']:
+ self.flatten = True
+
+ self.verbose = False
+ self.timeout = 10
+ self.le_jobid = str(uuid.uuid4())
+
+ if self.use_tls:
+ self._appender = TLSSocketAppender(verbose=self.verbose,
+ LE_API=self.api_uri,
+ LE_TLS_PORT=self.api_tls_port)
+ else:
+ self._appender = PlainTextSocketAppender(verbose=self.verbose,
+ LE_API=self.api_uri,
+ LE_PORT=self.api_port)
+ self._appender.reopen_connection()
+
+ def emit_formatted(self, record):
+ if self.flatten:
+ results = flatdict.FlatDict(record)
+ self.emit(self._dump_results(results))
+ else:
+ self.emit(self._dump_results(record))
+
+ def emit(self, record):
+ msg = record.rstrip('\n')
+ msg = "{} {}".format(self.token, msg)
+ self._appender.put(msg)
+
+ def runner_on_ok(self, host, res):
+ results = {}
+ results['le_jobid'] = self.le_jobid
+ results['hostname'] = host
+ results['results'] = res
+ results['status'] = 'OK'
+ self.emit_formatted(results)
+
+ def runner_on_failed(self, host, res, ignore_errors=False):
+ results = {}
+ results['le_jobid'] = self.le_jobid
+ results['hostname'] = host
+ results['results'] = res
+ results['status'] = 'FAILED'
+ self.emit_formatted(results)
+
+ def runner_on_skipped(self, host, item=None):
+ results = {}
+ results['le_jobid'] = self.le_jobid
+ results['hostname'] = host
+ results['status'] = 'SKIPPED'
+ self.emit_formatted(results)
+
+ def runner_on_unreachable(self, host, res):
+ results = {}
+ results['le_jobid'] = self.le_jobid
+ results['hostname'] = host
+ results['results'] = res
+ results['status'] = 'UNREACHABLE'
+ self.emit_formatted(results)
+
+ def runner_on_async_failed(self, host, res, jid):
+ results = {}
+ results['le_jobid'] = self.le_jobid
+ results['hostname'] = host
+ results['results'] = res
+ results['jid'] = jid
+ results['status'] = 'ASYNC_FAILED'
+ self.emit_formatted(results)
+
+ def v2_playbook_on_play_start(self, play):
+ results = {}
+ results['le_jobid'] = self.le_jobid
+ results['started_by'] = os.getlogin()
+ if play.name:
+ results['play'] = play.name
+ results['hosts'] = play.hosts
+ self.emit_formatted(results)
+
+ def playbook_on_stats(self, stats):
+ """ close connection """
+ self._appender.close_connection()
diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py
index f855c1a6e5..9fa257af74 100644
--- a/lib/ansible/plugins/callback/minimal.py
+++ b/lib/ansible/plugins/callback/minimal.py
@@ -53,28 +53,32 @@ class CallbackModule(CallbackBase):
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
- self._display.display(msg, color='red')
+ self._display.display(msg, color=C.COLOR_ERROR)
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
if result._task.action in C.MODULE_NO_JSON:
- self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color='red')
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "FAILED"), color=C.COLOR_ERROR)
else:
- self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red')
+ self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_ERROR)
def v2_runner_on_ok(self, result):
+ self._clean_results(result._result, result._task.action)
if result._task.action in C.MODULE_NO_JSON:
- self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color='green')
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result, "SUCCESS"), color=C.COLOR_OK)
else:
- self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='green')
+ if 'changed' in result._result and result._result['changed']:
+ self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_CHANGED)
+ else:
+ self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_OK)
self._handle_warnings(result._result)
def v2_runner_on_skipped(self, result):
- self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
+ self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
- self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='yellow')
+ self._display.display("%s | UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color=C.COLOR_UNREACHABLE)
def v2_on_file_diff(self, result):
if 'diff' in result._result and result._result['diff']:
diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py
index a99b680c05..0f6283fd44 100644
--- a/lib/ansible/plugins/callback/oneline.py
+++ b/lib/ansible/plugins/callback/oneline.py
@@ -52,24 +52,24 @@ class CallbackModule(CallbackBase):
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','')
if result._task.action in C.MODULE_NO_JSON:
- self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color='red')
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color=C.COLOR_ERROR)
else:
- self._display.display(msg, color='red')
+ self._display.display(msg, color=C.COLOR_ERROR)
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
- self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='red')
+ self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_ERROR)
def v2_runner_on_ok(self, result):
if result._task.action in C.MODULE_NO_JSON:
- self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color='green')
+ self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color=C.COLOR_OK)
else:
- self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color='green')
+ self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_OK)
def v2_runner_on_unreachable(self, result):
- self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
+ self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color=C.COLOR_UNREACHABLE)
def v2_runner_on_skipped(self, result):
- self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
+ self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
diff --git a/lib/ansible/plugins/callback/osx_say.py b/lib/ansible/plugins/callback/osx_say.py
index da588434a8..2434077870 100644
--- a/lib/ansible/plugins/callback/osx_say.py
+++ b/lib/ansible/plugins/callback/osx_say.py
@@ -40,9 +40,9 @@ class CallbackModule(CallbackBase):
CALLBACK_NAME = 'osx_say'
CALLBACK_NEEDS_WHITELIST = True
- def __init__(self, display):
+ def __init__(self):
- super(CallbackModule, self).__init__(display)
+ super(CallbackModule, self).__init__()
# plugin disable itself if say is not present
# ansible will not call any callback if disabled is set to True
diff --git a/lib/ansible/plugins/callback/profile_tasks.py b/lib/ansible/plugins/callback/profile_tasks.py
index b23f84092b..e4004c97d4 100644
--- a/lib/ansible/plugins/callback/profile_tasks.py
+++ b/lib/ansible/plugins/callback/profile_tasks.py
@@ -71,11 +71,11 @@ class CallbackModule(CallbackBase):
CALLBACK_NAME = 'profile_tasks'
CALLBACK_NEEDS_WHITELIST = True
- def __init__(self, display):
+ def __init__(self):
self.stats = {}
self.current = None
- super(CallbackModule, self).__init__(display)
+ super(CallbackModule, self).__init__()
def _record_task(self, name):
"""
diff --git a/lib/ansible/plugins/callback/skippy.py b/lib/ansible/plugins/callback/skippy.py
index 5ef2b6f3c1..306d1a534e 100644
--- a/lib/ansible/plugins/callback/skippy.py
+++ b/lib/ansible/plugins/callback/skippy.py
@@ -19,9 +19,9 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-from ansible.plugins.callback import CallbackBase
+from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
-class CallbackModule(CallbackBase):
+class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
@@ -32,130 +32,8 @@ class CallbackModule(CallbackBase):
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'skippy'
- def v2_runner_on_failed(self, result, ignore_errors=False):
- if 'exception' in result._result:
- if self._display.verbosity < 3:
- # extract just the actual error message from the exception text
- error = result._result['exception'].strip().split('\n')[-1]
- msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
- else:
- msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
-
- self._display.display(msg, color='red')
-
- # finally, remove the exception from the result so it's not shown every time
- del result._result['exception']
-
- if result._task.loop and 'results' in result._result:
- self._process_items(result)
- else:
- if result._task.delegate_to:
- self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), result._task.delegate_to, self._dump_results(result._result)), color='red')
- else:
- self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
-
- if result._task.ignore_errors:
- self._display.display("...ignoring", color='cyan')
-
- def v2_runner_on_ok(self, result):
-
- if result._task.action == 'include':
- msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
- color = 'cyan'
- elif result._result.get('changed', False):
- if result._task.delegate_to is not None:
- msg = "changed: [%s -> %s]" % (result._host.get_name(), result._task.delegate_to)
- else:
- msg = "changed: [%s]" % result._host.get_name()
- color = 'yellow'
- else:
- if result._task.delegate_to is not None:
- msg = "ok: [%s -> %s]" % (result._host.get_name(), result._task.delegate_to)
- else:
- msg = "ok: [%s]" % result._host.get_name()
- color = 'green'
-
- if result._task.loop and 'results' in result._result:
- self._process_items(result)
- else:
-
- if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include':
- msg += " => %s" % (self._dump_results(result._result),)
- self._display.display(msg, color=color)
-
- self._handle_warnings(result._result)
-
-
- def v2_runner_on_unreachable(self, result):
- if result._task.delegate_to:
- self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), result._task.delegate_to, self._dump_results(result._result)), color='red')
- else:
- self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color='red')
-
- def v2_playbook_on_no_hosts_matched(self):
- self._display.display("skipping: no hosts matched", color='cyan')
-
- def v2_playbook_on_no_hosts_remaining(self):
- self._display.banner("NO MORE HOSTS LEFT")
-
- def v2_playbook_on_task_start(self, task, is_conditional):
- self._display.banner("TASK [%s]" % task.get_name().strip())
- if self._display.verbosity > 2:
- path = task.get_path()
- if path:
- self._display.display("task path: %s" % path, color='dark gray')
-
- def v2_playbook_on_cleanup_task_start(self, task):
- self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
-
- def v2_playbook_on_handler_task_start(self, task):
- self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
-
- def v2_playbook_on_play_start(self, play):
- name = play.get_name().strip()
- if not name:
- msg = "PLAY"
- else:
- msg = "PLAY [%s]" % name
-
- self._display.banner(msg)
-
- def v2_on_file_diff(self, result):
- if 'diff' in result._result and result._result['diff']:
- self._display.display(self._get_diff(result._result['diff']))
-
- def v2_playbook_item_on_ok(self, result):
-
- if result._task.action == 'include':
- msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
- color = 'cyan'
- elif result._result.get('changed', False):
- msg = "changed: [%s]" % result._host.get_name()
- color = 'yellow'
- else:
- msg = "ok: [%s]" % result._host.get_name()
- color = 'green'
-
- msg += " => (item=%s)" % (result._result['item'],)
-
- if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and not '_ansible_verbose_override' in result._result and result._task.action != 'include':
- msg += " => %s" % self._dump_results(result._result)
- self._display.display(msg, color=color)
-
- def v2_playbook_item_on_failed(self, result):
- if 'exception' in result._result:
- if self._display.verbosity < 3:
- # extract just the actual error message from the exception text
- error = result._result['exception'].strip().split('\n')[-1]
- msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
- else:
- msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
-
- self._display.display(msg, color='red')
-
- # finally, remove the exception from the result so it's not shown every time
- del result._result['exception']
-
- self._display.display("failed: [%s] => (item=%s) => %s" % (result._host.get_name(), result._result['item'], self._dump_results(result._result)), color='red')
- self._handle_warnings(result._result)
+ def v2_runner_on_skipped(self, result):
+ pass
+ def v2_playbook_item_on_skipped(self, result):
+ pass
diff --git a/lib/ansible/plugins/callback/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py
index 6ba9c6f3cd..6b5fc60bdc 100644
--- a/lib/ansible/plugins/callback/syslog_json.py
+++ b/lib/ansible/plugins/callback/syslog_json.py
@@ -28,9 +28,9 @@ class CallbackModule(CallbackBase):
CALLBACK_NAME = 'syslog_json'
CALLBACK_NEEDS_WHITELIST = True
- def __init__(self, display):
+ def __init__(self):
- super(CallbackModule, self).__init__(display)
+ super(CallbackModule, self).__init__()
self.logger = logging.getLogger('ansible logger')
self.logger.setLevel(logging.DEBUG)
diff --git a/lib/ansible/plugins/callback/timer.py b/lib/ansible/plugins/callback/timer.py
index 9d976ea18d..0c9829aea2 100644
--- a/lib/ansible/plugins/callback/timer.py
+++ b/lib/ansible/plugins/callback/timer.py
@@ -16,9 +16,9 @@ class CallbackModule(CallbackBase):
CALLBACK_NAME = 'timer'
CALLBACK_NEEDS_WHITELIST = True
- def __init__(self, display):
+ def __init__(self):
- super(CallbackModule, self).__init__(display)
+ super(CallbackModule, self).__init__()
self.start_time = datetime.now()
diff --git a/lib/ansible/plugins/callback/tree.py b/lib/ansible/plugins/callback/tree.py
index 5c976cc6f2..ee710a6dfd 100644
--- a/lib/ansible/plugins/callback/tree.py
+++ b/lib/ansible/plugins/callback/tree.py
@@ -36,12 +36,13 @@ class CallbackModule(CallbackBase):
CALLBACK_NAME = 'tree'
CALLBACK_NEEDS_WHITELIST = True
- def __init__(self, display):
- super(CallbackModule, self).__init__(display)
+ def __init__(self):
+ super(CallbackModule, self).__init__()
self.tree = TREE_DIR
if not self.tree:
- self._display.warnings("Disabling tree callback, invalid directory provided to tree option: %s" % self.tree)
+ self.tree = os.path.expanduser("~/.ansible/tree")
+ self._display.warning("The tree callback is defaulting to ~/.ansible/tree, as an invalid directory was provided: %s" % self.tree)
def write_tree_file(self, hostname, buf):
''' write something into treedir/hostname '''
@@ -53,7 +54,7 @@ class CallbackModule(CallbackBase):
with open(path, 'wb+') as fd:
fd.write(buf)
except (OSError, IOError) as e:
- self._display.warnings("Unable to write to %s's file: %s" % (hostname, str(e)))
+ self._display.warning("Unable to write to %s's file: %s" % (hostname, str(e)))
def result_to_tree(self, result):
if self.tree:
diff --git a/lib/ansible/plugins/connection/__init__.py b/lib/ansible/plugins/connection/__init__.py
index 1ff5d8f30b..bea8e5b426 100644
--- a/lib/ansible/plugins/connection/__init__.py
+++ b/lib/ansible/plugins/connection/__init__.py
@@ -75,6 +75,7 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
self.success_key = None
self.prompt = None
+ self._connected = False
# load the shell plugin for this action/connection
if play_context.shell:
@@ -88,6 +89,11 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
if not self._shell:
raise AnsibleError("Invalid shell type specified (%s), or the plugin for that shell type is missing." % shell_type)
+ @property
+ def connected(self):
+ '''Read-only property holding whether the connection to the remote host is active or closed.'''
+ return self._connected
+
def _become_method_supported(self):
''' Checks if the current class supports this privilege escalation method '''
@@ -200,7 +206,10 @@ class ConnectionBase(with_metaclass(ABCMeta, object)):
pass
def check_become_success(self, output):
- return self._play_context.success_key == output.rstrip()
+ for line in output.splitlines(True):
+ if self._play_context.success_key == line.rstrip():
+ return True
+ return False
def check_password_prompt(self, output):
if self._play_context.prompt is None:
diff --git a/lib/ansible/plugins/connection/chroot.py b/lib/ansible/plugins/connection/chroot.py
index c86ea1fc35..ba41ffb5d8 100644
--- a/lib/ansible/plugins/connection/chroot.py
+++ b/lib/ansible/plugins/connection/chroot.py
@@ -30,6 +30,7 @@ from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.connection import ConnectionBase
from ansible.module_utils.basic import is_executable
+from ansible.utils.unicode import to_bytes
try:
from __main__ import display
@@ -90,6 +91,7 @@ class Connection(ConnectionBase):
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
+ local_cmd = map(to_bytes, local_cmd)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
diff --git a/lib/ansible/plugins/connection/docker.py b/lib/ansible/plugins/connection/docker.py
index 308dea0edc..130317f24a 100644
--- a/lib/ansible/plugins/connection/docker.py
+++ b/lib/ansible/plugins/connection/docker.py
@@ -36,6 +36,7 @@ from distutils.version import LooseVersion
import ansible.constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
+from ansible.utils.unicode import to_bytes
try:
from __main__ import display
@@ -80,8 +81,10 @@ class Connection(ConnectionBase):
docker_version = self._get_docker_version()
if LooseVersion(docker_version) < LooseVersion('1.3'):
raise AnsibleError('docker connection type requires docker 1.3 or higher')
- if LooseVersion(docker_version) >= LooseVersion('1.8.0'):
- self.can_copy_bothways = True
+ # Docker cp in 1.8.0 sets the owner and group to root rather than the
+ # user that the docker container is set to use by default.
+ #if LooseVersion(docker_version) >= LooseVersion('1.8.0'):
+ # self.can_copy_bothways = True
@staticmethod
def _sanitize_version(version):
@@ -93,7 +96,7 @@ class Connection(ConnectionBase):
cmd_output = subprocess.check_output(cmd)
for line in cmd_output.split('\n'):
- if line.startswith('Server version:'): # old docker versions
+ if line.startswith('Server version:'): # old docker versions
return self._sanitize_version(line.split()[2])
# no result yet, must be newer Docker version
@@ -110,7 +113,7 @@ class Connection(ConnectionBase):
""" Connect to the container. Nothing to do """
super(Connection, self)._connect()
if not self._connected:
- display.vvv("ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
+ display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
self._play_context.remote_user, host=self._play_context.remote_addr)
)
self._connected = True
@@ -123,7 +126,8 @@ class Connection(ConnectionBase):
# -i is needed to keep stdin open which allows pipelining to work
local_cmd = [self.docker_cmd, "exec", '-i', self._play_context.remote_addr, executable, '-c', cmd]
- display.vvv("EXEC %s" % (local_cmd), host=self._play_context.remote_addr)
+ display.vvv("EXEC %s" % (local_cmd,), host=self._play_context.remote_addr)
+ local_cmd = map(to_bytes, local_cmd)
p = subprocess.Popen(local_cmd, shell=False, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -157,6 +161,7 @@ class Connection(ConnectionBase):
if self.can_copy_bothways:
# only docker >= 1.8.1 can do this natively
args = [ self.docker_cmd, "cp", in_path, "%s:%s" % (self._play_context.remote_addr, out_path) ]
+ args = map(to_bytes, args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
@@ -167,7 +172,8 @@ class Connection(ConnectionBase):
# running containers, so we use docker exec to implement this
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
args = [self.docker_cmd, "exec", "-i", self._play_context.remote_addr, executable, "-c",
- "dd of={0} bs={1}".format(out_path, BUFSIZE)]
+ "dd of=%s bs=%s" % (out_path, BUFSIZE)]
+ args = map(to_bytes, args)
with open(in_path, 'rb') as in_file:
try:
p = subprocess.Popen(args, stdin=in_file,
@@ -190,6 +196,7 @@ class Connection(ConnectionBase):
out_dir = os.path.dirname(out_path)
args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
+ args = map(to_bytes, args)
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
diff --git a/lib/ansible/plugins/connection/jail.py b/lib/ansible/plugins/connection/jail.py
index e665692543..8f88b6ad28 100644
--- a/lib/ansible/plugins/connection/jail.py
+++ b/lib/ansible/plugins/connection/jail.py
@@ -30,6 +30,7 @@ import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.connection import ConnectionBase
+from ansible.utils.unicode import to_bytes
try:
from __main__ import display
@@ -83,7 +84,7 @@ class Connection(ConnectionBase):
return stdout.split()
def get_jail_path(self):
- p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'],
+ p = subprocess.Popen([self.jls_cmd, '-j', to_bytes(self.jail), '-q', 'path'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -109,7 +110,8 @@ class Connection(ConnectionBase):
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
- display.vvv("EXEC %s" % (local_cmd), host=self.jail)
+ display.vvv("EXEC %s" % (local_cmd,), host=self.jail)
+ local_cmd = map(to_bytes, local_cmd)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
diff --git a/lib/ansible/plugins/connection/libvirt_lxc.py b/lib/ansible/plugins/connection/libvirt_lxc.py
index dc82d98404..3bfff8b1c3 100644
--- a/lib/ansible/plugins/connection/libvirt_lxc.py
+++ b/lib/ansible/plugins/connection/libvirt_lxc.py
@@ -30,6 +30,7 @@ import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.connection import ConnectionBase
+from ansible.utils.unicode import to_bytes
try:
from __main__ import display
@@ -65,7 +66,7 @@ class Connection(ConnectionBase):
return cmd
def _check_domain(self, domain):
- p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', domain],
+ p = subprocess.Popen([self.virsh, '-q', '-c', 'lxc:///', 'dominfo', to_bytes(domain)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode:
@@ -89,7 +90,8 @@ class Connection(ConnectionBase):
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
local_cmd = [self.virsh, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
- display.vvv("EXEC %s" % (local_cmd), host=self.lxc)
+ display.vvv("EXEC %s" % (local_cmd,), host=self.lxc)
+ local_cmd = map(to_bytes, local_cmd)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
diff --git a/lib/ansible/plugins/connection/local.py b/lib/ansible/plugins/connection/local.py
index 0244f90d59..5004c3698d 100644
--- a/lib/ansible/plugins/connection/local.py
+++ b/lib/ansible/plugins/connection/local.py
@@ -25,10 +25,13 @@ import select
import fcntl
import getpass
+from ansible.compat.six import text_type, binary_type
+
import ansible.constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
+from ansible.utils.unicode import to_bytes, to_str
try:
from __main__ import display
@@ -45,7 +48,7 @@ class Connection(ConnectionBase):
''' used to identify this connection object '''
return 'local'
- def _connect(self, port=None):
+ def _connect(self):
''' connect to the local host; nothing to do here '''
# Because we haven't made any remote connection we're running as
@@ -54,7 +57,7 @@ class Connection(ConnectionBase):
self._play_context.remote_user = getpass.getuser()
if not self._connected:
- display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user, host=self._play_context.remote_addr))
+ display.vvv(u"ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._play_context.remote_user, host=self._play_context.remote_addr))
self._connected = True
return self
@@ -69,9 +72,15 @@ class Connection(ConnectionBase):
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
- display.vvv("{0} EXEC {1}".format(self._play_context.remote_addr, cmd))
+ display.vvv(u"{0} EXEC {1}".format(self._play_context.remote_addr, cmd))
# FIXME: cwd= needs to be set to the basedir of the playbook
display.debug("opening command with Popen()")
+
+ if isinstance(cmd, (text_type, binary_type)):
+ cmd = to_bytes(cmd)
+ else:
+ cmd = map(to_bytes, cmd)
+
p = subprocess.Popen(
cmd,
shell=isinstance(cmd, basestring),
@@ -117,22 +126,22 @@ class Connection(ConnectionBase):
super(Connection, self).put_file(in_path, out_path)
- display.vvv("{0} PUT {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
+ display.vvv(u"{0} PUT {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
if not os.path.exists(in_path):
- raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
+ raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_str(in_path)))
try:
shutil.copyfile(in_path, out_path)
except shutil.Error:
- raise AnsibleError("failed to copy: {0} and {1} are the same".format(in_path, out_path))
+ raise AnsibleError("failed to copy: {0} and {1} are the same".format(to_str(in_path), to_str(out_path)))
except IOError as e:
- raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, e))
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(to_str(out_path), to_str(e)))
def fetch_file(self, in_path, out_path):
''' fetch a file from local to local -- for copatibility '''
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("{0} FETCH {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
+ display.vvv(u"{0} FETCH {1} TO {2}".format(self._play_context.remote_addr, in_path, out_path))
self.put_file(in_path, out_path)
def close(self):
diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py
index 8bbc031271..0a0b2bb04b 100644
--- a/lib/ansible/plugins/connection/ssh.py
+++ b/lib/ansible/plugins/connection/ssh.py
@@ -32,7 +32,8 @@ from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
from ansible.utils.path import unfrackpath, makedirs_safe
-from ansible.utils.unicode import to_bytes, to_unicode
+from ansible.utils.unicode import to_bytes, to_unicode, to_str
+from ansible.compat.six import text_type, binary_type
try:
from __main__ import display
@@ -60,7 +61,6 @@ class Connection(ConnectionBase):
# management here.
def _connect(self):
- self._connected = True
return self
@staticmethod
@@ -197,7 +197,7 @@ class Connection(ConnectionBase):
if user:
self._add_args(
"ANSIBLE_REMOTE_USER/remote_user/ansible_user/user/-u set",
- ("-o", "User={0}".format(self._play_context.remote_user))
+ ("-o", "User={0}".format(to_bytes(self._play_context.remote_user)))
)
self._add_args(
@@ -231,7 +231,7 @@ class Connection(ConnectionBase):
raise AnsibleError("Cannot write to ControlPath %s" % cpdir)
args = ("-o", "ControlPath={0}".format(
- C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=cpdir))
+ to_bytes(C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=cpdir)))
)
self._add_args("found only ControlPersist; added ControlPath", args)
@@ -284,7 +284,7 @@ class Connection(ConnectionBase):
for l in chunk.splitlines(True):
suppress_output = False
- # display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
+ #display.debug("Examining line (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
if self._play_context.prompt and self.check_password_prompt(l):
display.debug("become_prompt: (source=%s, state=%s): '%s'" % (source, state, l.rstrip('\r\n')))
self._flags['become_prompt'] = True
@@ -320,8 +320,8 @@ class Connection(ConnectionBase):
Starts the command and communicates with it until it ends.
'''
- display_cmd = map(pipes.quote, cmd[:-1]) + [cmd[-1]]
- display.vvv('SSH: EXEC {0}'.format(' '.join(display_cmd)), host=self.host)
+ display_cmd = map(to_unicode, map(pipes.quote, cmd))
+ display.vvv(u'SSH: EXEC {0}'.format(u' '.join(display_cmd)), host=self.host)
# Start the given command. If we don't need to pipeline data, we can try
# to use a pseudo-tty (ssh will have been invoked with -tt). If we are
@@ -329,6 +329,12 @@ class Connection(ConnectionBase):
# old pipes.
p = None
+
+ if isinstance(cmd, (text_type, binary_type)):
+ cmd = to_bytes(cmd)
+ else:
+ cmd = map(to_bytes, cmd)
+
if not in_data:
try:
# Make sure stdin is a proper pty to avoid tcgetattr errors
@@ -348,7 +354,7 @@ class Connection(ConnectionBase):
if self._play_context.password:
os.close(self.sshpass_pipe[0])
- os.write(self.sshpass_pipe[1], "{0}\n".format(self._play_context.password))
+ os.write(self.sshpass_pipe[1], "{0}\n".format(to_bytes(self._play_context.password)))
os.close(self.sshpass_pipe[1])
## SSH state machine
@@ -366,7 +372,7 @@ class Connection(ConnectionBase):
# only when using ssh. Otherwise we can send initial data straightaway.
state = states.index('ready_to_send')
- if 'ssh' in cmd:
+ if b'ssh' in cmd:
if self._play_context.prompt:
# We're requesting escalation with a password, so we have to
# wait for a password prompt.
@@ -458,12 +464,17 @@ class Connection(ConnectionBase):
tmp_stdout = tmp_stderr = ''
# If we see a privilege escalation prompt, we send the password.
+ # (If we're expecting a prompt but the escalation succeeds, we
+ # didn't need the password and can carry on regardless.)
- if states[state] == 'awaiting_prompt' and self._flags['become_prompt']:
- display.debug('Sending become_pass in response to prompt')
- stdin.write(self._play_context.become_pass + '\n')
- self._flags['become_prompt'] = False
- state += 1
+ if states[state] == 'awaiting_prompt':
+ if self._flags['become_prompt']:
+ display.debug('Sending become_pass in response to prompt')
+ stdin.write('{0}\n'.format(to_bytes(self._play_context.become_pass )))
+ self._flags['become_prompt'] = False
+ state += 1
+ elif self._flags['become_success']:
+ state += 1
# We've requested escalation (with or without a password), now we
# wait for an error message or a successful escalation.
@@ -534,7 +545,7 @@ class Connection(ConnectionBase):
stdin.close()
if C.HOST_KEY_CHECKING:
- if cmd[0] == "sshpass" and p.returncode == 6:
+ if cmd[0] == b"sshpass" and p.returncode == 6:
raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
@@ -551,7 +562,7 @@ class Connection(ConnectionBase):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
+ display.vvv(u"ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._play_context.remote_user), host=self._play_context.remote_addr)
# we can only use tty when we are not pipelining the modules. piping
# data into /usr/bin/python inside a tty automatically invokes the
@@ -596,7 +607,7 @@ class Connection(ConnectionBase):
raise AnsibleConnectionFailure("Failed to connect to the host via ssh.")
except (AnsibleConnectionFailure, Exception) as e:
if attempt == remaining_tries - 1:
- raise e
+ raise
else:
pause = 2 ** attempt - 1
if pause > 30:
@@ -619,44 +630,46 @@ class Connection(ConnectionBase):
super(Connection, self).put_file(in_path, out_path)
- display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host)
+ display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self.host)
if not os.path.exists(in_path):
- raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
+ raise AnsibleFileNotFound("file or module does not exist: {0}".format(to_str(in_path)))
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
if C.DEFAULT_SCP_IF_SSH:
- cmd = self._build_command('scp', in_path, '{0}:{1}'.format(host, pipes.quote(out_path)))
+ cmd = self._build_command('scp', in_path, u'{0}:{1}'.format(host, pipes.quote(out_path)))
in_data = None
else:
- cmd = self._build_command('sftp', host)
- in_data = "put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
+ cmd = self._build_command('sftp', to_bytes(host))
+ in_data = u"put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
+ in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._run(cmd, in_data)
if returncode != 0:
- raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(out_path, stdout, stderr))
+ raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(to_str(out_path), to_str(stdout), to_str(stderr)))
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
- display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
+ display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
# scp and sftp require square brackets for IPv6 addresses, but
# accept them for hostnames and IPv4 addresses too.
host = '[%s]' % self.host
if C.DEFAULT_SCP_IF_SSH:
- cmd = self._build_command('scp', '{0}:{1}'.format(host, pipes.quote(in_path)), out_path)
+ cmd = self._build_command('scp', u'{0}:{1}'.format(host, pipes.quote(in_path)), out_path)
in_data = None
else:
cmd = self._build_command('sftp', host)
- in_data = "get {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
+ in_data = u"get {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
+ in_data = to_bytes(in_data, nonstring='passthru')
(returncode, stdout, stderr) = self._run(cmd, in_data)
if returncode != 0:
@@ -670,6 +683,8 @@ class Connection(ConnectionBase):
# temporarily disabled as we are forced to currently close connections after every task because of winrm
# if self._connected and self._persistent:
# cmd = self._build_command('ssh', '-O', 'stop', self.host)
+ #
+ # cmd = map(to_bytes, cmd)
# p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# stdout, stderr = p.communicate()
diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py
index f75c7a7092..dec4878740 100644
--- a/lib/ansible/plugins/connection/winrm.py
+++ b/lib/ansible/plugins/connection/winrm.py
@@ -24,6 +24,8 @@ import os
import re
import shlex
import traceback
+import json
+import xmltodict
from ansible.compat.six.moves.urllib.parse import urlunsplit
@@ -44,8 +46,10 @@ except ImportError:
from ansible.errors import AnsibleFileNotFound
from ansible.plugins.connection import ConnectionBase
+from ansible.utils.hashing import secure_hash
from ansible.utils.path import makedirs_safe
from ansible.utils.unicode import to_bytes, to_unicode, to_str
+from ansible.utils.vars import combine_vars
try:
from __main__ import display
@@ -58,6 +62,7 @@ class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
module_implementation_preferences = ('.ps1', '')
+ become_methods = []
def __init__(self, *args, **kwargs):
@@ -68,7 +73,6 @@ class Connection(ConnectionBase):
self._shell_type = 'powershell'
# TODO: Add runas support
- self.become_methods_supported=[]
super(Connection, self).__init__(*args, **kwargs)
@@ -81,7 +85,7 @@ class Connection(ConnectionBase):
'''
Override WinRM-specific options from host variables.
'''
- host_vars = host.get_vars()
+ host_vars = combine_vars(host.get_group_vars(), host.get_vars())
self._winrm_host = self._play_context.remote_addr
self._winrm_port = int(self._play_context.port or 5986)
@@ -96,10 +100,12 @@ class Connection(ConnectionBase):
self._winrm_realm = None
self._winrm_realm = host_vars.get('ansible_winrm_realm', self._winrm_realm) or None
+ transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext'
+
if HAVE_KERBEROS and ('@' in self._winrm_user or self._winrm_realm):
- self._winrm_transport = 'kerberos,plaintext'
+ self._winrm_transport = 'kerberos,%s' % transport_selector
else:
- self._winrm_transport = 'plaintext'
+ self._winrm_transport = transport_selector
self._winrm_transport = host_vars.get('ansible_winrm_transport', self._winrm_transport)
if isinstance(self._winrm_transport, basestring):
self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()]
@@ -148,18 +154,46 @@ class Connection(ConnectionBase):
else:
raise AnsibleError('No transport found for WinRM connection')
- def _winrm_exec(self, command, args=(), from_exec=False):
- if from_exec:
- display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
- else:
- display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
+ def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False):
+ rq = {'env:Envelope': protocol._get_soap_header(
+ resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
+ action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
+ shell_id=shell_id)}
+ stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
+ .setdefault('rsp:Stream', {})
+ stream['@Name'] = 'stdin'
+ stream['@CommandId'] = command_id
+ stream['#text'] = base64.b64encode(to_bytes(stdin))
+ if eof:
+ stream['@End'] = 'true'
+ rs = protocol.send_message(xmltodict.unparse(rq))
+
+ def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
if not self.protocol:
self.protocol = self._winrm_connect()
+ self._connected = True
if not self.shell_id:
self.shell_id = self.protocol.open_shell(codepage=65001) # UTF-8
+ display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
+ if from_exec:
+ display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
+ else:
+ display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
command_id = None
try:
- command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args))
+ stdin_push_failed = False
+ command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator == None))
+
+ # TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that comes from this)
+ try:
+ if stdin_iterator:
+ for (data, is_last) in stdin_iterator:
+ self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
+ except:
+ stdin_push_failed = True
+
+ # NB: this could hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
+ # FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
if from_exec:
display.vvvvv('WINRM RESULT %r' % to_unicode(response), host=self._winrm_host)
@@ -167,6 +201,10 @@ class Connection(ConnectionBase):
display.vvvvvv('WINRM RESULT %r' % to_unicode(response), host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % to_unicode(response.std_out), host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % to_unicode(response.std_err), host=self._winrm_host)
+
+ if stdin_push_failed:
+ raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (response.std_out, response.std_err))
+
return response
finally:
if command_id:
@@ -175,6 +213,7 @@ class Connection(ConnectionBase):
def _connect(self):
if not self.protocol:
self.protocol = self._winrm_connect()
+ self._connected = True
return self
def exec_command(self, cmd, in_data=None, sudoable=True):
@@ -209,45 +248,78 @@ class Connection(ConnectionBase):
result.std_err = to_bytes(result.std_err)
return (result.status_code, result.std_out, result.std_err)
+ # FUTURE: determine buffer size at runtime via remote winrm config?
+ def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000):
+ in_size = os.path.getsize(in_path)
+ offset = 0
+ with open(in_path, 'rb') as in_file:
+ for out_data in iter((lambda:in_file.read(buffer_size)), ''):
+ offset += len(out_data)
+ self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
+ # yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
+ b64_data = base64.b64encode(out_data) + '\r\n'
+ # cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
+ yield b64_data, (in_file.tell() == in_size)
+
+ if offset == 0: # empty file, return an empty buffer + eof to close it
+ yield "", True
+
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(in_path):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path)
- with open(in_path) as in_file:
- in_size = os.path.getsize(in_path)
- script_template = '''
- $s = [System.IO.File]::OpenWrite("%s");
- [void]$s.Seek(%d, [System.IO.SeekOrigin]::Begin);
- $b = [System.Convert]::FromBase64String("%s");
- [void]$s.Write($b, 0, $b.length);
- [void]$s.SetLength(%d);
- [void]$s.Close();
- '''
- # Determine max size of data we can pass per command.
- script = script_template % (self._shell._escape(out_path), in_size, '', in_size)
- cmd = self._shell._encode_script(script)
- # Encode script with no data, subtract its length from 8190 (max
- # windows command length), divide by 2.67 (UTF16LE base64 command
- # encoding), then by 1.35 again (data base64 encoding).
- buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35)
- for offset in xrange(0, in_size or 1, buffer_size):
- try:
- out_data = in_file.read(buffer_size)
- if offset == 0:
- if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'):
- out_path = out_path + '.ps1'
- b64_data = base64.b64encode(out_data)
- script = script_template % (self._shell._escape(out_path), offset, b64_data, in_size)
- display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
- cmd_parts = self._shell._encode_script(script, as_list=True)
- result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
- if result.status_code != 0:
- raise IOError(to_str(result.std_err))
- except Exception:
- traceback.print_exc()
- raise AnsibleError('failed to transfer file to "%s"' % out_path)
+
+ script_template = u'''
+ begin {{
+ $path = "{0}"
+
+ $DebugPreference = "Continue"
+ $ErrorActionPreference = "Stop"
+ Set-StrictMode -Version 2
+
+ $fd = [System.IO.File]::Create($path)
+
+ $sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
+
+ $bytes = @() #initialize for empty file case
+ }}
+ process {{
+ $bytes = [System.Convert]::FromBase64String($input)
+ $sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
+ $fd.Write($bytes, 0, $bytes.Length)
+ }}
+ end {{
+ $sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
+
+ $hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
+
+ $fd.Close()
+
+ Write-Output "{{""sha1"":""$hash""}}"
+ }}
+ '''
+
+ script = script_template.format(self._shell._escape(out_path))
+ cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False)
+
+ result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
+ # TODO: improve error handling
+ if result.status_code != 0:
+ raise AnsibleError(to_str(result.std_err))
+
+ put_output = json.loads(result.std_out)
+ remote_sha1 = put_output.get("sha1")
+
+ if not remote_sha1:
+ raise AnsibleError("Remote sha1 was not returned")
+
+ local_sha1 = secure_hash(in_path)
+
+ if not remote_sha1 == local_sha1:
+ raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_str(remote_sha1), to_str(local_sha1)))
+
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
@@ -313,5 +385,8 @@ class Connection(ConnectionBase):
def close(self):
if self.protocol and self.shell_id:
+ display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
self.protocol.close_shell(self.shell_id)
- self.shell_id = None
+ self.shell_id = None
+ self.protocol = None
+ self._connected = False
diff --git a/lib/ansible/plugins/connection/zone.py b/lib/ansible/plugins/connection/zone.py
index f0001d3c9e..b65c80b73f 100644
--- a/lib/ansible/plugins/connection/zone.py
+++ b/lib/ansible/plugins/connection/zone.py
@@ -31,6 +31,7 @@ import traceback
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.connection import ConnectionBase
+from ansible.utils import to_bytes
try:
from __main__ import display
@@ -45,13 +46,8 @@ class Connection(ConnectionBase):
''' Local zone based connections '''
transport = 'zone'
- # Pipelining may work. Someone needs to test by setting this to True and
- # having pipelining=True in their ansible.cfg
- has_pipelining = False
- # Some become_methods may work in v2 (sudo works for other chroot-based
- # plugins while su seems to be failing). If some work, check chroot.py to
- # see how to disable just some methods.
- become_methods = frozenset()
+ has_pipelining = True
+ become_methods = frozenset(C.BECOME_METHODS).difference(('su',))
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
@@ -61,8 +57,8 @@ class Connection(ConnectionBase):
if os.geteuid() != 0:
raise AnsibleError("zone connection requires running as root")
- self.zoneadm_cmd = self._search_executable('zoneadm')
- self.zlogin_cmd = self._search_executable('zlogin')
+ self.zoneadm_cmd = to_bytes(self._search_executable('zoneadm'))
+ self.zlogin_cmd = to_bytes(self._search_executable('zlogin'))
if self.zone not in self.list_zones():
raise AnsibleError("incorrect zone name %s" % self.zone)
@@ -91,7 +87,7 @@ class Connection(ConnectionBase):
def get_zone_path(self):
#solaris10vm# zoneadm -z cswbuild list -p
#-:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared
- process = subprocess.Popen([self.zoneadm_cmd, '-z', self.zone, 'list', '-p'],
+ process = subprocess.Popen([self.zoneadm_cmd, '-z', to_bytes(self.zone), 'list', '-p'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -114,13 +110,11 @@ class Connection(ConnectionBase):
compared to exec_command() it looses some niceties like being able to
return the process's exit code immediately.
'''
- # FIXME: previous code took pains not to invoke /bin/sh and left out
- # -c. Not sure why as cmd could contain shell metachars (like
- # cmd = "mkdir -p $HOME/pathname && echo $HOME/pathname") which
- # probably wouldn't work without a shell. Get someone to test that
- # this connection plugin works and then we can remove this note
- executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else '/bin/sh'
- local_cmd = [self.zlogin_cmd, self.zone, executable, '-c', cmd]
+ # Note: zlogin invokes a shell (just like ssh does) so we do not pass
+ # this through /bin/sh -c here. Instead it goes through the shell
+ # that zlogin selects.
+ local_cmd = [self.zlogin_cmd, self.zone, cmd]
+ local_cmd = map(to_bytes, local_cmd)
display.vvv("EXEC %s" % (local_cmd), host=self.zone)
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
@@ -132,13 +126,6 @@ class Connection(ConnectionBase):
''' run a command on the zone '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
- # TODO: Check whether we can send the command to stdin via
- # p.communicate(in_data)
- # If we can, then we can change this plugin to has_pipelining=True and
- # remove the error if in_data is given.
- if in_data:
- raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
p = self._buffered_exec_command(cmd)
stdout, stderr = p.communicate(in_data)
diff --git a/lib/ansible/plugins/filter/core.py b/lib/ansible/plugins/filter/core.py
index d5e1a12e53..fed5097d91 100644
--- a/lib/ansible/plugins/filter/core.py
+++ b/lib/ansible/plugins/filter/core.py
@@ -100,9 +100,11 @@ def to_nice_json(a, *args, **kw):
else:
if major >= 2:
return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw)
+ try:
+ return json.dumps(a, indent=4, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
+ except:
# Fallback to the to_json filter
return to_json(a, *args, **kw)
- return json.dumps(a, indent=4, sort_keys=True, cls=AnsibleJSONEncoder, *args, **kw)
def bool(a):
''' return a bool for the arg '''
@@ -223,7 +225,11 @@ def get_encrypted_password(password, hashtype='sha512', salt=None):
if hashtype in cryptmethod:
if salt is None:
r = SystemRandom()
- salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)])
+ if hashtype in ['md5']:
+ saltsize = 8
+ else:
+ saltsize = 16
+ salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(saltsize)])
if not HAS_PASSLIB:
if sys.platform.startswith('darwin'):
@@ -339,6 +345,18 @@ def comment(text, style='plain', **kw):
str_postfix,
str_end)
+def extract(item, container, morekeys=None):
+ from jinja2.runtime import Undefined
+
+ value = container[item]
+
+ if value is not Undefined and morekeys is not None:
+ if not isinstance(morekeys, list):
+ morekeys = [morekeys]
+
+ value = reduce(lambda d, k: d[k], morekeys, value)
+
+ return value
class FilterModule(object):
''' Ansible core jinja2 filters '''
@@ -415,4 +433,7 @@ class FilterModule(object):
# comment-style decoration
'comment': comment,
+
+ # array and dict lookups
+ 'extract': extract,
}
diff --git a/lib/ansible/plugins/lookup/consul_kv.py b/lib/ansible/plugins/lookup/consul_kv.py
index 47eaa71bc8..27cf3dbef3 100755..100644
--- a/lib/ansible/plugins/lookup/consul_kv.py
+++ b/lib/ansible/plugins/lookup/consul_kv.py
@@ -75,7 +75,7 @@ class LookupModule(LookupBase):
def __init__(self, loader=None, templar=None, **kwargs):
- super(LookupBase, self).__init__(loader, templar, **kwargs)
+ super(LookupModule, self).__init__(loader, templar, **kwargs)
self.agent_url = 'http://localhost:8500'
if os.getenv('ANSIBLE_CONSUL_URL') is not None:
diff --git a/lib/ansible/plugins/lookup/inventory_hostnames.py b/lib/ansible/plugins/lookup/inventory_hostnames.py
index a86d2270bb..651055b6f7 100644
--- a/lib/ansible/plugins/lookup/inventory_hostnames.py
+++ b/lib/ansible/plugins/lookup/inventory_hostnames.py
@@ -26,10 +26,15 @@ class LookupModule(LookupBase):
def get_hosts(self, variables, pattern):
hosts = []
- if pattern in variables['groups']:
- hosts = variables['groups'][pattern]
- elif pattern in variables['groups']['all']:
- hosts = [pattern]
+ if pattern[0] in ('!','&'):
+ obj = pattern[1:]
+ else:
+ obj = pattern
+
+ if obj in variables['groups']:
+ hosts = variables['groups'][obj]
+ elif obj in variables['groups']['all']:
+ hosts = [obj]
return hosts
def run(self, terms, variables=None, **kwargs):
diff --git a/lib/ansible/plugins/shell/csh.py b/lib/ansible/plugins/shell/csh.py
index 1c383d133c..bd210f12fe 100644
--- a/lib/ansible/plugins/shell/csh.py
+++ b/lib/ansible/plugins/shell/csh.py
@@ -24,6 +24,8 @@ class ShellModule(ShModule):
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\\\n'
_SHELL_REDIRECT_ALLNULL = '>& /dev/null'
+ _SHELL_SUB_LEFT = '"`'
+ _SHELL_SUB_RIGHT = '`"'
def env_prefix(self, **kwargs):
return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
diff --git a/lib/ansible/plugins/shell/fish.py b/lib/ansible/plugins/shell/fish.py
index ff78941e19..342de99e5f 100644
--- a/lib/ansible/plugins/shell/fish.py
+++ b/lib/ansible/plugins/shell/fish.py
@@ -21,5 +21,12 @@ from ansible.plugins.shell.sh import ShellModule as ShModule
class ShellModule(ShModule):
+ _SHELL_AND = '; and'
+ _SHELL_OR = '; or'
+ _SHELL_SUB_LEFT = '('
+ _SHELL_SUB_RIGHT = ')'
+ _SHELL_GROUP_LEFT = ''
+ _SHELL_GROUP_RIGHT = ''
+
def env_prefix(self, **kwargs):
return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
diff --git a/lib/ansible/plugins/shell/powershell.py b/lib/ansible/plugins/shell/powershell.py
index ead0c02fa6..096a0cf95d 100644
--- a/lib/ansible/plugins/shell/powershell.py
+++ b/lib/ansible/plugins/shell/powershell.py
@@ -49,6 +49,13 @@ class ShellModule(object):
return path
return '"%s"' % path
+ # powershell requires that script files end with .ps1
+ def get_remote_filename(self, base_name):
+ if not base_name.strip().lower().endswith('.ps1'):
+ return base_name.strip() + '.ps1'
+
+ return base_name.strip()
+
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
path = self._unquote(path)
diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py
index 54d2b57c8f..8b20338a60 100644
--- a/lib/ansible/plugins/shell/sh.py
+++ b/lib/ansible/plugins/shell/sh.py
@@ -33,6 +33,12 @@ class ShellModule(object):
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\n'
_SHELL_REDIRECT_ALLNULL = '> /dev/null 2>&1'
+ _SHELL_AND = '&&'
+ _SHELL_OR = '||'
+ _SHELL_SUB_LEFT = '"$('
+ _SHELL_SUB_RIGHT = ')"'
+ _SHELL_GROUP_LEFT = '('
+ _SHELL_GROUP_RIGHT = ')'
def env_prefix(self, **kwargs):
'''Build command prefix with environment variables.'''
@@ -47,6 +53,10 @@ class ShellModule(object):
def join_path(self, *args):
return os.path.join(*args)
+ # some shells (eg, powershell) are snooty about filenames/extensions, this lets the shell plugin have a say
+ def get_remote_filename(self, base_name):
+ return base_name.strip()
+
def path_has_trailing_slash(self, path):
return path.endswith('/')
@@ -67,14 +77,14 @@ class ShellModule(object):
basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile)
if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')):
basetmp = self.join_path('/tmp', basefile)
- cmd = 'mkdir -p "`echo %s`"' % basetmp
- cmd += ' && echo "`echo %s`"' % basetmp
+ cmd = 'mkdir -p %s echo %s %s' % (self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
+ cmd += ' %s echo %s echo %s %s' % (self._SHELL_AND, self._SHELL_SUB_LEFT, basetmp, self._SHELL_SUB_RIGHT)
# change the umask in a subshell to achieve the desired mode
# also for directories created with `mkdir -p`
if mode:
tmp_umask = 0o777 & ~mode
- cmd = '(umask %o && %s)' % (tmp_umask, cmd)
+ cmd = '%s umask %o %s %s %s' % (self._SHELL_GROUP_LEFT, tmp_umask, self._SHELL_AND, cmd, self._SHELL_GROUP_RIGHT)
return cmd
@@ -124,14 +134,14 @@ class ShellModule(object):
# used by a variety of shells on the remote host to invoke a python
# "one-liner".
shell_escaped_path = pipes.quote(path)
- test = "rc=flag; [ -r %(p)s ] || rc=2; [ -f %(p)s ] || rc=1; [ -d %(p)s ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"%(p)s && exit 0" % dict(p=shell_escaped_path, i=python_interp)
+ test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR)
csums = [
- "({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
- "({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
+ u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
+ u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
]
- cmd = " || ".join(csums)
- cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path)
+ cmd = (" %s " % self._SHELL_OR).join(csums)
+ cmd = "%s; %s %s (echo \'0 \'%s)" % (test, cmd, self._SHELL_OR, shell_escaped_path)
return cmd
def build_module_command(self, env_string, shebang, cmd, arg_path=None, rm_tmp=None):
diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py
index 3cdec5b573..27e7b2f974 100644
--- a/lib/ansible/plugins/strategy/__init__.py
+++ b/lib/ansible/plugins/strategy/__init__.py
@@ -30,6 +30,8 @@ from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
+from ansible.executor.play_iterator import PlayIterator
+from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.inventory.group import Group
@@ -37,7 +39,9 @@ from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader
from ansible.template import Templar
+from ansible.utils.unicode import to_unicode
from ansible.vars.unsafe_proxy import wrap_var
+from ansible.vars import combine_vars
try:
from __main__ import display
@@ -107,9 +111,7 @@ class StrategyBase:
failed_hosts = set(failed_hosts).union(self._tqm._failed_hosts.keys())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
- # send the stats callback
- self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
-
+ # return the appropriate code, depending on the status hosts after the run
if len(unreachable_hosts) > 0:
return 3
elif len(failed_hosts) > 0:
@@ -139,42 +141,32 @@ class StrategyBase:
display.debug("entering _queue_task() for %s/%s" % (host, task))
+ task_vars['hostvars'] = self._tqm.hostvars
# and then queue the new task
display.debug("%s - putting task (%s) in queue" % (host, task))
try:
display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
- (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
- self._cur_worker += 1
- if self._cur_worker >= len(self._workers):
- self._cur_worker = 0
-
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj()
- # compress (and convert) the data if so configured, which can
- # help a lot when the variable dictionary is huge. We pop the
- # hostvars out of the task variables right now, due to the fact
- # that they're not JSON serializable
- compressed_vars = False
- hostvars = task_vars.pop('hostvars', None)
- if C.DEFAULT_VAR_COMPRESSION_LEVEL > 0:
- zip_vars = zlib.compress(json.dumps(task_vars), C.DEFAULT_VAR_COMPRESSION_LEVEL)
- compressed_vars = True
- # we're done with the original dict now, so delete it to
- # try and reclaim some memory space, which is helpful if the
- # data contained in the dict is very large
- del task_vars
- else:
- zip_vars = task_vars # noqa (pyflakes false positive because task_vars is deleted in the conditional above)
-
- # and queue the task
- main_q.put((host, task, self._loader.get_basedir(), zip_vars, hostvars, compressed_vars, play_context, shared_loader_obj), block=False)
-
- # nuke the hostvars object too, as its no longer needed
- del hostvars
+ queued = False
+ while True:
+ (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
+ if worker_prc is None or not worker_prc.is_alive():
+ worker_prc = WorkerProcess(rslt_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
+ self._workers[self._cur_worker][0] = worker_prc
+ worker_prc.start()
+ queued = True
+ self._cur_worker += 1
+ if self._cur_worker >= len(self._workers):
+ self._cur_worker = 0
+ time.sleep(0.0001)
+ if queued:
+ break
+ del task_vars
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
@@ -182,7 +174,7 @@ class StrategyBase:
return
display.debug("exiting _queue_task() for %s/%s" % (host, task))
- def _process_pending_results(self, iterator):
+ def _process_pending_results(self, iterator, one_pass=False):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
@@ -192,13 +184,23 @@ class StrategyBase:
while not self._final_q.empty() and not self._tqm._terminated:
try:
- result = self._final_q.get(block=False)
+ result = self._final_q.get()
display.debug("got result from result worker: %s" % ([text_type(x) for x in result],))
+ # helper method, used to find the original host from the one
+ # returned in the result/message, which has been serialized and
+ # thus had some information stripped from it to speed up the
+ # serialization process
+ def get_original_host(host):
+ if host.name in self._inventory._hosts_cache:
+ return self._inventory._hosts_cache[host.name]
+ else:
+ return self._inventory.get_host(host.name)
+
# all host status messages contain 2 entries: (msg, task_result)
if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
task_result = result[1]
- host = task_result._host
+ host = get_original_host(task_result._host)
task = task_result._task
if result[0] == 'host_task_failed' or task_result.is_failed():
if not task.ignore_errors:
@@ -208,8 +210,10 @@ class StrategyBase:
[iterator.mark_host_failed(h) for h in self._inventory.get_hosts(iterator._play.hosts) if h.name not in self._tqm._unreachable_hosts]
else:
iterator.mark_host_failed(host)
- self._tqm._failed_hosts[host.name] = True
- self._tqm._stats.increment('failures', host.name)
+ (state, tmp_task) = iterator.get_next_task_for_host(host, peek=True)
+ if not state or state.run_state != PlayIterator.ITERATING_RESCUE:
+ self._tqm._failed_hosts[host.name] = True
+ self._tqm._stats.increment('failures', host.name)
else:
self._tqm._stats.increment('ok', host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors)
@@ -227,7 +231,7 @@ class StrategyBase:
self._tqm._stats.increment('changed', host.name)
self._tqm.send_callback('v2_runner_on_ok', task_result)
- if self._diff and 'diff' in task_result._result:
+ if self._diff:
self._tqm.send_callback('v2_on_file_diff', task_result)
self._pending_results -= 1
@@ -252,7 +256,7 @@ class StrategyBase:
self._add_host(new_host_info, iterator)
elif result[0] == 'add_group':
- host = result[1]
+ host = get_original_host(result[1])
result_item = result[2]
self._add_group(host, result_item)
@@ -260,58 +264,78 @@ class StrategyBase:
task_result = result[1]
handler_name = result[2]
- original_task = iterator.get_original_task(task_result._host, task_result._task)
+ original_host = get_original_host(task_result._host)
+ original_task = iterator.get_original_task(original_host, task_result._task)
if handler_name not in self._notified_handlers:
self._notified_handlers[handler_name] = []
- if task_result._host not in self._notified_handlers[handler_name]:
- self._notified_handlers[handler_name].append(task_result._host)
+ if original_host not in self._notified_handlers[handler_name]:
+ self._notified_handlers[handler_name].append(original_host)
display.vv("NOTIFIED HANDLER %s" % (handler_name,))
elif result[0] == 'register_host_var':
# essentially the same as 'set_host_var' below, however we
# never follow the delegate_to value for registered vars and
# the variable goes in the fact_cache
- host = result[1]
- var_name = result[2]
+ host = get_original_host(result[1])
+ task = result[2]
var_value = wrap_var(result[3])
+ var_name = task.register
- self._variable_manager.set_nonpersistent_facts(host, {var_name: var_value})
+ if task.run_once:
+ host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
+ else:
+ host_list = [host]
+
+ for target_host in host_list:
+ self._variable_manager.set_nonpersistent_facts(target_host, {var_name: var_value})
elif result[0] in ('set_host_var', 'set_host_facts'):
- host = result[1]
+ host = get_original_host(result[1])
task = result[2]
item = result[3]
- if task.delegate_to is not None:
+ # find the host we're actually refering too here, which may
+ # be a host that is not really in inventory at all
+ if task.delegate_to is not None and task.delegate_facts:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
self.add_tqm_variables(task_vars, play=iterator._play)
if item is not None:
task_vars['item'] = item
templar = Templar(loader=self._loader, variables=task_vars)
host_name = templar.template(task.delegate_to)
- target_host = self._inventory.get_host(host_name)
- if target_host is None:
- target_host = Host(name=host_name)
+ actual_host = self._inventory.get_host(host_name)
+ if actual_host is None:
+ actual_host = Host(name=host_name)
+ else:
+ actual_host = host
+
+ if task.run_once:
+ host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
else:
- target_host = host
+ host_list = [actual_host]
if result[0] == 'set_host_var':
var_name = result[4]
var_value = result[5]
-
- self._variable_manager.set_host_variable(target_host, var_name, var_value)
+ for target_host in host_list:
+ self._variable_manager.set_host_variable(target_host, var_name, var_value)
elif result[0] == 'set_host_facts':
facts = result[4]
- if task.action == 'set_fact':
- self._variable_manager.set_nonpersistent_facts(target_host, facts)
- else:
- self._variable_manager.set_host_facts(target_host, facts)
+ for target_host in host_list:
+ if task.action == 'set_fact':
+ self._variable_manager.set_nonpersistent_facts(target_host, facts)
+ else:
+ self._variable_manager.set_host_facts(target_host, facts)
else:
raise AnsibleError("unknown result message received: %s" % result[0])
+
except Queue.Empty:
- time.sleep(0.01)
+ time.sleep(0.0001)
+
+ if one_pass:
+ break
return ret_results
@@ -327,7 +351,7 @@ class StrategyBase:
while self._pending_results > 0 and not self._tqm._terminated:
results = self._process_pending_results(iterator)
ret_results.extend(results)
- time.sleep(0.01)
+ time.sleep(0.0001)
display.debug("no more pending results, returning what we have")
return ret_results
@@ -350,9 +374,8 @@ class StrategyBase:
allgroup.add_host(new_host)
# Set/update the vars for this host
- new_vars = host_info.get('host_vars', dict())
- new_host.vars = self._inventory.get_host_vars(new_host)
- new_host.vars.update(new_vars)
+ new_host.vars = combine_vars(new_host.vars, self._inventory.get_host_vars(new_host))
+ new_host.vars = combine_vars(new_host.vars, host_info.get('host_vars', dict()))
new_groups = host_info.get('groups', [])
for group_name in new_groups:
@@ -416,6 +439,7 @@ class StrategyBase:
Loads an included YAML file of tasks, applying the optional set of variables.
'''
+ display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
@@ -442,7 +466,7 @@ class StrategyBase:
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
for host in included_file._hosts:
- tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e)))
+ tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_unicode(e)))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
@@ -468,7 +492,7 @@ class StrategyBase:
tags = [ tags ]
if len(tags) > 0:
if len(b._task_include.tags) > 0:
- raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task)",
+ raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
obj=included_file._task._ds)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
b._task_include.tags = tags
@@ -476,6 +500,7 @@ class StrategyBase:
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
+ display.debug("done processing included file")
return block_list
def run_handlers(self, iterator, play_context):
@@ -520,7 +545,10 @@ class StrategyBase:
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
+ saved_name = handler.name
+ handler.name = handler_name
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
+ handler.name = saved_name
if notified_hosts is None:
notified_hosts = self._notified_handlers[handler_name]
@@ -552,6 +580,7 @@ class StrategyBase:
host_results,
self._tqm,
iterator=iterator,
+ inventory=self._inventory,
loader=self._loader,
variable_manager=self._variable_manager
)
@@ -570,6 +599,7 @@ class StrategyBase:
for task in block.block:
result = self._do_handler_run(
handler=task,
+ handler_name=None,
iterator=iterator,
play_context=play_context,
notified_hosts=included_file._hosts[:],
diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py
index 2d3c184a8c..da123ce3b7 100644
--- a/lib/ansible/plugins/strategy/free.py
+++ b/lib/ansible/plugins/strategy/free.py
@@ -78,7 +78,7 @@ class StrategyModule(StrategyBase):
(state, task) = iterator.get_next_task_for_host(host, peek=True)
display.debug("free host state: %s" % state)
display.debug("free host task: %s" % task)
- if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task:
+ if not iterator.is_failed(host) and host_name not in self._tqm._unreachable_hosts and task:
# set the flag so the outer loop knows we've still found
# some work which needs to be done
@@ -122,6 +122,8 @@ class StrategyModule(StrategyBase):
else:
# handle step if needed, skip meta actions as they are used internally
if not self._step or self._take_step(task, host_name):
+ if task.any_errors_fatal:
+ display.warning("Using any_errors_fatal with the free strategy is not supported, as tasks are executed independently on each host")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
self._queue_task(host, task, task_vars, play_context)
@@ -135,19 +137,25 @@ class StrategyModule(StrategyBase):
if last_host == starting_host:
break
- results = self._process_pending_results(iterator)
+ results = self._wait_on_pending_results(iterator)
host_results.extend(results)
try:
- included_files = IncludedFile.process_include_results(host_results, self._tqm, iterator=iterator,
- loader=self._loader, variable_manager=self._variable_manager)
+ included_files = IncludedFile.process_include_results(
+ host_results,
+ self._tqm,
+ iterator=iterator,
+ inventory=self._inventory,
+ loader=self._loader,
+ variable_manager=self._variable_manager
+ )
except AnsibleError as e:
return False
if len(included_files) > 0:
+ all_blocks = dict((host, []) for host in hosts_left)
for included_file in included_files:
- # included hosts get the task list while those excluded get an equal-length
- # list of noop tasks, to make sure that they continue running in lock-step
+ display.debug("collecting new blocks for %s" % included_file)
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
except AnsibleError as e:
@@ -156,22 +164,21 @@ class StrategyModule(StrategyBase):
display.warning(str(e))
continue
- for host in hosts_left:
- if host in included_file._hosts:
- task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
- final_blocks = []
- for new_block in new_blocks:
- final_blocks.append(new_block.filter_tagged_tasks(play_context, task_vars))
- iterator.add_tasks(host, final_blocks)
+ for new_block in new_blocks:
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=included_file._task)
+ final_block = new_block.filter_tagged_tasks(play_context, task_vars)
+ for host in hosts_left:
+ if host in included_file._hosts:
+ all_blocks[host].append(final_block)
+ display.debug("done collecting new blocks for %s" % included_file)
- # pause briefly so we don't spin lock
- time.sleep(0.05)
+ display.debug("adding all collected blocks from %d included file(s) to iterator" % len(included_files))
+ for host in hosts_left:
+ iterator.add_tasks(host, all_blocks[host])
+ display.debug("done adding collected blocks to iterator")
- try:
- results = self._wait_on_pending_results(iterator)
- host_results.extend(results)
- except Exception as e:
- pass
+ # pause briefly so we don't spin lock
+ time.sleep(0.001)
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py
index 65240ef8fa..804cfadc77 100644
--- a/lib/ansible/plugins/strategy/linear.py
+++ b/lib/ansible/plugins/strategy/linear.py
@@ -54,7 +54,8 @@ class StrategyModule(StrategyBase):
host_tasks = {}
display.debug("building list of next tasks for hosts")
for host in hosts:
- host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
+ if not iterator.is_failed(host):
+ host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
display.debug("done building task lists")
num_setups = 0
@@ -62,19 +63,26 @@ class StrategyModule(StrategyBase):
num_rescue = 0
num_always = 0
- lowest_cur_block = len(iterator._blocks)
-
display.debug("counting tasks in each state of execution")
- for (k, v) in iteritems(host_tasks):
- if v is None:
- continue
-
+ host_tasks_to_run = [(host, state_task)
+ for host, state_task in iteritems(host_tasks)
+ if state_task and state_task[1]]
+
+ if host_tasks_to_run:
+ lowest_cur_block = min(
+ (s.cur_block for h, (s, t) in host_tasks_to_run
+ if s.run_state != PlayIterator.ITERATING_COMPLETE))
+ else:
+ # empty host_tasks_to_run will just run till the end of the function
+ # without ever touching lowest_cur_block
+ lowest_cur_block = None
+
+ for (k, v) in host_tasks_to_run:
(s, t) = v
- if t is None:
- continue
- if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
- lowest_cur_block = s.cur_block
+ if s.cur_block > lowest_cur_block:
+ # Not the current block, ignore it
+ continue
if s.run_state == PlayIterator.ITERATING_SETUP:
num_setups += 1
@@ -98,7 +106,7 @@ class StrategyModule(StrategyBase):
rvals = []
display.debug("starting to advance hosts")
for host in hosts:
- host_state_task = host_tasks[host.name]
+ host_state_task = host_tasks.get(host.name)
if host_state_task is None:
continue
(s, t) = host_state_task
@@ -169,6 +177,10 @@ class StrategyModule(StrategyBase):
skip_rest = False
choose_step = True
+ # flag set if task is set to any_errors_fatal
+ any_errors_fatal = False
+
+ results = []
for (host, task) in host_tasks:
if not task:
continue
@@ -179,14 +191,15 @@ class StrategyModule(StrategyBase):
run_once = False
work_to_do = True
+ if task.any_errors_fatal:
+ any_errors_fatal = True
+
# test to see if the task across all hosts points to an action plugin which
# sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
# will only send this task to the first host in the list.
try:
action = action_loader.get(task.action, class_only=True)
- if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
- run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
@@ -218,6 +231,8 @@ class StrategyModule(StrategyBase):
templar = Templar(loader=self._loader, variables=task_vars)
display.debug("done getting variables")
+ run_once = templar.template(task.run_once)
+
if not callback_sent:
display.debug("sending task start callback, copying the task so we can template it temporarily")
saved_name = task.name
@@ -240,15 +255,17 @@ class StrategyModule(StrategyBase):
self._queue_task(host, task, task_vars, play_context)
# if we're bypassing the host loop, break out now
- if run_once:
+ if run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
break
+ results += self._process_pending_results(iterator, one_pass=True)
+
# go to next host/task group
if skip_rest:
continue
display.debug("done queuing things up, now waiting for results queue to drain")
- results = self._wait_on_pending_results(iterator)
+ results += self._wait_on_pending_results(iterator)
host_results.extend(results)
if not work_to_do and len(iterator.get_failed_hosts()) > 0:
@@ -258,51 +275,94 @@ class StrategyModule(StrategyBase):
break
try:
- included_files = IncludedFile.process_include_results(host_results, self._tqm,
- iterator=iterator, loader=self._loader, variable_manager=self._variable_manager)
+ included_files = IncludedFile.process_include_results(
+ host_results,
+ self._tqm,
+ iterator=iterator,
+ inventory=self._inventory,
+ loader=self._loader,
+ variable_manager=self._variable_manager
+ )
except AnsibleError as e:
return False
+ include_failure = False
if len(included_files) > 0:
+ display.debug("we have included files to process")
noop_task = Task()
noop_task.action = 'meta'
noop_task.args['_raw_params'] = 'noop'
noop_task.set_loader(iterator._play._loader)
+ display.debug("generating all_blocks data")
all_blocks = dict((host, []) for host in hosts_left)
+ display.debug("done generating all_blocks data")
for included_file in included_files:
+ display.debug("processing included file: %s" % included_file._filename)
# included hosts get the task list while those excluded get an equal-length
# list of noop tasks, to make sure that they continue running in lock-step
try:
new_blocks = self._load_included_file(included_file, iterator=iterator)
+ display.debug("iterating over new_blocks loaded from include file")
for new_block in new_blocks:
+ task_vars = self._variable_manager.get_vars(
+ loader=self._loader,
+ play=iterator._play,
+ task=included_file._task,
+ )
+ display.debug("filtering new block on tags")
+ final_block = new_block.filter_tagged_tasks(play_context, task_vars)
+ display.debug("done filtering new block on tags")
+
noop_block = Block(parent_block=task._block)
noop_block.block = [noop_task for t in new_block.block]
noop_block.always = [noop_task for t in new_block.always]
noop_block.rescue = [noop_task for t in new_block.rescue]
+
for host in hosts_left:
if host in included_file._hosts:
- task_vars = self._variable_manager.get_vars(loader=self._loader,
- play=iterator._play, host=host, task=included_file._task)
- final_block = new_block.filter_tagged_tasks(play_context, task_vars)
all_blocks[host].append(final_block)
else:
all_blocks[host].append(noop_block)
+ display.debug("done iterating over new_blocks loaded from include file")
except AnsibleError as e:
for host in included_file._hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
display.error(e, wrap_text=False)
+ include_failure = True
continue
# finally go through all of the hosts and append the
# accumulated blocks to their list of tasks
+ display.debug("extending task lists for all hosts with included blocks")
+
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
+ display.debug("done extending task lists")
+ display.debug("done processing included files")
+
display.debug("results queue empty")
+
+ display.debug("checking for any_errors_fatal")
+ failed_hosts = []
+ for res in results:
+ if res.is_failed() or res.is_unreachable():
+ failed_hosts.append(res._host.name)
+
+ # if any_errors_fatal and we had an error, mark all hosts as failed
+ if any_errors_fatal and len(failed_hosts) > 0:
+ for host in hosts_left:
+ # don't double-mark hosts, or the iterator will potentially
+ # fail them out of the rescue/always states
+ if host.name not in failed_hosts:
+ self._tqm._failed_hosts[host.name] = True
+ iterator.mark_host_failed(host)
+ display.debug("done checking for any_errors_fatal")
+
except (IOError, EOFError) as e:
display.debug("got IOError/EOFError in task loop: %s" % e)
# most likely an abort, return failed
diff --git a/lib/ansible/plugins/test/core.py b/lib/ansible/plugins/test/core.py
index 06fa687e24..fb9e0fb86e 100644
--- a/lib/ansible/plugins/test/core.py
+++ b/lib/ansible/plugins/test/core.py
@@ -36,7 +36,7 @@ def failed(*a, **kw):
def success(*a, **kw):
''' Test if task result yields success '''
- return not failed(*a, **kw) and not skipped(*a, **kw)
+ return not failed(*a, **kw)
def changed(*a, **kw):
''' Test if task result yields changed '''
@@ -89,14 +89,18 @@ class TestModule(object):
def tests(self):
return {
# failure testing
- 'failed' : failed,
- 'success' : success,
+ 'failed' : failed,
+ 'failure' : failed,
+ 'success' : success,
+ 'succeeded' : success,
# changed testing
'changed' : changed,
+ 'change' : changed,
# skip testing
'skipped' : skipped,
+ 'skip' : skipped,
# regex
'match': match,
diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py
index bed1c832c8..8ce2358eb1 100644
--- a/lib/ansible/template/__init__.py
+++ b/lib/ansible/template/__init__.py
@@ -164,7 +164,8 @@ class Templar:
self.block_end = self.environment.block_end_string
self.variable_start = self.environment.variable_start_string
self.variable_end = self.environment.variable_end_string
- self._clean_regex = re.compile(r'(?:%s[%s%s]|[%s%s]%s)' % (self.variable_start[0], self.variable_start[1], self.block_start[1], self.block_end[0], self.variable_end[0], self.variable_end[1]))
+ self._clean_regex = re.compile(r'(?:%s|%s|%s|%s)' % (self.variable_start, self.block_start, self.block_end, self.variable_end))
+ self._no_type_regex = re.compile(r'.*\|(?:%s)\s*(?:%s)?$' % ('|'.join(C.STRING_TYPE_FILTERS), self.variable_end))
def _get_filters(self):
'''
@@ -278,8 +279,7 @@ class Templar:
if fail_on_undefined is None:
fail_on_undefined = self._fail_on_undefined_errors
- # Don't template unsafe variables, instead drop them back down to
- # their constituent type.
+ # Don't template unsafe variables, instead drop them back down to their constituent type.
if hasattr(variable, '__UNSAFE__'):
if isinstance(variable, text_type):
return self._clean_data(text_type(variable))
@@ -294,6 +294,7 @@ class Templar:
if isinstance(variable, string_types):
result = variable
+
if self._contains_vars(variable):
# Check to see if the string we are trying to render is just referencing a single
@@ -319,7 +320,7 @@ class Templar:
result = self._cached_result[sha1_hash]
else:
result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines, escape_backslashes=escape_backslashes, fail_on_undefined=fail_on_undefined, overrides=overrides)
- if convert_data:
+ if convert_data and not self._no_type_regex.match(variable):
# if this looks like a dictionary or list, convert it to such using the safe_eval method
if (result.startswith("{") and not result.startswith(self.environment.variable_start_string)) or \
result.startswith("[") or result in ("True", "False"):
@@ -391,6 +392,8 @@ class Templar:
instance = self._lookup_loader.get(name.lower(), loader=self._loader, templar=self)
if instance is not None:
+ wantlist = kwargs.pop('wantlist', False)
+
from ansible.utils.listify import listify_lookup_plugin_terms
loop_terms = listify_lookup_plugin_terms(terms=args, templar=self, loader=self._loader, fail_on_undefined=True, convert_bare=False)
# safely catch run failures per #5059
@@ -404,8 +407,11 @@ class Templar:
ran = None
if ran:
- from ansible.vars.unsafe_proxy import UnsafeProxy
- ran = UnsafeProxy(",".join(ran))
+ from ansible.vars.unsafe_proxy import UnsafeProxy, wrap_var
+ if wantlist:
+ ran = wrap_var(ran)
+ else:
+ ran = UnsafeProxy(",".join(ran))
return ran
else:
diff --git a/lib/ansible/utils/color.py b/lib/ansible/utils/color.py
index 55060ace04..81a05d749e 100644
--- a/lib/ansible/utils/color.py
+++ b/lib/ansible/utils/color.py
@@ -62,7 +62,8 @@ codeCodes = {
'purple': u'0;35', 'bright red': u'1;31',
'yellow': u'0;33', 'bright purple': u'1;35',
'dark gray': u'1;30', 'bright yellow': u'1;33',
- 'normal': u'0'
+ 'magenta': u'0;35', 'bright magenta': u'1;35',
+ 'normal': u'0' ,
}
def stringc(text, color):
diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py
index 3d51f17de4..3703c15540 100644
--- a/lib/ansible/utils/display.py
+++ b/lib/ansible/utils/display.py
@@ -145,7 +145,7 @@ class Display:
# characters that are invalid in the user's locale
msg2 = to_unicode(msg2, self._output_encoding(stderr=stderr))
- if color == 'red':
+ if color == C.COLOR_ERROR:
logger.error(msg2)
else:
logger.info(msg2)
@@ -168,7 +168,7 @@ class Display:
def debug(self, msg):
if C.DEFAULT_DEBUG:
debug_lock.acquire()
- self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color='dark gray')
+ self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG)
debug_lock.release()
def verbose(self, msg, host=None, caplevel=2):
@@ -176,9 +176,9 @@ class Display:
#msg = utils.sanitize_output(msg)
if self.verbosity > caplevel:
if host is None:
- self.display(msg, color='blue')
+ self.display(msg, color=C.COLOR_VERBOSE)
else:
- self.display("<%s> %s" % (host, msg), color='blue', screen_only=True)
+ self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, screen_only=True)
def deprecated(self, msg, version=None, removed=False):
''' used to print out a deprecation message.'''
@@ -199,15 +199,20 @@ class Display:
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in self._deprecations:
- self.display(new_msg.strip(), color='purple', stderr=True)
+ self.display(new_msg.strip(), color=C.COLOR_DEPRECATE, stderr=True)
self._deprecations[new_msg] = 1
- def warning(self, msg):
- new_msg = "\n[WARNING]: %s" % msg
- wrapped = textwrap.wrap(new_msg, self.columns)
- new_msg = "\n".join(wrapped) + "\n"
+ def warning(self, msg, formatted=False):
+
+ if not formatted:
+ new_msg = "\n[WARNING]: %s" % msg
+ wrapped = textwrap.wrap(new_msg, self.columns)
+ new_msg = "\n".join(wrapped) + "\n"
+ else:
+ new_msg = "\n[WARNING]: \n%s" % msg
+
if new_msg not in self._warns:
- self.display(new_msg, color='bright purple', stderr=True)
+ self.display(new_msg, color=C.COLOR_WARN, stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
@@ -256,19 +261,64 @@ class Display:
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
- new_msg = msg
+ new_msg = u"ERROR! " + msg
if new_msg not in self._errors:
- self.display(new_msg, color='red', stderr=True)
+ self.display(new_msg, color=C.COLOR_ERROR, stderr=True)
self._errors[new_msg] = 1
@staticmethod
- def prompt(msg):
+ def prompt(msg, private=False):
prompt_string = to_bytes(msg, encoding=Display._output_encoding())
if sys.version_info >= (3,):
# Convert back into text on python3. We do this double conversion
# to get rid of characters that are illegal in the user's locale
prompt_string = to_unicode(prompt_string)
- return input(prompt_string)
+
+ if private:
+ return getpass.getpass(msg)
+ else:
+ return input(prompt_string)
+
+ @classmethod
+ def do_var_prompt(cls, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+
+ result = None
+ if sys.__stdin__.isatty():
+
+ do_prompt = cls.prompt
+
+ if prompt and default is not None:
+ msg = "%s [%s]: " % (prompt, default)
+ elif prompt:
+ msg = "%s: " % prompt
+ else:
+ msg = 'input for %s: ' % varname
+
+ if confirm:
+ while True:
+ result = do_prompt(msg, private)
+ second = do_prompt("confirm " + msg, private)
+ if result == second:
+ break
+ display.display("***** VALUES ENTERED DO NOT MATCH ****")
+ else:
+ result = do_prompt(msg, private)
+ else:
+ result = None
+ display.warning("Not prompting as we are not in interactive mode")
+
+ # if result is false and default is not None
+ if not result and default is not None:
+ result = default
+
+ if encrypt:
+ # Circular import because encrypt needs a display class
+ from ansible.utils.encrypt import do_encrypt
+ result = do_encrypt(result, encrypt, salt_size, salt)
+
+ # handle utf-8 chars
+ result = to_unicode(result, errors='strict')
+ return result
@staticmethod
def _output_encoding(stderr=False):
diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py
index 7fe83a8fa0..d834737ab5 100644
--- a/lib/ansible/utils/listify.py
+++ b/lib/ansible/utils/listify.py
@@ -31,9 +31,8 @@ __all__ = ['listify_lookup_plugin_terms']
def listify_lookup_plugin_terms(terms, templar, loader, fail_on_undefined=False, convert_bare=True):
if isinstance(terms, string_types):
- stripped = terms.strip()
# TODO: warn/deprecation on bare vars in with_ so we can eventually remove fail on undefined override
- terms = templar.template(terms, convert_bare=convert_bare, fail_on_undefined=fail_on_undefined)
+ terms = templar.template(terms.strip(), convert_bare=convert_bare, fail_on_undefined=fail_on_undefined)
else:
terms = templar.template(terms, fail_on_undefined=fail_on_undefined)
diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py
index 4a90c3caca..14a5d03056 100755
--- a/lib/ansible/utils/module_docs.py
+++ b/lib/ansible/utils/module_docs.py
@@ -67,7 +67,7 @@ def get_docstring(filename, verbose=False):
theid = t.id
except AttributeError as e:
# skip errors can happen when trying to use the normal code
- display.warning("Failed to assign id for %t on %s, skipping" % (t, filename))
+ display.warning("Failed to assign id for %s on %s, skipping" % (t, filename))
continue
if 'DOCUMENTATION' in theid:
diff --git a/lib/ansible/utils/module_docs_fragments/eos.py b/lib/ansible/utils/module_docs_fragments/eos.py
new file mode 100644
index 0000000000..bd8d3f510e
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/eos.py
@@ -0,0 +1,91 @@
+#
+# (c) 2015, Peter Sprygada <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = """
+options:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ device over the specified transport. The value of host is used as
+ the destination address for the transport.
+ required: true
+ port:
+ description:
+ - Specifies the port to use when buiding the connection to the remote
+ device. This value applies to either I(cli) or I(eapi). The port
+ value will default to the approriate transport common port if
+ none is provided in the task. (cli=22, http=80, https=443).
+ required: false
+ default: 0 (use common port)
+ username:
+ description:
+ - Configures the usename to use to authenticate the connection to
+ the remote device. The value of I(username) is used to authenticate
+ either the CLI login or the eAPI authentication depending on which
+ transport is used.
+ required: true
+ password:
+ description:
+ - Specifies the password to use when authentication the connection to
+ the remote device. This is a common argument used for either I(cli)
+ or I(eapi) transports.
+ required: false
+ default: null
+ authorize:
+ description:
+ - Instructs the module to enter priviledged mode on the remote device
+ before sending any commands. If not specified, the device will
+ attempt to excecute all commands in non-priviledged mode.
+ required: false
+ default: false
+ choices: BOOLEANS
+ auth_pass:
+ description:
+ - Specifies the password to use if required to enter privileged mode
+ on the remote device. If I(authorize) is false, then this argument
+ does nothing
+ required: false
+ default: none
+ transport:
+ description:
+ - Configures the transport connection to use when connecting to the
+ remote device. The transport argument supports connectivity to the
+ device over cli (ssh) or eapi.
+ required: true
+ default: cli
+ use_ssl:
+ description:
+ - Configures the I(transport) to use SSL if set to true only when the
+ I(transport) argument is configured as eapi. If the transport
+ argument is not eapi, this value is ignored
+ required: false
+ default: true
+ choices: BOOLEANS
+ provider:
+ description:
+ - Convience method that allows all M(eos) arguments to be passed as
+ a dict object. All constraints (required, choices, etc) must be
+ met either by individual arguments or values in this dict.
+ required: false
+ default: null
+
+"""
diff --git a/lib/ansible/utils/module_docs_fragments/ios.py b/lib/ansible/utils/module_docs_fragments/ios.py
new file mode 100644
index 0000000000..66ba28ad02
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/ios.py
@@ -0,0 +1,74 @@
+#
+# (c) 2015, Peter Sprygada <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = """
+options:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ device over the specified transport. The value of host is used as
+ the destination address for the transport.
+ required: true
+ port:
+ description:
+ - Specifies the port to use when buiding the connection to the remote
+ device. The port value will default to the well known SSH port
+ of 22
+ required: false
+ default: 22
+ username:
+ description:
+ - Configures the usename to use to authenticate the connection to
+ the remote device. The value of I(username) is used to authenticate
+ the SSH session
+ required: true
+ password:
+ description:
+ - Specifies the password to use when authentication the connection to
+ the remote device. The value of I(password) is used to authenticate
+ the SSH session
+ required: false
+ default: null
+ authorize:
+ description:
+ - Instructs the module to enter priviledged mode on the remote device
+ before sending any commands. If not specified, the device will
+ attempt to excecute all commands in non-priviledged mode.
+ required: false
+ default: false
+ choices: BOOLEANS
+ auth_pass:
+ description:
+ - Specifies the password to use if required to enter privileged mode
+ on the remote device. If I(authorize) is false, then this argument
+ does nothing
+ required: false
+ default: none
+ provider:
+ description:
+ - Convience method that allows all M(ios) arguments to be passed as
+ a dict object. All constraints (required, choices, etc) must be
+ met either by individual arguments or values in this dict.
+ required: false
+ default: null
+
+"""
diff --git a/lib/ansible/utils/module_docs_fragments/iosxr.py b/lib/ansible/utils/module_docs_fragments/iosxr.py
new file mode 100644
index 0000000000..3b9959db47
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/iosxr.py
@@ -0,0 +1,59 @@
+#
+# (c) 2015, Peter Sprygada <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = """
+options:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ device over the specified transport. The value of host is used as
+ the destination address for the transport.
+ required: true
+ port:
+ description:
+ - Specifies the port to use when buiding the connection to the remote
+ device. The port value will default to the well known SSH port
+ of 22
+ required: false
+ default: 22
+ username:
+ description:
+ - Configures the usename to use to authenticate the connection to
+ the remote device. The value of I(username) is used to authenticate
+ the SSH session
+ required: true
+ password:
+ description:
+ - Specifies the password to use when authentication the connection to
+ the remote device. The value of I(password) is used to authenticate
+ the SSH session
+ required: false
+ default: null
+ provider:
+ description:
+ - Convience method that allows all M(iosxr) arguments to be passed as
+ a dict object. All constraints (required, choices, etc) must be
+ met either by individual arguments or values in this dict.
+ required: false
+ default: null
+
+ """
diff --git a/lib/ansible/utils/module_docs_fragments/junos.py b/lib/ansible/utils/module_docs_fragments/junos.py
new file mode 100644
index 0000000000..96627288ca
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/junos.py
@@ -0,0 +1,59 @@
+#
+# (c) 2015, Peter Sprygada <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = """
+options:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ device over the specified transport. The value of host is used as
+ the destination address for the transport.
+ required: true
+ port:
+ description:
+ - Specifies the port to use when buiding the connection to the remote
+ device. The port value will default to the well known SSH port
+ of 22
+ required: false
+ default: 22
+ username:
+ description:
+ - Configures the usename to use to authenticate the connection to
+ the remote device. The value of I(username) is used to authenticate
+ the SSH session
+ required: true
+ password:
+ description:
+ - Specifies the password to use when authentication the connection to
+ the remote device. The value of I(password) is used to authenticate
+ the SSH session
+ required: false
+ default: null
+ provider:
+ description:
+ - Convience method that allows all M(ios) arguments to be passed as
+ a dict object. All constraints (required, choices, etc) must be
+ met either by individual arguments or values in this dict.
+ required: false
+ default: null
+
+"""
diff --git a/lib/ansible/utils/module_docs_fragments/mysql.py b/lib/ansible/utils/module_docs_fragments/mysql.py
new file mode 100644
index 0000000000..5dd1e04f93
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/mysql.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2015 Jonathan Mainguy <jon@soh.re>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ModuleDocFragment(object):
+
+ # Standard mysql documentation fragment
+ DOCUMENTATION = '''
+options:
+ login_user:
+ description:
+ - The username used to authenticate with
+ required: false
+ default: null
+ login_password:
+ description:
+ - The password used to authenticate with
+ required: false
+ default: null
+ login_host:
+ description:
+ - Host running the database
+ required: false
+ default: localhost
+ login_port:
+ description:
+ - Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used
+ required: false
+ default: 3306
+ login_unix_socket:
+ description:
+ - The path to a Unix domain socket for local connections
+ required: false
+ default: null
+ config_file:
+ description:
+ - Specify a config file from which user and password are to be read
+ required: false
+ default: '~/.my.cnf'
+ version_added: "2.0"
+ ssl_ca:
+ required: false
+ default: null
+ version_added: "2.0"
+ description:
+ - The path to a Certificate Authority (CA) certificate. This option, if used, must specify the same certificate as used by the server.
+ ssl_cert:
+ required: false
+ default: null
+ version_added: "2.0"
+ description:
+ - The path to a client public key certificate.
+ ssl_key:
+ required: false
+ default: null
+ version_added: "2.0"
+ description:
+ - The path to the client private key.
+requirements:
+ - MySQLdb
+notes:
+ - Requires the MySQLdb Python package on the remote host. For Ubuntu, this
+ is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this
+ is as easy as yum install MySQL-python. (See M(yum).)
+ - Both C(login_password) and C(login_user) are required when you are
+ passing credentials. If none are present, the module will attempt to read
+ the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
+ default login of 'root' with no password.
+'''
diff --git a/lib/ansible/utils/module_docs_fragments/nxos.py b/lib/ansible/utils/module_docs_fragments/nxos.py
new file mode 100644
index 0000000000..26312155c4
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/nxos.py
@@ -0,0 +1,76 @@
+#
+# (c) 2015, Peter Sprygada <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = """
+options:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ device over the specified transport. The value of host is used as
+ the destination address for the transport.
+ required: true
+ port:
+ description:
+ - Specifies the port to use when buiding the connection to the remote
+ device. This value applies to either I(cli) or I(nxapi). The port
+ value will default to the approriate transport common port if
+ none is provided in the task. (cli=22, http=80, https=443).
+ required: false
+ default: 0 (use common port)
+ username:
+ description:
+ - Configures the usename to use to authenticate the connection to
+ the remote device. The value of I(username) is used to authenticate
+ either the CLI login or the nxapi authentication depending on which
+ transport is used.
+ required: true
+ password:
+ description:
+ - Specifies the password to use when authentication the connection to
+ the remote device. This is a common argument used for either I(cli)
+ or I(nxapi) transports.
+ required: false
+ default: null
+ transport:
+ description:
+ - Configures the transport connection to use when connecting to the
+ remote device. The transport argument supports connectivity to the
+ device over cli (ssh) or nxapi.
+ required: true
+ default: cli
+ use_ssl:
+ description:
+ - Configures the I(transport) to use SSL if set to true only when the
+ I(transport) argument is configured as nxapi. If the transport
+ argument is not nxapi, this value is ignored
+ required: false
+ default: false
+ choices: BOOLEANS
+ provider:
+ description:
+ - Convience method that allows all M(nxos) arguments to be passed as
+ a dict object. All constraints (required, choices, etc) must be
+ met either by individual arguments or values in this dict.
+ required: false
+ default: null
+
+"""
diff --git a/lib/ansible/utils/module_docs_fragments/openswitch.py b/lib/ansible/utils/module_docs_fragments/openswitch.py
new file mode 100644
index 0000000000..3b3dbcaecc
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/openswitch.py
@@ -0,0 +1,82 @@
+#
+# (c) 2015, Peter Sprygada <psprygada@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ModuleDocFragment(object):
+
+ # Standard files documentation fragment
+ DOCUMENTATION = """
+options:
+ host:
+ description:
+ - Specifies the DNS host name or address for connecting to the remote
+ device over the specified transport. The value of host is used as
+ the destination address for the transport. Note this argument
+ does not affect the SSH argument.
+ required: true
+ port:
+ description:
+ - Specifies the port to use when buiding the connection to the remote
+ device. This value applies to either I(cli) or I(). The port
+ value will default to the approriate transport common port if
+ none is provided in the task. (cli=22, http=80, https=443). Note
+ this argument does not affect the SSH transport.
+ required: false
+ default: 0 (use common port)
+ username:
+ description:
+ - Configures the usename to use to authenticate the connection to
+ the remote device. The value of I(username) is used to authenticate
+ either the CLI login or the eAPI authentication depending on which
+ transport is used. Note this argument does not affect the SSH
+ transport.
+ required: true
+ password:
+ description:
+ - Specifies the password to use when authentication the connection to
+ the remote device. This is a common argument used for either I(cli)
+ or I(rest) transports. Note this argument does not affect the SSH
+ transport
+ required: false
+ default: null
+ transport:
+ description:
+ - Configures the transport connection to use when connecting to the
+ remote device. The transport argument supports connectivity to the
+ device over ssh, cli or REST.
+ required: true
+ default: ssh
+ choices: ['ssh', 'cli', 'rest']
+ use_ssl:
+ description:
+ - Configures the I(transport) to use SSL if set to true only when the
+ I(transport) argument is configured as rest. If the transport
+ argument is not rest, this value is ignored
+ required: false
+ default: true
+ choices: BOOLEANS
+ provider:
+ description:
+ - Convience method that allows all M(openswitch) arguments to be passed as
+ a dict object. All constraints (required, choices, etc) must be
+ met either by individual arguments or values in this dict.
+ required: false
+ default: null
+
+
+"""
diff --git a/lib/ansible/utils/module_docs_fragments/vca.py b/lib/ansible/utils/module_docs_fragments/vca.py
new file mode 100644
index 0000000000..88cb1b4184
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/vca.py
@@ -0,0 +1,83 @@
+# (c) 2016, Charles Paul <cpaul@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ModuleDocFragment(object):
+ # Parameters for VCA modules
+ DOCUMENTATION = """
+options:
+ username:
+ description:
+ - The vca username or email address, if not set the environment variable VCA_USER is checked for the username.
+ required: false
+ default: None
+ aliases: ['user']
+ password:
+ description:
+ - The vca password, if not set the environment variable VCA_PASS is checked for the password
+ required: false
+ default: None
+ aliases: ['pass', 'pwd']
+ org:
+ description:
+ - The org to login to for creating vapp, mostly set when the service_type is vdc.
+ required: false
+ default: None
+ instance_id:
+ description:
+ - The instance id in a vchs environment to be used for creating the vapp
+ required: false
+ default: None
+ host:
+ description:
+ - The authentication host to be used when service type is vcd.
+ required: false
+ default: None
+ api_version:
+ description:
+ - The api version to be used with the vca
+ required: false
+ default: "5.7"
+ service_type:
+ description:
+ - The type of service we are authenticating against
+ required: false
+ default: vca
+ choices: [ "vca", "vchs", "vcd" ]
+ state:
+ description:
+ - if the object should be added or removed
+ required: false
+ default: present
+ choices: [ "present", "absent" ]
+ verify_certs:
+ description:
+ - If the certificates of the authentication is to be verified
+ required: false
+ default: True
+ vdc_name:
+ description:
+ - The name of the vdc where the gateway is located.
+ required: false
+ default: None
+ gateway_name:
+ description:
+ - The name of the gateway of the vdc where the rule should be added
+ required: false
+ default: gateway
+"""
+
diff --git a/lib/ansible/utils/module_docs_fragments/vmware.py b/lib/ansible/utils/module_docs_fragments/vmware.py
new file mode 100644
index 0000000000..0b69886594
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments/vmware.py
@@ -0,0 +1,37 @@
+# (c) 2016, Charles Paul <cpaul@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ModuleDocFragment(object):
+ # Paramaters for VMware modules
+ DOCUMENTATION = '''
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the vSphere vCenter
+ required: True
+ username:
+ description:
+ - The username of the vSphere vCenter
+ required: True
+ aliases: ['user', 'admin']
+ password:
+ description:
+ - The password of the vSphere vCenter
+ required: True
+ aliases: ['pass', 'pwd']
+'''
diff --git a/lib/ansible/utils/path.py b/lib/ansible/utils/path.py
index ffac578243..d8dc423426 100644
--- a/lib/ansible/utils/path.py
+++ b/lib/ansible/utils/path.py
@@ -18,8 +18,6 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
-import stat
-from time import sleep
from errno import EEXIST
__all__ = ['unfrackpath']
diff --git a/lib/ansible/utils/vars.py b/lib/ansible/utils/vars.py
index a6e42cefa1..4d44a068c2 100644
--- a/lib/ansible/utils/vars.py
+++ b/lib/ansible/utils/vars.py
@@ -20,7 +20,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
-from json import JSONEncoder
+from json import dumps
from collections import MutableMapping
from ansible.compat.six import iteritems, string_types
@@ -28,7 +28,8 @@ from ansible.compat.six import iteritems, string_types
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.parsing.splitter import parse_kv
-from ansible.utils.unicode import to_unicode
+from ansible.utils.unicode import to_unicode, to_str
+
def _validate_mutable_mappings(a, b):
"""
@@ -43,9 +44,15 @@ def _validate_mutable_mappings(a, b):
# a variable number of arguments instead.
if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
- raise AnsibleError("failed to combine variables, expected dicts but"
- " got a '{0}' and a '{1}'".format(
- a.__class__.__name__, b.__class__.__name__))
+ myvars = []
+ for x in [a, b]:
+ try:
+ myvars.append(dumps(x))
+ except:
+ myvars.append(to_str(x))
+ raise AnsibleError("failed to combine variables, expected dicts but got a '{0}' and a '{1}': \n{2}\n{3}".format(
+ a.__class__.__name__, b.__class__.__name__, myvars[0], myvars[1])
+ )
def combine_vars(a, b):
"""
diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py
index 26f52adfb0..4135ff1768 100644
--- a/lib/ansible/vars/__init__.py
+++ b/lib/ansible/vars/__init__.py
@@ -34,7 +34,7 @@ except ImportError:
from ansible import constants as C
from ansible.cli import CLI
-from ansible.compat.six import string_types
+from ansible.compat.six import string_types, text_type
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable, AnsibleFileNotFound
from ansible.inventory.host import Host
from ansible.plugins import lookup_loader
@@ -43,7 +43,6 @@ from ansible.template import Templar
from ansible.utils.debug import debug
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.vars import combine_vars
-from ansible.vars.hostvars import HostVars
from ansible.vars.unsafe_proxy import wrap_var
try:
@@ -101,13 +100,14 @@ class VariableManager:
def __getstate__(self):
data = dict(
- fact_cache = self._fact_cache.copy(),
- np_fact_cache = self._nonpersistent_fact_cache.copy(),
- vars_cache = self._vars_cache.copy(),
- extra_vars = self._extra_vars.copy(),
- host_vars_files = self._host_vars_files.copy(),
- group_vars_files = self._group_vars_files.copy(),
+ fact_cache = self._fact_cache,
+ np_fact_cache = self._nonpersistent_fact_cache,
+ vars_cache = self._vars_cache,
+ extra_vars = self._extra_vars,
+ host_vars_files = self._host_vars_files,
+ group_vars_files = self._group_vars_files,
omit_token = self._omit_token,
+ #inventory = self._inventory,
)
return data
@@ -119,7 +119,7 @@ class VariableManager:
self._host_vars_files = data.get('host_vars_files', defaultdict(dict))
self._group_vars_files = data.get('group_vars_files', defaultdict(dict))
self._omit_token = data.get('omit_token', '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest())
- self._inventory = None
+ self._inventory = data.get('inventory', None)
def _get_cache_entry(self, play=None, host=None, task=None):
play_id = "NONE"
@@ -170,7 +170,8 @@ class VariableManager:
return data
-
+ # FIXME: include_hostvars is no longer used, and should be removed, but
+ # all other areas of code calling get_vars need to be fixed too
def get_vars(self, loader, play=None, host=None, task=None, include_hostvars=True, include_delegate_to=True, use_cache=True):
'''
Returns the variables, with optional "context" given via the parameters
@@ -233,7 +234,7 @@ class VariableManager:
for item in data:
all_vars = combine_vars(all_vars, item)
- for group in host.get_groups():
+ for group in sorted(host.get_groups(), key=lambda g: g.depth):
if group.name in self._group_vars_files and group.name != 'all':
for data in self._group_vars_files[group.name]:
data = preprocess_vars(data)
@@ -305,6 +306,7 @@ class VariableManager:
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
+ all_vars = combine_vars(all_vars, role.get_role_params())
all_vars = combine_vars(all_vars, role.get_vars(include_params=False))
if task:
@@ -316,6 +318,12 @@ class VariableManager:
all_vars = combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
all_vars = combine_vars(all_vars, self._nonpersistent_fact_cache.get(host.name, dict()))
+ # special case for include tasks, where the include params
+ # may be specified in the vars field for the task, which should
+ # have higher precedence than the vars/np facts above
+ if task:
+ all_vars = combine_vars(all_vars, task.get_include_params())
+
all_vars = combine_vars(all_vars, self._extra_vars)
all_vars = combine_vars(all_vars, magic_variables)
@@ -333,6 +341,8 @@ class VariableManager:
all_vars['ansible_delegated_vars'] = self._get_delegated_vars(loader, play, task, all_vars)
#VARIABLE_CACHE[cache_entry] = all_vars
+ if task or play:
+ all_vars['vars'] = all_vars.copy()
debug("done with get_vars()")
return all_vars
@@ -358,23 +368,14 @@ class VariableManager:
variables['groups'] = dict()
for (group_name, group) in iteritems(self._inventory.groups):
variables['groups'][group_name] = [h.name for h in group.get_hosts()]
-
- if include_hostvars:
- hostvars_cache_entry = self._get_cache_entry(play=play)
- if hostvars_cache_entry in HOSTVARS_CACHE:
- hostvars = HOSTVARS_CACHE[hostvars_cache_entry]
- else:
- hostvars = HostVars(play=play, inventory=self._inventory, loader=loader, variable_manager=self)
- HOSTVARS_CACHE[hostvars_cache_entry] = hostvars
- variables['hostvars'] = hostvars
- variables['vars'] = hostvars[host.get_name()]
-
if play:
variables['role_names'] = [r._role_name for r in play.roles]
if task:
if task._role:
+ variables['role_name'] = task._role.get_name()
variables['role_path'] = task._role._role_path
+ variables['role_uuid'] = text_type(task._role._uuid)
if self._inventory is not None:
variables['inventory_dir'] = self._inventory.basedir()
@@ -404,7 +405,7 @@ class VariableManager:
items = []
if task.loop is not None:
if task.loop in lookup_loader:
- #TODO: remove convert_bare true and deprecate this in with_
+ #TODO: remove convert_bare true and deprecate this in with_
try:
loop_terms = listify_lookup_plugin_terms(terms=task.loop_args, templar=templar, loader=loader, fail_on_undefined=True, convert_bare=True)
except AnsibleUndefinedVariable as e:
@@ -433,8 +434,15 @@ class VariableManager:
continue
# a dictionary of variables to use if we have to create a new host below
+ # we set the default port based on the default transport here, to make sure
+ # we use the proper default for windows
+ new_port = C.DEFAULT_REMOTE_PORT
+ if C.DEFAULT_TRANSPORT == 'winrm':
+ new_port = 5986
+
new_delegated_host_vars = dict(
ansible_host=delegated_host_name,
+ ansible_port=new_port,
ansible_user=C.DEFAULT_REMOTE_USER,
ansible_connection=C.DEFAULT_TRANSPORT,
)
@@ -597,4 +605,3 @@ class VariableManager:
if host_name not in self._vars_cache:
self._vars_cache[host_name] = dict()
self._vars_cache[host_name][varname] = value
-
diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py
index f56f542574..afa00ec8a4 100644
--- a/lib/ansible/vars/hostvars.py
+++ b/lib/ansible/vars/hostvars.py
@@ -46,43 +46,29 @@ __all__ = ['HostVars']
class HostVars(collections.Mapping):
''' A special view of vars_cache that adds values from the inventory when needed. '''
- def __init__(self, play, inventory, variable_manager, loader):
+ def __init__(self, inventory, variable_manager, loader):
self._lookup = dict()
+ self._inventory = inventory
self._loader = loader
- self._play = play
self._variable_manager = variable_manager
self._cached_result = dict()
- hosts = inventory.get_hosts(ignore_limits_and_restrictions=True)
-
- # check to see if localhost is in the hosts list, as we
- # may have it referenced via hostvars but if created implicitly
- # it doesn't sow up in the hosts list
- has_localhost = False
- for host in hosts:
- if host.name in C.LOCALHOST:
- has_localhost = True
- break
+ def set_variable_manager(self, variable_manager):
+ self._variable_manager = variable_manager
- if not has_localhost:
- new_host = Host(name='localhost')
- new_host.set_variable("ansible_python_interpreter", sys.executable)
- new_host.set_variable("ansible_connection", "local")
- new_host.address = '127.0.0.1'
- hosts.append(new_host)
+ def set_inventory(self, inventory):
+ self._inventory = inventory
- for host in hosts:
- self._lookup[host.name] = host
+ def _find_host(self, host_name):
+ return self._inventory.get_host(host_name)
def __getitem__(self, host_name):
+ host = self._find_host(host_name)
+ if host is None:
+ raise j2undefined
- if host_name not in self._lookup:
- return j2undefined
+ data = self._variable_manager.get_vars(loader=self._loader, host=host, include_hostvars=False)
- host = self._lookup.get(host_name)
- data = self._variable_manager.get_vars(loader=self._loader, host=host, play=self._play, include_hostvars=False)
-
- # Using cache in order to avoid template call
sha1_hash = sha1(str(data).encode('utf-8')).hexdigest()
if sha1_hash in self._cached_result:
result = self._cached_result[sha1_hash]
@@ -92,30 +78,22 @@ class HostVars(collections.Mapping):
self._cached_result[sha1_hash] = result
return result
+ def set_host_variable(self, host, varname, value):
+ self._variable_manager.set_host_variable(host, varname, value)
+
+ def set_nonpersistent_facts(self, host, facts):
+ self._variable_manager.set_nonpersistent_facts(host, facts)
+
+ def set_host_facts(self, host, facts):
+ self._variable_manager.set_host_facts(host, facts)
+
def __contains__(self, host_name):
- item = self.get(host_name)
- if item and item is not j2undefined:
- return True
- return False
+ return self._find_host(host_name) is not None
def __iter__(self):
- for host in self._lookup:
+ for host in self._inventory.get_hosts(ignore_limits_and_restrictions=True):
yield host
def __len__(self):
- return len(self._lookup)
-
- def __getstate__(self):
- return dict(
- loader=self._loader,
- lookup=self._lookup,
- play=self._play,
- var_manager=self._variable_manager,
- )
-
- def __setstate__(self, data):
- self._play = data.get('play')
- self._loader = data.get('loader')
- self._lookup = data.get('lookup')
- self._variable_manager = data.get('var_manager')
- self._cached_result = dict()
+ return len(self._inventory.get_hosts(ignore_limits_and_restrictions=True))
+
diff --git a/packaging/debian/control b/packaging/debian/control
index 462fd5caf1..13f0c5b42d 100644
--- a/packaging/debian/control
+++ b/packaging/debian/control
@@ -8,7 +8,7 @@ Homepage: http://ansible.github.com/
Package: ansible
Architecture: all
-Depends: python, python-support (>= 0.90), python-jinja2, python-yaml, python-paramiko, python-httplib2, python-six, python-crypto (>= 2.6), sshpass, ${misc:Depends}
+Depends: python, python-support (>= 0.90), python-jinja2, python-yaml, python-paramiko, python-httplib2, python-six, python-crypto (>= 2.6), python-setuptools, sshpass, ${misc:Depends}
Description: A radically simple IT automation platform
A radically simple IT automation platform that makes your applications and
systems easier to deploy. Avoid writing scripts or custom code to deploy and
diff --git a/packaging/port/sysutils/ansible/Makefile b/packaging/port/sysutils/ansible/Makefile
index 10016f9908..ef71c95c6c 100644
--- a/packaging/port/sysutils/ansible/Makefile
+++ b/packaging/port/sysutils/ansible/Makefile
@@ -1,7 +1,7 @@
# $FreeBSD$
PORTNAME= ansible
-PORTVERSION= 2.0
+PORTVERSION= 2.1
PORTREVISION= 1
CATEGORIES= python net-mgmt sysutils
MASTER_SITES= http://releases.ansible.com/ansible/
diff --git a/test/code-smell/required-and-default-attributes.sh b/test/code-smell/required-and-default-attributes.sh
new file mode 100755
index 0000000000..9822a15597
--- /dev/null
+++ b/test/code-smell/required-and-default-attributes.sh
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+BASEDIR=${1-"lib/ansible"}
+cd "$BASEDIR"
+grep -r FieldAttribute . |grep 'default' | grep 'required'
+if test $? -eq 0 ; then
+ exit 1
+fi
+exit 0
+
diff --git a/test/integration/Makefile b/test/integration/Makefile
index a2d91f96f1..dcd30f0b83 100644
--- a/test/integration/Makefile
+++ b/test/integration/Makefile
@@ -193,5 +193,5 @@ test_lookup_paths:
no_log:
# This test expects 7 loggable vars and 0 non loggable ones, if either mismatches it fails, run the ansible-playbook command to debug
- [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -vvvvv | awk -e 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ]
+ [ "$$(ansible-playbook no_log_local.yml -i $(INVENTORY) -vvvvv | awk --source 'BEGIN { logme = 0; nolog = 0; } /LOG_ME/ { logme += 1;} /DO_NOT_LOG/ { nolog += 1;} END { printf "%d/%d", logme, nolog; }')" = "6/0" ]
diff --git a/test/integration/destructive.yml b/test/integration/destructive.yml
index b8f56d113b..3e8cca385e 100644
--- a/test/integration/destructive.yml
+++ b/test/integration/destructive.yml
@@ -17,4 +17,5 @@
- { role: test_mysql_db, tags: test_mysql_db}
- { role: test_mysql_user, tags: test_mysql_user}
- { role: test_mysql_variables, tags: test_mysql_variables}
- - { role: test_docker, tags: test_docker}
+ - { role: test_docker, tags: test_docker, when: ansible_distribution != "Fedora" }
+ - { role: test_zypper, tags: test_zypper}
diff --git a/test/integration/non_destructive.yml b/test/integration/non_destructive.yml
index 668b20de95..ee30fa2315 100644
--- a/test/integration/non_destructive.yml
+++ b/test/integration/non_destructive.yml
@@ -41,6 +41,7 @@
- { role: test_get_url, tags: test_get_url }
- { role: test_embedded_module, tags: test_embedded_module }
- { role: test_uri, tags: test_uri }
+ - { role: test_add_host, tags: test_add_host }
# Turn on test_binary when we start testing v2
#- { role: test_binary, tags: test_binary }
diff --git a/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml b/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml
index 341392b00c..79584893ed 100644
--- a/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml
+++ b/test/integration/roles/ec2_elb_instance_setup/tasks/main.yml
@@ -5,7 +5,12 @@
# install apache on the ec2 instances
- name: install apache on new ec2 instances
- yum: name=httpd
+ package: name=httpd
+ when: ansible_os_family == 'RedHat'
+
+- name: install apache on new ec2 instances
+ package: name=apache
+ when: ansible_os_family == 'Debian'
- name: start and enable apache
service: name=httpd state=started enabled=yes
diff --git a/test/integration/roles/prepare_tests/tasks/main.yml b/test/integration/roles/prepare_tests/tasks/main.yml
index 3641880baa..7983ea5236 100644
--- a/test/integration/roles/prepare_tests/tasks/main.yml
+++ b/test/integration/roles/prepare_tests/tasks/main.yml
@@ -22,6 +22,7 @@
always_run: True
tags:
- prepare
+ when: clean_working_dir|default("yes")|bool
- name: create the test directory
file: name={{output_dir}} state=directory
diff --git a/test/integration/roles/setup_mysql_db/tasks/main.yml b/test/integration/roles/setup_mysql_db/tasks/main.yml
index a8010e7138..612d94f6d1 100644
--- a/test/integration/roles/setup_mysql_db/tasks/main.yml
+++ b/test/integration/roles/setup_mysql_db/tasks/main.yml
@@ -31,6 +31,11 @@
with_items: mysql_packages
when: ansible_pkg_mgr == 'yum'
+- name: install mysqldb_test rpm dependencies
+ dnf: name={{ item }} state=latest
+ with_items: mysql_packages
+ when: ansible_pkg_mgr == 'dnf'
+
- name: install mysqldb_test debian dependencies
apt: name={{ item }} state=latest
with_items: mysql_packages
diff --git a/test/integration/roles/setup_postgresql_db/tasks/main.yml b/test/integration/roles/setup_postgresql_db/tasks/main.yml
index fbcc9cab72..c25318a2ad 100644
--- a/test/integration/roles/setup_postgresql_db/tasks/main.yml
+++ b/test/integration/roles/setup_postgresql_db/tasks/main.yml
@@ -9,9 +9,9 @@
# Make sure we start fresh
- name: remove rpm dependencies for postgresql test
- yum: name={{ item }} state=absent
+ package: name={{ item }} state=absent
with_items: postgresql_packages
- when: ansible_pkg_mgr == 'yum'
+ when: ansible_os_family == "RedHat"
- name: remove dpkg dependencies for postgresql test
apt: name={{ item }} state=absent
@@ -35,9 +35,9 @@
when: ansible_os_family == "Debian"
- name: install rpm dependencies for postgresql test
- yum: name={{ item }} state=latest
+ package: name={{ item }} state=latest
with_items: postgresql_packages
- when: ansible_pkg_mgr == 'yum'
+ when: ansible_os_family == "RedHat"
- name: install dpkg dependencies for postgresql test
apt: name={{ item }} state=latest
diff --git a/test/integration/roles/test_add_host/tasks/main.yml b/test/integration/roles/test_add_host/tasks/main.yml
new file mode 100644
index 0000000000..cafd6bd4eb
--- /dev/null
+++ b/test/integration/roles/test_add_host/tasks/main.yml
@@ -0,0 +1,39 @@
+# test code for the add_host action
+# (c) 2015, Matt Davis <mdavis@ansible.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- name: add a host to the runtime inventory
+ add_host:
+ name: newdynamichost
+ groups: newdynamicgroup
+ a_var: from add_host
+
+- debug: msg={{hostvars['newdynamichost'].group_names}}
+
+- name: ensure that dynamically-added host is visible via hostvars, groups, etc (there are several caches that could break this)
+ assert:
+ that:
+ - hostvars['bogushost'] is not defined # there was a bug where an undefined host was a "type" instead of an instance- ensure this works before we rely on it
+ - hostvars['newdynamichost'] is defined
+ - hostvars['newdynamichost'].group_names is defined
+ - "'newdynamicgroup' in hostvars['newdynamichost'].group_names"
+ - hostvars['newdynamichost']['bogusvar'] is not defined
+ - hostvars['newdynamichost']['a_var'] is defined
+ - hostvars['newdynamichost']['a_var'] == 'from add_host'
+ - groups['bogusgroup'] is not defined # same check as above to ensure that bogus groups are undefined...
+ - groups['newdynamicgroup'] is defined
+ - "'newdynamichost' in groups['newdynamicgroup']"
diff --git a/test/integration/roles/test_apt/tasks/main.yml b/test/integration/roles/test_apt/tasks/main.yml
index 8976087371..552b543d2d 100644
--- a/test/integration/roles/test_apt/tasks/main.yml
+++ b/test/integration/roles/test_apt/tasks/main.yml
@@ -1,4 +1,3 @@
-# test code for the yum module
# (c) 2014, James Tanner <tanner.jc@gmail.com>
# This file is part of Ansible
diff --git a/test/integration/roles/test_apt_repository/tasks/apt.yml b/test/integration/roles/test_apt_repository/tasks/apt.yml
index 49d13bc52a..9c8e3ab447 100644
--- a/test/integration/roles/test_apt_repository/tasks/apt.yml
+++ b/test/integration/roles/test_apt_repository/tasks/apt.yml
@@ -2,6 +2,7 @@
- set_fact:
test_ppa_name: 'ppa:menulibre-dev/devel'
+ test_ppa_filename: 'menulibre-dev'
test_ppa_spec: 'deb http://ppa.launchpad.net/menulibre-dev/devel/ubuntu {{ansible_distribution_release}} main'
test_ppa_key: 'A7AD98A1' # http://keyserver.ubuntu.com:11371/pks/lookup?search=0xD06AAF4C11DAB86DF421421EFE6B20ECA7AD98A1&op=index
@@ -145,6 +146,47 @@
apt_key: id='{{test_ppa_key}}' state=absent
#
+# TEST: apt_repository: repo=<spec> filename=<filename>
+#
+- include: 'cleanup.yml'
+
+- name: 'record apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_before
+
+- name: 'name=<spec> filename=<filename> (expect: pass)'
+ apt_repository: repo='{{test_ppa_spec}}' filename='{{test_ppa_filename}}' state=present
+ register: result
+
+- assert:
+ that:
+ - 'result.changed'
+ - 'result.state == "present"'
+ - 'result.repo == "{{test_ppa_spec}}"'
+
+- name: 'examine source file'
+ stat: path='/etc/apt/sources.list.d/{{test_ppa_filename}}.list'
+ register: source_file
+
+- name: 'assert source file exists'
+ assert:
+ that:
+ - 'source_file.stat.exists == True'
+
+- name: 'examine apt cache mtime'
+ stat: path='/var/cache/apt/pkgcache.bin'
+ register: cache_after
+
+- name: 'assert the apt cache did change'
+ assert:
+ that:
+ - 'cache_before.stat.mtime != cache_after.stat.mtime'
+
+# When installing a repo with the spec, the key is *NOT* added
+- name: 'ensure ppa key is absent (expect: pass)'
+ apt_key: id='{{test_ppa_key}}' state=absent
+
+#
# TEARDOWN
#
- include: 'cleanup.yml'
diff --git a/test/integration/roles/test_cs_instance/tasks/absent.yml b/test/integration/roles/test_cs_instance/tasks/absent.yml
index bafb3ec9e7..eeab47a61d 100644
--- a/test/integration/roles/test_cs_instance/tasks/absent.yml
+++ b/test/integration/roles/test_cs_instance/tasks/absent.yml
@@ -21,3 +21,23 @@
that:
- instance|success
- not instance|changed
+
+- name: test recover to stopped state and update a deleted instance
+ cs_instance:
+ name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ service_offering: "{{ test_cs_instance_offering_1 }}"
+ state: stopped
+ register: instance
+- name: verify test recover to stopped state and update a deleted instance
+ assert:
+ that:
+ - instance|success
+ - instance|changed
+ - instance.state == "Stopped"
+ - instance.service_offering == "{{ test_cs_instance_offering_1 }}"
+
+# force expunge, only works with admin permissions
+- cs_instance:
+ name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ state: expunged
+ failed_when: false
diff --git a/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml b/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml
new file mode 100644
index 0000000000..35fa6dff34
--- /dev/null
+++ b/test/integration/roles/test_cs_instance/tasks/absent_display_name.yml
@@ -0,0 +1,43 @@
+---
+- name: test destroy instance with display_name
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ state: absent
+ register: instance
+- name: verify destroy instance with display_name
+ assert:
+ that:
+ - instance|success
+ - instance|changed
+ - instance.state == "Destroyed"
+
+- name: test destroy instance with display_name idempotence
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ state: absent
+ register: instance
+- name: verify destroy instance with display_name idempotence
+ assert:
+ that:
+ - instance|success
+ - not instance|changed
+
+- name: test recover to stopped state and update a deleted instance with display_name
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ service_offering: "{{ test_cs_instance_offering_1 }}"
+ state: stopped
+ register: instance
+- name: verify test recover to stopped state and update a deleted instance with display_name
+ assert:
+ that:
+ - instance|success
+ - instance|changed
+ - instance.state == "Stopped"
+ - instance.service_offering == "{{ test_cs_instance_offering_1 }}"
+
+# force expunge, only works with admin permissions
+- cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ state: expunged
+ failed_when: false
diff --git a/test/integration/roles/test_cs_instance/tasks/cleanup.yml b/test/integration/roles/test_cs_instance/tasks/cleanup.yml
index 63192dbd60..e6b6550dfa 100644
--- a/test/integration/roles/test_cs_instance/tasks/cleanup.yml
+++ b/test/integration/roles/test_cs_instance/tasks/cleanup.yml
@@ -28,9 +28,3 @@
assert:
that:
- sg|success
-
-# force expunge, only works with admin permissions
-- cs_instance:
- name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
- state: expunged
- failed_when: false
diff --git a/test/integration/roles/test_cs_instance/tasks/main.yml b/test/integration/roles/test_cs_instance/tasks/main.yml
index d1a67e1781..d6475a4766 100644
--- a/test/integration/roles/test_cs_instance/tasks/main.yml
+++ b/test/integration/roles/test_cs_instance/tasks/main.yml
@@ -4,3 +4,8 @@
- include: tags.yml
- include: absent.yml
- include: cleanup.yml
+
+- include: setup.yml
+- include: present_display_name.yml
+- include: absent_display_name.yml
+- include: cleanup.yml
diff --git a/test/integration/roles/test_cs_instance/tasks/present.yml b/test/integration/roles/test_cs_instance/tasks/present.yml
index 10242a57fd..ad3d391ef9 100644
--- a/test/integration/roles/test_cs_instance/tasks/present.yml
+++ b/test/integration/roles/test_cs_instance/tasks/present.yml
@@ -1,4 +1,12 @@
---
+- name: setup instance to be absent
+ cs_instance: name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent
+ register: instance
+- name: verify instance to be absent
+ assert:
+ that:
+ - instance|success
+
- name: test create instance
cs_instance:
name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
@@ -21,7 +29,6 @@
- instance.ssh_key == "{{ cs_resource_prefix }}-sshkey"
- not instance.tags
-
- name: test create instance idempotence
cs_instance:
name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
@@ -44,7 +51,6 @@
- instance.ssh_key == "{{ cs_resource_prefix }}-sshkey"
- not instance.tags
-
- name: test running instance not updated
cs_instance:
name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
@@ -60,7 +66,6 @@
- instance.service_offering == "{{ test_cs_instance_offering_1 }}"
- instance.state == "Running"
-
- name: test stopping instance
cs_instance:
name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
@@ -76,7 +81,6 @@
- instance.service_offering == "{{ test_cs_instance_offering_1 }}"
- instance.state == "Stopped"
-
- name: test stopping instance idempotence
cs_instance:
name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
@@ -89,7 +93,6 @@
- not instance|changed
- instance.state == "Stopped"
-
- name: test updating stopped instance
cs_instance:
name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
@@ -106,7 +109,6 @@
- instance.service_offering == "{{ test_cs_instance_offering_2 }}"
- instance.state == "Stopped"
-
- name: test starting instance
cs_instance:
name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
@@ -122,7 +124,6 @@
- instance.service_offering == "{{ test_cs_instance_offering_2 }}"
- instance.state == "Running"
-
- name: test starting instance idempotence
cs_instance:
name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
@@ -133,6 +134,9 @@
that:
- instance|success
- not instance|changed
+ - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_2 }}"
- instance.state == "Running"
- name: test force update running instance
@@ -147,7 +151,7 @@
- instance|success
- instance|changed
- instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
- - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}"
- instance.service_offering == "{{ test_cs_instance_offering_1 }}"
- instance.state == "Running"
@@ -163,6 +167,21 @@
- instance|success
- not instance|changed
- instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
- - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}"
- instance.service_offering == "{{ test_cs_instance_offering_1 }}"
- instance.state == "Running"
+
+- name: test restore instance
+ cs_instance:
+ name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ template: "{{ test_cs_instance_template }}"
+ state: restored
+ register: instance
+- name: verify restore instance
+ assert:
+ that:
+ - instance|success
+ - instance|changed
+ - instance.name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.display_name == "{{ cs_resource_prefix }}-display-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_1 }}"
diff --git a/test/integration/roles/test_cs_instance/tasks/present_display_name.yml b/test/integration/roles/test_cs_instance/tasks/present_display_name.yml
new file mode 100644
index 0000000000..c1882149d9
--- /dev/null
+++ b/test/integration/roles/test_cs_instance/tasks/present_display_name.yml
@@ -0,0 +1,176 @@
+---
+- name: setup instance with display_name to be absent
+ cs_instance: display_name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent
+ register: instance
+- name: verify instance with display_name to be absent
+ assert:
+ that:
+ - instance|success
+
+- name: test create instance with display_name
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ template: "{{ test_cs_instance_template }}"
+ service_offering: "{{ test_cs_instance_offering_1 }}"
+ affinity_group: "{{ cs_resource_prefix }}-ag"
+ security_group: "{{ cs_resource_prefix }}-sg"
+ ssh_key: "{{ cs_resource_prefix }}-sshkey"
+ tags: []
+ register: instance
+- name: verify create instance with display_name
+ assert:
+ that:
+ - instance|success
+ - instance|changed
+ - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_1 }}"
+ - instance.state == "Running"
+ - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey"
+ - not instance.tags
+
+- name: test create instance with display_name idempotence
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ template: "{{ test_cs_instance_template }}"
+ service_offering: "{{ test_cs_instance_offering_1 }}"
+ affinity_group: "{{ cs_resource_prefix }}-ag"
+ security_group: "{{ cs_resource_prefix }}-sg"
+ ssh_key: "{{ cs_resource_prefix }}-sshkey"
+ tags: []
+ register: instance
+- name: verify create instance with display_name idempotence
+ assert:
+ that:
+ - instance|success
+ - not instance|changed
+ - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_1 }}"
+ - instance.state == "Running"
+ - instance.ssh_key == "{{ cs_resource_prefix }}-sshkey"
+ - not instance.tags
+
+- name: test running instance with display_name not updated
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ service_offering: "{{ test_cs_instance_offering_2 }}"
+ register: instance
+- name: verify running instance with display_name not updated
+ assert:
+ that:
+ - instance|success
+ - not instance|changed
+ - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_1 }}"
+ - instance.state == "Running"
+
+- name: test stopping instance with display_name
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ state: stopped
+ register: instance
+- name: verify stopping instance with display_name
+ assert:
+ that:
+ - instance|success
+ - instance|changed
+ - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_1 }}"
+ - instance.state == "Stopped"
+
+- name: test stopping instance with display_name idempotence
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ state: stopped
+ register: instance
+- name: verify stopping instance idempotence
+ assert:
+ that:
+ - instance|success
+ - not instance|changed
+ - instance.state == "Stopped"
+
+- name: test updating stopped instance with display_name
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ service_offering: "{{ test_cs_instance_offering_2 }}"
+ register: instance
+- name: verify updating stopped instance with display_name
+ assert:
+ that:
+ - instance|success
+ - instance|changed
+ - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_2 }}"
+ - instance.state == "Stopped"
+
+- name: test starting instance with display_name
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ state: started
+ register: instance
+- name: verify starting instance with display_name
+ assert:
+ that:
+ - instance|success
+ - instance|changed
+ - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_2 }}"
+ - instance.state == "Running"
+
+- name: test starting instance with display_name idempotence
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ state: started
+ register: instance
+- name: verify starting instance with display_name idempotence
+ assert:
+ that:
+ - instance|success
+ - not instance|changed
+ - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_2 }}"
+ - instance.state == "Running"
+
+- name: test force update running instance with display_name
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ service_offering: "{{ test_cs_instance_offering_1 }}"
+ force: true
+ register: instance
+- name: verify force update running instance with display_name
+ assert:
+ that:
+ - instance|success
+ - instance|changed
+ - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_1 }}"
+ - instance.state == "Running"
+
+- name: test force update running instance with display_name idempotence
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ service_offering: "{{ test_cs_instance_offering_1 }}"
+ force: true
+ register: instance
+- name: verify force update running instance with display_name idempotence
+ assert:
+ that:
+ - instance|success
+ - not instance|changed
+ - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_1 }}"
+ - instance.state == "Running"
+
+- name: test restore instance with display_name
+ cs_instance:
+ display_name: "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ template: "{{ test_cs_instance_template }}"
+ state: restored
+ register: instance
+- name: verify restore instance with display_name
+ assert:
+ that:
+ - instance|success
+ - instance|changed
+ - instance.display_name == "{{ cs_resource_prefix }}-vm-{{ instance_number }}"
+ - instance.service_offering == "{{ test_cs_instance_offering_1 }}"
diff --git a/test/integration/roles/test_cs_instance/tasks/setup.yml b/test/integration/roles/test_cs_instance/tasks/setup.yml
index 32f3ff13e2..0039ce8f1b 100644
--- a/test/integration/roles/test_cs_instance/tasks/setup.yml
+++ b/test/integration/roles/test_cs_instance/tasks/setup.yml
@@ -22,11 +22,3 @@
assert:
that:
- sg|success
-
-- name: setup instance to be absent
- cs_instance: name={{ cs_resource_prefix }}-vm-{{ instance_number }} state=absent
- register: instance
-- name: verify instance to be absent
- assert:
- that:
- - instance|success
diff --git a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
index 3ba234ecff..c25821c3be 100644
--- a/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
+++ b/test/integration/roles/test_docker/tasks/docker-setup-rht.yml
@@ -1,18 +1,17 @@
-- name: Install docker packages (yum)
- yum:
+- name: Install docker packages (rht family)
+ package:
state: present
name: docker-io,docker-registry,python-docker-py,nginx
-- name: Install netcat
- yum:
+- name: Install netcat (Fedora)
+ package:
state: present
name: nmap-ncat
- # RHEL7 as well...
- when: ansible_distribution == 'Fedora'
+ when: ansible_distribution == 'Fedora' or (ansible_os_family == 'RedHat' and ansible_distribution_version|version_compare('>=', 7))
-- name: Install netcat
- yum:
+- name: Install netcat (RHEL)
+ package:
state: present
name: nc
- when: ansible_distribution != 'Fedora'
+ when: ansible_distribution != 'Fedora' and (ansible_os_family == 'RedHat' and ansible_distribution_version|version_compare('<', 7))
diff --git a/test/integration/roles/test_docker/tasks/docker-tests.yml b/test/integration/roles/test_docker/tasks/docker-tests.yml
index 33ffe6c70c..14e23f72dd 100644
--- a/test/integration/roles/test_docker/tasks/docker-tests.yml
+++ b/test/integration/roles/test_docker/tasks/docker-tests.yml
@@ -8,7 +8,6 @@
image: busybox
state: present
pull: missing
- docker_api_version: "1.14"
- name: Run a small script in busybox
docker:
@@ -17,22 +16,12 @@
pull: always
command: "nc -l -p 2000 -e xargs -n1 echo hello"
detach: True
- docker_api_version: "1.14"
-
-- name: Get the docker container id
- shell: "docker ps | grep busybox | awk '{ print $1 }'"
- register: container_id
- name: Get the docker container ip
- shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
- register: container_ip
-
-- name: Pause a few moments because docker is not reliable
- pause:
- seconds: 40
+ set_fact: container_ip="{{docker_containers[0].NetworkSettings.IPAddress}}"
- name: Try to access the server
- shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
+ shell: "echo 'world' | nc {{ container_ip }} 2000"
register: docker_output
- name: check that the script ran
@@ -49,22 +38,12 @@
TEST: hello
command: '/bin/sh -c "nc -l -p 2000 -e xargs -n1 echo $TEST"'
detach: True
- docker_api_version: "1.14"
-
-- name: Get the docker container id
- shell: "docker ps | grep busybox | awk '{ print $1 }'"
- register: container_id
- name: Get the docker container ip
- shell: "docker inspect {{ container_id.stdout_lines[0] }} | grep IPAddress | awk -F '\"' '{ print $4 }'"
- register: container_ip
-
-- name: Pause a few moments because docker is not reliable
- pause:
- seconds: 40
+ set_fact: container_ip="{{docker_containers[0].NetworkSettings.IPAddress}}"
- name: Try to access the server
- shell: "echo 'world' | nc {{ container_ip.stdout_lines[0] }} 2000"
+ shell: "echo 'world' | nc {{ container_ip }} 2000"
register: docker_output
- name: check that the script ran
@@ -73,7 +52,7 @@
- "'hello world' in docker_output.stdout_lines"
- name: Remove containers
- shell: "docker rm $(docker ps -aq)"
+ shell: "docker rm -f $(docker ps -aq)"
- name: Remove all images from the local docker
shell: "docker rmi -f $(docker images -q)"
diff --git a/test/integration/roles/test_docker/tasks/main.yml b/test/integration/roles/test_docker/tasks/main.yml
index 2ea15644d5..76b3fa7070 100644
--- a/test/integration/roles/test_docker/tasks/main.yml
+++ b/test/integration/roles/test_docker/tasks/main.yml
@@ -3,7 +3,7 @@
#- include: docker-setup-rht.yml
# Packages on RHEL and CentOS 7 are broken, broken, broken. Revisit when
# they've got that sorted out
- # CentOS 6 currently broken by conflicting files in pyhton-backports and python-backports-ssl_match_hostname
+ # CentOS 6 currently broken by conflicting files in python-backports and python-backports-ssl_match_hostname
#when: ansible_distribution in ['RedHat', 'CentOS'] and ansible_lsb.major_release|int == 6
# python-docker isn't available until 14.10. Revist at the next Ubuntu LTS
diff --git a/test/integration/roles/test_docker/tasks/registry-tests.yml b/test/integration/roles/test_docker/tasks/registry-tests.yml
index 57b4d25277..1ef330da5f 100644
--- a/test/integration/roles/test_docker/tasks/registry-tests.yml
+++ b/test/integration/roles/test_docker/tasks/registry-tests.yml
@@ -19,11 +19,8 @@
- name: Push docker image into the private registry
command: "docker push localhost:5000/mine"
-- name: Remove containers
- shell: "docker rm $(docker ps -aq)"
-
- name: Remove all images from the local docker
- shell: "docker rmi -f $(docker images -q)"
+ shell: "docker rmi -f {{image_id.stdout_lines[0]}}"
- name: Get number of images in docker
command: "docker images"
@@ -41,7 +38,6 @@
state: present
pull: missing
insecure_registry: True
- docker_api_version: "1.14"
- name: Run a small script in the new image
docker:
@@ -51,7 +47,6 @@
command: "nc -l -p 2000 -e xargs -n1 echo hello"
detach: True
insecure_registry: True
- docker_api_version: "1.14"
- name: Get the docker container id
shell: "docker ps | grep mine | awk '{ print $1 }'"
@@ -76,8 +71,9 @@
- name: Remove containers
- shell: "docker rm $(docker ps -aq)"
+ shell: "docker rm -f $(docker ps -aq)"
+- shell: docker images -q
- name: Remove all images from the local docker
shell: "docker rmi -f $(docker images -q)"
@@ -157,7 +153,6 @@
state: running
command: "nc -l -p 2000 -e xargs -n1 echo hello"
detach: True
- docker_api_version: "1.14"
- name: Get the docker container id
shell: "docker ps | grep mine | awk '{ print $1 }'"
diff --git a/test/integration/roles/test_filters/tasks/main.yml b/test/integration/roles/test_filters/tasks/main.yml
index a94bb8c655..6d75c0d81c 100644
--- a/test/integration/roles/test_filters/tasks/main.yml
+++ b/test/integration/roles/test_filters/tasks/main.yml
@@ -68,3 +68,12 @@
- '"0.10 GB" == 102400000|human_readable(unit="G")'
- '"0.10 Gb" == 102400000|human_readable(isbits=True, unit="G")'
+- name: Container lookups with extract
+ assert:
+ that:
+ - "'x' == [0]|map('extract',['x','y'])|list|first"
+ - "'y' == [1]|map('extract',['x','y'])|list|first"
+ - "42 == ['x']|map('extract',{'x':42,'y':31})|list|first"
+ - "31 == ['x','y']|map('extract',{'x':42,'y':31})|list|last"
+ - "'local' == ['localhost']|map('extract',hostvars,'ansible_connection')|list|first"
+ - "'local' == ['localhost']|map('extract',hostvars,['ansible_connection'])|list|first"
diff --git a/test/integration/roles/test_get_url/tasks/main.yml b/test/integration/roles/test_get_url/tasks/main.yml
index 6e3842f6ab..9ed0549ec4 100644
--- a/test/integration/roles/test_get_url/tasks/main.yml
+++ b/test/integration/roles/test_get_url/tasks/main.yml
@@ -16,6 +16,21 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+- name: Determine if python looks like it will support modern ssl features like SNI
+ command: python -c 'from ssl import SSLContext'
+ ignore_errors: True
+ register: python_test
+
+- name: Set python_has_sslcontext if we have it
+ set_fact:
+ python_has_ssl_context: True
+ when: python_test.rc == 0
+
+- name: Set python_has_sslcontext False if we don't have it
+ set_fact:
+ python_has_ssl_context: False
+ when: python_test.rc != 0
+
- name: test https fetch
get_url: url="https://raw.githubusercontent.com/ansible/ansible/devel/README.md" dest={{output_dir}}/get_url.txt force=yes
register: result
@@ -28,10 +43,16 @@
- name: test https fetch to a site with mismatched hostname and certificate
get_url:
- url: "https://kennethreitz.org/"
+ url: "https://www.kennethreitz.org/"
dest: "{{ output_dir }}/shouldnotexist.html"
ignore_errors: True
register: result
+ # kennethreitz having trouble staying up. Eventually need to install our own
+ # certs & web server to test this... also need to install and test it with
+ # a proxy so the complications are inevitable
+ until: "'read operation timed out' not in result.msg"
+ retries: 30
+ delay: 10
- stat:
path: "{{ output_dir }}/shouldnotexist.html"
@@ -46,10 +67,13 @@
- name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no
get_url:
- url: "https://kennethreitz.org/"
+ url: "https://www.kennethreitz.org/"
dest: "{{ output_dir }}/kreitz.html"
validate_certs: no
register: result
+ until: "'read operation timed out' not in result.msg"
+ retries: 30
+ delay: 10
- stat:
path: "{{ output_dir }}/kreitz.html"
@@ -61,34 +85,63 @@
- "result.changed == true"
- "stat_result.stat.exists == true"
-# SNI Tests
-# SNI is only built into the stdlib from python-2.7.9 onwards
+# At the moment, AWS can't make an https request to velox.ch... connection
+# timed out. So we'll use a different test until/unless the problem is resolved
+## SNI Tests
+## SNI is only built into the stdlib from python-2.7.9 onwards
+#- name: Test that SNI works
+# get_url:
+# # A test site that returns a page with information on what SNI information
+# # the client sent. A failure would have the string: did not send a TLS server name indication extension
+# url: 'https://foo.sni.velox.ch/'
+# dest: "{{ output_dir }}/sni.html"
+# register: get_url_result
+# ignore_errors: True
+#
+#- command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html"
+# register: data_result
+# when: "{{ python_has_ssl_context }}"
+#
+#- debug: var=get_url_result
+#- name: Assert that SNI works with this python version
+# assert:
+# that:
+# - 'data_result.rc == 0'
+# - '"failed" not in get_url_result'
+# when: "{{ python_has_ssl_context }}"
+#
+## If the client doesn't support SNI then get_url should have failed with a certificate mismatch
+#- name: Assert that hostname verification failed because SNI is not supported on this version of python
+# assert:
+# that:
+# - 'get_url_result["failed"]'
+# when: "{{ not python_has_ssl_context }}"
+
+# These tests are just side effects of how the site is hosted. It's not
+# specifically a test site. So the tests may break due to the hosting changing
- name: Test that SNI works
get_url:
- # A test site that returns a page with information on what SNI information
- # the client sent. A failure would have the string: did not send a TLS server name indication extension
- url: 'https://foo.sni.velox.ch/'
+ url: 'https://www.mnot.net/blog/2014/05/09/if_you_can_read_this_youre_sniing'
dest: "{{ output_dir }}/sni.html"
register: get_url_result
ignore_errors: True
-- command: "grep 'sent the following TLS server name indication extension' {{ output_dir}}/sni.html"
+- command: "grep '<h2>If You Can Read This, You.re SNIing</h2>' {{ output_dir}}/sni.html"
register: data_result
- when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}"
+ when: "{{ python_has_ssl_context }}"
-# If distros start backporting SNI, can make a new conditional based on whether this works:
-# python -c 'from ssl import SSLContext'
-- debug: msg=get_url_result
+- debug: var=get_url_result
- name: Assert that SNI works with this python version
assert:
that:
- 'data_result.rc == 0'
- '"failed" not in get_url_result'
- when: "{{ ansible_python_version | version_compare('2.7.9', '>=') }}"
+ when: "{{ python_has_ssl_context }}"
# If the client doesn't support SNI then get_url should have failed with a certificate mismatch
- name: Assert that hostname verification failed because SNI is not supported on this version of python
assert:
that:
- 'get_url_result["failed"]'
- when: "{{ ansible_python_version | version_compare('2.7.9', '<') }}"
+ when: "{{ not python_has_ssl_context }}"
+# End hacky SNI test section
diff --git a/test/integration/roles/test_git/tasks/main.yml b/test/integration/roles/test_git/tasks/main.yml
index 831db8ea69..49f5f53bfb 100644
--- a/test/integration/roles/test_git/tasks/main.yml
+++ b/test/integration/roles/test_git/tasks/main.yml
@@ -27,6 +27,8 @@
repo_submodule1: 'https://github.com/abadger/test_submodules_subm1.git'
repo_submodule1_newer: 'https://github.com/abadger/test_submodules_subm1_newer.git'
repo_submodule2: 'https://github.com/abadger/test_submodules_subm2.git'
+ repo_update_url_1: 'https://github.com/ansible-test-robinro/git-test-old'
+ repo_update_url_2: 'https://github.com/ansible-test-robinro/git-test-new'
known_host_files:
- "{{ lookup('env','HOME') }}/.ssh/known_hosts"
- '/etc/ssh/ssh_known_hosts'
@@ -346,3 +348,67 @@
- assert:
that: '{{ submodule2.stdout_lines|length }} == 4'
+# test change of repo url
+# see https://github.com/ansible/ansible-modules-core/pull/721
+
+- name: clear checkout_dir
+ file: state=absent path={{ checkout_dir }}
+
+- name: Clone example git repo
+ git:
+ repo: '{{ repo_update_url_1 }}'
+ dest: '{{ checkout_dir }}'
+
+- name: Clone repo with changed url to the same place
+ git:
+ repo: '{{ repo_update_url_2 }}'
+ dest: '{{ checkout_dir }}'
+ register: clone2
+
+- assert:
+ that: "clone2|success"
+
+- name: check url updated
+ shell: git remote show origin | grep Fetch
+ register: remote_url
+ args:
+ chdir: '{{ checkout_dir }}'
+
+- assert:
+ that:
+ - "'git-test-new' in remote_url.stdout"
+ - "'git-test-old' not in remote_url.stdout"
+
+- name: check for new content in git-test-new
+ stat: path={{ checkout_dir }}/newfilename
+ register: repo_content
+
+- name: assert presence of new file in repo (i.e. working copy updated)
+ assert:
+ that: "repo_content.stat.exists"
+
+# Test that checkout by branch works when the branch is not in our current repo but the sha is
+
+- name: clear checkout_dir
+ file: state=absent path={{ checkout_dir }}
+
+- name: Clone example git repo that we're going to modify
+ git:
+ repo: '{{ repo_update_url_1 }}'
+ dest: '{{ checkout_dir }}/repo'
+
+- name: Clone the repo again - this is what we test
+ git:
+ repo: '{{ checkout_dir }}/repo'
+ dest: '{{ checkout_dir }}/checkout'
+
+- name: Add a branch to the repo
+ command: git branch new-branch
+ args:
+ chdir: '{{ checkout_dir }}/repo'
+
+- name: Checkout the new branch in the checkout
+ git:
+ repo: '{{ checkout_dir}}/repo'
+ version: 'new-branch'
+ dest: '{{ checkout_dir }}/checkout'
diff --git a/test/integration/roles/test_hg/tasks/main.yml b/test/integration/roles/test_hg/tasks/main.yml
index c6d6f70b06..4eee22e4c7 100644
--- a/test/integration/roles/test_hg/tasks/main.yml
+++ b/test/integration/roles/test_hg/tasks/main.yml
@@ -66,7 +66,6 @@
assert:
that:
- "tags.stat.isreg"
- - "head.stat.isreg"
- "branches.stat.isreg"
- name: verify on a reclone things are marked unchanged
diff --git a/test/integration/roles/test_lookups/tasks/main.yml b/test/integration/roles/test_lookups/tasks/main.yml
index 5ca29e27c1..3c5e066ee3 100644
--- a/test/integration/roles/test_lookups/tasks/main.yml
+++ b/test/integration/roles/test_lookups/tasks/main.yml
@@ -177,7 +177,7 @@
- name: Test that retrieving a url with invalid cert fails
set_fact:
- web_data: "{{ lookup('url', 'https://kennethreitz.org/') }}"
+ web_data: "{{ lookup('url', 'https://www.kennethreitz.org/') }}"
ignore_errors: True
register: url_invalid_cert
@@ -188,9 +188,9 @@
- name: Test that retrieving a url with invalid cert with validate_certs=False works
set_fact:
- web_data: "{{ lookup('url', 'https://kennethreitz.org/', validate_certs=False) }}"
+ web_data: "{{ lookup('url', 'https://www.kennethreitz.org/', validate_certs=False) }}"
register: url_no_validate_cert
- assert:
that:
- - "'kennethreitz.org' in web_data"
+ - "'www.kennethreitz.org' in web_data"
diff --git a/test/integration/roles/test_mysql_db/tasks/main.yml b/test/integration/roles/test_mysql_db/tasks/main.yml
index a059cd212a..1c2adcce8e 100644
--- a/test/integration/roles/test_mysql_db/tasks/main.yml
+++ b/test/integration/roles/test_mysql_db/tasks/main.yml
@@ -19,8 +19,9 @@
# ============================================================
- name: make sure the test database is not there
- command: mysql "-e drop database '{{db_name}}';"
- ignore_errors: True
+ command: mysql {{db_name}}
+ register: mysql_db_check
+ failed_when: "'1049' not in mysql_db_check.stderr"
- name: test state=present for a database name (expect changed=true)
mysql_db: name={{ db_name }} state=present
diff --git a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml
index 50307cef95..6b417ecc24 100644
--- a/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml
+++ b/test/integration/roles/test_mysql_user/tasks/user_password_update_test.yml
@@ -63,7 +63,6 @@
assert:
that:
- "result.failed == true"
- - "'check login credentials (login_user, and login_password' in result.msg"
- name: create database using user2 and new password
mysql_db: name={{ db_name }} state=present login_user={{ user_name_2 }} login_password={{ user_password_1 }}
@@ -79,8 +78,23 @@
- include: remove_user.yml user_name={{user_name_2}} user_password={{ user_password_1 }}
+- name: Create user with password1234 using hash. (expect changed=true)
+ mysql_user: name=jmainguy password='*D65798AAC0E5C6DF3F320F8A30E026E7EBD73A95' encrypted=yes
+ register: encrypt_result
+- name: Check that the module made a change
+ assert:
+ that:
+ - "encrypt_result.changed == True"
+- name: See if the password needs to be updated. (expect changed=false)
+ mysql_user: name=jmainguy password='password1234'
+ register: plain_result
+- name: Check that the module did not change the password
+ assert:
+ that:
+ - "plain_result.changed == False"
-
+- name: Remove user (cleanup)
+ mysql_user: name=jmainguy state=absent
diff --git a/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml b/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml
index 70aa26856e..ba51b9d67c 100644
--- a/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml
+++ b/test/integration/roles/test_mysql_variables/tasks/assert_fail_msg.yml
@@ -23,5 +23,3 @@
assert:
that:
- "output.failed == true"
- - "'{{msg}}' in output.msg"
-
diff --git a/test/integration/roles/test_service/tasks/main.yml b/test/integration/roles/test_service/tasks/main.yml
index c0e590643c..8b61d62143 100644
--- a/test/integration/roles/test_service/tasks/main.yml
+++ b/test/integration/roles/test_service/tasks/main.yml
@@ -98,7 +98,7 @@
- name: assert that the broken test failed
assert:
that:
- - "broken_enable_result.failed == True"
+ - "broken_enable_result|failed"
- name: remove the test daemon script
file: path=/usr/sbin/ansible_test_service state=absent
diff --git a/test/integration/roles/test_subversion/tasks/main.yml b/test/integration/roles/test_subversion/tasks/main.yml
index 8b28688a9e..d15d63ab02 100644
--- a/test/integration/roles/test_subversion/tasks/main.yml
+++ b/test/integration/roles/test_subversion/tasks/main.yml
@@ -36,7 +36,7 @@
- debug: var=subverted
-- shell: ls ~/ansible_testing/svn
+- shell: ls {{ checkout_dir }}
# FIXME: the before/after logic here should be fixed to make them hashes, see GitHub 6078
# looks like this: {
diff --git a/test/integration/roles/test_template/files/foo-py26.txt b/test/integration/roles/test_template/files/foo-py26.txt
index 84279bc7b3..76b0bb56f7 100644
--- a/test/integration/roles/test_template/files/foo-py26.txt
+++ b/test/integration/roles/test_template/files/foo-py26.txt
@@ -3,6 +3,7 @@ templated_var_loaded
{
"bool": true,
"multi_part": "1Foo",
+ "null_type": null,
"number": 5,
"string_num": "5"
}
diff --git a/test/integration/roles/test_template/tasks/main.yml b/test/integration/roles/test_template/tasks/main.yml
index a35b93d9d9..9fd1d860e0 100644
--- a/test/integration/roles/test_template/tasks/main.yml
+++ b/test/integration/roles/test_template/tasks/main.yml
@@ -48,11 +48,6 @@
- name: copy known good into place
copy: src=foo.txt dest={{output_dir}}/foo.txt
- when: pyver.stdout != '2.6'
-
-- name: copy known good into place
- copy: src=foo-py26.txt dest={{output_dir}}/foo.txt
- when: pyver.stdout == '2.6'
- name: compare templated file to known good
shell: diff {{output_dir}}/foo.templated {{output_dir}}/foo.txt
diff --git a/test/integration/roles/test_unarchive/tasks/main.yml b/test/integration/roles/test_unarchive/tasks/main.yml
index c26d3aeb10..e4f438e525 100644
--- a/test/integration/roles/test_unarchive/tasks/main.yml
+++ b/test/integration/roles/test_unarchive/tasks/main.yml
@@ -21,6 +21,10 @@
yum: name=zip state=latest
when: ansible_pkg_mgr == 'yum'
+- name: Ensure zip is present to create test archive (dnf)
+ dnf: name=zip state=latest
+ when: ansible_pkg_mgr == 'dnf'
+
- name: Ensure zip is present to create test archive (apt)
apt: name=zip state=latest
when: ansible_pkg_mgr == 'apt'
diff --git a/test/integration/roles/test_uri/tasks/main.yml b/test/integration/roles/test_uri/tasks/main.yml
index 7300578982..9ce05938b6 100644
--- a/test/integration/roles/test_uri/tasks/main.yml
+++ b/test/integration/roles/test_uri/tasks/main.yml
@@ -94,10 +94,16 @@
- name: test https fetch to a site with mismatched hostname and certificate
uri:
- url: "https://kennethreitz.org/"
+ url: "https://www.kennethreitz.org/"
dest: "{{ output_dir }}/shouldnotexist.html"
ignore_errors: True
register: result
+ # kennethreitz having trouble staying up. Eventually need to install our own
+ # certs & web server to test this... also need to install and test it with
+ # a proxy so the complications are inevitable
+ until: "'read operation timed out' not in result.msg"
+ retries: 30
+ delay: 10
- stat:
path: "{{ output_dir }}/shouldnotexist.html"
@@ -117,10 +123,13 @@
- name: test https fetch to a site with mismatched hostname and certificate and validate_certs=no
get_url:
- url: "https://kennethreitz.org/"
+ url: "https://www.kennethreitz.org/"
dest: "{{ output_dir }}/kreitz.html"
validate_certs: no
register: result
+ until: "'read operation timed out' not in result.msg"
+ retries: 30
+ delay: 10
- stat:
path: "{{ output_dir }}/kreitz.html"
diff --git a/test/integration/roles/test_var_precedence_dep/tasks/main.yml b/test/integration/roles/test_var_precedence_dep/tasks/main.yml
index b50f9dfc27..2f8e17096b 100644
--- a/test/integration/roles/test_var_precedence_dep/tasks/main.yml
+++ b/test/integration/roles/test_var_precedence_dep/tasks/main.yml
@@ -7,7 +7,7 @@
- assert:
that:
- 'extra_var == "extra_var"'
- - 'param_var == "param_var"'
+ - 'param_var == "param_var_role1"'
- 'vars_var == "vars_var"'
- 'vars_files_var == "vars_files_var"'
- 'vars_files_var_role == "vars_files_var_dep"'
diff --git a/test/integration/roles/test_win_raw/tasks/main.yml b/test/integration/roles/test_win_raw/tasks/main.yml
index 6351c516be..30c1a75e6b 100644
--- a/test/integration/roles/test_win_raw/tasks/main.yml
+++ b/test/integration/roles/test_win_raw/tasks/main.yml
@@ -101,3 +101,16 @@
assert:
that:
- "raw_result2.stdout_lines[0] == '--% icacls D:\\\\somedir\\\\ /grant \"! ЗАО. Руководство\":F'"
+
+# Assumes MaxShellsPerUser == 30 (the default)
+
+- name: test raw + with_items to verify that winrm connection is reused for each item
+ raw: echo "{{item}}"
+ with_items: "{{range(32)|list}}"
+ register: raw_with_items_result
+
+- name: check raw + with_items result
+ assert:
+ that:
+ - "not raw_with_items_result|failed"
+ - "raw_with_items_result.results|length == 32"
diff --git a/test/integration/roles/test_win_setup/tasks/main.yml b/test/integration/roles/test_win_setup/tasks/main.yml
index fb13da1542..3debb8f68d 100644
--- a/test/integration/roles/test_win_setup/tasks/main.yml
+++ b/test/integration/roles/test_win_setup/tasks/main.yml
@@ -27,6 +27,14 @@
- "not setup_result|changed"
- "setup_result.ansible_facts"
- "setup_result.ansible_facts.ansible_os_family == 'Windows'"
+ - "setup_result.ansible_facts.ansible_date_time"
+ - "setup_result.ansible_facts.ansible_date_time.date"
+ - "setup_result.ansible_facts.ansible_date_time.year"
+ - "setup_result.ansible_facts.ansible_date_time.month"
+ - "setup_result.ansible_facts.ansible_date_time.day"
+ - "setup_result.ansible_facts.ansible_date_time.hour is defined"
+ - "setup_result.ansible_facts.ansible_date_time.minute is defined"
+ - "setup_result.ansible_facts.ansible_date_time.iso8601"
- "setup_result.ansible_facts.ansible_distribution"
- "setup_result.ansible_facts.ansible_distribution_version"
- "setup_result.ansible_facts.ansible_fqdn"
diff --git a/test/integration/roles/test_yum/tasks/main.yml b/test/integration/roles/test_yum/tasks/main.yml
index 5df887ae9f..b17af6b465 100644
--- a/test/integration/roles/test_yum/tasks/main.yml
+++ b/test/integration/roles/test_yum/tasks/main.yml
@@ -16,6 +16,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Note: We install the yum package onto Fedora so that this will work on dnf systems
+# We want to test that for people who don't want to upgrade their systems.
- include: 'yum.yml'
when: ansible_distribution in ['RedHat', 'CentOS', 'ScientificLinux', 'Fedora']
diff --git a/test/integration/roles/test_zypper/files/empty.spec b/test/integration/roles/test_zypper/files/empty.spec
new file mode 100644
index 0000000000..044ea3a548
--- /dev/null
+++ b/test/integration/roles/test_zypper/files/empty.spec
@@ -0,0 +1,12 @@
+Summary: Empty RPM
+Name: empty
+Version: 1
+Release: 0
+License: GPLv3
+Group: Applications/System
+BuildArch: noarch
+
+%description
+Empty RPM
+
+%files
diff --git a/test/integration/roles/test_zypper/meta/main.yml b/test/integration/roles/test_zypper/meta/main.yml
new file mode 100644
index 0000000000..07faa21776
--- /dev/null
+++ b/test/integration/roles/test_zypper/meta/main.yml
@@ -0,0 +1,2 @@
+dependencies:
+ - prepare_tests
diff --git a/test/integration/roles/test_zypper/tasks/main.yml b/test/integration/roles/test_zypper/tasks/main.yml
new file mode 100644
index 0000000000..5ecdb8684b
--- /dev/null
+++ b/test/integration/roles/test_zypper/tasks/main.yml
@@ -0,0 +1,26 @@
+# test code for the zyppe module
+#
+# (c) 2015, Guido Günther <agx@sigxcpu.org>
+#
+# heavily based on the yum tests which are
+#
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+- include: 'zypper.yml'
+ when: ansible_distribution in ['SLES', 'openSUSE']
+
diff --git a/test/integration/roles/test_zypper/tasks/zypper.yml b/test/integration/roles/test_zypper/tasks/zypper.yml
new file mode 100644
index 0000000000..8ae04576c8
--- /dev/null
+++ b/test/integration/roles/test_zypper/tasks/zypper.yml
@@ -0,0 +1,194 @@
+# UNINSTALL
+- name: uninstall hello
+ zypper: name=hello state=removed
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: False
+ register: rpm_result
+
+- debug: var=zypper_result
+- debug: var=rpm_result
+
+- name: verify uninstallation of hello
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "rpm_result.rc == 1"
+
+# UNINSTALL AGAIN
+- name: uninstall hello again
+ zypper: name=hello state=removed
+ register: zypper_result
+
+- name: verify no change on re-uninstall
+ assert:
+ that:
+ - "not zypper_result.changed"
+
+# INSTALL
+- name: install hello
+ zypper: name=hello state=present
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: False
+ register: rpm_result
+
+- debug: var=zypper_result
+- debug: var=rpm_result
+
+- name: verify installation of hello
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_result.rc == 0"
+
+# INSTALL AGAIN
+- name: install hello again
+ zypper: name=hello state=present
+ register: zypper_result
+
+- name: verify no change on second install
+ assert:
+ that:
+ - "not zypper_result.changed"
+
+# Multiple packages
+- name: uninstall hello and metamail
+ zypper:
+ name:
+ - hello
+ - metamail
+ state: removed
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: False
+ register: rpm_hello_result
+
+- name: check metamail with rpm
+ shell: rpm -q metamail
+ failed_when: False
+ register: rpm_metamail_result
+
+- name: verify packages uninstalled
+ assert:
+ that:
+ - "rpm_hello_result.rc != 0"
+ - "rpm_metamail_result.rc != 0"
+
+- name: install hello and metamail
+ zypper:
+ name:
+ - hello
+ - metamail
+ state: present
+ register: zypper_result
+
+- name: check hello with rpm
+ shell: rpm -q hello
+ failed_when: False
+ register: rpm_hello_result
+
+- name: check metamail with rpm
+ shell: rpm -q metamail
+ failed_when: False
+ register: rpm_metamail_result
+
+- name: verify packages installed
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_hello_result.rc == 0"
+ - "rpm_metamail_result.rc == 0"
+
+- name: uninstall hello and metamail
+ zypper:
+ name:
+ - hello
+ - metamail
+ state: removed
+
+# INSTALL nonexistent package
+- name: install hello from url
+ zypper: name=doesnotexist state=present
+ register: zypper_result
+ ignore_errors: yes
+
+- name: verify package installation failed
+ assert:
+ that:
+ - "zypper_result.rc == 104"
+ - "zypper_result.msg.startswith('No provider of')"
+
+# INSTALL broken local package
+- name: create directory
+ file:
+ path: "{{output_dir | expanduser}}/zypper"
+ state: directory
+
+- name: fake rpm package
+ file:
+ path: "{{output_dir | expanduser}}/zypper/broken.rpm"
+ state: touch
+
+- name: install broken rpm
+ zypper:
+ name="{{output_dir | expanduser}}/zypper/broken.rpm"
+ state=present
+ register: zypper_result
+ ignore_errors: yes
+
+- debug: var=zypper_result
+
+- name: verify we failed installation of broken rpm
+ assert:
+ that:
+ - "zypper_result.rc == 1"
+ - "'broken.rpm: not an rpm package' in zypper_result.msg"
+
+# Build and install an empty rpm
+- name: copy spec file
+ copy:
+ src: empty.spec
+ dest: "{{ output_dir | expanduser }}/zypper/empty.spec"
+
+- name: build rpm
+ command: |
+ rpmbuild -bb \
+ --define "_topdir {{output_dir | expanduser }}/zypper/rpm-build"
+ --define "_builddir %{_topdir}" \
+ --define "_rpmdir %{_topdir}" \
+ --define "_srcrpmdir %{_topdir}" \
+ --define "_specdir {{output_dir | expanduser}}/zypper" \
+ --define "_sourcedir %{_topdir}" \
+ {{ output_dir }}/zypper/empty.spec
+ register: rpm_build_result
+
+- name: install empty rpm
+ zypper:
+ name: "{{ output_dir | expanduser }}/zypper/rpm-build/noarch/empty-1-0.noarch.rpm"
+ register: zypper_result
+
+- name: check empty with rpm
+ shell: rpm -q empty
+ failed_when: False
+ register: rpm_result
+
+- name: verify installation of empty
+ assert:
+ that:
+ - "zypper_result.rc == 0"
+ - "zypper_result.changed"
+ - "rpm_result.rc == 0"
+
+- name: uninstall empry
+ zypper:
+ name: empty
+ state: removed
diff --git a/test/integration/test_var_precedence.yml b/test/integration/test_var_precedence.yml
index ae4b4cfea1..df09914158 100644
--- a/test/integration/test_var_precedence.yml
+++ b/test/integration/test_var_precedence.yml
@@ -1,43 +1,45 @@
---
- hosts: testhost
vars:
- - vars_var: "vars_var"
- - param_var: "BAD!"
- - vars_files_var: "BAD!"
- - extra_var_override_once_removed: "{{ extra_var_override }}"
- - from_inventory_once_removed: "{{ inven_var | default('BAD!') }}"
+ - ansible_hostname: "BAD!"
+ - vars_var: "vars_var"
+ - param_var: "BAD!"
+ - vars_files_var: "BAD!"
+ - extra_var_override_once_removed: "{{ extra_var_override }}"
+ - from_inventory_once_removed: "{{ inven_var | default('BAD!') }}"
vars_files:
- - vars/test_var_precedence.yml
+ - vars/test_var_precedence.yml
roles:
- - { role: test_var_precedence, param_var: "param_var" }
+ - { role: test_var_precedence, param_var: "param_var" }
tasks:
- - name: register a result
- command: echo 'BAD!'
- register: registered_var
- - name: use set_fact to override the registered_var
- set_fact: registered_var="this is from set_fact"
- - debug: var=extra_var
- - debug: var=extra_var_override_once_removed
- - debug: var=vars_var
- - debug: var=vars_files_var
- - debug: var=vars_files_var_role
- - debug: var=registered_var
- - debug: var=from_inventory_once_removed
- - assert:
- that:
- - 'extra_var == "extra_var"'
- - 'extra_var_override == "extra_var_override"'
- - 'extra_var_override_once_removed == "extra_var_override"'
- - 'vars_var == "vars_var"'
- - 'vars_files_var == "vars_files_var"'
- - 'vars_files_var_role == "vars_files_var_role3"'
- - 'registered_var == "this is from set_fact"'
- - 'from_inventory_once_removed == "inventory_var"'
+ - name: register a result
+ command: echo 'BAD!'
+ register: registered_var
+ - name: use set_fact to override the registered_var
+ set_fact: registered_var="this is from set_fact"
+ - debug: var=extra_var
+ - debug: var=extra_var_override_once_removed
+ - debug: var=vars_var
+ - debug: var=vars_files_var
+ - debug: var=vars_files_var_role
+ - debug: var=registered_var
+ - debug: var=from_inventory_once_removed
+ - assert:
+ that: item
+ with_items:
+ - 'extra_var == "extra_var"'
+ - 'extra_var_override == "extra_var_override"'
+ - 'extra_var_override_once_removed == "extra_var_override"'
+ - 'vars_var == "vars_var"'
+ - 'vars_files_var == "vars_files_var"'
+ - 'vars_files_var_role == "vars_files_var_role3"'
+ - 'registered_var == "this is from set_fact"'
+ - 'from_inventory_once_removed == "inventory_var"'
- hosts: inven_overridehosts
vars_files:
- - "test_var_precedence.yml"
+ - "test_var_precedence.yml"
roles:
- - role: test_var_precedence_inven_override
- foo: bar
+ - role: test_var_precedence_inven_override
+ foo: bar
diff --git a/test/integration/unicode-test-script b/test/integration/unicode-test-script
new file mode 100755
index 0000000000..340f2a9f5b
--- /dev/null
+++ b/test/integration/unicode-test-script
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+echo "Non-ascii arguments:"
+echo $@
+
+echo "Non-ascii Env var:"
+echo $option
diff --git a/test/integration/unicode.yml b/test/integration/unicode.yml
index 6e8e073a79..74d5772264 100644
--- a/test/integration/unicode.yml
+++ b/test/integration/unicode.yml
@@ -49,6 +49,81 @@
that:
- "'¯ ° ± ² ³ ´ µ ¶ · ¸ ¹ º » ¼ ½ ¾ ¿ À Á Â Ã Ä Å Æ Ç È É Ê Ë Ì Í Î Ï Ð Ñ Ò Ó Ô Õ Ö ×' in output.stdout_lines"
+ - name: Run raw with non-ascii options
+ raw: "/bin/echo Zażółć gęślą jaźń"
+ register: results
+
+ - name: Check that raw output the right thing
+ assert:
+ that:
+ - "'Zażółć gęślą jaźń' in results.stdout_lines"
+
+ - name: Run a script with non-ascii options and environment
+ script: unicode-test-script --option "Zażółć gęślą jaźń"
+ environment:
+ option: Zażółć
+ register: results
+
+ - name: Check that script output includes the nonascii arguments and environment values
+ assert:
+ that:
+ - "'--option Zażółć gęślą jaźń' in results.stdout_lines"
+ - "'Zażółć' in results.stdout_lines"
+
+ - name: Ping with non-ascii environment variable and option
+ ping:
+ data: "Zażółć gęślą jaźń"
+ environment:
+ option: Zażółć
+ register: results
+
+ - name: Check that ping with non-ascii data was correct
+ assert:
+ that:
+ - "'Zażółć gęślą jaźń' == results.ping"
+
+ - name: Command that echos a non-ascii env var
+ command: "echo $option"
+ environment:
+ option: Zażółć
+ register: results
+
+ - name: Check that a non-ascii env var was passed to the command module
+ assert:
+ that:
+ - "'Zażółć' in results.stdout_lines"
+
+ - name: Clean a temp directory
+ file:
+ path: /var/tmp/ansible_test_unicode_get_put
+ state: absent
+
+ - name: Create a temp directory
+ file:
+ path: /var/tmp/ansible_test_unicode_get_put
+ state: directory
+
+ - name: Create a file with a non-ascii filename
+ file:
+ path: /var/tmp/ansible_test_unicode_get_put/Zażółć
+ state: touch
+ delegate_to: localhost
+
+ - name: Put with unicode filename
+ copy:
+ src: /var/tmp/ansible_test_unicode_get_put/Zażółć
+ dest: /var/tmp/ansible_test_unicode_get_put/Zażółć2
+
+ - name: Fetch with unicode filename
+ fetch:
+ src: /var/tmp/ansible_test_unicode_get_put/Zażółć2
+ dest: /var/tmp/ansible_test_unicode_get_put/
+
+ - name: Clean a temp directory
+ file:
+ path: /var/tmp/ansible_test_unicode_get_put
+ state: absent
+
- name: 'A play for hosts in group: ĪīĬĭ'
hosts: 'ĪīĬĭ'
gather_facts: true
diff --git a/test/units/errors/test_errors.py b/test/units/errors/test_errors.py
index 4c09c0089b..4480bf01df 100644
--- a/test/units/errors/test_errors.py
+++ b/test/units/errors/test_errors.py
@@ -40,13 +40,13 @@ class TestErrors(unittest.TestCase):
def test_basic_error(self):
e = AnsibleError(self.message)
- self.assertEqual(e.message, 'ERROR! ' + self.message)
- self.assertEqual(e.__repr__(), 'ERROR! ' + self.message)
+ self.assertEqual(e.message, self.message)
+ self.assertEqual(e.__repr__(), self.message)
def test_basic_unicode_error(self):
e = AnsibleError(self.unicode_message)
- self.assertEqual(e.message, 'ERROR! ' + self.unicode_message)
- self.assertEqual(e.__repr__(), 'ERROR! ' + self.unicode_message)
+ self.assertEqual(e.message, self.unicode_message)
+ self.assertEqual(e.__repr__(), self.unicode_message)
@patch.object(AnsibleError, '_get_error_lines_from_file')
def test_error_with_object(self, mock_method):
@@ -55,7 +55,7 @@ class TestErrors(unittest.TestCase):
mock_method.return_value = ('this is line 1\n', '')
e = AnsibleError(self.message, self.obj)
- self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
+ self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
def test_get_error_lines_from_file(self):
m = mock_open()
@@ -65,12 +65,12 @@ class TestErrors(unittest.TestCase):
# this line will be found in the file
self.obj.ansible_pos = ('foo.yml', 1, 1)
e = AnsibleError(self.message, self.obj)
- self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
+ self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis is line 1\n^ here\n")
# this line will not be found, as it is out of the index range
self.obj.ansible_pos = ('foo.yml', 2, 1)
e = AnsibleError(self.message, self.obj)
- self.assertEqual(e.message, "ERROR! This is the error message\n\nThe error appears to have been in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)")
+ self.assertEqual(e.message, "This is the error message\n\nThe error appears to have been in 'foo.yml': line 2, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\n(specified line no longer in file, maybe it changed?)")
m = mock_open()
m.return_value.readlines.return_value = ['this line has unicode \xf0\x9f\x98\xa8 in it!\n']
@@ -79,5 +79,5 @@ class TestErrors(unittest.TestCase):
# this line will be found in the file
self.obj.ansible_pos = ('foo.yml', 1, 1)
e = AnsibleError(self.unicode_message, self.obj)
- self.assertEqual(e.message, "ERROR! This is an error with \xf0\x9f\x98\xa8 in it\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis line has unicode \xf0\x9f\x98\xa8 in it!\n^ here\n")
+ self.assertEqual(e.message, "This is an error with \xf0\x9f\x98\xa8 in it\n\nThe error appears to have been in 'foo.yml': line 1, column 1, but may\nbe elsewhere in the file depending on the exact syntax problem.\n\nThe offending line appears to be:\n\n\nthis line has unicode \xf0\x9f\x98\xa8 in it!\n^ here\n")
diff --git a/test/units/inventory/test_host.py b/test/units/inventory/test_host.py
index 078d4321b5..5c0945f7b4 100644
--- a/test/units/inventory/test_host.py
+++ b/test/units/inventory/test_host.py
@@ -29,9 +29,7 @@ class TestHost(unittest.TestCase):
def test_equality(self):
self.assertEqual(self.hostA, self.hostA)
self.assertNotEqual(self.hostA, self.hostB)
- self.assertEqual(self.hostA, Host('a'))
- # __ne__ is a separate method
- self.assertFalse(self.hostA != Host('a'))
+ self.assertNotEqual(self.hostA, Host('a'))
def test_hashability(self):
# equality implies the hash values are the same
diff --git a/test/units/module_utils/basic/test_exit_json.py b/test/units/module_utils/basic/test_exit_json.py
index 66610ec3ed..27bbb0f9e5 100644
--- a/test/units/module_utils/basic/test_exit_json.py
+++ b/test/units/module_utils/basic/test_exit_json.py
@@ -31,8 +31,11 @@ from ansible.module_utils import basic
from ansible.module_utils.basic import heuristic_log_sanitize
from ansible.module_utils.basic import return_values, remove_values
+empty_invocation = {u'module_args': {}}
+
@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)")
class TestAnsibleModuleExitJson(unittest.TestCase):
+
def setUp(self):
self.COMPLEX_ARGS = basic.MODULE_COMPLEX_ARGS
basic.MODULE_COMPLEX_ARGS = '{}'
@@ -56,7 +59,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase):
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
- self.assertEquals(return_val, dict(changed=False))
+ self.assertEquals(return_val, dict(changed=False, invocation=empty_invocation))
def test_exit_json_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
@@ -67,7 +70,7 @@ class TestAnsibleModuleExitJson(unittest.TestCase):
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
- self.assertEquals(return_val, dict(msg="message", changed=False))
+ self.assertEquals(return_val, dict(msg="message", changed=False, invocation=empty_invocation))
def test_fail_json_exits(self):
with self.assertRaises(SystemExit) as ctx:
@@ -78,13 +81,13 @@ class TestAnsibleModuleExitJson(unittest.TestCase):
else:
self.assertEquals(ctx.exception.code, 1)
return_val = json.loads(self.fake_stream.getvalue())
- self.assertEquals(return_val, dict(msg="message", failed=True))
+ self.assertEquals(return_val, dict(msg="message", failed=True, invocation=empty_invocation))
def test_exit_json_proper_changed(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(changed=True, msg='success')
return_val = json.loads(self.fake_stream.getvalue())
- self.assertEquals(return_val, dict(changed=True, msg='success'))
+ self.assertEquals(return_val, dict(changed=True, msg='success', invocation=empty_invocation))
@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)")
class TestAnsibleModuleExitValuesRemoved(unittest.TestCase):
@@ -94,19 +97,22 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase):
dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/',
- not_secret='following the leader', changed=False, msg='here')
+ not_secret='following the leader', changed=False, msg='here',
+ invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(dict(username='person', password='password12345'),
dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/',
- not_secret='following the leader', changed=False, msg='here')
+ not_secret='following the leader', changed=False, msg='here',
+ invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/',
- not_secret='following the leader', changed=False, msg='here')
+ not_secret='following the leader', changed=False, msg='here',
+ invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
)
@@ -119,6 +125,7 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase):
sys.stdout = self.old_stdout
def test_exit_json_removes_values(self):
+ self.maxDiff = None
for args, return_val, expected in self.dataset:
sys.stdout = StringIO()
basic.MODULE_COMPLEX_ARGS = json.dumps(args)
@@ -134,6 +141,7 @@ class TestAnsibleModuleExitValuesRemoved(unittest.TestCase):
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
def test_fail_json_removes_values(self):
+ self.maxDiff = None
for args, return_val, expected in self.dataset:
expected = copy.deepcopy(expected)
del expected['changed']
diff --git a/test/units/module_utils/basic/test_heuristic_log_sanitize.py b/test/units/module_utils/basic/test_heuristic_log_sanitize.py
index 51a5c11adf..14ffff0d74 100644
--- a/test/units/module_utils/basic/test_heuristic_log_sanitize.py
+++ b/test/units/module_utils/basic/test_heuristic_log_sanitize.py
@@ -85,6 +85,7 @@ class TestHeuristicLogSanitize(unittest.TestCase):
self.assertTrue(ssh_output.endswith("}"))
self.assertIn(":********@foo.com/data'", ssh_output)
+ @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)")
def test_hides_parameter_secrets(self):
output = heuristic_log_sanitize('token="secret", user="person", token_entry="test=secret"', frozenset(['secret']))
self.assertNotIn('secret', output)
diff --git a/test/units/module_utils/basic/test_known_hosts.py b/test/units/module_utils/basic/test_known_hosts.py
new file mode 100644
index 0000000000..515d67686d
--- /dev/null
+++ b/test/units/module_utils/basic/test_known_hosts.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+# (c) 2015, Michael Scherer <mscherer@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from ansible.compat.tests import unittest
+from ansible.module_utils import known_hosts
+
+class TestAnsibleModuleKnownHosts(unittest.TestCase):
+ urls = {
+ 'ssh://one.example.org/example.git':
+ {'is_ssh_url': True, 'get_fqdn': 'one.example.org'},
+ 'ssh+git://two.example.org/example.git':
+ {'is_ssh_url': True, 'get_fqdn': 'two.example.org'},
+ 'rsync://three.example.org/user/example.git':
+ {'is_ssh_url': False, 'get_fqdn': 'three.example.org'},
+ 'git@four.example.org:user/example.git':
+ {'is_ssh_url': True, 'get_fqdn': 'four.example.org'},
+ 'git+ssh://five.example.org/example.git':
+ {'is_ssh_url': True, 'get_fqdn': 'five.example.org'},
+ 'ssh://six.example.org:21/example.org':
+ {'is_ssh_url': True, 'get_fqdn': 'six.example.org'},
+ 'ssh://[2001:DB8::abcd:abcd]/example.git':
+ {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
+ 'ssh://[2001:DB8::abcd:abcd]:22/example.git':
+ {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
+ 'username@[2001:DB8::abcd:abcd]/example.git':
+ {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
+ 'username@[2001:DB8::abcd:abcd]:22/example.git':
+ {'is_ssh_url': True, 'get_fqdn': '[2001:DB8::abcd:abcd]'},
+ }
+
+ def test_is_ssh_url(self):
+ for u in self.urls:
+ self.assertEqual(known_hosts.is_ssh_url(u), self.urls[u]['is_ssh_url'])
+
+ def test_get_fqdn(self):
+ for u in self.urls:
+ self.assertEqual(known_hosts.get_fqdn(u), self.urls[u]['get_fqdn'])
+
+
+
diff --git a/test/units/module_utils/basic/test_no_log.py b/test/units/module_utils/basic/test_no_log.py
index 24d38ddcfa..102b7a3ab2 100644
--- a/test/units/module_utils/basic/test_no_log.py
+++ b/test/units/module_utils/basic/test_no_log.py
@@ -46,8 +46,11 @@ class TestReturnValues(unittest.TestCase):
'three': ['amigos', 'musketeers', None,
{'ping': 'pong', 'base': ('balls', 'raquets')}]},
frozenset(['1', 'dos', 'amigos', 'musketeers', 'pong', 'balls', 'raquets'])),
+ (u'Toshio くらとみ', frozenset(['Toshio くらとみ'])),
+ ('Toshio くらとみ', frozenset(['Toshio くらとみ'])),
)
+ @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)")
def test_return_values(self):
for data, expected in self.dataset:
self.assertEquals(frozenset(return_values(data)), expected)
@@ -69,6 +72,8 @@ class TestRemoveValues(unittest.TestCase):
'three': ['amigos', 'musketeers', None,
{'ping': 'pong', 'base': ['balls', 'raquets']}]},
frozenset(['nope'])),
+ ('Toshio くら', frozenset(['とみ'])),
+ (u'Toshio くら', frozenset(['とみ'])),
)
dataset_remove = (
('string', frozenset(['string']), OMIT),
@@ -94,12 +99,16 @@ class TestRemoveValues(unittest.TestCase):
('This sentence has an enigma wrapped in a mystery inside of a secret. - mr mystery',
frozenset(['enigma', 'mystery', 'secret']),
'This sentence has an ******** wrapped in a ******** inside of a ********. - mr ********'),
+ ('Toshio くらとみ', frozenset(['くらとみ']), 'Toshio ********'),
+ (u'Toshio くらとみ', frozenset(['くらとみ']), u'Toshio ********'),
)
+ @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)")
def test_no_removal(self):
for value, no_log_strings in self.dataset_no_remove:
self.assertEquals(remove_values(value, no_log_strings), value)
+ @unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)")
def test_strings_to_remove(self):
for value, no_log_strings, expected in self.dataset_remove:
self.assertEquals(remove_values(value, no_log_strings), expected)
diff --git a/test/units/module_utils/basic/test_run_command.py b/test/units/module_utils/basic/test_run_command.py
new file mode 100644
index 0000000000..0db6fbe7b9
--- /dev/null
+++ b/test/units/module_utils/basic/test_run_command.py
@@ -0,0 +1,176 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division)
+__metaclass__ = type
+
+import errno
+import sys
+import time
+
+from ansible.compat.tests import unittest
+from ansible.compat.six import StringIO, BytesIO
+from ansible.compat.tests.mock import call, MagicMock, Mock, patch, sentinel
+
+from ansible.module_utils import basic
+from ansible.module_utils.basic import AnsibleModule
+
+class OpenStringIO(StringIO):
+ """StringIO with dummy close() method
+
+ So that you can inspect the content after close() was called.
+ """
+
+ def close(self):
+ pass
+
+
+@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)")
+class TestAnsibleModuleRunCommand(unittest.TestCase):
+
+ def setUp(self):
+
+ self.cmd_out = {
+ # os.read() is returning 'bytes', not strings
+ sentinel.stdout: BytesIO(),
+ sentinel.stderr: BytesIO(),
+ }
+
+ def mock_os_read(fd, nbytes):
+ return self.cmd_out[fd].read(nbytes)
+
+ def mock_select(rlist, wlist, xlist, timeout=1):
+ return (rlist, [], [])
+
+ def mock_os_chdir(path):
+ if path == '/inaccessible':
+ raise OSError(errno.EPERM, "Permission denied: '/inaccessible'")
+
+ basic.MODULE_COMPLEX_ARGS = '{}'
+ self.module = AnsibleModule(argument_spec=dict())
+ self.module.fail_json = MagicMock(side_effect=SystemExit)
+
+ self.os = patch('ansible.module_utils.basic.os').start()
+ self.os.path.expandvars.side_effect = lambda x: x
+ self.os.path.expanduser.side_effect = lambda x: x
+ self.os.environ = {'PATH': '/bin'}
+ self.os.getcwd.return_value = '/home/foo'
+ self.os.path.isdir.return_value = True
+ self.os.chdir.side_effect = mock_os_chdir
+ self.os.read.side_effect = mock_os_read
+
+ self.subprocess = patch('ansible.module_utils.basic.subprocess').start()
+ self.cmd = Mock()
+ self.cmd.returncode = 0
+ self.cmd.stdin = OpenStringIO()
+ self.cmd.stdout.fileno.return_value = sentinel.stdout
+ self.cmd.stderr.fileno.return_value = sentinel.stderr
+ self.subprocess.Popen.return_value = self.cmd
+
+ self.select = patch('ansible.module_utils.basic.select').start()
+ self.select.select.side_effect = mock_select
+
+ self.addCleanup(patch.stopall)
+
+ def test_list_as_args(self):
+ self.module.run_command(['/bin/ls', 'a', ' b', 'c '])
+ self.assertTrue(self.subprocess.Popen.called)
+ args, kwargs = self.subprocess.Popen.call_args
+ self.assertEqual(args, (['/bin/ls', 'a', ' b', 'c '], ))
+ self.assertEqual(kwargs['shell'], False)
+
+ def test_str_as_args(self):
+ self.module.run_command('/bin/ls a " b" "c "')
+ self.assertTrue(self.subprocess.Popen.called)
+ args, kwargs = self.subprocess.Popen.call_args
+ self.assertEqual(args, (['/bin/ls', 'a', ' b', 'c '], ))
+ self.assertEqual(kwargs['shell'], False)
+
+ def test_tuple_as_args(self):
+ self.assertRaises(SystemExit, self.module.run_command, ('ls', '/'))
+ self.assertTrue(self.module.fail_json.called)
+
+ def test_unsafe_shell(self):
+ self.module.run_command('ls a " b" "c "', use_unsafe_shell=True)
+ self.assertTrue(self.subprocess.Popen.called)
+ args, kwargs = self.subprocess.Popen.call_args
+ self.assertEqual(args, ('ls a " b" "c "', ))
+ self.assertEqual(kwargs['shell'], True)
+
+ def test_cwd(self):
+ self.os.getcwd.return_value = '/old'
+ self.module.run_command('/bin/ls', cwd='/new')
+ self.assertEqual(self.os.chdir.mock_calls,
+ [call('/new'), call('/old'), ])
+
+ def test_cwd_not_a_dir(self):
+ self.os.getcwd.return_value = '/old'
+ self.os.path.isdir.side_effect = lambda d: d != '/not-a-dir'
+ self.module.run_command('/bin/ls', cwd='/not-a-dir')
+ self.assertEqual(self.os.chdir.mock_calls, [call('/old'), ])
+
+ def test_cwd_inaccessible(self):
+ self.assertRaises(SystemExit, self.module.run_command, '/bin/ls', cwd='/inaccessible')
+ self.assertTrue(self.module.fail_json.called)
+ args, kwargs = self.module.fail_json.call_args
+ self.assertEqual(kwargs['rc'], errno.EPERM)
+
+ def test_prompt_bad_regex(self):
+ self.assertRaises(SystemExit, self.module.run_command, 'foo', prompt_regex='[pP)assword:')
+ self.assertTrue(self.module.fail_json.called)
+
+ def test_prompt_no_match(self):
+ self.cmd_out[sentinel.stdout] = BytesIO(b'hello')
+ (rc, _, _) = self.module.run_command('foo', prompt_regex='[pP]assword:')
+ self.assertEqual(rc, 0)
+
+ def test_prompt_match_wo_data(self):
+ self.cmd_out[sentinel.stdout] = BytesIO(b'Authentication required!\nEnter password: ')
+ (rc, _, _) = self.module.run_command('foo', prompt_regex=r'[pP]assword:', data=None)
+ self.assertEqual(rc, 257)
+
+ def test_check_rc_false(self):
+ self.cmd.returncode = 1
+ (rc, _, _) = self.module.run_command('/bin/false', check_rc=False)
+ self.assertEqual(rc, 1)
+
+ def test_check_rc_true(self):
+ self.cmd.returncode = 1
+ self.assertRaises(SystemExit, self.module.run_command, '/bin/false', check_rc=True)
+ self.assertTrue(self.module.fail_json.called)
+ args, kwargs = self.module.fail_json.call_args
+ self.assertEqual(kwargs['rc'], 1)
+
+ def test_text_stdin(self):
+ (rc, stdout, stderr) = self.module.run_command('/bin/foo', data='hello world')
+ self.assertEqual(self.cmd.stdin.getvalue(), 'hello world\n')
+
+ def test_ascii_stdout(self):
+ self.cmd_out[sentinel.stdout] = BytesIO(b'hello')
+ (rc, stdout, stderr) = self.module.run_command('/bin/cat hello.txt')
+ self.assertEqual(rc, 0)
+ self.assertEqual(stdout, 'hello')
+
+ def test_utf8_output(self):
+ self.cmd_out[sentinel.stdout] = BytesIO(u'Žarn§'.encode('utf-8'))
+ self.cmd_out[sentinel.stderr] = BytesIO(u'لرئيسية'.encode('utf-8'))
+ (rc, stdout, stderr) = self.module.run_command('/bin/something_ugly')
+ self.assertEqual(rc, 0)
+ self.assertEqual(stdout.decode('utf-8'), u'Žarn§')
+ self.assertEqual(stderr.decode('utf-8'), u'لرئيسية')
+
diff --git a/test/units/parsing/test_addresses.py b/test/units/parsing/test_addresses.py
index 870cbb0a14..a688d0253b 100644
--- a/test/units/parsing/test_addresses.py
+++ b/test/units/parsing/test_addresses.py
@@ -71,7 +71,12 @@ class TestParseAddress(unittest.TestCase):
for t in self.tests:
test = self.tests[t]
- (host, port) = parse_address(t)
+ try:
+ (host, port) = parse_address(t)
+ except:
+ host = None
+ port = None
+
assert host == test[0]
assert port == test[1]
@@ -79,6 +84,11 @@ class TestParseAddress(unittest.TestCase):
for t in self.range_tests:
test = self.range_tests[t]
- (host, port) = parse_address(t, allow_ranges=True)
+ try:
+ (host, port) = parse_address(t, allow_ranges=True)
+ except:
+ host = None
+ port = None
+
assert host == test[0]
assert port == test[1]
diff --git a/test/units/playbook/test_play_context.py b/test/units/playbook/test_play_context.py
index 0595966f1b..5434ef3000 100644
--- a/test/units/playbook/test_play_context.py
+++ b/test/units/playbook/test_play_context.py
@@ -91,6 +91,7 @@ class TestPlayContext(unittest.TestCase):
mock_task.become_user = 'mocktaskroot'
mock_task.become_pass = 'mocktaskpass'
mock_task._local_action = False
+ mock_task.delegate_to = None
all_vars = dict(
ansible_connection = 'mock_inventory',
diff --git a/test/units/plugins/action/test_action.py b/test/units/plugins/action/test_action.py
index 0e47b6a538..afb5d767e1 100644
--- a/test/units/plugins/action/test_action.py
+++ b/test/units/plugins/action/test_action.py
@@ -49,7 +49,7 @@ class TestActionBase(unittest.TestCase):
play_context.remote_user = 'apo'
action_base._low_level_execute_command('ECHO', sudoable=True)
- play_context.make_become_cmd.assert_called_once_with('ECHO', executable=None)
+ play_context.make_become_cmd.assert_called_once_with("/bin/sh -c ECHO", executable='/bin/sh')
play_context.make_become_cmd.reset_mock()
@@ -58,6 +58,6 @@ class TestActionBase(unittest.TestCase):
try:
play_context.remote_user = 'root'
action_base._low_level_execute_command('ECHO SAME', sudoable=True)
- play_context.make_become_cmd.assert_called_once_with('ECHO SAME', executable=None)
+ play_context.make_become_cmd.assert_called_once_with("/bin/sh -c 'ECHO SAME'", executable='/bin/sh')
finally:
C.BECOME_ALLOW_SAME_USER = become_allow_same_user
diff --git a/test/units/plugins/cache/test_cache.py b/test/units/plugins/cache/test_cache.py
index af1d924910..cd82e1ef2c 100644
--- a/test/units/plugins/cache/test_cache.py
+++ b/test/units/plugins/cache/test_cache.py
@@ -26,7 +26,7 @@ from ansible.plugins.cache.memory import CacheModule as MemoryCache
HAVE_MEMCACHED = True
try:
- import memcached
+ import memcache
except ImportError:
HAVE_MEMCACHED = False
else:
@@ -110,6 +110,6 @@ class TestAbstractClass(unittest.TestCase):
def test_memory_cachemodule(self):
self.assertIsInstance(MemoryCache(), MemoryCache)
- @unittest.skipUnless(HAVE_REDIS, 'Redis pyhton module not installed')
+ @unittest.skipUnless(HAVE_REDIS, 'Redis python module not installed')
def test_redis_cachemodule(self):
self.assertIsInstance(RedisCache(), RedisCache)
diff --git a/test/units/plugins/callback/test_callback.py b/test/units/plugins/callback/test_callback.py
new file mode 100644
index 0000000000..54964ac9df
--- /dev/null
+++ b/test/units/plugins/callback/test_callback.py
@@ -0,0 +1,82 @@
+# (c) 2012-2014, Chris Meyers <chris.meyers.fsu@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six import PY3
+from copy import deepcopy
+
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import patch, mock_open
+
+from ansible.plugins.callback import CallbackBase
+import ansible.plugins.callback as callish
+
+class TestCopyResultExclude(unittest.TestCase):
+ def setUp(self):
+ class DummyClass():
+ def __init__(self):
+ self.bar = [ 1, 2, 3 ]
+ self.a = {
+ "b": 2,
+ "c": 3,
+ }
+ self.b = {
+ "c": 3,
+ "d": 4,
+ }
+ self.foo = DummyClass()
+ self.cb = CallbackBase()
+
+ def tearDown(self):
+ pass
+
+ def test_copy_logic(self):
+ res = self.cb._copy_result_exclude(self.foo, ())
+ self.assertEqual(self.foo.bar, res.bar)
+
+ def test_copy_deep(self):
+ res = self.cb._copy_result_exclude(self.foo, ())
+ self.assertNotEqual(id(self.foo.bar), id(res.bar))
+
+ def test_no_exclude(self):
+ res = self.cb._copy_result_exclude(self.foo, ())
+ self.assertEqual(self.foo.bar, res.bar)
+ self.assertEqual(self.foo.a, res.a)
+ self.assertEqual(self.foo.b, res.b)
+
+ def test_exclude(self):
+ res = self.cb._copy_result_exclude(self.foo, ['bar', 'b'])
+ self.assertIsNone(res.bar)
+ self.assertIsNone(res.b)
+ self.assertEqual(self.foo.a, res.a)
+
+ def test_result_unmodified(self):
+ bar_id = id(self.foo.bar)
+ a_id = id(self.foo.a)
+ res = self.cb._copy_result_exclude(self.foo, ['bar', 'a'])
+
+ self.assertEqual(self.foo.bar, [ 1, 2, 3 ])
+ self.assertEqual(bar_id, id(self.foo.bar))
+
+ self.assertEqual(self.foo.a, dict(b=2, c=3))
+ self.assertEqual(a_id, id(self.foo.a))
+
+ self.assertRaises(AttributeError, self.cb._copy_result_exclude, self.foo, ['a', 'c', 'bar'])
+
diff --git a/test/units/plugins/strategies/test_strategy_base.py b/test/units/plugins/strategies/test_strategy_base.py
index bf01cf6fcc..9ea944a2a1 100644
--- a/test/units/plugins/strategies/test_strategy_base.py
+++ b/test/units/plugins/strategies/test_strategy_base.py
@@ -24,8 +24,11 @@ from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.strategy import StrategyBase
+from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.task_result import TaskResult
+from ansible.playbook.handler import Handler
+from ansible.inventory.host import Host
from six.moves import queue as Queue
from units.mock.loader import DictDataLoader
@@ -73,6 +76,7 @@ class TestStrategyBase(unittest.TestCase):
for i in range(0, 5):
mock_host = MagicMock()
mock_host.name = "host%02d" % (i+1)
+ mock_host.has_hostkey = True
mock_hosts.append(mock_host)
mock_inventory = MagicMock()
@@ -98,37 +102,45 @@ class TestStrategyBase(unittest.TestCase):
mock_tqm._unreachable_hosts = ["host02"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:])
- def test_strategy_base_queue_task(self):
- fake_loader = DictDataLoader()
-
- workers = []
- for i in range(0, 3):
- worker_main_q = MagicMock()
- worker_main_q.put.return_value = None
- worker_result_q = MagicMock()
- workers.append([i, worker_main_q, worker_result_q])
+ @patch.object(WorkerProcess, 'run')
+ def test_strategy_base_queue_task(self, mock_worker):
+ def fake_run(self):
+ return
- mock_tqm = MagicMock()
- mock_tqm._final_q = MagicMock()
- mock_tqm.get_workers.return_value = workers
- mock_tqm.get_loader.return_value = fake_loader
+ mock_worker.run.side_effect = fake_run
- strategy_base = StrategyBase(tqm=mock_tqm)
- strategy_base._cur_worker = 0
- strategy_base._pending_results = 0
- strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
- self.assertEqual(strategy_base._cur_worker, 1)
- self.assertEqual(strategy_base._pending_results, 1)
- strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
- self.assertEqual(strategy_base._cur_worker, 2)
- self.assertEqual(strategy_base._pending_results, 2)
- strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
- self.assertEqual(strategy_base._cur_worker, 0)
- self.assertEqual(strategy_base._pending_results, 3)
- workers[0][1].put.side_effect = EOFError
- strategy_base._queue_task(host=MagicMock(), task=MagicMock(), task_vars=dict(), play_context=MagicMock())
- self.assertEqual(strategy_base._cur_worker, 1)
- self.assertEqual(strategy_base._pending_results, 3)
+ fake_loader = DictDataLoader()
+ mock_var_manager = MagicMock()
+ mock_host = MagicMock()
+ mock_host.has_hostkey = True
+ mock_inventory = MagicMock()
+ mock_options = MagicMock()
+ mock_options.module_path = None
+
+ tqm = TaskQueueManager(
+ inventory=mock_inventory,
+ variable_manager=mock_var_manager,
+ loader=fake_loader,
+ options=mock_options,
+ passwords=None,
+ )
+ tqm._initialize_processes(3)
+ tqm.hostvars = dict()
+
+ try:
+ strategy_base = StrategyBase(tqm=tqm)
+ strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
+ self.assertEqual(strategy_base._cur_worker, 1)
+ self.assertEqual(strategy_base._pending_results, 1)
+ strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
+ self.assertEqual(strategy_base._cur_worker, 2)
+ self.assertEqual(strategy_base._pending_results, 2)
+ strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
+ self.assertEqual(strategy_base._cur_worker, 0)
+ self.assertEqual(strategy_base._pending_results, 3)
+ finally:
+ tqm.cleanup()
+
def test_strategy_base_process_pending_results(self):
mock_tqm = MagicMock()
@@ -156,10 +168,12 @@ class TestStrategyBase(unittest.TestCase):
mock_iterator = MagicMock()
mock_iterator.mark_host_failed.return_value = None
+ mock_iterator.get_next_task_for_host.return_value = (None, None)
mock_host = MagicMock()
mock_host.name = 'test01'
mock_host.vars = dict()
+ mock_host.has_hostkey = True
mock_task = MagicMock()
mock_task._role = None
@@ -182,6 +196,7 @@ class TestStrategyBase(unittest.TestCase):
mock_inventory.get_host.side_effect = _get_host
mock_inventory.get_group.side_effect = _get_group
mock_inventory.clear_pattern_cache.return_value = None
+ mock_inventory.get_host_vars.return_value = {}
mock_var_mgr = MagicMock()
mock_var_mgr.set_host_variable.return_value = None
@@ -315,22 +330,15 @@ class TestStrategyBase(unittest.TestCase):
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
self.assertEqual(res, [])
- def test_strategy_base_run_handlers(self):
- workers = []
- for i in range(0, 3):
- worker_main_q = MagicMock()
- worker_main_q.put.return_value = None
- worker_result_q = MagicMock()
- workers.append([i, worker_main_q, worker_result_q])
-
- mock_tqm = MagicMock()
- mock_tqm._final_q = MagicMock()
- mock_tqm.get_workers.return_value = workers
- mock_tqm.send_callback.return_value = None
-
+ @patch.object(WorkerProcess, 'run')
+ def test_strategy_base_run_handlers(self, mock_worker):
+ def fake_run(*args):
+ return
+ mock_worker.side_effect = fake_run
mock_play_context = MagicMock()
- mock_handler_task = MagicMock()
+ mock_handler_task = MagicMock(Handler)
+ mock_handler_task.action = 'foo'
mock_handler_task.get_name.return_value = "test handler"
mock_handler_task.has_triggered.return_value = False
@@ -341,10 +349,9 @@ class TestStrategyBase(unittest.TestCase):
mock_play = MagicMock()
mock_play.handlers = [mock_handler]
- mock_host = MagicMock()
+ mock_host = MagicMock(Host)
mock_host.name = "test01"
-
- mock_iterator = MagicMock()
+ mock_host.has_hostkey = True
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = [mock_host]
@@ -355,8 +362,29 @@ class TestStrategyBase(unittest.TestCase):
mock_iterator = MagicMock
mock_iterator._play = mock_play
- strategy_base = StrategyBase(tqm=mock_tqm)
- strategy_base._inventory = mock_inventory
- strategy_base._notified_handlers = {"test handler": [mock_host]}
-
- result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context)
+ fake_loader = DictDataLoader()
+ mock_options = MagicMock()
+ mock_options.module_path = None
+
+ tqm = TaskQueueManager(
+ inventory=mock_inventory,
+ variable_manager=mock_var_mgr,
+ loader=fake_loader,
+ options=mock_options,
+ passwords=None,
+ )
+ tqm._initialize_processes(3)
+ tqm.hostvars = dict()
+
+ try:
+ strategy_base = StrategyBase(tqm=tqm)
+
+ strategy_base._inventory = mock_inventory
+ strategy_base._notified_handlers = {"test handler": [mock_host]}
+
+ task_result = TaskResult(Host('host01'), Handler(), dict(changed=False))
+ tqm._final_q.put(('host_task_ok', task_result))
+
+ result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context)
+ finally:
+ tqm.cleanup()
diff --git a/test/units/vars/test_variable_manager.py b/test/units/vars/test_variable_manager.py
index acd8e5c898..41f08138a5 100644
--- a/test/units/vars/test_variable_manager.py
+++ b/test/units/vars/test_variable_manager.py
@@ -173,6 +173,7 @@ class TestVariableManager(unittest.TestCase):
mock_task._role = None
mock_task.loop = None
mock_task.get_vars.return_value = dict(foo="bar")
+ mock_task.get_include_params.return_value = dict()
v = VariableManager()
self.assertEqual(v.get_vars(loader=fake_loader, task=mock_task, use_cache=False).get("foo"), "bar")
diff --git a/test/utils/ansible-playbook_integration_runner/ansible.cfg b/test/utils/ansible-playbook_integration_runner/ansible.cfg
new file mode 100644
index 0000000000..14c8065152
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/ansible.cfg
@@ -0,0 +1,2 @@
+[defaults]
+host_key_checking = False
diff --git a/test/utils/ansible-playbook_integration_runner/ec2.yml b/test/utils/ansible-playbook_integration_runner/ec2.yml
new file mode 100644
index 0000000000..55619776d9
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/ec2.yml
@@ -0,0 +1,41 @@
+- name: Launch Instance
+ ec2:
+ group_id: 'sg-07bb906d' # jenkins-slave_new
+ count: 1
+ instance_type: 'm3.large'
+ image: '{{ item.image }}'
+ wait: true
+ region: 'us-east-1'
+ keypair: '{{ keypair }}'
+ aws_access_key: "{{ aws_access_key|default(lookup('env', 'AWS_ACCESS_KEY')) }}"
+ aws_secret_key: "{{ aws_secret_key|default(lookup('env', 'AWS_SECRET_KEY')) }}"
+ instance_tags:
+ jenkins: jenkins_ansible_pr_test
+ register: ec2
+ with_items: slaves
+# We could do an async here, that would speed things up
+
+
+- name: Wait for SSH
+ wait_for:
+ host: "{{ item['instances'][0]['public_ip'] }}"
+ port: 22
+ delay: 10
+ timeout: 320
+ state: started
+ with_items: ec2.results
+
+- name: Wait a little longer for centos
+ pause: seconds=20
+
+- name: Add hosts group temporary inventory group with pem path
+ add_host:
+ name: "{{ item.1.platform }}-{{ ec2.results[item.0]['instances'][0]['public_ip'] }}"
+ groups: dynamic_hosts
+ ansible_ssh_host: "{{ ec2.results[item.0]['instances'][0]['public_ip'] }}"
+ ansible_ssh_private_key_file: '{{ pem_path }}'
+ ansible_ssh_user: "{{ item.1.ssh_user }}"
+ ec2_vars: "{{ ec2.results[item.0]['instances'][0] }}"
+ ec2_instance_ids: "{{ ec2.results[item.0]['instance_ids'] }}"
+ with_indexed_items: slaves
+
diff --git a/test/utils/ansible-playbook_integration_runner/inventory b/test/utils/ansible-playbook_integration_runner/inventory
new file mode 100644
index 0000000000..2302edae31
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/inventory
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/test/utils/ansible-playbook_integration_runner/main.yml b/test/utils/ansible-playbook_integration_runner/main.yml
new file mode 100644
index 0000000000..f1bd26b7ea
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/main.yml
@@ -0,0 +1,77 @@
+- hosts: all
+ connection: local
+ vars:
+ slaves:
+ - distribution: "Ubuntu"
+ version: "12.04"
+ image: "ami-309ddf5a"
+ ssh_user: "ubuntu"
+ platform: "ubuntu-12.04-x86_64"
+ - distribution: "Ubuntu"
+ version: "14.04"
+ image: "ami-d06632ba"
+ ssh_user: "ubuntu"
+ platform: "ubuntu-14.04-x86_64"
+ - distribution: "CentOS"
+ version: "6.5"
+ image: "ami-8997afe0"
+ ssh_user: "root"
+ platform: "centos-6.5-x86_64"
+ - distribution: "CentOS"
+ version: "7"
+ image: "ami-61bbf104"
+ ssh_user: "centos"
+ platform: "centos-7-x86_64"
+ - distribution: "Fedora"
+ version: "23"
+ image: "ami-518bfb3b"
+ ssh_user: "fedora"
+ platform: "fedora-23-x86_64"
+
+ tasks:
+ - debug: var=ansible_version
+ - include: ec2.yml
+ when: groups['dynamic_hosts'] is not defined
+
+# Have to hardcode these per-slave. We can't even run setup yet so we can't
+# introspect what they have.
+- hosts: dynamic_hosts
+ sudo: true
+ gather_facts: False
+ tasks:
+ - name: Install packages that let setup and package manager modules run
+ raw: dnf install -y python2 python2-dnf libselinux-python
+ when: "'fedora-23' in '{{ inventory_hostname }}'"
+
+- hosts: dynamic_hosts
+ sudo: true
+ vars:
+ credentials_file: ''
+ test_flags: ""
+ make_target: "non_destructive"
+ #pre_tasks:
+ roles:
+ - { role: ansible_test_deps, tags: ansible_test_deps }
+ - { role: run_integration,
+ tags: run_integration,
+ run_integration_test_flags: "{{ test_flags }}",
+ run_integration_credentials_file: "{{ credentials_file }}",
+ run_integration_make_target: "{{ make_target }}", }
+ tasks:
+
+ - name: Kill ec2 instances
+ sudo: false
+ local_action:
+ module: ec2
+ state: absent
+ region: 'us-east-1'
+ instance_ids: "{{ hostvars[item]['ec2_instance_ids'] }}"
+ when: hostvars[item]['ec2_instance_ids'] is defined and item == inventory_hostname
+ with_items: groups['dynamic_hosts']
+
+ - set_fact:
+ ansible_connection: local
+
+ - name: Fail
+ shell: 'echo "{{ inventory_hostname }}, Failed" && exit 1'
+ when: "'rc' not in test_results or test_results.rc != 0"
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.travis.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.travis.yml
new file mode 100644
index 0000000000..2264f0b20a
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/.travis.yml
@@ -0,0 +1,37 @@
+sudo: required
+dist: trusty
+language: python
+python:
+ - "2.7"
+services:
+ - docker
+env:
+ global:
+ - PATH="/usr/bin:$PATH"
+
+before_install:
+ # Ansible doesn't play well with virtualenv
+ - deactivate
+ - sudo apt-get update -qq
+ - sudo apt-get install docker-engine
+
+install:
+ - sudo pip install docker-py
+ # software-properties-common for ubuntu 14.04
+ # python-software-properties for ubuntu 12.04
+ - sudo apt-get install -y sshpass software-properties-common python-software-properties
+ - sudo apt-add-repository -y ppa:ansible/ansible
+ - sudo apt-get update -qq
+ - sudo apt-get install -y ansible
+ - sudo rm /usr/bin/python && sudo ln -s /usr/bin/python2.7 /usr/bin/python
+ - ansible-galaxy install -r test/requirements.yml -p test/roles/
+
+script:
+ # Ensure any invocation of ansible-playbook (i.e. sudo) results in host_key_checking disabled
+ - sudo ansible all -i "127.0.0.1," -m lineinfile -a "regexp=^#host_key_checking dest=/etc/ansible/ansible.cfg line='host_key_checking = False'" -c local
+ - ansible-playbook -i test/inventory test/main.yml --syntax-check
+ - sudo ansible-playbook -i test/inventory test/main.yml
+
+notifications:
+ # notify ansible galaxy of results
+ webhooks: http://goo.gl/nSuq9h
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md
new file mode 100644
index 0000000000..09ffacacaf
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/README.md
@@ -0,0 +1,6 @@
+[![Build Status](https://travis-ci.org/chrismeyersfsu/ansible_test_deps.svg)](https://travis-ci.org/chrismeyersfsu/ansible_test_deps)
+
+ansible_test_deps
+=========
+
+Install needed packages to run ansible integration tests.
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/defaults/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/defaults/main.yml
new file mode 100644
index 0000000000..c7837fc56b
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for .
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/handlers/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/handlers/main.yml
new file mode 100644
index 0000000000..050cdd1234
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for .
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/.galaxy_install_info b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/.galaxy_install_info
new file mode 100644
index 0000000000..ffc298fff6
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/.galaxy_install_info
@@ -0,0 +1 @@
+{install_date: 'Tue Dec 8 15:06:28 2015', version: master}
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/main.yml
new file mode 100644
index 0000000000..07c15d619e
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/meta/main.yml
@@ -0,0 +1,23 @@
+---
+galaxy_info:
+ author: Chris Meyers
+ description: install ansible integration test dependencies
+ company: Ansible
+ license: license (GPLv2, CC-BY, etc)
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 6
+ - 7
+ - name: Ubuntu
+ versions:
+ - precise
+ - trusty
+ galaxy_tags:
+ - testing
+ - integration
+ - ansible
+ - dependencies
+dependencies: []
+
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml
new file mode 100644
index 0000000000..832138527f
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/tasks/main.yml
@@ -0,0 +1,102 @@
+---
+
+- apt: update_cache=yes
+ when: ansible_os_family == 'Debian'
+
+- name: Install sudo
+ package: name=sudo state=installed
+ ignore_errors: true
+
+- name: Install RH epel
+ yum: name="epel-release" state=installed
+ sudo: true
+ when: ansible_distribution in ('CentOS', 'RedHat')
+
+- name: Install RH ansible dependencies
+ package: name="{{ item }}" state=installed
+ sudo: true
+ with_items:
+ - python-pip
+ - python-httplib2
+ - rsync
+ - subversion
+ - mercurial
+ - git
+ - rubygems
+ - unzip
+ - openssl
+ - make
+ - gcc
+ - gawk
+ - python-devel
+ - libselinux-python
+ - python-virtualenv
+ - yum
+ - yum-metadata-parser
+ - redhat-rpm-config
+ when: ansible_os_family == 'RedHat'
+
+- name: Install Debian ansible dependencies
+ apt: name="{{ item }}" state=installed update_cache=yes
+ sudo: true
+ with_items:
+ - python-pip
+ - python-httplib2
+ - rsync
+ - subversion
+ - mercurial
+ - git
+ - unzip
+ - gawk
+ - python-dev
+ - python-virtualenv
+ when: ansible_os_family == 'Debian'
+
+- name: update ca certificates
+ sudo: true
+ yum: name=ca-certificates state=latest
+ when: ansible_os_family == 'RedHat'
+
+- name: Install ubuntu 12.04 ansible dependencies
+ apt: name="{{ item }}" state=installed update_cache=yes
+ sudo: true
+ with_items:
+ - rubygems
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04"
+
+- name: Install ubuntu 14.04 ansible dependencies
+ apt: name="{{ item }}" state=installed update_cache=yes
+ sudo: true
+ with_items:
+ - rubygems-integration
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "14.04"
+
+# Not sure why CentOS 6 is working without this....
+#- name: Install Red Hat 6 ansible dependencies
+# yum: name="{{ item }}" state=installed
+# sudo: true
+# with_items:
+# - python-crypto2.6
+# when: ansible_distribution in ('CentOS', 'RedHat') and ansible_distribution_major_version == "6"
+
+- name: Install ansible pip deps
+ sudo: true
+ pip: name="{{ item }}"
+ with_items:
+ - PyYAML
+ - Jinja2
+ - paramiko
+
+- name: Install ubuntu 12.04 ansible pip deps
+ sudo: true
+ pip: name="{{ item }}" state=latest
+ with_items:
+ - pycrypto
+ when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == "12.04"
+
+- name: Remove tty sudo requirement
+ sudo: true
+ lineinfile: "dest=/etc/sudoers regexp='^Defaults[ , ]*requiretty' line='#Defaults requiretty'"
+ when: ansible_os_family == 'RedHat'
+
+
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/inventory b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/inventory
new file mode 100644
index 0000000000..2302edae31
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/inventory
@@ -0,0 +1 @@
+localhost ansible_connection=local
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml
new file mode 100644
index 0000000000..b66d699d5d
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/main.yml
@@ -0,0 +1,29 @@
+---
+- name: Bring up docker containers
+ hosts: localhost
+ gather_facts: false
+ vars:
+ inventory:
+ - name: ansible_test_deps_host_1
+ image: "chrismeyers/centos6"
+ - name: ansible_test_deps_host_2
+ image: "chrismeyers/ubuntu12.04"
+ - name: ansible_test_deps_host_3
+ image: "ubuntu-upstart:14.04"
+ roles:
+ - { role: provision_docker, provision_docker_company: 'ansible', provision_docker_inventory: "{{ inventory }}" }
+
+- name: Run ansible_test_deps Tests
+ hosts: docker_containers
+ vars:
+ git_dir: "/tmp/ansible"
+ roles:
+ - { role: ansible_test_deps }
+ tasks:
+ - name: Clone ansible
+ git:
+ repo: "https://github.com/ansible/ansible.git"
+ dest: "{{ git_dir }}"
+ - name: Invoke ansible in hacking mode
+ shell: "cd {{ git_dir }} && . hacking/env-setup && ansible --version && ansible-playbook --version"
+
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/requirements.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/requirements.yml
new file mode 100644
index 0000000000..fa10641a72
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/requirements.yml
@@ -0,0 +1,2 @@
+- src: chrismeyersfsu.provision_docker
+ name: provision_docker
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps
new file mode 120000
index 0000000000..eb6d9edda4
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/test/roles/ansible_test_deps
@@ -0,0 +1 @@
+../../../ansible_test_deps \ No newline at end of file
diff --git a/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/vars/main.yml b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/vars/main.yml
new file mode 100644
index 0000000000..a38c5fb042
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/ansible_test_deps/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for .
diff --git a/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml
new file mode 100644
index 0000000000..a833c96558
--- /dev/null
+++ b/test/utils/ansible-playbook_integration_runner/roles/run_integration/tasks/main.yml
@@ -0,0 +1,43 @@
+---
+- name: Sync ansible repo to ec2 instance
+ synchronize:
+ src: "{{ sync_dir }}/"
+ dest: "~/ansible"
+ no_log: true
+
+- name: Get ansible source dir
+ sudo: false
+ shell: "cd ~/ansible && pwd"
+ register: results
+
+- shell: "ls -la && . hacking/env-setup && cd test/integration && TEST_FLAGS='-e clean_working_dir=no' make {{ run_integration_make_target }}"
+ args:
+ chdir: "{{ results.stdout }}"
+ async: 3600
+ poll: 0
+ register: async_test_results
+ sudo: true
+ environment:
+ TEST_FLAGS: "{{ run_integration_test_flags|default(lookup('env', 'TEST_FLAGS')) }}"
+ CREDENTIALS_FILE: "{{ run_integration_credentials_file|default(lookup('env', 'CREDENTIALS_FILE')) }}"
+
+- name: poll for test results
+ async_status: jid="{{async_test_results.ansible_job_id}}"
+ register: test_results
+ until: test_results.finished
+ retries: 120
+ delay: 30
+ ignore_errors: true
+ no_log: true
+
+- name: save stdout test results for each host
+ local_action: copy
+ args:
+ dest: "{{sync_dir}}/{{inventory_hostname}}.stdout_results.txt"
+ content: "{{test_results.stdout}}"
+
+- name: save stderr test results for each host
+ local_action: copy
+ args:
+ dest: "{{sync_dir}}/{{inventory_hostname}}.stderr_results.txt"
+ content: "{{test_results.stderr}}"