summaryrefslogtreecommitdiff
path: root/lib/ansible
diff options
context:
space:
mode:
authorJames Cammarata <jimi@sngx.net>2015-05-03 21:47:26 -0500
committerJames Cammarata <jimi@sngx.net>2015-05-03 21:47:26 -0500
commitce3ef7f4c16e47d5a0b5600e1c56c177b7c93f0d (patch)
treeebb90eaba034dfb4ea8a3afe746a87f9b7dee66f /lib/ansible
parent8cf4452d48e583cfd59f96e67cfd34a1c35226e7 (diff)
downloadansible-ce3ef7f4c16e47d5a0b5600e1c56c177b7c93f0d.tar.gz
Making the switch to v2
Diffstat (limited to 'lib/ansible')
-rw-r--r--lib/ansible/__init__.py8
-rw-r--r--lib/ansible/cache/jsonfile.py143
-rw-r--r--lib/ansible/callbacks.py725
-rw-r--r--lib/ansible/cli/__init__.py447
-rw-r--r--lib/ansible/cli/adhoc.py156
-rw-r--r--lib/ansible/cli/doc.py283
-rw-r--r--lib/ansible/cli/galaxy.py491
-rw-r--r--lib/ansible/cli/playbook.py180
-rw-r--r--lib/ansible/cli/pull.py219
-rw-r--r--lib/ansible/cli/vault.py123
-rw-r--r--lib/ansible/compat/__init__.py27
-rw-r--r--lib/ansible/compat/tests/__init__.py40
-rw-r--r--lib/ansible/compat/tests/mock.py38
-rw-r--r--lib/ansible/compat/tests/unittest.py36
-rw-r--r--lib/ansible/config/__init__.py20
-rw-r--r--lib/ansible/constants.py47
-rw-r--r--lib/ansible/errors/__init__.py185
-rw-r--r--lib/ansible/errors/yaml_strings.py118
-rw-r--r--lib/ansible/executor/__init__.py21
-rw-r--r--lib/ansible/executor/connection_info.py270
-rw-r--r--lib/ansible/executor/module_common.py199
-rw-r--r--lib/ansible/executor/play_iterator.py302
-rw-r--r--lib/ansible/executor/playbook_executor.py211
-rw-r--r--lib/ansible/executor/process/__init__.py21
-rw-r--r--lib/ansible/executor/process/result.py176
-rw-r--r--lib/ansible/executor/process/worker.py155
-rw-r--r--lib/ansible/executor/stats.py51
-rw-r--r--lib/ansible/executor/task_executor.py454
-rw-r--r--lib/ansible/executor/task_queue_manager.py233
-rw-r--r--lib/ansible/executor/task_queue_manager.py: (renamed from lib/ansible/callback_plugins/__init__.py)0
-rw-r--r--lib/ansible/executor/task_result.py61
-rw-r--r--lib/ansible/galaxy/__init__.py70
-rwxr-xr-xlib/ansible/galaxy/api.py141
-rw-r--r--lib/ansible/galaxy/data/metadata_template.j245
-rw-r--r--lib/ansible/galaxy/data/readme38
-rw-r--r--lib/ansible/galaxy/role.py295
-rw-r--r--lib/ansible/inventory/__init__.py96
-rw-r--r--lib/ansible/inventory/dir.py31
-rw-r--r--lib/ansible/inventory/expand_hosts.py3
-rw-r--r--lib/ansible/inventory/group.py54
-rw-r--r--lib/ansible/inventory/host.py85
-rw-r--r--lib/ansible/inventory/ini.py58
-rw-r--r--lib/ansible/inventory/script.py36
-rw-r--r--lib/ansible/inventory/vars_plugins/noop.py2
-rw-r--r--lib/ansible/module_common.py193
-rw-r--r--lib/ansible/module_utils/basic.py68
-rw-r--r--lib/ansible/module_utils/powershell.ps16
-rw-r--r--lib/ansible/module_utils/vmware.py137
-rw-r--r--lib/ansible/modules/__init__.py20
m---------lib/ansible/modules/core7
m---------lib/ansible/modules/extras9
-rw-r--r--lib/ansible/new_inventory/__init__.py341
-rw-r--r--lib/ansible/new_inventory/group.py21
-rw-r--r--lib/ansible/new_inventory/host.py51
-rw-r--r--lib/ansible/parsing/__init__.py222
-rw-r--r--lib/ansible/parsing/mod_args.py278
-rw-r--r--lib/ansible/parsing/splitter.py273
-rw-r--r--lib/ansible/parsing/utils/__init__.py21
-rw-r--r--lib/ansible/parsing/utils/jsonify.py45
-rw-r--r--lib/ansible/parsing/vault/__init__.py603
-rw-r--r--lib/ansible/parsing/yaml/__init__.py21
-rw-r--r--lib/ansible/parsing/yaml/constructor.py91
-rw-r--r--lib/ansible/parsing/yaml/loader.py51
-rw-r--r--lib/ansible/parsing/yaml/objects.py65
-rw-r--r--lib/ansible/playbook/__init__.py887
-rw-r--r--lib/ansible/playbook/attribute.py (renamed from lib/ansible/errors.py)23
-rw-r--r--lib/ansible/playbook/base.py345
-rw-r--r--lib/ansible/playbook/become.py141
-rw-r--r--lib/ansible/playbook/block.py319
-rw-r--r--lib/ansible/playbook/conditional.py102
-rw-r--r--lib/ansible/playbook/handler.py53
-rw-r--r--lib/ansible/playbook/helpers.py116
-rw-r--r--lib/ansible/playbook/play.py1124
-rw-r--r--lib/ansible/playbook/playbook_include.py125
-rw-r--r--lib/ansible/playbook/role/__init__.py396
-rw-r--r--lib/ansible/playbook/role/definition.py175
-rw-r--r--lib/ansible/playbook/role/include.py49
-rw-r--r--lib/ansible/playbook/role/metadata.py91
-rw-r--r--lib/ansible/playbook/role/requirement.py166
-rw-r--r--lib/ansible/playbook/taggable.py95
-rw-r--r--lib/ansible/playbook/task.py616
-rw-r--r--lib/ansible/playbook/vars.py21
-rw-r--r--lib/ansible/playbook/vars_file.py21
-rw-r--r--lib/ansible/plugins/__init__.py (renamed from lib/ansible/utils/plugins.py)60
-rw-r--r--lib/ansible/plugins/action/__init__.py471
-rw-r--r--lib/ansible/plugins/action/add_host.py62
-rw-r--r--lib/ansible/plugins/action/assemble.py156
-rw-r--r--lib/ansible/plugins/action/assert.py65
-rw-r--r--lib/ansible/plugins/action/async.py70
-rw-r--r--lib/ansible/plugins/action/copy.py349
-rw-r--r--lib/ansible/plugins/action/debug.py48
-rw-r--r--lib/ansible/plugins/action/fail.py35
-rw-r--r--lib/ansible/plugins/action/fetch.py (renamed from lib/ansible/runner/action_plugins/fetch.py)119
-rw-r--r--lib/ansible/plugins/action/group_by.py39
-rw-r--r--lib/ansible/plugins/action/include_vars.py50
-rw-r--r--lib/ansible/plugins/action/normal.py29
-rw-r--r--lib/ansible/plugins/action/patch.py66
-rw-r--r--lib/ansible/plugins/action/pause.py136
-rw-r--r--lib/ansible/plugins/action/raw.py41
-rw-r--r--lib/ansible/plugins/action/script.py98
-rw-r--r--lib/ansible/plugins/action/set_fact.py38
-rw-r--r--lib/ansible/plugins/action/synchronize.py169
-rw-r--r--lib/ansible/plugins/action/template.py186
-rw-r--r--lib/ansible/plugins/action/unarchive.py114
-rw-r--r--lib/ansible/plugins/cache/__init__.py (renamed from lib/ansible/cache/__init__.py)9
-rw-r--r--lib/ansible/plugins/cache/base.py (renamed from lib/ansible/cache/base.py)32
-rw-r--r--lib/ansible/plugins/cache/memcached.py (renamed from lib/ansible/cache/memcached.py)6
-rw-r--r--lib/ansible/plugins/cache/memory.py (renamed from lib/ansible/cache/memory.py)4
-rw-r--r--lib/ansible/plugins/cache/redis.py (renamed from lib/ansible/cache/redis.py)17
-rw-r--r--lib/ansible/plugins/callback/__init__.py (renamed from lib/ansible/callback_plugins/noop.py)38
-rw-r--r--lib/ansible/plugins/callback/default.py136
-rw-r--r--lib/ansible/plugins/callback/minimal.py104
-rw-r--r--lib/ansible/plugins/connections/__init__.py95
-rw-r--r--lib/ansible/plugins/connections/accelerate.py (renamed from lib/ansible/runner/connection_plugins/accelerate.py)17
-rw-r--r--lib/ansible/plugins/connections/chroot.py (renamed from lib/ansible/runner/connection_plugins/chroot.py)2
-rw-r--r--lib/ansible/plugins/connections/funcd.py (renamed from lib/ansible/runner/connection_plugins/funcd.py)2
-rw-r--r--lib/ansible/plugins/connections/jail.py (renamed from lib/ansible/runner/connection_plugins/jail.py)2
-rw-r--r--lib/ansible/plugins/connections/libvirt_lxc.py (renamed from lib/ansible/runner/connection_plugins/libvirt_lxc.py)2
-rw-r--r--lib/ansible/plugins/connections/local.py132
-rw-r--r--lib/ansible/plugins/connections/paramiko_ssh.py (renamed from lib/ansible/runner/connection_plugins/paramiko_ssh.py)233
-rw-r--r--lib/ansible/plugins/connections/ssh.py462
-rw-r--r--lib/ansible/plugins/connections/winrm.py (renamed from lib/ansible/runner/connection_plugins/winrm.py)147
-rw-r--r--lib/ansible/plugins/connections/zone.py (renamed from lib/ansible/runner/connection_plugins/zone.py)2
l---------lib/ansible/plugins/filter1
-rw-r--r--lib/ansible/plugins/inventory/__init__.py82
-rw-r--r--lib/ansible/plugins/inventory/aggregate.py61
-rw-r--r--lib/ansible/plugins/inventory/directory.py52
-rw-r--r--lib/ansible/plugins/inventory/ini.py60
-rw-r--r--lib/ansible/plugins/lookup/__init__.py49
-rw-r--r--lib/ansible/plugins/lookup/cartesian.py (renamed from lib/ansible/runner/lookup_plugins/cartesian.py)37
-rw-r--r--lib/ansible/plugins/lookup/csvfile.py (renamed from lib/ansible/runner/lookup_plugins/csvfile.py)23
-rw-r--r--lib/ansible/plugins/lookup/dict.py (renamed from lib/ansible/runner/lookup_plugins/dict.py)25
-rw-r--r--lib/ansible/plugins/lookup/dnstxt.py (renamed from lib/ansible/runner/lookup_plugins/dnstxt.py)24
-rw-r--r--lib/ansible/plugins/lookup/env.py (renamed from lib/ansible/runner/lookup_plugins/env.py)17
-rw-r--r--lib/ansible/plugins/lookup/etcd.py (renamed from lib/ansible/runner/lookup_plugins/etcd.py)19
-rw-r--r--lib/ansible/plugins/lookup/file.py (renamed from lib/ansible/runner/lookup_plugins/file.py)33
-rw-r--r--lib/ansible/plugins/lookup/fileglob.py (renamed from lib/ansible/runner/lookup_plugins/fileglob.py)17
-rw-r--r--lib/ansible/plugins/lookup/first_found.py (renamed from lib/ansible/runner/lookup_plugins/first_found.py)54
-rw-r--r--lib/ansible/plugins/lookup/flattened.py (renamed from lib/ansible/runner/lookup_plugins/flattened.py)50
-rw-r--r--lib/ansible/plugins/lookup/indexed_items.py (renamed from lib/ansible/runner/lookup_plugins/template.py)20
-rw-r--r--lib/ansible/plugins/lookup/inventory_hostnames.py (renamed from lib/ansible/runner/lookup_plugins/indexed_items.py)35
-rw-r--r--lib/ansible/plugins/lookup/items.py30
-rw-r--r--lib/ansible/plugins/lookup/lines.py (renamed from lib/ansible/runner/lookup_plugins/lines.py)19
-rw-r--r--lib/ansible/plugins/lookup/nested.py51
-rw-r--r--lib/ansible/plugins/lookup/password.py (renamed from lib/ansible/runner/lookup_plugins/password.py)61
-rw-r--r--lib/ansible/plugins/lookup/pipe.py (renamed from lib/ansible/runner/lookup_plugins/pipe.py)17
-rw-r--r--lib/ansible/plugins/lookup/random_choice.py (renamed from lib/ansible/runner/lookup_plugins/random_choice.py)12
-rw-r--r--lib/ansible/plugins/lookup/redis_kv.py (renamed from lib/ansible/runner/lookup_plugins/redis_kv.py)25
-rw-r--r--lib/ansible/plugins/lookup/sequence.py (renamed from lib/ansible/runner/lookup_plugins/sequence.py)31
-rw-r--r--lib/ansible/plugins/lookup/subelements.py (renamed from lib/ansible/runner/lookup_plugins/subelements.py)34
-rw-r--r--lib/ansible/plugins/lookup/template.py45
-rw-r--r--lib/ansible/plugins/lookup/together.py (renamed from lib/ansible/runner/lookup_plugins/together.py)36
-rw-r--r--lib/ansible/plugins/lookup/url.py (renamed from lib/ansible/runner/lookup_plugins/url.py)14
-rw-r--r--lib/ansible/plugins/shell/__init__.py21
-rw-r--r--lib/ansible/plugins/shell/csh.py (renamed from lib/ansible/runner/shell_plugins/csh.py)4
-rw-r--r--lib/ansible/plugins/shell/fish.py (renamed from lib/ansible/runner/shell_plugins/fish.py)4
-rw-r--r--lib/ansible/plugins/shell/powershell.py (renamed from lib/ansible/runner/shell_plugins/powershell.py)95
-rw-r--r--lib/ansible/plugins/shell/sh.py (renamed from lib/ansible/runner/shell_plugins/sh.py)4
-rw-r--r--lib/ansible/plugins/strategies/__init__.py432
-rw-r--r--lib/ansible/plugins/strategies/free.py151
-rw-r--r--lib/ansible/plugins/strategies/linear.py307
-rw-r--r--lib/ansible/plugins/vars/__init__.py21
-rw-r--r--lib/ansible/runner/__init__.py1517
-rw-r--r--lib/ansible/runner/action_plugins/__init__.py0
-rw-r--r--lib/ansible/runner/action_plugins/add_host.py111
-rw-r--r--lib/ansible/runner/action_plugins/assemble.py158
-rw-r--r--lib/ansible/runner/action_plugins/assert.py64
-rw-r--r--lib/ansible/runner/action_plugins/async.py48
-rw-r--r--lib/ansible/runner/action_plugins/copy.py381
-rw-r--r--lib/ansible/runner/action_plugins/debug.py60
-rw-r--r--lib/ansible/runner/action_plugins/fail.py44
-rw-r--r--lib/ansible/runner/action_plugins/group_by.py108
-rw-r--r--lib/ansible/runner/action_plugins/include_vars.py56
-rw-r--r--lib/ansible/runner/action_plugins/normal.py59
-rw-r--r--lib/ansible/runner/action_plugins/patch.py69
-rw-r--r--lib/ansible/runner/action_plugins/pause.py139
-rw-r--r--lib/ansible/runner/action_plugins/raw.py54
-rw-r--r--lib/ansible/runner/action_plugins/script.py136
-rw-r--r--lib/ansible/runner/action_plugins/set_fact.py47
-rw-r--r--lib/ansible/runner/action_plugins/synchronize.py218
-rw-r--r--lib/ansible/runner/action_plugins/template.py179
-rw-r--r--lib/ansible/runner/action_plugins/unarchive.py121
-rw-r--r--lib/ansible/runner/action_plugins/win_copy.py377
-rw-r--r--lib/ansible/runner/action_plugins/win_template.py146
-rw-r--r--lib/ansible/runner/connection.py53
-rw-r--r--lib/ansible/runner/connection_plugins/__init__.py0
-rw-r--r--lib/ansible/runner/connection_plugins/fireball.py153
-rw-r--r--lib/ansible/runner/connection_plugins/local.py129
-rw-r--r--lib/ansible/runner/connection_plugins/ssh.py460
-rw-r--r--lib/ansible/runner/filter_plugins/__init__.py0
-rw-r--r--lib/ansible/runner/filter_plugins/core.py351
-rw-r--r--lib/ansible/runner/filter_plugins/ipaddr.py659
-rw-r--r--lib/ansible/runner/filter_plugins/mathstuff.py126
-rw-r--r--lib/ansible/runner/lookup_plugins/__init__.py0
-rwxr-xr-xlib/ansible/runner/lookup_plugins/consul_kv.py128
-rw-r--r--lib/ansible/runner/lookup_plugins/dig.py212
-rw-r--r--lib/ansible/runner/lookup_plugins/inventory_hostnames.py48
-rw-r--r--lib/ansible/runner/lookup_plugins/nested.py73
-rw-r--r--lib/ansible/runner/poller.py115
-rw-r--r--lib/ansible/runner/return_data.py58
-rw-r--r--lib/ansible/runner/shell_plugins/__init__.py0
-rw-r--r--lib/ansible/template/__init__.py295
-rw-r--r--lib/ansible/template/safe_eval.py122
-rw-r--r--lib/ansible/template/template.py (renamed from lib/ansible/runner/lookup_plugins/items.py)35
-rw-r--r--lib/ansible/template/vars.py88
-rw-r--r--lib/ansible/test-requirements.txt16
-rw-r--r--lib/ansible/utils/__init__.py1646
-rw-r--r--lib/ansible/utils/boolean.py29
-rw-r--r--lib/ansible/utils/cmd_functions.py59
-rw-r--r--lib/ansible/utils/color.py (renamed from lib/ansible/color.py)26
-rw-r--r--lib/ansible/utils/debug.py18
-rw-r--r--lib/ansible/utils/display.py142
-rw-r--r--lib/ansible/utils/display_functions.py63
-rw-r--r--lib/ansible/utils/encrypt.py49
-rw-r--r--lib/ansible/utils/hashing.py7
-rw-r--r--lib/ansible/utils/listify.py66
-rw-r--r--lib/ansible/utils/module_docs.py4
l---------lib/ansible/utils/module_docs_fragments1
-rw-r--r--lib/ansible/utils/module_docs_fragments/__init__.py0
-rw-r--r--lib/ansible/utils/module_docs_fragments/aws.py78
-rw-r--r--lib/ansible/utils/module_docs_fragments/cloudstack.py62
-rw-r--r--lib/ansible/utils/module_docs_fragments/files.py78
-rw-r--r--lib/ansible/utils/module_docs_fragments/openstack.py103
-rw-r--r--lib/ansible/utils/module_docs_fragments/rackspace.py122
-rw-r--r--lib/ansible/utils/path.py37
-rw-r--r--lib/ansible/utils/string_functions.py18
-rw-r--r--lib/ansible/utils/su_prompts.py60
-rw-r--r--lib/ansible/utils/template.py404
-rw-r--r--lib/ansible/utils/unicode.py37
-rw-r--r--lib/ansible/utils/vars.py51
-rw-r--r--lib/ansible/utils/vault.py593
-rw-r--r--lib/ansible/vars/__init__.py317
-rw-r--r--lib/ansible/vars/hostvars.py47
233 files changed, 16914 insertions, 14114 deletions
diff --git a/lib/ansible/__init__.py b/lib/ansible/__init__.py
index ba5ca83b72..8637adb54d 100644
--- a/lib/ansible/__init__.py
+++ b/lib/ansible/__init__.py
@@ -14,5 +14,9 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-__version__ = '2.0.0'
-__author__ = 'Michael DeHaan'
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+__version__ = '2.0'
diff --git a/lib/ansible/cache/jsonfile.py b/lib/ansible/cache/jsonfile.py
deleted file mode 100644
index 0bade893a8..0000000000
--- a/lib/ansible/cache/jsonfile.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import time
-import errno
-import codecs
-
-try:
- import simplejson as json
-except ImportError:
- import json
-
-from ansible import constants as C
-from ansible import utils
-from ansible.cache.base import BaseCacheModule
-
-class CacheModule(BaseCacheModule):
- """
- A caching module backed by json files.
- """
- def __init__(self, *args, **kwargs):
-
- self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
- self._cache = {}
- self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path
- if not self._cache_dir:
- utils.exit("error, fact_caching_connection is not set, cannot use fact cache")
-
- if not os.path.exists(self._cache_dir):
- try:
- os.makedirs(self._cache_dir)
- except (OSError,IOError), e:
- utils.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
- return None
-
- def get(self, key):
-
- if key in self._cache:
- return self._cache.get(key)
-
- if self.has_expired(key):
- raise KeyError
-
- cachefile = "%s/%s" % (self._cache_dir, key)
- try:
- f = codecs.open(cachefile, 'r', encoding='utf-8')
- except (OSError,IOError), e:
- utils.warning("error while trying to read %s : %s" % (cachefile, str(e)))
- else:
- value = json.load(f)
- self._cache[key] = value
- return value
- finally:
- f.close()
-
- def set(self, key, value):
-
- self._cache[key] = value
-
- cachefile = "%s/%s" % (self._cache_dir, key)
- try:
- f = codecs.open(cachefile, 'w', encoding='utf-8')
- except (OSError,IOError), e:
- utils.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
- else:
- f.write(utils.jsonify(value))
- finally:
- f.close()
-
- def has_expired(self, key):
-
- cachefile = "%s/%s" % (self._cache_dir, key)
- try:
- st = os.stat(cachefile)
- except (OSError,IOError), e:
- if e.errno == errno.ENOENT:
- return False
- else:
- utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
-
- if time.time() - st.st_mtime <= self._timeout:
- return False
-
- if key in self._cache:
- del self._cache[key]
- return True
-
- def keys(self):
- keys = []
- for k in os.listdir(self._cache_dir):
- if not (k.startswith('.') or self.has_expired(k)):
- keys.append(k)
- return keys
-
- def contains(self, key):
- cachefile = "%s/%s" % (self._cache_dir, key)
-
- if key in self._cache:
- return True
-
- if self.has_expired(key):
- return False
- try:
- st = os.stat(cachefile)
- return True
- except (OSError,IOError), e:
- if e.errno == errno.ENOENT:
- return False
- else:
- utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
-
- def delete(self, key):
- del self._cache[key]
- try:
- os.remove("%s/%s" % (self._cache_dir, key))
- except (OSError,IOError), e:
- pass #TODO: only pass on non existing?
-
- def flush(self):
- self._cache = {}
- for key in self.keys():
- self.delete(key)
-
- def copy(self):
- ret = dict()
- for key in self.keys():
- ret[key] = self.get(key)
- return ret
diff --git a/lib/ansible/callbacks.py b/lib/ansible/callbacks.py
deleted file mode 100644
index 39d3a8d442..0000000000
--- a/lib/ansible/callbacks.py
+++ /dev/null
@@ -1,725 +0,0 @@
-# (C) 2012-2014, Michael DeHaan, <michael.dehaan@gmail.com>
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import utils
-import sys
-import getpass
-import os
-import subprocess
-import random
-import fnmatch
-import tempfile
-import fcntl
-import constants
-import locale
-from ansible.color import stringc
-from ansible.module_utils import basic
-from ansible.utils.unicode import to_unicode, to_bytes
-
-import logging
-if constants.DEFAULT_LOG_PATH != '':
- path = constants.DEFAULT_LOG_PATH
-
- if (os.path.exists(path) and not os.access(path, os.W_OK)) and not os.access(os.path.dirname(path), os.W_OK):
- sys.stderr.write("log file at %s is not writeable, aborting\n" % path)
- sys.exit(1)
-
-
- logging.basicConfig(filename=path, level=logging.DEBUG, format='%(asctime)s %(name)s %(message)s')
- mypid = str(os.getpid())
- user = getpass.getuser()
- logger = logging.getLogger("p=%s u=%s | " % (mypid, user))
-
-callback_plugins = []
-
-def load_callback_plugins():
- global callback_plugins
- callback_plugins = [x for x in utils.plugins.callback_loader.all()]
-
-def get_cowsay_info():
- if constants.ANSIBLE_NOCOWS:
- return (None, None)
- cowsay = None
- if os.path.exists("/usr/bin/cowsay"):
- cowsay = "/usr/bin/cowsay"
- elif os.path.exists("/usr/games/cowsay"):
- cowsay = "/usr/games/cowsay"
- elif os.path.exists("/usr/local/bin/cowsay"):
- # BSD path for cowsay
- cowsay = "/usr/local/bin/cowsay"
- elif os.path.exists("/opt/local/bin/cowsay"):
- # MacPorts path for cowsay
- cowsay = "/opt/local/bin/cowsay"
-
- noncow = os.getenv("ANSIBLE_COW_SELECTION",None)
- if cowsay and noncow == 'random':
- cmd = subprocess.Popen([cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
- cows = out.split()
- cows.append(False)
- noncow = random.choice(cows)
- return (cowsay, noncow)
-
-cowsay, noncow = get_cowsay_info()
-
-def log_lockfile():
- # create the path for the lockfile and open it
- tempdir = tempfile.gettempdir()
- uid = os.getuid()
- path = os.path.join(tempdir, ".ansible-lock.%s" % uid)
- lockfile = open(path, 'w')
- # use fcntl to set FD_CLOEXEC on the file descriptor,
- # so that we don't leak the file descriptor later
- lockfile_fd = lockfile.fileno()
- old_flags = fcntl.fcntl(lockfile_fd, fcntl.F_GETFD)
- fcntl.fcntl(lockfile_fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
- return lockfile
-
-LOG_LOCK = log_lockfile()
-
-def log_flock(runner):
- if runner is not None:
- try:
- fcntl.lockf(runner.output_lockfile, fcntl.LOCK_EX)
- except OSError:
- # already got closed?
- pass
- else:
- try:
- fcntl.lockf(LOG_LOCK, fcntl.LOCK_EX)
- except OSError:
- pass
-
-
-def log_unflock(runner):
- if runner is not None:
- try:
- fcntl.lockf(runner.output_lockfile, fcntl.LOCK_UN)
- except OSError:
- # already got closed?
- pass
- else:
- try:
- fcntl.lockf(LOG_LOCK, fcntl.LOCK_UN)
- except OSError:
- pass
-
-def set_playbook(callback, playbook):
- ''' used to notify callback plugins of playbook context '''
- callback.playbook = playbook
- for callback_plugin in callback_plugins:
- callback_plugin.playbook = playbook
-
-def set_play(callback, play):
- ''' used to notify callback plugins of context '''
- callback.play = play
- for callback_plugin in callback_plugins:
- callback_plugin.play = play
-
-def set_task(callback, task):
- ''' used to notify callback plugins of context '''
- callback.task = task
- for callback_plugin in callback_plugins:
- callback_plugin.task = task
-
-def display(msg, color=None, stderr=False, screen_only=False, log_only=False, runner=None):
- # prevent a very rare case of interlaced multiprocess I/O
- log_flock(runner)
- msg2 = msg
- if color:
- msg2 = stringc(msg, color)
- if not log_only:
- if not stderr:
- try:
- print msg2
- except UnicodeEncodeError:
- print msg2.encode('utf-8')
- else:
- try:
- print >>sys.stderr, msg2
- except UnicodeEncodeError:
- print >>sys.stderr, msg2.encode('utf-8')
- if constants.DEFAULT_LOG_PATH != '':
- while msg.startswith("\n"):
- msg = msg.replace("\n","")
- if not screen_only:
- if color == 'red':
- logger.error(msg)
- else:
- logger.info(msg)
- log_unflock(runner)
-
-def call_callback_module(method_name, *args, **kwargs):
-
- for callback_plugin in callback_plugins:
- # a plugin that set self.disabled to True will not be called
- # see osx_say.py example for such a plugin
- if getattr(callback_plugin, 'disabled', False):
- continue
- methods = [
- getattr(callback_plugin, method_name, None),
- getattr(callback_plugin, 'on_any', None)
- ]
- for method in methods:
- if method is not None:
- method(*args, **kwargs)
-
-def vv(msg, host=None):
- return verbose(msg, host=host, caplevel=1)
-
-def vvv(msg, host=None):
- return verbose(msg, host=host, caplevel=2)
-
-def vvvv(msg, host=None):
- return verbose(msg, host=host, caplevel=3)
-
-def verbose(msg, host=None, caplevel=2):
- msg = utils.sanitize_output(msg)
- if utils.VERBOSITY > caplevel:
- if host is None:
- display(msg, color='blue')
- else:
- display("<%s> %s" % (host, msg), color='blue')
-
-class AggregateStats(object):
- ''' holds stats about per-host activity during playbook runs '''
-
- def __init__(self):
-
- self.processed = {}
- self.failures = {}
- self.ok = {}
- self.dark = {}
- self.changed = {}
- self.skipped = {}
-
- def _increment(self, what, host):
- ''' helper function to bump a statistic '''
-
- self.processed[host] = 1
- prev = (getattr(self, what)).get(host, 0)
- getattr(self, what)[host] = prev+1
-
- def compute(self, runner_results, setup=False, poll=False, ignore_errors=False):
- ''' walk through all results and increment stats '''
-
- for (host, value) in runner_results.get('contacted', {}).iteritems():
- if not ignore_errors and (('failed' in value and bool(value['failed'])) or
- ('failed_when_result' in value and [value['failed_when_result']] or ['rc' in value and value['rc'] != 0])[0]):
- self._increment('failures', host)
- elif 'skipped' in value and bool(value['skipped']):
- self._increment('skipped', host)
- elif 'changed' in value and bool(value['changed']):
- if not setup and not poll:
- self._increment('changed', host)
- self._increment('ok', host)
- else:
- if not poll or ('finished' in value and bool(value['finished'])):
- self._increment('ok', host)
-
- for (host, value) in runner_results.get('dark', {}).iteritems():
- self._increment('dark', host)
-
-
- def summarize(self, host):
- ''' return information about a particular host '''
-
- return dict(
- ok = self.ok.get(host, 0),
- failures = self.failures.get(host, 0),
- unreachable = self.dark.get(host,0),
- changed = self.changed.get(host, 0),
- skipped = self.skipped.get(host, 0)
- )
-
-########################################################################
-
-def regular_generic_msg(hostname, result, oneline, caption):
- ''' output on the result of a module run that is not command '''
-
- if not oneline:
- return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result,format=True))
- else:
- return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result))
-
-
-def banner_cowsay(msg):
-
- if ": [" in msg:
- msg = msg.replace("[","")
- if msg.endswith("]"):
- msg = msg[:-1]
- runcmd = [cowsay,"-W", "60"]
- if noncow:
- runcmd.append('-f')
- runcmd.append(noncow)
- runcmd.append(msg)
- cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
- return "%s\n" % out
-
-def banner_normal(msg):
-
- width = 78 - len(msg)
- if width < 3:
- width = 3
- filler = "*" * width
- return "\n%s %s " % (msg, filler)
-
-def banner(msg):
- if cowsay:
- try:
- return banner_cowsay(msg)
- except OSError:
- # somebody cleverly deleted cowsay or something during the PB run. heh.
- return banner_normal(msg)
- return banner_normal(msg)
-
-def command_generic_msg(hostname, result, oneline, caption):
- ''' output the result of a command run '''
-
- rc = result.get('rc', '0')
- stdout = result.get('stdout','')
- stderr = result.get('stderr', '')
- msg = result.get('msg', '')
-
- hostname = hostname.encode('utf-8')
- caption = caption.encode('utf-8')
-
- if not oneline:
- buf = "%s | %s | rc=%s >>\n" % (hostname, caption, result.get('rc',0))
- if stdout:
- buf += stdout
- if stderr:
- buf += stderr
- if msg:
- buf += msg
- return buf + "\n"
- else:
- if stderr:
- return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, rc, stdout, stderr)
- else:
- return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, rc, stdout)
-
-def host_report_msg(hostname, module_name, result, oneline):
- ''' summarize the JSON results for a particular host '''
-
- failed = utils.is_failed(result)
- msg = ('', None)
- if module_name in [ 'command', 'shell', 'raw' ] and 'ansible_job_id' not in result and result.get('parsed',True) != False:
- if not failed:
- msg = (command_generic_msg(hostname, result, oneline, 'success'), 'green')
- else:
- msg = (command_generic_msg(hostname, result, oneline, 'FAILED'), 'red')
- else:
- if not failed:
- msg = (regular_generic_msg(hostname, result, oneline, 'success'), 'green')
- else:
- msg = (regular_generic_msg(hostname, result, oneline, 'FAILED'), 'red')
- return msg
-
-###############################################
-
-class DefaultRunnerCallbacks(object):
- ''' no-op callbacks for API usage of Runner() if no callbacks are specified '''
-
- def __init__(self):
- pass
-
- def on_failed(self, host, res, ignore_errors=False):
- call_callback_module('runner_on_failed', host, res, ignore_errors=ignore_errors)
-
- def on_ok(self, host, res):
- call_callback_module('runner_on_ok', host, res)
-
- def on_skipped(self, host, item=None):
- call_callback_module('runner_on_skipped', host, item=item)
-
- def on_unreachable(self, host, res):
- call_callback_module('runner_on_unreachable', host, res)
-
- def on_no_hosts(self):
- call_callback_module('runner_on_no_hosts')
-
- def on_async_poll(self, host, res, jid, clock):
- call_callback_module('runner_on_async_poll', host, res, jid, clock)
-
- def on_async_ok(self, host, res, jid):
- call_callback_module('runner_on_async_ok', host, res, jid)
-
- def on_async_failed(self, host, res, jid):
- call_callback_module('runner_on_async_failed', host, res, jid)
-
- def on_file_diff(self, host, diff):
- call_callback_module('runner_on_file_diff', host, diff)
-
-########################################################################
-
-class CliRunnerCallbacks(DefaultRunnerCallbacks):
- ''' callbacks for use by /usr/bin/ansible '''
-
- def __init__(self):
- # set by /usr/bin/ansible later
- self.options = None
- self._async_notified = {}
-
- def on_failed(self, host, res, ignore_errors=False):
- self._on_any(host,res)
- super(CliRunnerCallbacks, self).on_failed(host, res, ignore_errors=ignore_errors)
-
- def on_ok(self, host, res):
- # hide magic variables used for ansible-playbook
- res.pop('verbose_override', None)
- res.pop('verbose_always', None)
-
- self._on_any(host,res)
- super(CliRunnerCallbacks, self).on_ok(host, res)
-
- def on_unreachable(self, host, res):
- if type(res) == dict:
- res = res.get('msg','')
- display("%s | FAILED => %s" % (host, res), stderr=True, color='red', runner=self.runner)
- if self.options.tree:
- utils.write_tree_file(
- self.options.tree, host,
- utils.jsonify(dict(failed=True, msg=res),format=True)
- )
- super(CliRunnerCallbacks, self).on_unreachable(host, res)
-
- def on_skipped(self, host, item=None):
- display("%s | skipped" % (host), runner=self.runner)
- super(CliRunnerCallbacks, self).on_skipped(host, item)
-
- def on_no_hosts(self):
- display("no hosts matched\n", stderr=True, runner=self.runner)
- super(CliRunnerCallbacks, self).on_no_hosts()
-
- def on_async_poll(self, host, res, jid, clock):
- if jid not in self._async_notified:
- self._async_notified[jid] = clock + 1
- if self._async_notified[jid] > clock:
- self._async_notified[jid] = clock
- display("<job %s> polling on %s, %ss remaining" % (jid, host, clock), runner=self.runner)
- super(CliRunnerCallbacks, self).on_async_poll(host, res, jid, clock)
-
- def on_async_ok(self, host, res, jid):
- if jid:
- display("<job %s> finished on %s => %s"%(jid, host, utils.jsonify(res,format=True)), runner=self.runner)
- super(CliRunnerCallbacks, self).on_async_ok(host, res, jid)
-
- def on_async_failed(self, host, res, jid):
- display("<job %s> FAILED on %s => %s"%(jid, host, utils.jsonify(res,format=True)), color='red', stderr=True, runner=self.runner)
- super(CliRunnerCallbacks, self).on_async_failed(host,res,jid)
-
- def _on_any(self, host, result):
- result2 = result.copy()
- result2.pop('invocation', None)
- (msg, color) = host_report_msg(host, self.options.module_name, result2, self.options.one_line)
- display(msg, color=color, runner=self.runner)
- if self.options.tree:
- utils.write_tree_file(self.options.tree, host, utils.jsonify(result2,format=True))
-
- def on_file_diff(self, host, diff):
- display(utils.get_diff(diff), runner=self.runner)
- super(CliRunnerCallbacks, self).on_file_diff(host, diff)
-
-########################################################################
-
-class PlaybookRunnerCallbacks(DefaultRunnerCallbacks):
- ''' callbacks used for Runner() from /usr/bin/ansible-playbook '''
-
- def __init__(self, stats, verbose=None):
-
- if verbose is None:
- verbose = utils.VERBOSITY
-
- self.verbose = verbose
- self.stats = stats
- self._async_notified = {}
-
- def on_unreachable(self, host, results):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- item = None
- if type(results) == dict:
- item = results.get('item', None)
- if isinstance(item, unicode):
- item = utils.unicode.to_bytes(item)
- results = basic.json_dict_unicode_to_bytes(results)
- else:
- results = utils.unicode.to_bytes(results)
- host = utils.unicode.to_bytes(host)
- if item:
- msg = "fatal: [%s] => (item=%s) => %s" % (host, item, results)
- else:
- msg = "fatal: [%s] => %s" % (host, results)
- display(msg, color='red', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_unreachable(host, results)
-
- def on_failed(self, host, results, ignore_errors=False):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- results2 = results.copy()
- results2.pop('invocation', None)
-
- item = results2.get('item', None)
- parsed = results2.get('parsed', True)
- module_msg = ''
- if not parsed:
- module_msg = results2.pop('msg', None)
- stderr = results2.pop('stderr', None)
- stdout = results2.pop('stdout', None)
- returned_msg = results2.pop('msg', None)
-
- if item:
- msg = "failed: [%s] => (item=%s) => %s" % (host, item, utils.jsonify(results2))
- else:
- msg = "failed: [%s] => %s" % (host, utils.jsonify(results2))
- display(msg, color='red', runner=self.runner)
-
- if stderr:
- display("stderr: %s" % stderr, color='red', runner=self.runner)
- if stdout:
- display("stdout: %s" % stdout, color='red', runner=self.runner)
- if returned_msg:
- display("msg: %s" % returned_msg, color='red', runner=self.runner)
- if not parsed and module_msg:
- display(module_msg, color='red', runner=self.runner)
- if ignore_errors:
- display("...ignoring", color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_failed(host, results, ignore_errors=ignore_errors)
-
- def on_ok(self, host, host_result):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- item = host_result.get('item', None)
-
- host_result2 = host_result.copy()
- host_result2.pop('invocation', None)
- verbose_always = host_result2.pop('verbose_always', False)
- changed = host_result.get('changed', False)
- ok_or_changed = 'ok'
- if changed:
- ok_or_changed = 'changed'
-
- # show verbose output for non-setup module results if --verbose is used
- msg = ''
- if (not self.verbose or host_result2.get("verbose_override",None) is not
- None) and not verbose_always:
- if item:
- msg = "%s: [%s] => (item=%s)" % (ok_or_changed, host, item)
- else:
- if 'ansible_job_id' not in host_result or 'finished' in host_result:
- msg = "%s: [%s]" % (ok_or_changed, host)
- else:
- # verbose ...
- if item:
- msg = "%s: [%s] => (item=%s) => %s" % (ok_or_changed, host, item, utils.jsonify(host_result2, format=verbose_always))
- else:
- if 'ansible_job_id' not in host_result or 'finished' in host_result2:
- msg = "%s: [%s] => %s" % (ok_or_changed, host, utils.jsonify(host_result2, format=verbose_always))
-
- if msg != '':
- if not changed:
- display(msg, color='green', runner=self.runner)
- else:
- display(msg, color='yellow', runner=self.runner)
- if constants.COMMAND_WARNINGS and 'warnings' in host_result2 and host_result2['warnings']:
- for warning in host_result2['warnings']:
- display("warning: %s" % warning, color='purple', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_ok(host, host_result)
-
- def on_skipped(self, host, item=None):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- if constants.DISPLAY_SKIPPED_HOSTS:
- msg = ''
- if item:
- msg = "skipping: [%s] => (item=%s)" % (host, item)
- else:
- msg = "skipping: [%s]" % host
- display(msg, color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_skipped(host, item)
-
- def on_no_hosts(self):
- display("FATAL: no hosts matched or all hosts have already failed -- aborting\n", color='red', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_no_hosts()
-
- def on_async_poll(self, host, res, jid, clock):
- if jid not in self._async_notified:
- self._async_notified[jid] = clock + 1
- if self._async_notified[jid] > clock:
- self._async_notified[jid] = clock
- msg = "<job %s> polling, %ss remaining"%(jid, clock)
- display(msg, color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_async_poll(host,res,jid,clock)
-
- def on_async_ok(self, host, res, jid):
- if jid:
- msg = "<job %s> finished on %s"%(jid, host)
- display(msg, color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_async_ok(host, res, jid)
-
- def on_async_failed(self, host, res, jid):
- msg = "<job %s> FAILED on %s" % (jid, host)
- display(msg, color='red', stderr=True, runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_async_failed(host,res,jid)
-
- def on_file_diff(self, host, diff):
- display(utils.get_diff(diff), runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_file_diff(host, diff)
-
-########################################################################
-
-class PlaybookCallbacks(object):
- ''' playbook.py callbacks used by /usr/bin/ansible-playbook '''
-
- def __init__(self, verbose=False):
-
- self.verbose = verbose
-
- def on_start(self):
- call_callback_module('playbook_on_start')
-
- def on_notify(self, host, handler):
- call_callback_module('playbook_on_notify', host, handler)
-
- def on_no_hosts_matched(self):
- display("skipping: no hosts matched", color='cyan')
- call_callback_module('playbook_on_no_hosts_matched')
-
- def on_no_hosts_remaining(self):
- display("\nFATAL: all hosts have already failed -- aborting", color='red')
- call_callback_module('playbook_on_no_hosts_remaining')
-
- def on_task_start(self, name, is_conditional):
- name = utils.unicode.to_bytes(name)
- msg = "TASK: [%s]" % name
- if is_conditional:
- msg = "NOTIFIED: [%s]" % name
-
- if hasattr(self, 'start_at'):
- self.start_at = utils.unicode.to_bytes(self.start_at)
- if name == self.start_at or fnmatch.fnmatch(name, self.start_at):
- # we found out match, we can get rid of this now
- del self.start_at
- elif self.task.role_name:
- # handle tasks prefixed with rolenames
- actual_name = name.split('|', 1)[1].lstrip()
- if actual_name == self.start_at or fnmatch.fnmatch(actual_name, self.start_at):
- del self.start_at
-
- if hasattr(self, 'start_at'): # we still have start_at so skip the task
- self.skip_task = True
- elif hasattr(self, 'step') and self.step:
- if isinstance(name, str):
- name = utils.unicode.to_unicode(name)
- msg = u'Perform task: %s (y/n/c): ' % name
- if sys.stdout.encoding:
- msg = to_bytes(msg, sys.stdout.encoding)
- else:
- msg = to_bytes(msg)
- resp = raw_input(msg)
- if resp.lower() in ['y','yes']:
- self.skip_task = False
- display(banner(msg))
- elif resp.lower() in ['c', 'continue']:
- self.skip_task = False
- self.step = False
- display(banner(msg))
- else:
- self.skip_task = True
- else:
- self.skip_task = False
- display(banner(msg))
-
- call_callback_module('playbook_on_task_start', name, is_conditional)
-
- def on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
-
- if prompt and default is not None:
- msg = "%s [%s]: " % (prompt, default)
- elif prompt:
- msg = "%s: " % prompt
- else:
- msg = 'input for %s: ' % varname
-
- def do_prompt(prompt, private):
- if sys.stdout.encoding:
- msg = prompt.encode(sys.stdout.encoding)
- else:
- # when piping the output, or at other times when stdout
- # may not be the standard file descriptor, the stdout
- # encoding may not be set, so default to something sane
- msg = prompt.encode(locale.getpreferredencoding())
- if private:
- return getpass.getpass(msg)
- return raw_input(msg)
-
-
- if confirm:
- while True:
- result = do_prompt(msg, private)
- second = do_prompt("confirm " + msg, private)
- if result == second:
- break
- display("***** VALUES ENTERED DO NOT MATCH ****")
- else:
- result = do_prompt(msg, private)
-
- # if result is false and default is not None
- if not result and default is not None:
- result = default
-
-
- if encrypt:
- result = utils.do_encrypt(result, encrypt, salt_size, salt)
-
- # handle utf-8 chars
- result = to_unicode(result, errors='strict')
- call_callback_module( 'playbook_on_vars_prompt', varname, private=private, prompt=prompt,
- encrypt=encrypt, confirm=confirm, salt_size=salt_size, salt=None, default=default
- )
-
- return result
-
- def on_setup(self):
- display(banner("GATHERING FACTS"))
- call_callback_module('playbook_on_setup')
-
- def on_import_for_host(self, host, imported_file):
- msg = "%s: importing %s" % (host, imported_file)
- display(msg, color='cyan')
- call_callback_module('playbook_on_import_for_host', host, imported_file)
-
- def on_not_import_for_host(self, host, missing_file):
- msg = "%s: not importing file: %s" % (host, missing_file)
- display(msg, color='cyan')
- call_callback_module('playbook_on_not_import_for_host', host, missing_file)
-
- def on_play_start(self, name):
- display(banner("PLAY [%s]" % name))
- call_callback_module('playbook_on_play_start', name)
-
- def on_stats(self, stats):
- call_callback_module('playbook_on_stats', stats)
-
-
diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
new file mode 100644
index 0000000000..0b0494e032
--- /dev/null
+++ b/lib/ansible/cli/__init__.py
@@ -0,0 +1,447 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import operator
+import optparse
+import os
+import sys
+import time
+import yaml
+import re
+import getpass
+import subprocess
+
+from ansible import __version__
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.utils.unicode import to_bytes
+
+class SortedOptParser(optparse.OptionParser):
+ '''Optparser which sorts the options by opt before outputting --help'''
+
+ #FIXME: epilog parsing: OptionParser.format_epilog = lambda self, formatter: self.epilog
+
+ def format_help(self, formatter=None, epilog=None):
+ self.option_list.sort(key=operator.methodcaller('get_opt_string'))
+ return optparse.OptionParser.format_help(self, formatter=None)
+
+class CLI(object):
+ ''' code behind bin/ansible* programs '''
+
+ VALID_ACTIONS = ['No Actions']
+
+ _ITALIC = re.compile(r"I\(([^)]+)\)")
+ _BOLD = re.compile(r"B\(([^)]+)\)")
+ _MODULE = re.compile(r"M\(([^)]+)\)")
+ _URL = re.compile(r"U\(([^)]+)\)")
+ _CONST = re.compile(r"C\(([^)]+)\)")
+
+ PAGER = 'less'
+ LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
+ # -S (chop long lines) -X (disable termcap init and de-init)
+
+ def __init__(self, args, display=None):
+ """
+ Base init method for all command line programs
+ """
+
+ self.args = args
+ self.options = None
+ self.parser = None
+ self.action = None
+
+ if display is None:
+ self.display = Display()
+ else:
+ self.display = display
+
+ def set_action(self):
+ """
+ Get the action the user wants to execute from the sys argv list.
+ """
+ for i in range(0,len(self.args)):
+ arg = self.args[i]
+ if arg in self.VALID_ACTIONS:
+ self.action = arg
+ del self.args[i]
+ break
+
+ if not self.action:
+ raise AnsibleOptionsError("Missing required action")
+
+ def execute(self):
+ """
+ Actually runs a child defined method using the execute_<action> pattern
+ """
+ fn = getattr(self, "execute_%s" % self.action)
+ fn()
+
+ def parse(self):
+ raise Exception("Need to implement!")
+
+ def run(self):
+ raise Exception("Need to implement!")
+
+ @staticmethod
+ def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
+ ''' prompt for vault password and/or password change '''
+
+ vault_pass = None
+ new_vault_pass = None
+
+ if ask_vault_pass:
+ vault_pass = getpass.getpass(prompt="Vault password: ")
+
+ if ask_vault_pass and confirm_vault:
+ vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
+ if vault_pass != vault_pass2:
+ raise errors.AnsibleError("Passwords do not match")
+
+ if ask_new_vault_pass:
+ new_vault_pass = getpass.getpass(prompt="New Vault password: ")
+
+ if ask_new_vault_pass and confirm_new:
+ new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
+ if new_vault_pass != new_vault_pass2:
+ raise errors.AnsibleError("Passwords do not match")
+
+ # enforce no newline chars at the end of passwords
+ if vault_pass:
+ vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
+ if new_vault_pass:
+ new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
+
+ return vault_pass, new_vault_pass
+
+
+ def ask_passwords(self):
+ ''' prompt for connection and become passwords if needed '''
+
+ op = self.options
+ sshpass = None
+ becomepass = None
+ become_prompt = ''
+
+ if op.ask_pass:
+ sshpass = getpass.getpass(prompt="SSH password: ")
+ become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
+ if sshpass:
+ sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
+ else:
+ become_prompt = "%s password: " % op.become_method.upper()
+
+ if op.become_ask_pass:
+ becomepass = getpass.getpass(prompt=become_prompt)
+ if op.ask_pass and becomepass == '':
+ becomepass = sshpass
+ if becomepass:
+ becomepass = to_bytes(becomepass)
+
+ return (sshpass, becomepass)
+
+
+ def normalize_become_options(self):
+ ''' this keeps backwards compatibility with sudo/su self.options '''
+ self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
+ self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
+
+ if self.options.become:
+ pass
+ elif self.options.sudo:
+ self.options.become = True
+ self.options.become_method = 'sudo'
+ elif self.options.su:
+ self.options.become = True
+ options.become_method = 'su'
+
+
+ def validate_conflicts(self):
+ ''' check for conflicting options '''
+
+ op = self.options
+
+ # Check for vault related conflicts
+ if (op.ask_vault_pass and op.vault_password_file):
+ self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
+
+
+ # Check for privilege escalation conflicts
+ if (op.su or op.su_user or op.ask_su_pass) and \
+ (op.sudo or op.sudo_user or op.ask_sudo_pass) or \
+ (op.su or op.su_user or op.ask_su_pass) and \
+ (op.become or op.become_user or op.become_ask_pass) or \
+ (op.sudo or op.sudo_user or op.ask_sudo_pass) and \
+ (op.become or op.become_user or op.become_ask_pass):
+
+ self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
+ "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
+ "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
+ " are exclusive of each other")
+
+ @staticmethod
+ def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False,
+ async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False, epilog=None):
+ ''' create an options parser for most ansible scripts '''
+
+ #FIXME: implemente epilog parsing
+ #OptionParser.format_epilog = lambda self, formatter: self.epilog
+
+ # base opts
+ parser = SortedOptParser(usage, version=CLI.version("%prog"))
+ parser.add_option('-v','--verbose', dest='verbosity', default=0, action="count",
+ help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
+
+ if runtask_opts:
+ parser.add_option('-f','--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
+ help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
+ parser.add_option('-i', '--inventory-file', dest='inventory',
+ help="specify inventory host file (default=%s)" % C.DEFAULT_HOST_LIST,
+ default=C.DEFAULT_HOST_LIST)
+ parser.add_option('--list-hosts', dest='listhosts', action='store_true',
+ help='outputs a list of matching hosts; does not execute anything else')
+ parser.add_option('-M', '--module-path', dest='module_path',
+ help="specify path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH, default=None)
+ parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
+ help="set additional variables as key=value or YAML/JSON", default=[])
+
+ if vault_opts:
+ parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
+ help='ask for vault password')
+ parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE,
+ dest='vault_password_file', help="vault password file")
+
+
+ if subset_opts:
+ parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
+ help='further limit selected hosts to an additional pattern')
+ parser.add_option('-t', '--tags', dest='tags', default='all',
+ help="only run plays and tasks tagged with these values")
+ parser.add_option('--skip-tags', dest='skip_tags',
+ help="only run plays and tasks whose tags do not match these values")
+
+ if output_opts:
+ parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
+ help='condense output')
+ parser.add_option('-t', '--tree', dest='tree', default=None,
+ help='log output to this directory')
+
+ if runas_opts:
+ # priv user defaults to root later on to enable detecting when this option was given here
+ parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
+ help='ask for sudo password (deprecated, use become)')
+ parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
+ help='ask for su password (deprecated, use become)')
+ parser.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
+ help="run operations with sudo (nopasswd) (deprecated, use become)")
+ parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
+ help='desired sudo user (default=root) (deprecated, use become)')
+ parser.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
+ help='run operations with su (deprecated, use become)')
+ parser.add_option('-R', '--su-user', default=None,
+ help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
+
+ # consolidated privilege escalation (become)
+ parser.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
+ help="run operations with become (nopasswd implied)")
+ parser.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='string',
+ help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
+ parser.add_option('--become-user', default=None, dest='become_user', type='string',
+ help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
+ parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
+ help='ask for privilege escalation password')
+
+
+ if connect_opts:
+ parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
+ help='ask for connection password')
+ parser.add_option('--private-key', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
+ help='use this file to authenticate the connection')
+ parser.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
+ help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
+ parser.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
+ help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
+ parser.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
+ help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
+
+
+ if async_opts:
+ parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int',
+ dest='poll_interval',
+ help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
+ parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
+ help='run asynchronously, failing after X seconds (default=N/A)')
+
+ if check_opts:
+ parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
+ help="don't make any changes; instead, try to predict some of the changes that may occur")
+ parser.add_option('--syntax-check', dest='syntax', action='store_true',
+ help="perform a syntax check on the playbook, but do not execute it")
+
+ if diff_opts:
+ parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
+ help="when changing (small) files and templates, show the differences in those files; works great with --check"
+ )
+
+ if meta_opts:
+ parser.add_option('--force-handlers', dest='force_handlers', action='store_true',
+ help="run handlers even if a task fails")
+ parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
+ help="clear the fact cache")
+
+ return parser
+
+ @staticmethod
+ def version(prog):
+ ''' return ansible version '''
+ result = "{0} {1}".format(prog, __version__)
+ gitinfo = CLI._gitinfo()
+ if gitinfo:
+ result = result + " {0}".format(gitinfo)
+ result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
+ return result
+
+ @staticmethod
+ def version_info(gitinfo=False):
+ ''' return full ansible version info '''
+ if gitinfo:
+ # expensive call, user with care
+ ansible_version_string = version('')
+ else:
+ ansible_version_string = __version__
+ ansible_version = ansible_version_string.split()[0]
+ ansible_versions = ansible_version.split('.')
+ for counter in range(len(ansible_versions)):
+ if ansible_versions[counter] == "":
+ ansible_versions[counter] = 0
+ try:
+ ansible_versions[counter] = int(ansible_versions[counter])
+ except:
+ pass
+ if len(ansible_versions) < 3:
+ for counter in range(len(ansible_versions), 3):
+ ansible_versions.append(0)
+ return {'string': ansible_version_string.strip(),
+ 'full': ansible_version,
+ 'major': ansible_versions[0],
+ 'minor': ansible_versions[1],
+ 'revision': ansible_versions[2]}
+
+ @staticmethod
+ def _git_repo_info(repo_path):
+ ''' returns a string containing git branch, commit id and commit date '''
+ result = None
+ if os.path.exists(repo_path):
+ # Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
+ if os.path.isfile(repo_path):
+ try:
+ gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
+ # There is a possibility the .git file to have an absolute path.
+ if os.path.isabs(gitdir):
+ repo_path = gitdir
+ else:
+ repo_path = os.path.join(repo_path[:-4], gitdir)
+ except (IOError, AttributeError):
+ return ''
+ f = open(os.path.join(repo_path, "HEAD"))
+ branch = f.readline().split('/')[-1].rstrip("\n")
+ f.close()
+ branch_path = os.path.join(repo_path, "refs", "heads", branch)
+ if os.path.exists(branch_path):
+ f = open(branch_path)
+ commit = f.readline()[:10]
+ f.close()
+ else:
+ # detached HEAD
+ commit = branch[:10]
+ branch = 'detached HEAD'
+ branch_path = os.path.join(repo_path, "HEAD")
+
+ date = time.localtime(os.stat(branch_path).st_mtime)
+ if time.daylight == 0:
+ offset = time.timezone
+ else:
+ offset = time.altzone
+ result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
+ time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
+ else:
+ result = ''
+ return result
+
+ @staticmethod
+ def _gitinfo():
+ basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
+ repo_path = os.path.join(basedir, '.git')
+ result = CLI._git_repo_info(repo_path)
+ submodules = os.path.join(basedir, '.gitmodules')
+ if not os.path.exists(submodules):
+ return result
+ f = open(submodules)
+ for line in f:
+ tokens = line.strip().split(' ')
+ if tokens[0] == 'path':
+ submodule_path = tokens[2]
+ submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
+ if not submodule_info:
+ submodule_info = ' not found - use git submodule update --init ' + submodule_path
+ result += "\n {0}: {1}".format(submodule_path, submodule_info)
+ f.close()
+ return result
+
+
+ @staticmethod
+ def pager(text):
+ ''' find reasonable way to display text '''
+ # this is a much simpler form of what is in pydoc.py
+ if not sys.stdout.isatty():
+ pager_print(text)
+ elif 'PAGER' in os.environ:
+ if sys.platform == 'win32':
+ pager_print(text)
+ else:
+ CLI.pager_pipe(text, os.environ['PAGER'])
+ elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
+ CLI.pager_pipe(text, 'less')
+ else:
+ pager_print(text)
+
+ @staticmethod
+ def pager_pipe(text, cmd):
+ ''' pipe text through a pager '''
+ if 'LESS' not in os.environ:
+ os.environ['LESS'] = LESS_OPTS
+ try:
+ cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
+ cmd.communicate(input=text)
+ except IOError:
+ pass
+ except KeyboardInterrupt:
+ pass
+
+ @classmethod
+ def tty_ify(self, text):
+
+ t = self._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
+ t = self._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
+ t = self._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
+ t = self._URL.sub(r"\1", t) # U(word) => word
+ t = self._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
+
+ return t
diff --git a/lib/ansible/cli/adhoc.py b/lib/ansible/cli/adhoc.py
new file mode 100644
index 0000000000..16c2dc9e42
--- /dev/null
+++ b/lib/ansible/cli/adhoc.py
@@ -0,0 +1,156 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+########################################################
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.inventory import Inventory
+from ansible.parsing import DataLoader
+from ansible.parsing.splitter import parse_kv
+from ansible.playbook.play import Play
+from ansible.cli import CLI
+from ansible.utils.display import Display
+from ansible.utils.vault import read_vault_file
+from ansible.vars import VariableManager
+
+########################################################
+
+class AdHocCLI(CLI):
+ ''' code behind ansible ad-hoc cli'''
+
+ def parse(self):
+ ''' create an options parser for bin/ansible '''
+
+ self.parser = CLI.base_parser(
+ usage='%prog <host-pattern> [options]',
+ runas_opts=True,
+ async_opts=True,
+ output_opts=True,
+ connect_opts=True,
+ check_opts=True,
+ runtask_opts=True,
+ vault_opts=True,
+ )
+
+ # options unique to ansible ad-hoc
+ self.parser.add_option('-a', '--args', dest='module_args',
+ help="module arguments", default=C.DEFAULT_MODULE_ARGS)
+ self.parser.add_option('-m', '--module-name', dest='module_name',
+ help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
+ default=C.DEFAULT_MODULE_NAME)
+
+ self.options, self.args = self.parser.parse_args()
+
+ if len(self.args) != 1:
+ raise AnsibleOptionsError("Missing target hosts")
+
+ self.display.verbosity = self.options.verbosity
+ self.validate_conflicts()
+
+ return True
+
+
+ def run(self):
+ ''' use Runner lib to do SSH things '''
+
+ # only thing left should be host pattern
+ pattern = self.args[0]
+
+ # ignore connection password cause we are local
+ if self.options.connection == "local":
+ self.options.ask_pass = False
+
+ sshpass = None
+ becomepass = None
+ vault_pass = None
+
+ self.normalize_become_options()
+ (sshpass, becomepass) = self.ask_passwords()
+ passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
+
+ if self.options.vault_password_file:
+ # read vault_pass from a file
+ vault_pass = read_vault_file(self.options.vault_password_file)
+ elif self.options.ask_vault_pass:
+ vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0]
+
+ loader = DataLoader(vault_password=vault_pass)
+ variable_manager = VariableManager()
+
+ inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
+
+ hosts = inventory.list_hosts(pattern)
+ if len(hosts) == 0:
+ self.display.warning("provided hosts list is empty, only localhost is available")
+
+ if self.options.listhosts:
+ for host in hosts:
+ self.display.display(' %s' % host)
+ return 0
+
+ if self.options.module_name in C.MODULE_REQUIRE_ARGS and not self.options.module_args:
+ raise AnsibleError("No argument passed to %s module" % self.options.module_name)
+
+ #TODO: implement async support
+ #if self.options.seconds:
+ # callbacks.display("background launch...\n\n", color='cyan')
+ # results, poller = runner.run_async(self.options.seconds)
+ # results = self.poll_while_needed(poller)
+ #else:
+ # results = runner.run()
+
+ # create a pseudo-play to execute the specified module via a single task
+ play_ds = dict(
+ name = "Ansible Ad-Hoc",
+ hosts = pattern,
+ gather_facts = 'no',
+ tasks = [ dict(action=dict(module=self.options.module_name, args=parse_kv(self.options.module_args))), ]
+ )
+
+ play = Play().load(play_ds, variable_manager=variable_manager, loader=loader)
+
+ # now create a task queue manager to execute the play
+ tqm = None
+ try:
+ tqm = TaskQueueManager(
+ inventory=inventory,
+ variable_manager=variable_manager,
+ loader=loader,
+ display=self.display,
+ options=self.options,
+ passwords=passwords,
+ stdout_callback='minimal',
+ )
+ result = tqm.run(play)
+ finally:
+ if tqm:
+ tqm.cleanup()
+
+ return result
+
+ # ----------------------------------------------
+
+ def poll_while_needed(self, poller):
+ ''' summarize results from Runner '''
+
+ # BACKGROUND POLL LOGIC when -B and -P are specified
+ if self.options.seconds and self.options.poll_interval > 0:
+ poller.wait(self.options.seconds, self.options.poll_interval)
+
+ return poller.results
+
diff --git a/lib/ansible/cli/doc.py b/lib/ansible/cli/doc.py
new file mode 100644
index 0000000000..797a59f038
--- /dev/null
+++ b/lib/ansible/cli/doc.py
@@ -0,0 +1,283 @@
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# ansible-vault is a script that encrypts/decrypts YAML files. See
+# http://docs.ansible.com/playbooks_vault.html for more details.
+
+import fcntl
+import datetime
+import os
+import struct
+import termios
+import traceback
+import textwrap
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.plugins import module_loader
+from ansible.cli import CLI
+from ansible.utils import module_docs
+
+class DocCLI(CLI):
+ """ Vault command line class """
+
+ BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
+ IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
+
+ def __init__(self, args, display=None):
+
+ super(DocCLI, self).__init__(args, display)
+ self.module_list = []
+
+ def parse(self):
+
+ self.parser = CLI.base_parser(
+ usage='usage: %prog [options] [module...]',
+ epilog='Show Ansible module documentation',
+ )
+
+ self.parser.add_option("-M", "--module-path", action="store", dest="module_path", default=C.DEFAULT_MODULE_PATH,
+ help="Ansible modules/ directory")
+ self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
+ help='List available modules')
+ self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
+ help='Show playbook snippet for specified module(s)')
+
+ self.options, self.args = self.parser.parse_args()
+ self.display.verbosity = self.options.verbosity
+
+
+ def run(self):
+
+ if self.options.module_path is not None:
+ for i in self.options.module_path.split(os.pathsep):
+ module_loader.add_directory(i)
+
+ # list modules
+ if self.options.list_dir:
+ paths = module_loader._get_paths()
+ for path in paths:
+ self.find_modules(path)
+
+ CLI.pager(self.get_module_list_text())
+ return 0
+
+ if len(self.args) == 0:
+ raise AnsibleOptionsError("Incorrect options passed")
+
+ # process command line module list
+ text = ''
+ for module in self.args:
+
+ filename = module_loader.find_plugin(module)
+ if filename is None:
+ self.display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
+ continue
+
+ if any(filename.endswith(x) for x in self.BLACKLIST_EXTS):
+ continue
+
+ try:
+ doc, plainexamples, returndocs = module_docs.get_docstring(filename)
+ except:
+ self.display.vvv(traceback.print_exc())
+ self.display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
+ continue
+
+ if doc is not None:
+
+ all_keys = []
+ for (k,v) in doc['options'].iteritems():
+ all_keys.append(k)
+ all_keys = sorted(all_keys)
+ doc['option_keys'] = all_keys
+
+ doc['filename'] = filename
+ doc['docuri'] = doc['module'].replace('_', '-')
+ doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
+ doc['plainexamples'] = plainexamples
+ doc['returndocs'] = returndocs
+
+ if self.options.show_snippet:
+ text += DocCLI.get_snippet_text(doc)
+ else:
+ text += DocCLI.get_man_text(doc)
+ else:
+ # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
+ # probably a quoting issue.
+ self.display.warning("module %s missing documentation (or could not parse documentation)\n" % module)
+
+ CLI.pager(text)
+ return 0
+
+ def find_modules(self, path):
+
+ if os.path.isdir(path):
+ for module in os.listdir(path):
+ if module.startswith('.'):
+ continue
+ elif os.path.isdir(module):
+ self.find_modules(module)
+ elif any(module.endswith(x) for x in self.BLACKLIST_EXTS):
+ continue
+ elif module.startswith('__'):
+ continue
+ elif module in self.IGNORE_FILES:
+ continue
+ elif module.startswith('_'):
+ fullpath = '/'.join([path,module])
+ if os.path.islink(fullpath): # avoids aliases
+ continue
+
+ module = os.path.splitext(module)[0] # removes the extension
+ self.module_list.append(module)
+
+
+ def get_module_list_text(self):
+ tty_size = 0
+ if os.isatty(0):
+ tty_size = struct.unpack('HHHH',
+ fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
+ columns = max(60, tty_size)
+ displace = max(len(x) for x in self.module_list)
+ linelimit = columns - displace - 5
+ text = []
+ deprecated = []
+ for module in sorted(set(self.module_list)):
+
+ if module in module_docs.BLACKLIST_MODULES:
+ continue
+
+ filename = module_loader.find_plugin(module)
+
+ if filename is None:
+ continue
+ if filename.endswith(".ps1"):
+ continue
+ if os.path.isdir(filename):
+ continue
+
+ try:
+ doc, plainexamples, returndocs = module_docs.get_docstring(filename)
+ desc = self.tty_ify(doc.get('short_description', '?')).strip()
+ if len(desc) > linelimit:
+ desc = desc[:linelimit] + '...'
+
+ if module.startswith('_'): # Handle deprecated
+ deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
+ else:
+ text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
+ except:
+ raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
+
+ if len(deprecated) > 0:
+ text.append("\nDEPRECATED:")
+ text.extend(deprecated)
+ return "\n".join(text)
+
+
+ @staticmethod
+ def print_paths(finder):
+ ''' Returns a string suitable for printing of the search path '''
+
+ # Uses a list to get the order right
+ ret = []
+ for i in finder._get_paths():
+ if i not in ret:
+ ret.append(i)
+ return os.pathsep.join(ret)
+
+ @staticmethod
+ def get_snippet_text(doc):
+
+ text = []
+ desc = CLI.tty_ify(" ".join(doc['short_description']))
+ text.append("- name: %s" % (desc))
+ text.append(" action: %s" % (doc['module']))
+
+ for o in sorted(doc['options'].keys()):
+ opt = doc['options'][o]
+ desc = CLI.tty_ify(" ".join(opt['description']))
+
+ if opt.get('required', False):
+ s = o + "="
+ else:
+ s = o
+
+ text.append(" %-20s # %s" % (s, desc))
+ text.append('')
+
+ return "\n".join(text)
+
+ @staticmethod
+ def get_man_text(doc):
+
+ opt_indent=" "
+ text = []
+ text.append("> %s\n" % doc['module'].upper())
+
+ desc = " ".join(doc['description'])
+
+ text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
+
+ if 'option_keys' in doc and len(doc['option_keys']) > 0:
+ text.append("Options (= is mandatory):\n")
+
+ for o in sorted(doc['option_keys']):
+ opt = doc['options'][o]
+
+ if opt.get('required', False):
+ opt_leadin = "="
+ else:
+ opt_leadin = "-"
+
+ text.append("%s %s" % (opt_leadin, o))
+
+ desc = " ".join(opt['description'])
+
+ if 'choices' in opt:
+ choices = ", ".join(str(i) for i in opt['choices'])
+ desc = desc + " (Choices: " + choices + ")"
+ if 'default' in opt:
+ default = str(opt['default'])
+ desc = desc + " [Default: " + default + "]"
+ text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), initial_indent=opt_indent,
+ subsequent_indent=opt_indent))
+
+ if 'notes' in doc and len(doc['notes']) > 0:
+ notes = " ".join(doc['notes'])
+ text.append("Notes:%s\n" % textwrap.fill(CLI.tty_ify(notes), initial_indent=" ",
+ subsequent_indent=opt_indent))
+
+
+ if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
+ req = ", ".join(doc['requirements'])
+ text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), initial_indent=" ",
+ subsequent_indent=opt_indent))
+
+ if 'examples' in doc and len(doc['examples']) > 0:
+ text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
+ for ex in doc['examples']:
+ text.append("%s\n" % (ex['code']))
+
+ if 'plainexamples' in doc and doc['plainexamples'] is not None:
+ text.append("EXAMPLES:")
+ text.append(doc['plainexamples'])
+ if 'returndocs' in doc and doc['returndocs'] is not None:
+ text.append("RETURN VALUES:")
+ text.append(doc['returndocs'])
+ text.append('')
+
+ return "\n".join(text)
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
new file mode 100644
index 0000000000..abe85e0af8
--- /dev/null
+++ b/lib/ansible/cli/galaxy.py
@@ -0,0 +1,491 @@
+########################################################################
+#
+# (C) 2013, James Cammarata <jcammarata@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+
+import datetime
+import json
+import os
+import os.path
+import shutil
+import subprocess
+import sys
+import tarfile
+import tempfile
+import urllib
+import urllib2
+import yaml
+
+from collections import defaultdict
+from distutils.version import LooseVersion
+from jinja2 import Environment
+from optparse import OptionParser
+
+import ansible.constants as C
+import ansible.utils
+import ansible.galaxy
+from ansible.cli import CLI
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.galaxy import Galaxy
+from ansible.galaxy.api import GalaxyAPI
+from ansible.galaxy.role import GalaxyRole
+from ansible.playbook.role.requirement import RoleRequirement
+from ansible.utils.display import Display
+
+class GalaxyCLI(CLI):
+
+ VALID_ACTIONS = ("init", "info", "install", "list", "remove")
+ SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
+
+ def __init__(self, args, display=None):
+
+ self.api = None
+ self.galaxy = None
+ super(GalaxyCLI, self).__init__(args, display)
+
+ def parse(self):
+ ''' create an options parser for bin/ansible '''
+
+ self.parser = CLI.base_parser(
+ usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
+ epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
+ )
+
+
+ self.set_action()
+
+ # options specific to actions
+ if self.action == "info":
+ self.parser.set_usage("usage: %prog info [options] role_name[,version]")
+ elif self.action == "init":
+ self.parser.set_usage("usage: %prog init [options] role_name")
+ self.parser.add_option(
+ '-p', '--init-path', dest='init_path', default="./",
+ help='The path in which the skeleton role will be created. '
+ 'The default is the current working directory.')
+ self.parser.add_option(
+ '--offline', dest='offline', default=False, action='store_true',
+ help="Don't query the galaxy API when creating roles")
+ elif self.action == "install":
+ self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
+ self.parser.add_option(
+ '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
+ help='Ignore errors and continue with the next specified role.')
+ self.parser.add_option(
+ '-n', '--no-deps', dest='no_deps', action='store_true', default=False,
+ help='Don\'t download roles listed as dependencies')
+ self.parser.add_option(
+ '-r', '--role-file', dest='role_file',
+ help='A file containing a list of roles to be imported')
+ elif self.action == "remove":
+ self.parser.set_usage("usage: %prog remove role1 role2 ...")
+ elif self.action == "list":
+ self.parser.set_usage("usage: %prog list [role_name]")
+
+ # options that apply to more than one action
+ if self.action != "init":
+ self.parser.add_option(
+ '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
+ help='The path to the directory containing your roles. '
+ 'The default is the roles_path configured in your '
+ 'ansible.cfg file (/etc/ansible/roles if not configured)')
+
+ if self.action in ("info","init","install"):
+ self.parser.add_option( '-s', '--server', dest='api_server', default="https://galaxy.ansible.com",
+ help='The API server destination')
+
+ if self.action in ("init","install"):
+ self.parser.add_option(
+ '-f', '--force', dest='force', action='store_true', default=False,
+ help='Force overwriting an existing role')
+
+ # get options, args and galaxy object
+ self.options, self.args =self.parser.parse_args()
+ self.display.verbosity = self.options.verbosity
+ self.galaxy = Galaxy(self.options, self.display)
+
+ return True
+
+ def run(self):
+
+ # if not offline, get connect to galaxy api
+ if self.action in ("info","install") or (self.action == 'init' and not self.options.offline):
+ api_server = self.options.api_server
+ self.api = GalaxyAPI(self.galaxy, api_server)
+ if not self.api:
+ raise AnsibleError("The API server (%s) is not responding, please try again later." % api_server)
+
+ self.execute()
+
+ def get_opt(self, k, defval=""):
+ """
+ Returns an option from an Optparse values instance.
+ """
+ try:
+ data = getattr(self.options, k)
+ except:
+ return defval
+ if k == "roles_path":
+ if os.pathsep in data:
+ data = data.split(os.pathsep)[0]
+ return data
+
+ def exit_without_ignore(self, rc=1):
+ """
+ Exits with the specified return code unless the
+ option --ignore-errors was specified
+ """
+ if not self.get_opt("ignore_errors", False):
+ self.display.error('- you can use --ignore-errors to skip failed roles and finish processing the list.')
+ return rc
+
+ def execute_init(self):
+ """
+ Executes the init action, which creates the skeleton framework
+ of a role that complies with the galaxy metadata format.
+ """
+
+ init_path = self.get_opt('init_path', './')
+ force = self.get_opt('force', False)
+ offline = self.get_opt('offline', False)
+
+ role_name = self.args.pop(0).strip()
+ if role_name == "":
+ raise AnsibleOptionsError("- no role name specified for init")
+ role_path = os.path.join(init_path, role_name)
+ if os.path.exists(role_path):
+ if os.path.isfile(role_path):
+ raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
+ elif not force:
+ raise AnsibleError("- the directory %s already exists." % role_path + \
+ "you can use --force to re-initialize this directory,\n" + \
+ "however it will reset any main.yml files that may have\n" + \
+ "been modified there already.")
+
+ # create the default README.md
+ if not os.path.exists(role_path):
+ os.makedirs(role_path)
+ readme_path = os.path.join(role_path, "README.md")
+ f = open(readme_path, "wb")
+ f.write(self.galaxy.default_readme)
+ f.close
+
+ for dir in GalaxyRole.ROLE_DIRS:
+ dir_path = os.path.join(init_path, role_name, dir)
+ main_yml_path = os.path.join(dir_path, 'main.yml')
+ # create the directory if it doesn't exist already
+ if not os.path.exists(dir_path):
+ os.makedirs(dir_path)
+
+ # now create the main.yml file for that directory
+ if dir == "meta":
+ # create a skeleton meta/main.yml with a valid galaxy_info
+ # datastructure in place, plus with all of the available
+ # tags/platforms included (but commented out) and the
+ # dependencies section
+ platforms = []
+ if not offline and self.api:
+ platforms = self.api.get_list("platforms") or []
+ categories = []
+ if not offline and self.api:
+ categories = self.api.get_list("categories") or []
+
+ # group the list of platforms from the api based
+ # on their names, with the release field being
+ # appended to a list of versions
+ platform_groups = defaultdict(list)
+ for platform in platforms:
+ platform_groups[platform['name']].append(platform['release'])
+ platform_groups[platform['name']].sort()
+
+ inject = dict(
+ author = 'your name',
+ company = 'your company (optional)',
+ license = 'license (GPLv2, CC-BY, etc)',
+ issue_tracker_url = 'http://example.com/issue/tracker',
+ min_ansible_version = '1.2',
+ platforms = platform_groups,
+ categories = categories,
+ )
+ rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject)
+ f = open(main_yml_path, 'w')
+ f.write(rendered_meta)
+ f.close()
+ pass
+ elif dir not in ('files','templates'):
+ # just write a (mostly) empty YAML file for main.yml
+ f = open(main_yml_path, 'w')
+ f.write('---\n# %s file for %s\n' % (dir,role_name))
+ f.close()
+ self.display.display("- %s was created successfully" % role_name)
+
+ def execute_info(self):
+ """
+ Executes the info action. This action prints out detailed
+ information about an installed role as well as info available
+ from the galaxy API.
+ """
+
+ if len(self.args) == 0:
+ # the user needs to specify a role
+ raise AnsibleOptionsError("- you must specify a user/role name")
+
+ roles_path = self.get_opt("roles_path")
+
+ for role in self.args:
+
+ role_info = {}
+ gr = GalaxyRole(self.galaxy, role)
+ #self.galaxy.add_role(gr)
+
+ install_info = gr.install_info
+ if install_info:
+ if 'version' in install_info:
+ install_info['intalled_version'] = install_info['version']
+ del install_info['version']
+ role_info.update(install_info)
+
+ remote_data = False
+ if self.api:
+ remote_data = self.api.lookup_role_by_name(role, False)
+
+ if remote_data:
+ role_info.update(remote_data)
+
+ if gr.metadata:
+ role_info.update(gr.metadata)
+
+ req = RoleRequirement()
+ __, __, role_spec= req.parse({'role': role})
+ if role_spec:
+ role_info.update(role_spec)
+
+ if role_info:
+ self.display.display("- %s:" % (role))
+ for k in sorted(role_info.keys()):
+
+ if k in self.SKIP_INFO_KEYS:
+ continue
+
+ if isinstance(role_info[k], dict):
+ self.display.display("\t%s: " % (k))
+ for key in sorted(role_info[k].keys()):
+ if key in self.SKIP_INFO_KEYS:
+ continue
+ self.display.display("\t\t%s: %s" % (key, role_info[k][key]))
+ else:
+ self.display.display("\t%s: %s" % (k, role_info[k]))
+ else:
+ self.display.display("- the role %s was not found" % role)
+
+ def execute_install(self):
+ """
+ Executes the installation action. The args list contains the
+ roles to be installed, unless -f was specified. The list of roles
+ can be a name (which will be downloaded via the galaxy API and github),
+ or it can be a local .tar.gz file.
+ """
+
+ role_file = self.get_opt("role_file", None)
+
+ if len(self.args) == 0 and role_file is None:
+ # the user needs to specify one of either --role-file
+ # or specify a single user/role name
+ raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
+ elif len(self.args) == 1 and not role_file is None:
+ # using a role file is mutually exclusive of specifying
+ # the role name on the command line
+ raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
+
+ no_deps = self.get_opt("no_deps", False)
+ roles_path = self.get_opt("roles_path")
+
+ roles_done = []
+ roles_left = []
+ role_name = self.args.pop(0).strip()
+
+ gr = GalaxyRole(self.galaxy, role_name)
+ if role_file:
+ f = open(role_file, 'r')
+ if role_file.endswith('.yaml') or role_file.endswith('.yml'):
+ roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f))
+ else:
+ # roles listed in a file, one per line
+ for rname in f.readlines():
+ roles_left.append(GalaxyRole(self.galaxy, rname))
+ f.close()
+ else:
+ # roles were specified directly, so we'll just go out grab them
+ # (and their dependencies, unless the user doesn't want us to).
+ for rname in self.args:
+ roles_left.append(GalaxyRole(self.galaxy, rname))
+
+ while len(roles_left) > 0:
+ # query the galaxy API for the role data
+ role_data = None
+ role = roles_left.pop(0)
+ role_src = role.src
+ role_scm = role.scm
+ role_path = role.path
+
+ if role_path:
+ self.options.roles_path = role_path
+ else:
+ self.options.roles_path = roles_path
+
+ tmp_file = None
+ if role_src and os.path.isfile(role_src):
+ # installing a local tar.gz
+ tmp_file = role_src
+ else:
+ if role_scm:
+ # create tar file from scm url
+ tmp_file = scm_archive_role(role_scm, role_src, role.version, role.name)
+ if role_src:
+ if '://' in role_src:
+ # just download a URL - version will probably be in the URL
+ tmp_file = gr.fetch()
+ else:
+ role_data = self.api.lookup_role_by_name(role_src)
+ if not role_data:
+ self.display.warning("- sorry, %s was not found on %s." % (role_src, self.options.api_server))
+ self.exit_without_ignore()
+ continue
+
+ role_versions = self.api.fetch_role_related('versions', role_data['id'])
+ if not role.version:
+ # convert the version names to LooseVersion objects
+ # and sort them to get the latest version. If there
+ # are no versions in the list, we'll grab the head
+ # of the master branch
+ if len(role_versions) > 0:
+ loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
+ loose_versions.sort()
+ role["version"] = str(loose_versions[-1])
+ else:
+ role["version"] = 'master'
+ elif role['version'] != 'master':
+ if role_versions and role.version not in [a.get('name', None) for a in role_versions]:
+ self.display.warning('role is %s' % role)
+ self.display.warning("- the specified version (%s) was not found in the list of available versions (%s)." % (role.version, role_versions))
+ self.exit_without_ignore()
+ continue
+
+ # download the role. if --no-deps was specified, we stop here,
+ # otherwise we recursively grab roles and all of their deps.
+ tmp_file = gr.fetch(role_data)
+ installed = False
+ if tmp_file:
+ installed = install_role(role.name, role.version, tmp_file, options)
+ # we're done with the temp file, clean it up
+ if tmp_file != role_src:
+ os.unlink(tmp_file)
+ # install dependencies, if we want them
+
+ # this should use new roledepenencies code
+ #if not no_deps and installed:
+ # if not role_data:
+ # role_data = gr.get_metadata(role.get("name"), options)
+ # role_dependencies = role_data['dependencies']
+ # else:
+ # role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
+ # for dep in role_dependencies:
+ # if isinstance(dep, basestring):
+ # dep = ansible.utils.role_spec_parse(dep)
+ # else:
+ # dep = ansible.utils.role_yaml_parse(dep)
+ # if not get_role_metadata(dep["name"], options):
+ # if dep not in roles_left:
+ # print '- adding dependency: %s' % dep["name"]
+ # roles_left.append(dep)
+ # else:
+ # print '- dependency %s already pending installation.' % dep["name"]
+ # else:
+ # print '- dependency %s is already installed, skipping.' % dep["name"]
+
+ if not tmp_file or not installed:
+ self.display.warning("- %s was NOT installed successfully." % role.name)
+ self.exit_without_ignore()
+ return 0
+
+ def execute_remove(self):
+ """
+ Executes the remove action. The args list contains the list
+ of roles to be removed. This list can contain more than one role.
+ """
+
+ if len(self.args) == 0:
+ raise AnsibleOptionsError('- you must specify at least one role to remove.')
+
+ for role_name in self.args:
+ role = GalaxyRole(self.galaxy, role_name)
+ try:
+ if role.remove():
+ self.display.display('- successfully removed %s' % role_name)
+ else:
+ self.display.display('- %s is not installed, skipping.' % role_name)
+ except Exception as e:
+ raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
+
+ return 0
+
+ def execute_list(self):
+ """
+ Executes the list action. The args list can contain zero
+ or one role. If one is specified, only that role will be
+ shown, otherwise all roles in the specified directory will
+ be shown.
+ """
+
+ if len(self.args) > 1:
+ raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
+
+ if len(self.args) == 1:
+ # show only the request role, if it exists
+ gr = GalaxyRole(self.galaxy, self.name)
+ if gr.metadata:
+ install_info = gr.install_info
+ version = None
+ if install_info:
+ version = install_info.get("version", None)
+ if not version:
+ version = "(unknown version)"
+ # show some more info about single roles here
+ self.display.display("- %s, %s" % (self.name, version))
+ else:
+ self.display.display("- the role %s was not found" % self.name)
+ else:
+ # show all valid roles in the roles_path directory
+ roles_path = self.get_opt('roles_path')
+ roles_path = os.path.expanduser(roles_path)
+ if not os.path.exists(roles_path):
+ raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path)
+ elif not os.path.isdir(roles_path):
+ raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path)
+ path_files = os.listdir(roles_path)
+ for path_file in path_files:
+ if gr.metadata:
+ install_info = gr.metadata
+ version = None
+ if install_info:
+ version = install_info.get("version", None)
+ if not version:
+ version = "(unknown version)"
+ self.display.display("- %s, %s" % (path_file, version))
+ return 0
diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py
new file mode 100644
index 0000000000..eb60bacbd2
--- /dev/null
+++ b/lib/ansible/cli/playbook.py
@@ -0,0 +1,180 @@
+#!/usr/bin/env python
+
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+########################################################
+import os
+import stat
+import sys
+
+from ansible import constants as C
+from ansible.cli import CLI
+from ansible.errors import AnsibleError
+from ansible.executor.playbook_executor import PlaybookExecutor
+from ansible.inventory import Inventory
+from ansible.parsing import DataLoader
+from ansible.parsing.splitter import parse_kv
+from ansible.playbook import Playbook
+from ansible.playbook.task import Task
+from ansible.utils.display import Display
+from ansible.utils.unicode import to_unicode
+from ansible.utils.vars import combine_vars
+from ansible.utils.vault import read_vault_file
+from ansible.vars import VariableManager
+
+#---------------------------------------------------------------------------------------------------
+
+class PlaybookCLI(CLI):
+ ''' code behind ansible playbook cli'''
+
+ def parse(self):
+
+ # create parser for CLI options
+ parser = CLI.base_parser(
+ usage = "%prog playbook.yml",
+ connect_opts=True,
+ meta_opts=True,
+ runas_opts=True,
+ subset_opts=True,
+ check_opts=True,
+ diff_opts=True,
+ runtask_opts=True,
+ vault_opts=True,
+ )
+
+ # ansible playbook specific opts
+ parser.add_option('--list-tasks', dest='listtasks', action='store_true',
+ help="list all tasks that would be executed")
+ parser.add_option('--step', dest='step', action='store_true',
+ help="one-step-at-a-time: confirm each task before running")
+ parser.add_option('--start-at-task', dest='start_at',
+ help="start the playbook at the task matching this name")
+ parser.add_option('--list-tags', dest='listtags', action='store_true',
+ help="list all available tags")
+
+ self.options, self.args = parser.parse_args()
+
+ if len(self.args) == 0:
+ raise AnsibleOptionsError("You must specify a playbook file to run")
+
+ self.parser = parser
+
+ self.display.verbosity = self.options.verbosity
+ self.validate_conflicts()
+
+ def run(self):
+
+ # Note: slightly wrong, this is written so that implicit localhost
+ # Manage passwords
+ sshpass = None
+ becomepass = None
+ vault_pass = None
+ passwords = {}
+
+ # don't deal with privilege escalation or passwords when we don't need to
+ if not self.options.listhosts and not self.options.listtasks and not self.options.listtags:
+ self.normalize_become_options()
+ (sshpass, becomepass) = self.ask_passwords()
+ passwords = { 'conn_pass': sshpass, 'become_pass': becomepass }
+
+ if self.options.vault_password_file:
+ # read vault_pass from a file
+ vault_pass = read_vault_file(self.options.vault_password_file)
+ elif self.options.ask_vault_pass:
+ vault_pass = self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)[0]
+
+ loader = DataLoader(vault_password=vault_pass)
+
+ extra_vars = {}
+ for extra_vars_opt in self.options.extra_vars:
+ extra_vars_opt = to_unicode(extra_vars_opt, errors='strict')
+ if extra_vars_opt.startswith(u"@"):
+ # Argument is a YAML file (JSON is a subset of YAML)
+ data = loader.load_from_file(extra_vars_opt[1:])
+ elif extra_vars_opt and extra_vars_opt[0] in u'[{':
+ # Arguments as YAML
+ data = loader.load(extra_vars_opt)
+ else:
+ # Arguments as Key-value
+ data = parse_kv(extra_vars_opt)
+ extra_vars = combine_vars(extra_vars, data)
+
+ # FIXME: this should be moved inside the playbook executor code
+ only_tags = self.options.tags.split(",")
+ skip_tags = self.options.skip_tags
+ if self.options.skip_tags is not None:
+ skip_tags = self.options.skip_tags.split(",")
+
+ # initial error check, to make sure all specified playbooks are accessible
+ # before we start running anything through the playbook executor
+ for playbook in self.args:
+ if not os.path.exists(playbook):
+ raise AnsibleError("the playbook: %s could not be found" % playbook)
+ if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
+ raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
+
+ # create the variable manager, which will be shared throughout
+ # the code, ensuring a consistent view of global variables
+ variable_manager = VariableManager()
+ variable_manager.set_extra_vars(extra_vars)
+
+ # create the inventory, and filter it based on the subset specified (if any)
+ inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=self.options.inventory)
+ variable_manager.set_inventory(inventory)
+
+ # (which is not returned in list_hosts()) is taken into account for
+ # warning if inventory is empty. But it can't be taken into account for
+ # checking if limit doesn't match any hosts. Instead we don't worry about
+ # limit if only implicit localhost was in inventory to start with.
+ #
+ # Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
+ no_hosts = False
+ if len(inventory.list_hosts()) == 0:
+ # Empty inventory
+ self.display.warning("provided hosts list is empty, only localhost is available")
+ no_hosts = True
+ inventory.subset(self.options.subset)
+ if len(inventory.list_hosts()) == 0 and no_hosts is False:
+ # Invalid limit
+ raise AnsibleError("Specified --limit does not match any hosts")
+
+ # create the playbook executor, which manages running the plays via a task queue manager
+ pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, display=self.display, options=self.options, passwords=passwords)
+
+ results = pbex.run()
+
+ if isinstance(results, list):
+ for p in results:
+
+ self.display.display('\nplaybook: %s\n' % p['playbook'])
+ for play in p['plays']:
+ if self.options.listhosts:
+ self.display.display("\n %s (%s): host count=%d" % (play['name'], play['pattern'], len(play['hosts'])))
+ for host in play['hosts']:
+ self.display.display(" %s" % host)
+ if self.options.listtasks: #TODO: do we want to display block info?
+ self.display.display("\n %s" % (play['name']))
+ for task in play['tasks']:
+ self.display.display(" %s" % task)
+ if self.options.listtags: #TODO: fix once we figure out block handling above
+ self.display.display("\n %s: tags count=%d" % (play['name'], len(play['tags'])))
+ for tag in play['tags']:
+ self.display.display(" %s" % tag)
+ return 0
+ else:
+ return results
diff --git a/lib/ansible/cli/pull.py b/lib/ansible/cli/pull.py
new file mode 100644
index 0000000000..6b087d4ec0
--- /dev/null
+++ b/lib/ansible/cli/pull.py
@@ -0,0 +1,219 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+########################################################
+import datetime
+import os
+import random
+import shutil
+import socket
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.cli import CLI
+from ansible.utils.display import Display
+from ansible.utils.vault import read_vault_file
+
+########################################################
+
+class PullCLI(CLI):
+ ''' code behind ansible ad-hoc cli'''
+
+ DEFAULT_REPO_TYPE = 'git'
+ DEFAULT_PLAYBOOK = 'local.yml'
+ PLAYBOOK_ERRORS = {
+ 1: 'File does not exist',
+ 2: 'File is not readable'
+ }
+ SUPPORTED_REPO_MODULES = ['git']
+
+ def parse(self):
+ ''' create an options parser for bin/ansible '''
+
+ self.parser = CLI.base_parser(
+ usage='%prog <host-pattern> [options]',
+ connect_opts=True,
+ vault_opts=True,
+ )
+
+ # options unique to pull
+ self.parser.add_option('--purge', default=False, action='store_true', help='purge checkout after playbook run')
+ self.parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
+ help='only run the playbook if the repository has been updated')
+ self.parser.add_option('-s', '--sleep', dest='sleep', default=None,
+ help='sleep for random interval (between 0 and n number of seconds) before starting. This is a useful way to disperse git requests')
+ self.parser.add_option('-f', '--force', dest='force', default=False, action='store_true',
+ help='run the playbook even if the repository could not be updated')
+ self.parser.add_option('-d', '--directory', dest='dest', default=None, help='directory to checkout repository to')
+ self.parser.add_option('-U', '--url', dest='url', default=None, help='URL of the playbook repository')
+ self.parser.add_option('-C', '--checkout', dest='checkout',
+ help='branch/tag/commit to checkout. ' 'Defaults to behavior of repository module.')
+ self.parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
+ help='adds the hostkey for the repo url if not already added')
+ self.parser.add_option('-m', '--module-name', dest='module_name', default=self.DEFAULT_REPO_TYPE,
+ help='Repository module name, which ansible will use to check out the repo. Default is %s.' % self.DEFAULT_REPO_TYPE)
+
+
+ self.options, self.args = self.parser.parse_args()
+
+ if self.options.sleep:
+ try:
+ secs = random.randint(0,int(self.options.sleep))
+ self.options.sleep = secs
+ except ValueError:
+ raise AnsibleOptionsError("%s is not a number." % self.options.sleep)
+
+ if not self.options.url:
+ raise AnsibleOptionsError("URL for repository not specified, use -h for help")
+
+ if len(self.args) != 1:
+ raise AnsibleOptionsError("Missing target hosts")
+
+ if self.options.module_name not in self.SUPPORTED_REPO_MODULES:
+ raise AnsibleOptionsError("Unsuported repo module %s, choices are %s" % (self.options.module_name, ','.join(self.SUPPORTED_REPO_MODULES)))
+
+ self.display.verbosity = self.options.verbosity
+ self.validate_conflicts()
+
+ def run(self):
+ ''' use Runner lib to do SSH things '''
+
+ # log command line
+ now = datetime.datetime.now()
+ self.display.display(now.strftime("Starting Ansible Pull at %F %T"))
+ self.display.display(' '.join(sys.argv))
+
+ # Build Checkout command
+ # Now construct the ansible command
+ limit_opts = 'localhost:%s:127.0.0.1' % socket.getfqdn()
+ base_opts = '-c local --limit "%s"' % limit_opts
+ if self.options.verbosity > 0:
+ base_opts += ' -%s' % ''.join([ "v" for x in range(0, self.options.verbosity) ])
+
+ # Attempt to use the inventory passed in as an argument
+ # It might not yet have been downloaded so use localhost if note
+ if not self.options.inventory or not os.path.exists(self.options.inventory):
+ inv_opts = 'localhost,'
+ else:
+ inv_opts = self.options.inventory
+
+ #TODO: enable more repo modules hg/svn?
+ if self.options.module_name == 'git':
+ repo_opts = "name=%s dest=%s" % (self.options.url, self.options.dest)
+ if self.options.checkout:
+ repo_opts += ' version=%s' % self.options.checkout
+
+ if self.options.accept_host_key:
+ repo_opts += ' accept_hostkey=yes'
+
+ if self.options.key_file:
+ repo_opts += ' key_file=%s' % options.key_file
+
+ path = utils.plugins.module_finder.find_plugin(options.module_name)
+ if path is None:
+ raise AnsibleOptionsError(("module '%s' not found.\n" % options.module_name))
+
+ bin_path = os.path.dirname(os.path.abspath(__file__))
+ cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % (
+ bin_path, inv_opts, base_opts, self.options.module_name, repo_opts
+ )
+
+ for ev in self.options.extra_vars:
+ cmd += ' -e "%s"' % ev
+
+ # Nap?
+ if self.options.sleep:
+ self.display.display("Sleeping for %d seconds..." % self.options.sleep)
+ time.sleep(self.options.sleep);
+
+ # RUN the Checkout command
+ rc, out, err = cmd_functions.run_cmd(cmd, live=True)
+
+ if rc != 0:
+ if self.options.force:
+ self.display.warning("Unable to update repository. Continuing with (forced) run of playbook.")
+ else:
+ return rc
+ elif self.options.ifchanged and '"changed": true' not in out:
+ self.display.display("Repository has not changed, quitting.")
+ return 0
+
+ playbook = self.select_playbook(path)
+
+ if playbook is None:
+ raise AnsibleOptionsError("Could not find a playbook to run.")
+
+ # Build playbook command
+ cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
+ if self.options.vault_password_file:
+ cmd += " --vault-password-file=%s" % self.options.vault_password_file
+ if self.options.inventory:
+ cmd += ' -i "%s"' % self.options.inventory
+ for ev in self.options.extra_vars:
+ cmd += ' -e "%s"' % ev
+ if self.options.ask_sudo_pass:
+ cmd += ' -K'
+ if self.options.tags:
+ cmd += ' -t "%s"' % self.options.tags
+
+ os.chdir(self.options.dest)
+
+ # RUN THE PLAYBOOK COMMAND
+ rc, out, err = cmd_functions.run_cmd(cmd, live=True)
+
+ if self.options.purge:
+ os.chdir('/')
+ try:
+ shutil.rmtree(options.dest)
+ except Exception, e:
+ print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e))
+
+ return rc
+
+
+ def try_playbook(self, path):
+ if not os.path.exists(path):
+ return 1
+ if not os.access(path, os.R_OK):
+ return 2
+ return 0
+
+ def select_playbook(self, path):
+ playbook = None
+ if len(self.args) > 0 and self.args[0] is not None:
+ playbook = os.path.join(path, self.args[0])
+ rc = self.try_playbook(playbook)
+ if rc != 0:
+ self.display.warning("%s: %s" % (playbook, self.PLAYBOOK_ERRORS[rc]))
+ return None
+ return playbook
+ else:
+ fqdn = socket.getfqdn()
+ hostpb = os.path.join(path, fqdn + '.yml')
+ shorthostpb = os.path.join(path, fqdn.split('.')[0] + '.yml')
+ localpb = os.path.join(path, DEFAULT_PLAYBOOK)
+ errors = []
+ for pb in [hostpb, shorthostpb, localpb]:
+ rc = self.try_playbook(pb)
+ if rc == 0:
+ playbook = pb
+ break
+ else:
+ errors.append("%s: %s" % (pb, self.PLAYBOOK_ERRORS[rc]))
+ if playbook is None:
+ self.display.warning("\n".join(errors))
+ return playbook
diff --git a/lib/ansible/cli/vault.py b/lib/ansible/cli/vault.py
new file mode 100644
index 0000000000..6231f74332
--- /dev/null
+++ b/lib/ansible/cli/vault.py
@@ -0,0 +1,123 @@
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# ansible-vault is a script that encrypts/decrypts YAML files. See
+# http://docs.ansible.com/playbooks_vault.html for more details.
+
+import os
+import sys
+import traceback
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleOptionsError
+from ansible.parsing.vault import VaultEditor
+from ansible.cli import CLI
+from ansible.utils.display import Display
+
+class VaultCLI(CLI):
+ """ Vault command line class """
+
+ VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
+ CIPHER = 'AES256'
+
+ def __init__(self, args, display=None):
+
+ self.vault_pass = None
+ super(VaultCLI, self).__init__(args, display)
+
+ def parse(self):
+
+ self.parser = CLI.base_parser(
+ vault_opts=True,
+ usage = "usage: %%prog [%s] [--help] [options] vaultfile.yml" % "|".join(self.VALID_ACTIONS),
+ epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
+ )
+
+ self.set_action()
+
+ # options specific to self.actions
+ if self.action == "create":
+ self.parser.set_usage("usage: %prog create [options] file_name")
+ elif self.action == "decrypt":
+ self.parser.set_usage("usage: %prog decrypt [options] file_name")
+ elif self.action == "edit":
+ self.parser.set_usage("usage: %prog edit [options] file_name")
+ elif self.action == "view":
+ self.parser.set_usage("usage: %prog view [options] file_name")
+ elif self.action == "encrypt":
+ self.parser.set_usage("usage: %prog encrypt [options] file_name")
+ elif action == "rekey":
+ self.parser.set_usage("usage: %prog rekey [options] file_name")
+
+ self.options, self.args = self.parser.parse_args()
+ self.display.verbosity = self.options.verbosity
+
+ if len(self.args) == 0 or len(self.args) > 1:
+ raise AnsibleOptionsError("Vault requires a single filename as a parameter")
+
+ def run(self):
+
+ if self.options.vault_password_file:
+ # read vault_pass from a file
+ self.vault_pass = read_vault_file(self.options.vault_password_file)
+ elif self.options.ask_vault_pass:
+ self.vault_pass, _= self.ask_vault_passwords(ask_vault_pass=True, ask_new_vault_pass=False, confirm_new=False)
+
+ self.execute()
+
+ def execute_create(self):
+
+ cipher = getattr(self.options, 'cipher', self.CIPHER)
+ this_editor = VaultEditor(cipher, self.vault_pass, self.args[0])
+ this_editor.create_file()
+
+ def execute_decrypt(self):
+
+ cipher = getattr(self.options, 'cipher', self.CIPHER)
+ for f in self.args:
+ this_editor = VaultEditor(cipher, self.vault_pass, f)
+ this_editor.decrypt_file()
+
+ self.display.display("Decryption successful")
+
+ def execute_edit(self):
+
+ for f in self.args:
+ this_editor = VaultEditor(None, self.vault_pass, f)
+ this_editor.edit_file()
+
+ def execute_view(self):
+
+ for f in self.args:
+ this_editor = VaultEditor(None, self.vault_pass, f)
+ this_editor.view_file()
+
+ def execute_encrypt(self):
+
+ cipher = getattr(self.options, 'cipher', self.CIPHER)
+ for f in self.args:
+ this_editor = VaultEditor(cipher, self.vault_pass, f)
+ this_editor.encrypt_file()
+
+ self.display.display("Encryption successful")
+
+ def execute_rekey(self):
+ __, new_password = self.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
+
+ for f in self.args:
+ this_editor = VaultEditor(None, self.vault_pass, f)
+ this_editor.rekey_file(new_password)
+
+ self.display.display("Rekey successful")
diff --git a/lib/ansible/compat/__init__.py b/lib/ansible/compat/__init__.py
new file mode 100644
index 0000000000..e77b77d2a6
--- /dev/null
+++ b/lib/ansible/compat/__init__.py
@@ -0,0 +1,27 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat library for ansible. This contains compatibility definitions for older python
+When we need to import a module differently depending on python version, do it
+here. Then in the code we can simply import from compat in order to get what we want.
+'''
+
diff --git a/lib/ansible/compat/tests/__init__.py b/lib/ansible/compat/tests/__init__.py
new file mode 100644
index 0000000000..fc05b2549b
--- /dev/null
+++ b/lib/ansible/compat/tests/__init__.py
@@ -0,0 +1,40 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+This module contains things that are only needed for compat in the testsuites,
+not in ansible itself. If you are not installing the test suite, you can
+safely remove this subdirectory.
+'''
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
+
diff --git a/lib/ansible/compat/tests/mock.py b/lib/ansible/compat/tests/mock.py
new file mode 100644
index 0000000000..0614391c4b
--- /dev/null
+++ b/lib/ansible/compat/tests/mock.py
@@ -0,0 +1,38 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ from unittest.mock import *
+except ImportError:
+ # Python 2
+ try:
+ from mock import *
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
diff --git a/lib/ansible/compat/tests/unittest.py b/lib/ansible/compat/tests/unittest.py
new file mode 100644
index 0000000000..a629849b31
--- /dev/null
+++ b/lib/ansible/compat/tests/unittest.py
@@ -0,0 +1,36 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+# Python 2.6
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+else:
+ from unittest import *
diff --git a/lib/ansible/config/__init__.py b/lib/ansible/config/__init__.py
new file mode 100644
index 0000000000..ae8ccff595
--- /dev/null
+++ b/lib/ansible/config/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 089de5b7c5..456beb8bbc 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -15,10 +15,15 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
import os
import pwd
import sys
-import ConfigParser
+
+from six.moves import configparser
from string import ascii_letters, digits
# copied from utils, avoid circular reference fun :)
@@ -35,13 +40,15 @@ def get_config(p, section, key, env_var, default, boolean=False, integer=False,
''' return a configuration variable with casting '''
value = _get_config(p, section, key, env_var, default)
if boolean:
- return mk_boolean(value)
- if value and integer:
- return int(value)
- if value and floating:
- return float(value)
- if value and islist:
- return [x.strip() for x in value.split(',')]
+ value = mk_boolean(value)
+ if value:
+ if integer:
+ value = int(value)
+ elif floating:
+ value = float(value)
+ elif islist:
+ if isinstance(value, basestring):
+ value = [x.strip() for x in value.split(',')]
return value
def _get_config(p, section, key, env_var, default):
@@ -60,7 +67,7 @@ def _get_config(p, section, key, env_var, default):
def load_config_file():
''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
- p = ConfigParser.ConfigParser()
+ p = configparser.ConfigParser()
path0 = os.getenv("ANSIBLE_CONFIG", None)
if path0 is not None:
@@ -73,8 +80,8 @@ def load_config_file():
if path is not None and os.path.exists(path):
try:
p.read(path)
- except ConfigParser.Error as e:
- print "Error reading config file: \n%s" % e
+ except configparser.Error as e:
+ print("Error reading config file: \n{0}".format(e))
sys.exit(1)
return p
return None
@@ -98,7 +105,8 @@ YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
DEFAULTS='defaults'
# configurable things
-DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts')))
+DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, boolean=True)
+DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'hostfile', 'ANSIBLE_HOSTS', get_config(p, DEFAULTS,'inventory','ANSIBLE_INVENTORY', '/etc/ansible/hosts')))
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
@@ -112,6 +120,7 @@ DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE
DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
+DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
@@ -122,7 +131,6 @@ DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None,
DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
-DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
@@ -141,7 +149,7 @@ BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
-DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None)
+DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
# need to rethink impementing these 2
DEFAULT_BECOME_EXE = None
@@ -156,6 +164,7 @@ DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', '
DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
+DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
@@ -173,8 +182,8 @@ DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings',
DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
-DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
-
+RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
+RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
@@ -196,10 +205,16 @@ ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_fi
ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
+# galaxy related
+DEFAULT_GALAXY_URI = get_config(p, 'galaxy', 'server_uri', 'ANSIBLE_GALAXY_SERVER_URI', 'https://galaxy.ansible.com')
+# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
+GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', ['git','hg'], islist=True)
+
# characters included in auto-generated passwords
DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
# non-configurable things
+MODULE_REQUIRE_ARGS = ['command', 'shell', 'raw', 'script']
DEFAULT_BECOME_PASS = None
DEFAULT_SUDO_PASS = None
DEFAULT_REMOTE_PASS = None
diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
new file mode 100644
index 0000000000..63fb8ef023
--- /dev/null
+++ b/lib/ansible/errors/__init__.py
@@ -0,0 +1,185 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors.yaml_strings import *
+
+class AnsibleError(Exception):
+ '''
+ This is the base class for all errors raised from Ansible code,
+ and can be instantiated with two optional parameters beyond the
+ error message to control whether detailed information is displayed
+ when the error occurred while parsing a data file of some kind.
+
+ Usage:
+
+ raise AnsibleError('some message here', obj=obj, show_content=True)
+
+ Where "obj" is some subclass of ansible.parsing.yaml.objects.AnsibleBaseYAMLObject,
+ which should be returned by the DataLoader() class.
+ '''
+
+ def __init__(self, message, obj=None, show_content=True):
+ # we import this here to prevent an import loop problem,
+ # since the objects code also imports ansible.errors
+ from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject
+
+ self._obj = obj
+ self._show_content = show_content
+ if obj and isinstance(obj, AnsibleBaseYAMLObject):
+ extended_error = self._get_extended_error()
+ if extended_error:
+ self.message = 'ERROR! %s\n\n%s' % (message, extended_error)
+ else:
+ self.message = 'ERROR! %s' % message
+
+ def __str__(self):
+ return self.message
+
+ def __repr__(self):
+ return self.message
+
+ def _get_error_lines_from_file(self, file_name, line_number):
+ '''
+ Returns the line in the file which coresponds to the reported error
+ location, as well as the line preceding it (if the error did not
+ occur on the first line), to provide context to the error.
+ '''
+
+ target_line = ''
+ prev_line = ''
+
+ with open(file_name, 'r') as f:
+ lines = f.readlines()
+
+ target_line = lines[line_number]
+ if line_number > 0:
+ prev_line = lines[line_number - 1]
+
+ return (target_line, prev_line)
+
+ def _get_extended_error(self):
+ '''
+ Given an object reporting the location of the exception in a file, return
+ detailed information regarding it including:
+
+ * the line which caused the error as well as the one preceding it
+ * causes and suggested remedies for common syntax errors
+
+ If this error was created with show_content=False, the reporting of content
+ is suppressed, as the file contents may be sensitive (ie. vault data).
+ '''
+
+ error_message = ''
+
+ try:
+ (src_file, line_number, col_number) = self._obj.ansible_pos
+ error_message += YAML_POSITION_DETAILS % (src_file, line_number, col_number)
+ if src_file not in ('<string>', '<unicode>') and self._show_content:
+ (target_line, prev_line) = self._get_error_lines_from_file(src_file, line_number - 1)
+ if target_line:
+ stripped_line = target_line.replace(" ","")
+ arrow_line = (" " * (col_number-1)) + "^ here"
+ #header_line = ("=" * 73)
+ error_message += "\nThe offending line appears to be:\n\n%s\n%s\n%s\n" % (prev_line.rstrip(), target_line.rstrip(), arrow_line)
+
+ # common error/remediation checking here:
+ # check for unquoted vars starting lines
+ if ('{{' in target_line and '}}' in target_line) and ('"{{' not in target_line or "'{{" not in target_line):
+ error_message += YAML_COMMON_UNQUOTED_VARIABLE_ERROR
+ # check for common dictionary mistakes
+ elif ":{{" in stripped_line and "}}" in stripped_line:
+ error_message += YAML_COMMON_DICT_ERROR
+ # check for common unquoted colon mistakes
+ elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1:
+ error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
+ # otherwise, check for some common quoting mistakes
+ else:
+ parts = target_line.split(":")
+ if len(parts) > 1:
+ middle = parts[1].strip()
+ match = False
+ unbalanced = False
+
+ if middle.startswith("'") and not middle.endswith("'"):
+ match = True
+ elif middle.startswith('"') and not middle.endswith('"'):
+ match = True
+
+ if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2:
+ unbalanced = True
+
+ if match:
+ error_message += YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR
+ if unbalanced:
+ error_message += YAML_COMMON_UNBALANCED_QUOTES_ERROR
+
+ except (IOError, TypeError):
+ error_message += '\n(could not open file to display line)'
+ except IndexError:
+ error_message += '\n(specified line no longer in file, maybe it changed?)'
+
+ return error_message
+
+class AnsibleOptionsError(AnsibleError):
+ ''' bad or incomplete options passed '''
+ pass
+
+class AnsibleParserError(AnsibleError):
+ ''' something was detected early that is wrong about a playbook or data file '''
+ pass
+
+class AnsibleInternalError(AnsibleError):
+ ''' internal safeguards tripped, something happened in the code that should never happen '''
+ pass
+
+class AnsibleRuntimeError(AnsibleError):
+ ''' ansible had a problem while running a playbook '''
+ pass
+
+class AnsibleModuleError(AnsibleRuntimeError):
+ ''' a module failed somehow '''
+ pass
+
+class AnsibleConnectionFailure(AnsibleRuntimeError):
+ ''' the transport / connection_plugin had a fatal error '''
+ pass
+
+class AnsibleFilterError(AnsibleRuntimeError):
+ ''' a templating failure '''
+ pass
+
+class AnsibleLookupError(AnsibleRuntimeError):
+ ''' a lookup failure '''
+ pass
+
+class AnsibleCallbackError(AnsibleRuntimeError):
+ ''' a callback failure '''
+ pass
+
+class AnsibleUndefinedVariable(AnsibleRuntimeError):
+ ''' a templating failure '''
+ pass
+
+class AnsibleFileNotFound(AnsibleRuntimeError):
+ ''' a file missing failure '''
+ pass
diff --git a/lib/ansible/errors/yaml_strings.py b/lib/ansible/errors/yaml_strings.py
new file mode 100644
index 0000000000..dcd6ffd79f
--- /dev/null
+++ b/lib/ansible/errors/yaml_strings.py
@@ -0,0 +1,118 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+__all__ = [
+ 'YAML_SYNTAX_ERROR',
+ 'YAML_POSITION_DETAILS',
+ 'YAML_COMMON_DICT_ERROR',
+ 'YAML_COMMON_UNQUOTED_VARIABLE_ERROR',
+ 'YAML_COMMON_UNQUOTED_COLON_ERROR',
+ 'YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR',
+ 'YAML_COMMON_UNBALANCED_QUOTES_ERROR',
+]
+
+YAML_SYNTAX_ERROR = """\
+Syntax Error while loading YAML.
+"""
+
+YAML_POSITION_DETAILS = """\
+The error appears to have been in '%s': line %s, column %s, but may
+be elsewhere in the file depending on the exact syntax problem.
+"""
+
+YAML_COMMON_DICT_ERROR = """\
+This one looks easy to fix. YAML thought it was looking for the start of a
+hash/dictionary and was confused to see a second "{". Most likely this was
+meant to be an ansible template evaluation instead, so we have to give the
+parser a small hint that we wanted a string instead. The solution here is to
+just quote the entire value.
+
+For instance, if the original line was:
+
+ app_path: {{ base_path }}/foo
+
+It should be written as:
+
+ app_path: "{{ base_path }}/foo"
+"""
+
+YAML_COMMON_UNQUOTED_VARIABLE_ERROR = """\
+We could be wrong, but this one looks like it might be an issue with
+missing quotes. Always quote template expression brackets when they
+start a value. For instance:
+
+ with_items:
+ - {{ foo }}
+
+Should be written as:
+
+ with_items:
+ - "{{ foo }}"
+"""
+
+YAML_COMMON_UNQUOTED_COLON_ERROR = """\
+This one looks easy to fix. There seems to be an extra unquoted colon in the line
+and this is confusing the parser. It was only expecting to find one free
+colon. The solution is just add some quotes around the colon, or quote the
+entire line after the first colon.
+
+For instance, if the original line was:
+
+ copy: src=file.txt dest=/path/filename:with_colon.txt
+
+It can be written as:
+
+ copy: src=file.txt dest='/path/filename:with_colon.txt'
+
+Or:
+
+ copy: 'src=file.txt dest=/path/filename:with_colon.txt'
+"""
+
+YAML_COMMON_PARTIALLY_QUOTED_LINE_ERROR = """\
+This one looks easy to fix. It seems that there is a value started
+with a quote, and the YAML parser is expecting to see the line ended
+with the same kind of quote. For instance:
+
+ when: "ok" in result.stdout
+
+Could be written as:
+
+ when: '"ok" in result.stdout'
+
+Or equivalently:
+
+ when: "'ok' in result.stdout"
+"""
+
+YAML_COMMON_UNBALANCED_QUOTES_ERROR = """\
+We could be wrong, but this one looks like it might be an issue with
+unbalanced quotes. If starting a value with a quote, make sure the
+line ends with the same set of quotes. For instance this arbitrary
+example:
+
+ foo: "bad" "wolf"
+
+Could be written as:
+
+ foo: '"bad" "wolf"'
+"""
+
diff --git a/lib/ansible/executor/__init__.py b/lib/ansible/executor/__init__.py
new file mode 100644
index 0000000000..785fc45992
--- /dev/null
+++ b/lib/ansible/executor/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/lib/ansible/executor/connection_info.py b/lib/ansible/executor/connection_info.py
new file mode 100644
index 0000000000..1c168a8e26
--- /dev/null
+++ b/lib/ansible/executor/connection_info.py
@@ -0,0 +1,270 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pipes
+import random
+
+from ansible import constants as C
+from ansible.template import Templar
+from ansible.utils.boolean import boolean
+from ansible.errors import AnsibleError
+
+__all__ = ['ConnectionInformation']
+
+
+class ConnectionInformation:
+
+ '''
+ This class is used to consolidate the connection information for
+ hosts in a play and child tasks, where the task may override some
+ connection/authentication information.
+ '''
+
+ def __init__(self, play=None, options=None, passwords=None):
+
+ if passwords is None:
+ passwords = {}
+
+ # connection
+ self.connection = None
+ self.remote_addr = None
+ self.remote_user = None
+ self.password = passwords.get('conn_pass','')
+ self.port = None
+ self.private_key_file = C.DEFAULT_PRIVATE_KEY_FILE
+ self.timeout = C.DEFAULT_TIMEOUT
+
+ # privilege escalation
+ self.become = None
+ self.become_method = None
+ self.become_user = None
+ self.become_pass = passwords.get('become_pass','')
+
+ # general flags (should we move out?)
+ self.verbosity = 0
+ self.only_tags = set()
+ self.skip_tags = set()
+ self.no_log = False
+ self.check_mode = False
+
+ #TODO: just pull options setup to above?
+ # set options before play to allow play to override them
+ if options:
+ self.set_options(options)
+
+ if play:
+ self.set_play(play)
+
+ def __repr__(self):
+ value = "CONNECTION INFO:\n"
+ fields = self._get_fields()
+ fields.sort()
+ for field in fields:
+ value += "%20s : %s\n" % (field, getattr(self, field))
+ return value
+
+ def set_play(self, play):
+ '''
+ Configures this connection information instance with data from
+ the play class.
+ '''
+
+ if play.connection:
+ self.connection = play.connection
+
+ if play.remote_user:
+ self.remote_user = play.remote_user
+
+ if play.port:
+ self.port = int(play.port)
+
+ if play.become is not None:
+ self.become = play.become
+ if play.become_method:
+ self.become_method = play.become_method
+ if play.become_user:
+ self.become_user = play.become_user
+ self.become_pass = play.become_pass
+
+ # non connection related
+ self.no_log = play.no_log
+ self.environment = play.environment
+
+ def set_options(self, options):
+ '''
+ Configures this connection information instance with data from
+ options specified by the user on the command line. These have a
+ higher precedence than those set on the play or host.
+ '''
+
+ if options.connection:
+ self.connection = options.connection
+
+ self.remote_user = options.remote_user
+ self.private_key_file = options.private_key_file
+
+ # privilege escalation
+ self.become = options.become
+ self.become_method = options.become_method
+ self.become_user = options.become_user
+ self.become_pass = ''
+
+ # general flags (should we move out?)
+ if options.verbosity:
+ self.verbosity = options.verbosity
+ #if options.no_log:
+ # self.no_log = boolean(options.no_log)
+ if options.check:
+ self.check_mode = boolean(options.check)
+
+ # get the tag info from options, converting a comma-separated list
+ # of values into a proper list if need be. We check to see if the
+ # options have the attribute, as it is not always added via the CLI
+ if hasattr(options, 'tags'):
+ if isinstance(options.tags, list):
+ self.only_tags.update(options.tags)
+ elif isinstance(options.tags, basestring):
+ self.only_tags.update(options.tags.split(','))
+
+ if len(self.only_tags) == 0:
+ self.only_tags = set(['all'])
+
+ if hasattr(options, 'skip_tags'):
+ if isinstance(options.skip_tags, list):
+ self.skip_tags.update(options.skip_tags)
+ elif isinstance(options.skip_tags, basestring):
+ self.skip_tags.update(options.skip_tags.split(','))
+
+ def copy(self, ci):
+ '''
+ Copies the connection info from another connection info object, used
+ when merging in data from task overrides.
+ '''
+
+ for field in self._get_fields():
+ value = getattr(ci, field, None)
+ if isinstance(value, dict):
+ setattr(self, field, value.copy())
+ elif isinstance(value, set):
+ setattr(self, field, value.copy())
+ elif isinstance(value, list):
+ setattr(self, field, value[:])
+ else:
+ setattr(self, field, value)
+
+ def set_task_override(self, task):
+ '''
+ Sets attributes from the task if they are set, which will override
+ those from the play.
+ '''
+
+ new_info = ConnectionInformation()
+ new_info.copy(self)
+
+ for attr in ('connection', 'remote_user', 'become', 'become_user', 'become_pass', 'become_method', 'environment', 'no_log'):
+ if hasattr(task, attr):
+ attr_val = getattr(task, attr)
+ if attr_val:
+ setattr(new_info, attr, attr_val)
+
+ return new_info
+
+ def make_become_cmd(self, cmd, executable, become_settings=None):
+
+ """
+ helper function to create privilege escalation commands
+ """
+
+ # FIXME: become settings should probably be stored in the connection info itself
+ if become_settings is None:
+ become_settings = {}
+
+ randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
+ success_key = 'BECOME-SUCCESS-%s' % randbits
+ prompt = None
+ becomecmd = None
+
+ executable = executable or '$SHELL'
+
+ success_cmd = pipes.quote('echo %s; %s' % (success_key, cmd))
+ if self.become:
+ if self.become_method == 'sudo':
+ # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
+ # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
+ # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
+ # string to the user's shell. We loop reading output until we see the randomly-generated
+ # sudo prompt set with the -p option.
+ prompt = '[sudo via ansible, key=%s] password: ' % randbits
+ exe = become_settings.get('sudo_exe', C.DEFAULT_SUDO_EXE)
+ flags = become_settings.get('sudo_flags', C.DEFAULT_SUDO_FLAGS)
+ becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
+ (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, self.become_user, executable, success_cmd)
+
+ elif self.become_method == 'su':
+ exe = become_settings.get('su_exe', C.DEFAULT_SU_EXE)
+ flags = become_settings.get('su_flags', C.DEFAULT_SU_FLAGS)
+ becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, self.become_user, executable, success_cmd)
+
+ elif self.become_method == 'pbrun':
+ exe = become_settings.get('pbrun_exe', 'pbrun')
+ flags = become_settings.get('pbrun_flags', '')
+ becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, self.become_user, success_cmd)
+
+ elif self.become_method == 'pfexec':
+ exe = become_settings.get('pfexec_exe', 'pbrun')
+ flags = become_settings.get('pfexec_flags', '')
+ # No user as it uses it's own exec_attr to figure it out
+ becomecmd = '%s %s "%s"' % (exe, flags, success_cmd)
+
+ else:
+ raise AnsibleError("Privilege escalation method not found: %s" % self.become_method)
+
+ return (('%s -c ' % executable) + pipes.quote(becomecmd), prompt, success_key)
+
+ return (cmd, "", "")
+
+ def check_become_success(self, output, become_settings):
+ #TODO: implement
+ pass
+
+ def _get_fields(self):
+ return [i for i in self.__dict__.keys() if i[:1] != '_']
+
+ def post_validate(self, templar):
+ '''
+ Finalizes templated values which may be set on this objects fields.
+ '''
+
+ for field in self._get_fields():
+ value = templar.template(getattr(self, field))
+ setattr(self, field, value)
+
+ def update_vars(self, variables):
+ '''
+ Adds 'magic' variables relating to connections to the variable dictionary provided.
+ '''
+
+ variables['ansible_connection'] = self.connection
+ variables['ansible_ssh_host'] = self.remote_addr
+ variables['ansible_ssh_pass'] = self.password
+ variables['ansible_ssh_port'] = self.port
+ variables['ansible_ssh_user'] = self.remote_user
+ variables['ansible_ssh_private_key_file'] = self.private_key_file
diff --git a/lib/ansible/executor/module_common.py b/lib/ansible/executor/module_common.py
new file mode 100644
index 0000000000..535fbd45e3
--- /dev/null
+++ b/lib/ansible/executor/module_common.py
@@ -0,0 +1,199 @@
+# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# from python and deps
+from six.moves import StringIO
+import json
+import os
+import shlex
+
+# from Ansible
+from ansible import __version__
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.parsing.utils.jsonify import jsonify
+
+REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
+REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
+REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
+REPLACER_WINDOWS = "# POWERSHELL_COMMON"
+REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
+
+# We could end up writing out parameters with unicode characters so we need to
+# specify an encoding for the python source file
+ENCODING_STRING = '# -*- coding: utf-8 -*-'
+
+# we've moved the module_common relative to the snippets, so fix the path
+_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
+
+# ******************************************************************************
+
+def _slurp(path):
+ if not os.path.exists(path):
+ raise AnsibleError("imported module support code does not exist at %s" % path)
+ fd = open(path)
+ data = fd.read()
+ fd.close()
+ return data
+
+def _find_snippet_imports(module_data, module_path, strip_comments):
+ """
+ Given the source of the module, convert it to a Jinja2 template to insert
+ module code and return whether it's a new or old style module.
+ """
+
+ module_style = 'old'
+ if REPLACER in module_data:
+ module_style = 'new'
+ elif 'from ansible.module_utils.' in module_data:
+ module_style = 'new'
+ elif 'WANT_JSON' in module_data:
+ module_style = 'non_native_want_json'
+
+ output = StringIO()
+ lines = module_data.split('\n')
+ snippet_names = []
+
+ for line in lines:
+
+ if REPLACER in line:
+ output.write(_slurp(os.path.join(_SNIPPET_PATH, "basic.py")))
+ snippet_names.append('basic')
+ if REPLACER_WINDOWS in line:
+ ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
+ output.write(ps_data)
+ snippet_names.append('powershell')
+ elif line.startswith('from ansible.module_utils.'):
+ tokens=line.split(".")
+ import_error = False
+ if len(tokens) != 3:
+ import_error = True
+ if " import *" not in line:
+ import_error = True
+ if import_error:
+ raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
+ snippet_name = tokens[2].split()[0]
+ snippet_names.append(snippet_name)
+ output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py")))
+ else:
+ if strip_comments and line.startswith("#") or line == '':
+ pass
+ output.write(line)
+ output.write("\n")
+
+ if not module_path.endswith(".ps1"):
+ # Unixy modules
+ if len(snippet_names) > 0 and not 'basic' in snippet_names:
+ raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
+ else:
+ # Windows modules
+ if len(snippet_names) > 0 and not 'powershell' in snippet_names:
+ raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
+
+ return (output.getvalue(), module_style)
+
+# ******************************************************************************
+
+def modify_module(module_path, module_args, strip_comments=False):
+ """
+ Used to insert chunks of code into modules before transfer rather than
+ doing regular python imports. This allows for more efficient transfer in
+ a non-bootstrapping scenario by not moving extra files over the wire and
+ also takes care of embedding arguments in the transferred modules.
+
+ This version is done in such a way that local imports can still be
+ used in the module code, so IDEs don't have to be aware of what is going on.
+
+ Example:
+
+ from ansible.module_utils.basic import *
+
+ ... will result in the insertion of basic.py into the module
+ from the module_utils/ directory in the source tree.
+
+ All modules are required to import at least basic, though there will also
+ be other snippets.
+
+ For powershell, there's equivalent conventions like this:
+
+ # POWERSHELL_COMMON
+
+ which results in the inclusion of the common code from powershell.ps1
+
+ """
+ ### TODO: Optimization ideas if this code is actually a source of slowness:
+ # * Fix comment stripping: Currently doesn't preserve shebangs and encoding info (but we unconditionally add encoding info)
+ # * Use pyminifier if installed
+ # * comment stripping/pyminifier needs to have config setting to turn it
+ # off for debugging purposes (goes along with keep remote but should be
+ # separate otherwise users wouldn't be able to get info on what the
+ # minifier output)
+ # * Only split into lines and recombine into strings once
+ # * Cache the modified module? If only the args are different and we do
+ # that as the last step we could cache sll the work up to that point.
+
+ with open(module_path) as f:
+
+ # read in the module source
+ module_data = f.read()
+
+ (module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)
+
+ #module_args_json = jsonify(module_args)
+ module_args_json = json.dumps(module_args)
+ encoded_args = repr(module_args_json.encode('utf-8'))
+
+ # these strings should be part of the 'basic' snippet which is required to be included
+ module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
+ module_data = module_data.replace(REPLACER_COMPLEX, encoded_args)
+
+ # FIXME: we're not passing around an inject dictionary anymore, so
+ # this needs to be fixed with whatever method we use for vars
+ # like this moving forward
+ #if module_style == 'new':
+ # facility = C.DEFAULT_SYSLOG_FACILITY
+ # if 'ansible_syslog_facility' in inject:
+ # facility = inject['ansible_syslog_facility']
+ # module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
+
+ lines = module_data.split(b"\n", 1)
+ shebang = None
+ if lines[0].startswith(b"#!"):
+ shebang = lines[0].strip()
+ args = shlex.split(str(shebang[2:]))
+ interpreter = args[0]
+ interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
+
+ # FIXME: more inject stuff here...
+ #from ansible.utils.unicode import to_bytes
+ #if interpreter_config in inject:
+ # interpreter = to_bytes(inject[interpreter_config], errors='strict')
+ # lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:]))
+
+ lines.insert(1, ENCODING_STRING)
+ else:
+ lines.insert(0, ENCODING_STRING)
+
+ module_data = b"\n".join(lines)
+
+ return (module_data, module_style, shebang)
+
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
new file mode 100644
index 0000000000..dc4d4c7d5d
--- /dev/null
+++ b/lib/ansible/executor/play_iterator.py
@@ -0,0 +1,302 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import *
+from ansible.playbook.block import Block
+from ansible.playbook.task import Task
+
+from ansible.utils.boolean import boolean
+
+__all__ = ['PlayIterator']
+
+class HostState:
+ def __init__(self, blocks):
+ self._blocks = blocks[:]
+
+ self.cur_block = 0
+ self.cur_regular_task = 0
+ self.cur_rescue_task = 0
+ self.cur_always_task = 0
+ self.cur_role = None
+ self.run_state = PlayIterator.ITERATING_SETUP
+ self.fail_state = PlayIterator.FAILED_NONE
+ self.pending_setup = False
+ self.child_state = None
+
+ def __repr__(self):
+ return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, role=%s, run_state=%d, fail_state=%d, pending_setup=%s, child state? %s" % (
+ self.cur_block,
+ self.cur_regular_task,
+ self.cur_rescue_task,
+ self.cur_always_task,
+ self.cur_role,
+ self.run_state,
+ self.fail_state,
+ self.pending_setup,
+ self.child_state,
+ )
+
+ def get_current_block(self):
+ return self._blocks[self.cur_block]
+
+ def copy(self):
+ new_state = HostState(self._blocks)
+ new_state.cur_block = self.cur_block
+ new_state.cur_regular_task = self.cur_regular_task
+ new_state.cur_rescue_task = self.cur_rescue_task
+ new_state.cur_always_task = self.cur_always_task
+ new_state.cur_role = self.cur_role
+ new_state.run_state = self.run_state
+ new_state.fail_state = self.fail_state
+ new_state.pending_setup = self.pending_setup
+ new_state.child_state = self.child_state
+ return new_state
+
+class PlayIterator:
+
+ # the primary running states for the play iteration
+ ITERATING_SETUP = 0
+ ITERATING_TASKS = 1
+ ITERATING_RESCUE = 2
+ ITERATING_ALWAYS = 3
+ ITERATING_COMPLETE = 4
+
+ # the failure states for the play iteration, which are powers
+ # of 2 as they may be or'ed together in certain circumstances
+ FAILED_NONE = 0
+ FAILED_SETUP = 1
+ FAILED_TASKS = 2
+ FAILED_RESCUE = 4
+ FAILED_ALWAYS = 8
+
+ def __init__(self, inventory, play, connection_info, all_vars):
+ self._play = play
+
+ self._blocks = []
+ for block in self._play.compile():
+ new_block = block.filter_tagged_tasks(connection_info, all_vars)
+ if new_block.has_tasks():
+ self._blocks.append(new_block)
+
+ self._host_states = {}
+ for host in inventory.get_hosts(self._play.hosts):
+ self._host_states[host.name] = HostState(blocks=self._blocks)
+
+ def get_host_state(self, host):
+ try:
+ return self._host_states[host.name].copy()
+ except KeyError:
+ raise AnsibleError("invalid host (%s) specified for playbook iteration" % host)
+
+ def get_next_task_for_host(self, host, peek=False):
+
+ s = self.get_host_state(host)
+
+ task = None
+ if s.run_state == self.ITERATING_COMPLETE:
+ return None
+ elif s.run_state == self.ITERATING_SETUP:
+ s.run_state = self.ITERATING_TASKS
+ s.pending_setup = True
+ if self._play.gather_facts == 'smart' and not host._gathered_facts or boolean(self._play.gather_facts):
+ if not peek:
+ # mark the host as having gathered facts
+ host.set_gathered_facts(True)
+
+ task = Task()
+ task.action = 'setup'
+ task.args = {}
+ task.set_loader(self._play._loader)
+ else:
+ s.pending_setup = False
+
+ if not task:
+ (s, task) = self._get_next_task_from_state(s, peek=peek)
+
+ if task and task._role:
+ # if we had a current role, mark that role as completed
+ if s.cur_role and task._role != s.cur_role and s.cur_role._had_task_run and not peek:
+ s.cur_role._completed = True
+ s.cur_role = task._role
+
+ if not peek:
+ self._host_states[host.name] = s
+
+ return (s, task)
+
+
+ def _get_next_task_from_state(self, state, peek):
+
+ task = None
+
+ # if we previously encountered a child block and we have a
+ # saved child state, try and get the next task from there
+ if state.child_state:
+ (state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek)
+ if task:
+ return (state.child_state, task)
+ else:
+ state.child_state = None
+
+ # try and find the next task, given the current state.
+ while True:
+ # try to get the current block from the list of blocks, and
+ # if we run past the end of the list we know we're done with
+ # this block
+ try:
+ block = state._blocks[state.cur_block]
+ except IndexError:
+ state.run_state = self.ITERATING_COMPLETE
+ return (state, None)
+
+ if state.run_state == self.ITERATING_TASKS:
+ # clear the pending setup flag, since we're past that and it didn't fail
+ if state.pending_setup:
+ state.pending_setup = False
+
+ if state.fail_state & self.FAILED_TASKS == self.FAILED_TASKS:
+ state.run_state = self.ITERATING_RESCUE
+ elif state.cur_regular_task >= len(block.block):
+ state.run_state = self.ITERATING_ALWAYS
+ else:
+ task = block.block[state.cur_regular_task]
+ state.cur_regular_task += 1
+
+ elif state.run_state == self.ITERATING_RESCUE:
+ if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
+ state.run_state = self.ITERATING_ALWAYS
+ elif state.cur_rescue_task >= len(block.rescue):
+ if len(block.rescue) > 0:
+ state.fail_state = self.FAILED_NONE
+ state.run_state = self.ITERATING_ALWAYS
+ else:
+ task = block.rescue[state.cur_rescue_task]
+ state.cur_rescue_task += 1
+
+ elif state.run_state == self.ITERATING_ALWAYS:
+ if state.cur_always_task >= len(block.always):
+ if state.fail_state != self.FAILED_NONE:
+ state.run_state = self.ITERATING_COMPLETE
+ else:
+ state.cur_block += 1
+ state.cur_regular_task = 0
+ state.cur_rescue_task = 0
+ state.cur_always_task = 0
+ state.run_state = self.ITERATING_TASKS
+ state.child_state = None
+ else:
+ task = block.always[state.cur_always_task]
+ state.cur_always_task += 1
+
+ elif state.run_state == self.ITERATING_COMPLETE:
+ return (state, None)
+
+ # if the current task is actually a child block, we dive into it
+ if isinstance(task, Block):
+ state.child_state = HostState(blocks=[task])
+ state.child_state.run_state = self.ITERATING_TASKS
+ state.child_state.cur_role = state.cur_role
+ (state.child_state, task) = self._get_next_task_from_state(state.child_state, peek=peek)
+
+ # if something above set the task, break out of the loop now
+ if task:
+ break
+
+ return (state, task)
+
+ def mark_host_failed(self, host):
+ s = self.get_host_state(host)
+ if s.pending_setup:
+ s.fail_state |= self.FAILED_SETUP
+ s.run_state = self.ITERATING_COMPLETE
+ elif s.run_state == self.ITERATING_TASKS:
+ s.fail_state |= self.FAILED_TASKS
+ s.run_state = self.ITERATING_RESCUE
+ elif s.run_state == self.ITERATING_RESCUE:
+ s.fail_state |= self.FAILED_RESCUE
+ s.run_state = self.ITERATING_ALWAYS
+ elif s.run_state == self.ITERATING_ALWAYS:
+ s.fail_state |= self.FAILED_ALWAYS
+ s.run_state = self.ITERATING_COMPLETE
+ self._host_states[host.name] = s
+
+ def get_failed_hosts(self):
+ return dict((host, True) for (host, state) in self._host_states.iteritems() if state.run_state == self.ITERATING_COMPLETE and state.fail_state != self.FAILED_NONE)
+
+ def get_original_task(self, host, task):
+ '''
+ Finds the task in the task list which matches the UUID of the given task.
+ The executor engine serializes/deserializes objects as they are passed through
+ the different processes, and not all data structures are preserved. This method
+ allows us to find the original task passed into the executor engine.
+ '''
+ def _search_block(block, task):
+ for t in block.block:
+ if isinstance(t, Block):
+ res = _search_block(t, task)
+ if res:
+ return res
+ elif t._uuid == task._uuid:
+ return t
+ for t in block.rescue:
+ if isinstance(t, Block):
+ res = _search_block(t, task)
+ if res:
+ return res
+ elif t._uuid == task._uuid:
+ return t
+ for t in block.always:
+ if isinstance(t, Block):
+ res = _search_block(t, task)
+ if res:
+ return res
+ elif t._uuid == task._uuid:
+ return t
+ return None
+
+ s = self.get_host_state(host)
+ for block in s._blocks:
+ res = _search_block(block, task)
+ if res:
+ return res
+
+ return None
+
+ def add_tasks(self, host, task_list):
+ s = self.get_host_state(host)
+ target_block = s._blocks[s.cur_block].copy(exclude_parent=True)
+
+ if s.run_state == self.ITERATING_TASKS:
+ before = target_block.block[:s.cur_regular_task]
+ after = target_block.block[s.cur_regular_task:]
+ target_block.block = before + task_list + after
+ elif s.run_state == self.ITERATING_RESCUE:
+ before = target_block.rescue[:s.cur_rescue_task]
+ after = target_block.rescue[s.cur_rescue_task:]
+ target_block.rescue = before + task_list + after
+ elif s.run_state == self.ITERATING_ALWAYS:
+ before = target_block.always[:s.cur_always_task]
+ after = target_block.always[s.cur_always_task:]
+ target_block.always = before + task_list + after
+
+ s._blocks[s.cur_block] = target_block
+ self._host_states[host.name] = s
+
diff --git a/lib/ansible/executor/playbook_executor.py b/lib/ansible/executor/playbook_executor.py
new file mode 100644
index 0000000000..2d5958697b
--- /dev/null
+++ b/lib/ansible/executor/playbook_executor.py
@@ -0,0 +1,211 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import signal
+
+from ansible import constants as C
+from ansible.errors import *
+from ansible.executor.task_queue_manager import TaskQueueManager
+from ansible.playbook import Playbook
+from ansible.template import Templar
+
+from ansible.utils.color import colorize, hostcolor
+from ansible.utils.debug import debug
+
+class PlaybookExecutor:
+
+ '''
+ This is the primary class for executing playbooks, and thus the
+ basis for bin/ansible-playbook operation.
+ '''
+
+ def __init__(self, playbooks, inventory, variable_manager, loader, display, options, passwords):
+ self._playbooks = playbooks
+ self._inventory = inventory
+ self._variable_manager = variable_manager
+ self._loader = loader
+ self._display = display
+ self._options = options
+ self.passwords = passwords
+
+ if options.listhosts or options.listtasks or options.listtags:
+ self._tqm = None
+ else:
+ self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, display=display, options=options, passwords=self.passwords)
+
+ def run(self):
+
+ '''
+ Run the given playbook, based on the settings in the play which
+ may limit the runs to serialized groups, etc.
+ '''
+
+ signal.signal(signal.SIGINT, self._cleanup)
+
+ result = 0
+ entrylist = []
+ entry = {}
+ try:
+ for playbook_path in self._playbooks:
+ pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
+
+ if self._tqm is None: # we are doing a listing
+ entry = {'playbook': playbook_path}
+ entry['plays'] = []
+
+ i = 1
+ plays = pb.get_plays()
+ self._display.vv('%d plays in %s' % (len(plays), playbook_path))
+
+ for play in plays:
+ self._inventory.remove_restriction()
+
+ # Create a temporary copy of the play here, so we can run post_validate
+ # on it without the templating changes affecting the original object.
+ all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
+ templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False)
+ new_play = play.copy()
+ new_play.post_validate(templar)
+
+ if self._tqm is None:
+ # we are just doing a listing
+
+ pname = new_play.get_name().strip()
+ if pname == 'PLAY: <no name specified>':
+ pname = 'PLAY: #%d' % i
+ p = { 'name': pname }
+
+ if self._options.listhosts:
+ p['pattern']=play.hosts
+ p['hosts']=set(self._inventory.get_hosts(new_play.hosts))
+
+ #TODO: play tasks are really blocks, need to figure out how to get task objects from them
+ elif self._options.listtasks:
+ p['tasks'] = []
+ for task in play.get_tasks():
+ p['tasks'].append(task)
+ #p['tasks'].append({'name': task.get_name().strip(), 'tags': task.tags})
+
+ elif self._options.listtags:
+ p['tags'] = set(new_play.tags)
+ for task in play.get_tasks():
+ p['tags'].update(task)
+ #p['tags'].update(task.tags)
+ entry['plays'].append(p)
+
+ else:
+ # we are actually running plays
+ for batch in self._get_serialized_batches(new_play):
+ if len(batch) == 0:
+ self._tqm.send_callback('v2_playbook_on_play_start', new_play)
+ self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
+ result = 0
+ break
+ # restrict the inventory to the hosts in the serialized batch
+ self._inventory.restrict_to_hosts(batch)
+ # and run it...
+ result = self._tqm.run(play=play)
+ if result != 0:
+ break
+
+ if result != 0:
+ break
+
+ i = i + 1 # per play
+
+ if entry:
+ entrylist.append(entry) # per playbook
+
+ if entrylist:
+ return entrylist
+
+ finally:
+ if self._tqm is not None:
+ self._cleanup()
+
+ # FIXME: this stat summary stuff should be cleaned up and moved
+ # to a new method, if it even belongs here...
+ self._display.banner("PLAY RECAP")
+
+ hosts = sorted(self._tqm._stats.processed.keys())
+ for h in hosts:
+ t = self._tqm._stats.summarize(h)
+
+ self._display.display("%s : %s %s %s %s" % (
+ hostcolor(h, t),
+ colorize('ok', t['ok'], 'green'),
+ colorize('changed', t['changed'], 'yellow'),
+ colorize('unreachable', t['unreachable'], 'red'),
+ colorize('failed', t['failures'], 'red')),
+ screen_only=True
+ )
+
+ self._display.display("%s : %s %s %s %s" % (
+ hostcolor(h, t, False),
+ colorize('ok', t['ok'], None),
+ colorize('changed', t['changed'], None),
+ colorize('unreachable', t['unreachable'], None),
+ colorize('failed', t['failures'], None)),
+ log_only=True
+ )
+
+ self._display.display("", screen_only=True)
+ # END STATS STUFF
+
+ return result
+
+ def _cleanup(self, signum=None, framenum=None):
+ return self._tqm.cleanup()
+
+ def _get_serialized_batches(self, play):
+ '''
+ Returns a list of hosts, subdivided into batches based on
+ the serial size specified in the play.
+ '''
+
+ # make sure we have a unique list of hosts
+ all_hosts = self._inventory.get_hosts(play.hosts)
+
+ # check to see if the serial number was specified as a percentage,
+ # and convert it to an integer value based on the number of hosts
+ if isinstance(play.serial, basestring) and play.serial.endswith('%'):
+ serial_pct = int(play.serial.replace("%",""))
+ serial = int((serial_pct/100.0) * len(all_hosts))
+ else:
+ serial = int(play.serial)
+
+ # if the serial count was not specified or is invalid, default to
+ # a list of all hosts, otherwise split the list of hosts into chunks
+ # which are based on the serial size
+ if serial <= 0:
+ return [all_hosts]
+ else:
+ serialized_batches = []
+
+ while len(all_hosts) > 0:
+ play_hosts = []
+ for x in range(serial):
+ if len(all_hosts) > 0:
+ play_hosts.append(all_hosts.pop(0))
+
+ serialized_batches.append(play_hosts)
+
+ return serialized_batches
diff --git a/lib/ansible/executor/process/__init__.py b/lib/ansible/executor/process/__init__.py
new file mode 100644
index 0000000000..785fc45992
--- /dev/null
+++ b/lib/ansible/executor/process/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/lib/ansible/executor/process/result.py b/lib/ansible/executor/process/result.py
new file mode 100644
index 0000000000..f0416db852
--- /dev/null
+++ b/lib/ansible/executor/process/result.py
@@ -0,0 +1,176 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six.moves import queue
+import multiprocessing
+import os
+import signal
+import sys
+import time
+import traceback
+
+HAS_ATFORK=True
+try:
+ from Crypto.Random import atfork
+except ImportError:
+ HAS_ATFORK=False
+
+from ansible.playbook.handler import Handler
+from ansible.playbook.task import Task
+
+from ansible.utils.debug import debug
+
+__all__ = ['ResultProcess']
+
+
+class ResultProcess(multiprocessing.Process):
+ '''
+ The result worker thread, which reads results from the results
+ queue and fires off callbacks/etc. as necessary.
+ '''
+
+ def __init__(self, final_q, workers):
+
+ # takes a task queue manager as the sole param:
+ self._final_q = final_q
+ self._workers = workers
+ self._cur_worker = 0
+ self._terminated = False
+
+ super(ResultProcess, self).__init__()
+
+ def _send_result(self, result):
+ debug("sending result: %s" % (result,))
+ self._final_q.put(result, block=False)
+ debug("done sending result")
+
+ def _read_worker_result(self):
+ result = None
+ starting_point = self._cur_worker
+ while True:
+ (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
+ self._cur_worker += 1
+ if self._cur_worker >= len(self._workers):
+ self._cur_worker = 0
+
+ try:
+ if not rslt_q.empty():
+ debug("worker %d has data to read" % self._cur_worker)
+ result = rslt_q.get(block=False)
+ debug("got a result from worker %d: %s" % (self._cur_worker, result))
+ break
+ except queue.Empty:
+ pass
+
+ if self._cur_worker == starting_point:
+ break
+
+ return result
+
+ def terminate(self):
+ self._terminated = True
+ super(ResultProcess, self).terminate()
+
+ def run(self):
+ '''
+ The main thread execution, which reads from the results queue
+ indefinitely and sends callbacks/etc. when results are received.
+ '''
+
+ if HAS_ATFORK:
+ atfork()
+
+ while True:
+ try:
+ result = self._read_worker_result()
+ if result is None:
+ time.sleep(0.1)
+ continue
+
+ host_name = result._host.get_name()
+
+ # send callbacks, execute other options based on the result status
+ # FIXME: this should all be cleaned up and probably moved to a sub-function.
+ # the fact that this sometimes sends a TaskResult and other times
+ # sends a raw dictionary back may be confusing, but the result vs.
+ # results implementation for tasks with loops should be cleaned up
+ # better than this
+ if result.is_unreachable():
+ self._send_result(('host_unreachable', result))
+ elif result.is_failed():
+ self._send_result(('host_task_failed', result))
+ elif result.is_skipped():
+ self._send_result(('host_task_skipped', result))
+ else:
+ # if this task is notifying a handler, do it now
+ if result._task.notify:
+ # The shared dictionary for notified handlers is a proxy, which
+ # does not detect when sub-objects within the proxy are modified.
+ # So, per the docs, we reassign the list so the proxy picks up and
+ # notifies all other threads
+ for notify in result._task.notify:
+ self._send_result(('notify_handler', result._host, notify))
+
+ if result._task.loop:
+ # this task had a loop, and has more than one result, so
+ # loop over all of them instead of a single result
+ result_items = result._result['results']
+ else:
+ result_items = [ result._result ]
+
+ for result_item in result_items:
+ #if 'include' in result_item:
+ # include_variables = result_item.get('include_variables', dict())
+ # if 'item' in result_item:
+ # include_variables['item'] = result_item['item']
+ # self._send_result(('include', result._host, result._task, result_item['include'], include_variables))
+ #elif 'add_host' in result_item:
+ if 'add_host' in result_item:
+ # this task added a new host (add_host module)
+ self._send_result(('add_host', result_item))
+ elif 'add_group' in result_item:
+ # this task added a new group (group_by module)
+ self._send_result(('add_group', result._host, result_item))
+ elif 'ansible_facts' in result_item:
+ # if this task is registering facts, do that now
+ if result._task.action in ('set_fact', 'include_vars'):
+ for (key, value) in result_item['ansible_facts'].iteritems():
+ self._send_result(('set_host_var', result._host, key, value))
+ else:
+ self._send_result(('set_host_facts', result._host, result_item['ansible_facts']))
+
+ # finally, send the ok for this task
+ self._send_result(('host_task_ok', result))
+
+ # if this task is registering a result, do it now
+ if result._task.register:
+ self._send_result(('set_host_var', result._host, result._task.register, result._result))
+
+ except queue.Empty:
+ pass
+ except (KeyboardInterrupt, IOError, EOFError):
+ break
+ except:
+ # FIXME: we should probably send a proper callback here instead of
+ # simply dumping a stack trace on the screen
+ traceback.print_exc()
+ break
+
diff --git a/lib/ansible/executor/process/worker.py b/lib/ansible/executor/process/worker.py
new file mode 100644
index 0000000000..d8e8960fe4
--- /dev/null
+++ b/lib/ansible/executor/process/worker.py
@@ -0,0 +1,155 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six.moves import queue
+import multiprocessing
+import os
+import signal
+import sys
+import time
+import traceback
+
+HAS_ATFORK=True
+try:
+ from Crypto.Random import atfork
+except ImportError:
+ HAS_ATFORK=False
+
+from ansible.errors import AnsibleError, AnsibleConnectionFailure
+from ansible.executor.task_executor import TaskExecutor
+from ansible.executor.task_result import TaskResult
+from ansible.playbook.handler import Handler
+from ansible.playbook.task import Task
+
+from ansible.utils.debug import debug
+
+__all__ = ['WorkerProcess']
+
+
+class WorkerProcess(multiprocessing.Process):
+ '''
+ The worker thread class, which uses TaskExecutor to run tasks
+ read from a job queue and pushes results into a results queue
+ for reading later.
+ '''
+
+ def __init__(self, tqm, main_q, rslt_q, loader):
+
+ # takes a task queue manager as the sole param:
+ self._main_q = main_q
+ self._rslt_q = rslt_q
+ self._loader = loader
+
+ # dupe stdin, if we have one
+ self._new_stdin = sys.stdin
+ try:
+ fileno = sys.stdin.fileno()
+ if fileno is not None:
+ try:
+ self._new_stdin = os.fdopen(os.dup(fileno))
+ except OSError, e:
+ # couldn't dupe stdin, most likely because it's
+ # not a valid file descriptor, so we just rely on
+ # using the one that was passed in
+ pass
+ except ValueError:
+ # couldn't get stdin's fileno, so we just carry on
+ pass
+
+ super(WorkerProcess, self).__init__()
+
+ def run(self):
+ '''
+ Called when the process is started, and loops indefinitely
+ until an error is encountered (typically an IOerror from the
+ queue pipe being disconnected). During the loop, we attempt
+ to pull tasks off the job queue and run them, pushing the result
+ onto the results queue. We also remove the host from the blocked
+ hosts list, to signify that they are ready for their next task.
+ '''
+
+ if HAS_ATFORK:
+ atfork()
+
+ while True:
+ task = None
+ try:
+ if not self._main_q.empty():
+ debug("there's work to be done!")
+ (host, task, basedir, job_vars, connection_info, shared_loader_obj) = self._main_q.get(block=False)
+ debug("got a task/handler to work on: %s" % task)
+
+ # because the task queue manager starts workers (forks) before the
+ # playbook is loaded, set the basedir of the loader inherted by
+ # this fork now so that we can find files correctly
+ self._loader.set_basedir(basedir)
+
+ # Serializing/deserializing tasks does not preserve the loader attribute,
+ # since it is passed to the worker during the forking of the process and
+ # would be wasteful to serialize. So we set it here on the task now, and
+ # the task handles updating parent/child objects as needed.
+ task.set_loader(self._loader)
+
+ # apply the given task's information to the connection info,
+ # which may override some fields already set by the play or
+ # the options specified on the command line
+ new_connection_info = connection_info.set_task_override(task)
+
+ # execute the task and build a TaskResult from the result
+ debug("running TaskExecutor() for %s/%s" % (host, task))
+ executor_result = TaskExecutor(host, task, job_vars, new_connection_info, self._new_stdin, self._loader, shared_loader_obj).run()
+ debug("done running TaskExecutor() for %s/%s" % (host, task))
+ task_result = TaskResult(host, task, executor_result)
+
+ # put the result on the result queue
+ debug("sending task result")
+ self._rslt_q.put(task_result, block=False)
+ debug("done sending task result")
+
+ else:
+ time.sleep(0.1)
+
+ except queue.Empty:
+ pass
+ except (IOError, EOFError, KeyboardInterrupt):
+ break
+ except AnsibleConnectionFailure:
+ try:
+ if task:
+ task_result = TaskResult(host, task, dict(unreachable=True))
+ self._rslt_q.put(task_result, block=False)
+ except:
+ # FIXME: most likely an abort, catch those kinds of errors specifically
+ break
+ except Exception, e:
+ debug("WORKER EXCEPTION: %s" % e)
+ debug("WORKER EXCEPTION: %s" % traceback.format_exc())
+ try:
+ if task:
+ task_result = TaskResult(host, task, dict(failed=True, exception=traceback.format_exc(), stdout=''))
+ self._rslt_q.put(task_result, block=False)
+ except:
+ # FIXME: most likely an abort, catch those kinds of errors specifically
+ break
+
+ debug("WORKER PROCESS EXITING")
+
+
diff --git a/lib/ansible/executor/stats.py b/lib/ansible/executor/stats.py
new file mode 100644
index 0000000000..626b2959a4
--- /dev/null
+++ b/lib/ansible/executor/stats.py
@@ -0,0 +1,51 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+class AggregateStats:
+ ''' holds stats about per-host activity during playbook runs '''
+
+ def __init__(self):
+
+ self.processed = {}
+ self.failures = {}
+ self.ok = {}
+ self.dark = {}
+ self.changed = {}
+ self.skipped = {}
+
+ def increment(self, what, host):
+ ''' helper function to bump a statistic '''
+
+ self.processed[host] = 1
+ prev = (getattr(self, what)).get(host, 0)
+ getattr(self, what)[host] = prev+1
+
+ def summarize(self, host):
+ ''' return information about a particular host '''
+
+ return dict(
+ ok = self.ok.get(host, 0),
+ failures = self.failures.get(host, 0),
+ unreachable = self.dark.get(host,0),
+ changed = self.changed.get(host, 0),
+ skipped = self.skipped.get(host, 0)
+ )
+
diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
new file mode 100644
index 0000000000..2f90b3d87e
--- /dev/null
+++ b/lib/ansible/executor/task_executor.py
@@ -0,0 +1,454 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import pipes
+import subprocess
+import sys
+import time
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.executor.connection_info import ConnectionInformation
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.task import Task
+from ansible.plugins import lookup_loader, connection_loader, action_loader
+from ansible.template import Templar
+from ansible.utils.listify import listify_lookup_plugin_terms
+from ansible.utils.unicode import to_unicode
+
+from ansible.utils.debug import debug
+
+__all__ = ['TaskExecutor']
+
+class TaskExecutor:
+
+ '''
+ This is the main worker class for the executor pipeline, which
+ handles loading an action plugin to actually dispatch the task to
+ a given host. This class roughly corresponds to the old Runner()
+ class.
+ '''
+
+ def __init__(self, host, task, job_vars, connection_info, new_stdin, loader, shared_loader_obj):
+ self._host = host
+ self._task = task
+ self._job_vars = job_vars
+ self._connection_info = connection_info
+ self._new_stdin = new_stdin
+ self._loader = loader
+ self._shared_loader_obj = shared_loader_obj
+
+ def run(self):
+ '''
+ The main executor entrypoint, where we determine if the specified
+ task requires looping and either runs the task with
+ '''
+
+ debug("in run()")
+
+ try:
+ # lookup plugins need to know if this task is executing from
+ # a role, so that it can properly find files/templates/etc.
+ roledir = None
+ if self._task._role:
+ roledir = self._task._role._role_path
+ self._job_vars['roledir'] = roledir
+
+ items = self._get_loop_items()
+ if items is not None:
+ if len(items) > 0:
+ item_results = self._run_loop(items)
+
+ # loop through the item results, and remember the changed/failed
+ # result flags based on any item there.
+ changed = False
+ failed = False
+ for item in item_results:
+ if 'changed' in item:
+ changed = True
+ if 'failed' in item:
+ failed = True
+
+ # create the overall result item, and set the changed/failed
+ # flags there to reflect the overall result of the loop
+ res = dict(results=item_results)
+
+ if changed:
+ res['changed'] = True
+
+ if failed:
+ res['failed'] = True
+ res['msg'] = 'One or more items failed'
+ else:
+ res['msg'] = 'All items completed'
+ else:
+ res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
+ else:
+ debug("calling self._execute()")
+ res = self._execute()
+ debug("_execute() done")
+
+ # make sure changed is set in the result, if it's not present
+ if 'changed' not in res:
+ res['changed'] = False
+
+ debug("dumping result to json")
+ result = json.dumps(res)
+ debug("done dumping result, returning")
+ return result
+ except AnsibleError, e:
+ return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
+
+ def _get_loop_items(self):
+ '''
+ Loads a lookup plugin to handle the with_* portion of a task (if specified),
+ and returns the items result.
+ '''
+
+ items = None
+ if self._task.loop and self._task.loop in lookup_loader:
+ loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, variables=self._job_vars, loader=self._loader)
+ items = lookup_loader.get(self._task.loop, loader=self._loader).run(terms=loop_terms, variables=self._job_vars)
+
+ return items
+
+ def _run_loop(self, items):
+ '''
+ Runs the task with the loop items specified and collates the result
+ into an array named 'results' which is inserted into the final result
+ along with the item for which the loop ran.
+ '''
+
+ results = []
+
+ # make copies of the job vars and task so we can add the item to
+ # the variables and re-validate the task with the item variable
+ task_vars = self._job_vars.copy()
+
+ items = self._squash_items(items, task_vars)
+ for item in items:
+ task_vars['item'] = item
+
+ try:
+ tmp_task = self._task.copy()
+ except AnsibleParserError, e:
+ results.append(dict(failed=True, msg=str(e)))
+ continue
+
+ # now we swap the internal task with the copy, execute,
+ # and swap them back so we can do the next iteration cleanly
+ (self._task, tmp_task) = (tmp_task, self._task)
+ res = self._execute(variables=task_vars)
+ (self._task, tmp_task) = (tmp_task, self._task)
+
+ # now update the result with the item info, and append the result
+ # to the list of results
+ res['item'] = item
+ results.append(res)
+
+ # FIXME: we should be sending back a callback result for each item in the loop here
+ print(res)
+
+ return results
+
+ def _squash_items(self, items, variables):
+ '''
+ Squash items down to a comma-separated list for certain modules which support it
+ (typically package management modules).
+ '''
+
+ if len(items) > 0 and self._task.action in ('apt', 'yum', 'pkgng', 'zypper'):
+ final_items = []
+ for item in items:
+ variables['item'] = item
+ if self._task.evaluate_conditional(variables):
+ final_items.append(item)
+ return [",".join(final_items)]
+ else:
+ return items
+
+ def _execute(self, variables=None):
+ '''
+ The primary workhorse of the executor system, this runs the task
+ on the specified host (which may be the delegated_to host) and handles
+ the retry/until and block rescue/always execution
+ '''
+
+ if variables is None:
+ variables = self._job_vars
+
+ templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
+
+ # fields set from the play/task may be based on variables, so we have to
+ # do the same kind of post validation step on it here before we use it.
+ self._connection_info.post_validate(templar=templar)
+
+ # now that the connection information is finalized, we can add 'magic'
+ # variables to the variable dictionary
+ self._connection_info.update_vars(variables)
+
+ # get the connection and the handler for this execution
+ self._connection = self._get_connection(variables)
+ self._handler = self._get_action_handler(connection=self._connection)
+
+ # Evaluate the conditional (if any) for this task, which we do before running
+ # the final task post-validation. We do this before the post validation due to
+ # the fact that the conditional may specify that the task be skipped due to a
+ # variable not being present which would otherwise cause validation to fail
+ if not self._task.evaluate_conditional(variables):
+ debug("when evaulation failed, skipping this task")
+ return dict(changed=False, skipped=True, skip_reason='Conditional check failed')
+
+ # Now we do final validation on the task, which sets all fields to their final values
+ self._task.post_validate(templar=templar)
+
+ # if this task is a TaskInclude, we just return now with a success code so the
+ # main thread can expand the task list for the given host
+ if self._task.action == 'include':
+ include_variables = self._task.args.copy()
+ include_file = include_variables.get('_raw_params')
+ del include_variables['_raw_params']
+ return dict(changed=True, include=include_file, include_variables=include_variables)
+
+ # And filter out any fields which were set to default(omit), and got the omit token value
+ omit_token = variables.get('omit')
+ if omit_token is not None:
+ self._task.args = dict(filter(lambda x: x[1] != omit_token, self._task.args.iteritems()))
+
+ # Read some values from the task, so that we can modify them if need be
+ retries = self._task.retries
+ if retries <= 0:
+ retries = 1
+
+ delay = self._task.delay
+ if delay < 0:
+ delay = 1
+
+ # make a copy of the job vars here, in case we need to update them
+ # with the registered variable value later on when testing conditions
+ vars_copy = variables.copy()
+
+ debug("starting attempt loop")
+ result = None
+ for attempt in range(retries):
+ if attempt > 0:
+ # FIXME: this should use the callback/message passing mechanism
+ print("FAILED - RETRYING: %s (%d retries left)" % (self._task, retries-attempt))
+ result['attempts'] = attempt + 1
+
+ debug("running the handler")
+ result = self._handler.run(task_vars=variables)
+ debug("handler run complete")
+
+ if self._task.async > 0:
+ # the async_wrapper module returns dumped JSON via its stdout
+ # response, so we parse it here and replace the result
+ try:
+ result = json.loads(result.get('stdout'))
+ except ValueError, e:
+ return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e))
+
+ if self._task.poll > 0:
+ result = self._poll_async_result(result=result)
+
+ # update the local copy of vars with the registered value, if specified,
+ # or any facts which may have been generated by the module execution
+ if self._task.register:
+ vars_copy[self._task.register] = result
+
+ if 'ansible_facts' in result:
+ vars_copy.update(result['ansible_facts'])
+
+ # create a conditional object to evaluate task conditions
+ cond = Conditional(loader=self._loader)
+
+ # FIXME: make sure until is mutually exclusive with changed_when/failed_when
+ if self._task.until:
+ cond.when = self._task.until
+ if cond.evaluate_conditional(vars_copy):
+ break
+ elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result:
+ if self._task.changed_when:
+ cond.when = [ self._task.changed_when ]
+ result['changed'] = cond.evaluate_conditional(vars_copy)
+ if self._task.failed_when:
+ cond.when = [ self._task.failed_when ]
+ failed_when_result = cond.evaluate_conditional(vars_copy)
+ result['failed_when_result'] = result['failed'] = failed_when_result
+ if failed_when_result:
+ break
+ elif 'failed' not in result and result.get('rc', 0) == 0:
+ # if the result is not failed, stop trying
+ break
+
+ if attempt < retries - 1:
+ time.sleep(delay)
+
+ # do the final update of the local variables here, for both registered
+ # values and any facts which may have been created
+ if self._task.register:
+ variables[self._task.register] = result
+
+ if 'ansible_facts' in result:
+ variables.update(result['ansible_facts'])
+
+ # and return
+ debug("attempt loop complete, returning result")
+ return result
+
+ def _poll_async_result(self, result):
+ '''
+ Polls for the specified JID to be complete
+ '''
+
+ async_jid = result.get('ansible_job_id')
+ if async_jid is None:
+ return dict(failed=True, msg="No job id was returned by the async task")
+
+ # Create a new psuedo-task to run the async_status module, and run
+ # that (with a sleep for "poll" seconds between each retry) until the
+ # async time limit is exceeded.
+
+ async_task = Task().load(dict(action='async_status jid=%s' % async_jid))
+
+ # Because this is an async task, the action handler is async. However,
+ # we need the 'normal' action handler for the status check, so get it
+ # now via the action_loader
+ normal_handler = action_loader.get(
+ 'normal',
+ task=async_task,
+ connection=self._connection,
+ connection_info=self._connection_info,
+ loader=self._loader,
+ shared_loader_obj=self._shared_loader_obj,
+ )
+
+ time_left = self._task.async
+ while time_left > 0:
+ time.sleep(self._task.poll)
+
+ async_result = normal_handler.run()
+ if int(async_result.get('finished', 0)) == 1 or 'failed' in async_result or 'skipped' in async_result:
+ break
+
+ time_left -= self._task.poll
+
+ if int(async_result.get('finished', 0)) != 1:
+ return dict(failed=True, msg="async task did not complete within the requested time")
+ else:
+ return async_result
+
+ def _get_connection(self, variables):
+ '''
+ Reads the connection property for the host, and returns the
+ correct connection object from the list of connection plugins
+ '''
+
+ # FIXME: delegate_to calculation should be done here
+ # FIXME: calculation of connection params/auth stuff should be done here
+
+ self._connection_info.remote_addr = self._host.ipv4_address
+ if self._task.delegate_to is not None:
+ self._compute_delegate(variables)
+
+ conn_type = self._connection_info.connection
+ if conn_type == 'smart':
+ conn_type = 'ssh'
+ if sys.platform.startswith('darwin') and self._connection_info.remote_pass:
+ # due to a current bug in sshpass on OSX, which can trigger
+ # a kernel panic even for non-privileged users, we revert to
+ # paramiko on that OS when a SSH password is specified
+ conn_type = "paramiko"
+ else:
+ # see if SSH can support ControlPersist if not use paramiko
+ cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ (out, err) = cmd.communicate()
+ if "Bad configuration option" in err:
+ conn_type = "paramiko"
+
+ connection = connection_loader.get(conn_type, self._connection_info, self._new_stdin)
+ if not connection:
+ raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
+
+ return connection
+
+ def _get_action_handler(self, connection):
+ '''
+ Returns the correct action plugin to handle the requestion task action
+ '''
+
+ if self._task.action in action_loader:
+ if self._task.async != 0:
+ raise AnsibleError("async mode is not supported with the %s module" % module_name)
+ handler_name = self._task.action
+ elif self._task.async == 0:
+ handler_name = 'normal'
+ else:
+ handler_name = 'async'
+
+ handler = action_loader.get(
+ handler_name,
+ task=self._task,
+ connection=connection,
+ connection_info=self._connection_info,
+ loader=self._loader,
+ shared_loader_obj=self._shared_loader_obj,
+ )
+
+ if not handler:
+ raise AnsibleError("the handler '%s' was not found" % handler_name)
+
+ return handler
+
+ def _compute_delegate(self, variables):
+
+ # get the vars for the delegate by its name
+ try:
+ this_info = variables['hostvars'][self._task.delegate_to]
+ except:
+ # make sure the inject is empty for non-inventory hosts
+ this_info = {}
+
+ # get the real ssh_address for the delegate and allow ansible_ssh_host to be templated
+ #self._connection_info.remote_user = self._compute_delegate_user(self.delegate_to, delegate['inject'])
+ self._connection_info.remote_addr = this_info.get('ansible_ssh_host', self._task.delegate_to)
+ self._connection_info.port = this_info.get('ansible_ssh_port', self._connection_info.port)
+ self._connection_info.password = this_info.get('ansible_ssh_pass', self._connection_info.password)
+ self._connection_info.private_key_file = this_info.get('ansible_ssh_private_key_file', self._connection_info.private_key_file)
+ self._connection_info.connection = this_info.get('ansible_connection', self._connection_info.connection)
+ self._connection_info.become_pass = this_info.get('ansible_sudo_pass', self._connection_info.become_pass)
+
+ if self._connection_info.remote_addr in ('127.0.0.1', 'localhost'):
+ self._connection_info.connection = 'local'
+
+ # Last chance to get private_key_file from global variables.
+ # this is useful if delegated host is not defined in the inventory
+ #if delegate['private_key_file'] is None:
+ # delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None)
+
+ #if delegate['private_key_file'] is not None:
+ # delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file'])
+
+ for i in this_info:
+ if i.startswith("ansible_") and i.endswith("_interpreter"):
+ variables[i] = this_info[i]
+
diff --git a/lib/ansible/executor/task_queue_manager.py b/lib/ansible/executor/task_queue_manager.py
new file mode 100644
index 0000000000..a875c310d5
--- /dev/null
+++ b/lib/ansible/executor/task_queue_manager.py
@@ -0,0 +1,233 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import multiprocessing
+import os
+import socket
+import sys
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.executor.connection_info import ConnectionInformation
+from ansible.executor.play_iterator import PlayIterator
+from ansible.executor.process.worker import WorkerProcess
+from ansible.executor.process.result import ResultProcess
+from ansible.executor.stats import AggregateStats
+from ansible.plugins import callback_loader, strategy_loader
+from ansible.template import Templar
+
+from ansible.utils.debug import debug
+
+__all__ = ['TaskQueueManager']
+
+class TaskQueueManager:
+
+ '''
+ This class handles the multiprocessing requirements of Ansible by
+ creating a pool of worker forks, a result handler fork, and a
+ manager object with shared datastructures/queues for coordinating
+ work between all processes.
+
+ The queue manager is responsible for loading the play strategy plugin,
+ which dispatches the Play's tasks to hosts.
+ '''
+
+ def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None):
+
+ self._inventory = inventory
+ self._variable_manager = variable_manager
+ self._loader = loader
+ self._display = display
+ self._options = options
+ self._stats = AggregateStats()
+ self.passwords = passwords
+
+ # a special flag to help us exit cleanly
+ self._terminated = False
+
+ # this dictionary is used to keep track of notified handlers
+ self._notified_handlers = dict()
+
+ # dictionaries to keep track of failed/unreachable hosts
+ self._failed_hosts = dict()
+ self._unreachable_hosts = dict()
+
+ self._final_q = multiprocessing.Queue()
+
+ # load callback plugins
+ self._callback_plugins = self._load_callbacks(stdout_callback)
+
+ # create the pool of worker threads, based on the number of forks specified
+ try:
+ fileno = sys.stdin.fileno()
+ except ValueError:
+ fileno = None
+
+ self._workers = []
+ for i in range(self._options.forks):
+ main_q = multiprocessing.Queue()
+ rslt_q = multiprocessing.Queue()
+
+ prc = WorkerProcess(self, main_q, rslt_q, loader)
+ prc.start()
+
+ self._workers.append((prc, main_q, rslt_q))
+
+ self._result_prc = ResultProcess(self._final_q, self._workers)
+ self._result_prc.start()
+
+ def _initialize_notified_handlers(self, handlers):
+ '''
+ Clears and initializes the shared notified handlers dict with entries
+ for each handler in the play, which is an empty array that will contain
+ inventory hostnames for those hosts triggering the handler.
+ '''
+
+ # Zero the dictionary first by removing any entries there.
+ # Proxied dicts don't support iteritems, so we have to use keys()
+ for key in self._notified_handlers.keys():
+ del self._notified_handlers[key]
+
+ # FIXME: there is a block compile helper for this...
+ handler_list = []
+ for handler_block in handlers:
+ for handler in handler_block.block:
+ handler_list.append(handler)
+
+ # then initialize it with the handler names from the handler list
+ for handler in handler_list:
+ self._notified_handlers[handler.get_name()] = []
+
+ def _load_callbacks(self, stdout_callback):
+ '''
+ Loads all available callbacks, with the exception of those which
+ utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
+ only one such callback plugin will be loaded.
+ '''
+
+ loaded_plugins = []
+
+ stdout_callback_loaded = False
+ if stdout_callback is None:
+ stdout_callback = C.DEFAULT_STDOUT_CALLBACK
+
+ if stdout_callback not in callback_loader:
+ raise AnsibleError("Invalid callback for stdout specified: %s" % stdout_callback)
+
+ for callback_plugin in callback_loader.all(class_only=True):
+ if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
+ # we only allow one callback of type 'stdout' to be loaded, so check
+ # the name of the current plugin and type to see if we need to skip
+ # loading this callback plugin
+ callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
+ (callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
+ if callback_type == 'stdout':
+ if callback_name != stdout_callback or stdout_callback_loaded:
+ continue
+ stdout_callback_loaded = True
+
+ loaded_plugins.append(callback_plugin(self._display))
+ else:
+ loaded_plugins.append(callback_plugin())
+
+ return loaded_plugins
+
+ def run(self, play):
+ '''
+ Iterates over the roles/tasks in a play, using the given (or default)
+ strategy for queueing tasks. The default is the linear strategy, which
+ operates like classic Ansible by keeping all hosts in lock-step with
+ a given task (meaning no hosts move on to the next task until all hosts
+ are done with the current task).
+ '''
+
+ all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
+ templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False)
+
+ new_play = play.copy()
+ new_play.post_validate(templar)
+
+ connection_info = ConnectionInformation(new_play, self._options, self.passwords)
+ for callback_plugin in self._callback_plugins:
+ if hasattr(callback_plugin, 'set_connection_info'):
+ callback_plugin.set_connection_info(connection_info)
+
+ self.send_callback('v2_playbook_on_play_start', new_play)
+
+ # initialize the shared dictionary containing the notified handlers
+ self._initialize_notified_handlers(new_play.handlers)
+
+ # load the specified strategy (or the default linear one)
+ strategy = strategy_loader.get(new_play.strategy, self)
+ if strategy is None:
+ raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
+
+ # build the iterator
+ iterator = PlayIterator(inventory=self._inventory, play=new_play, connection_info=connection_info, all_vars=all_vars)
+
+ # and run the play using the strategy
+ return strategy.run(iterator, connection_info)
+
+ def cleanup(self):
+ debug("RUNNING CLEANUP")
+
+ self.terminate()
+
+ self._final_q.close()
+ self._result_prc.terminate()
+
+ for (worker_prc, main_q, rslt_q) in self._workers:
+ rslt_q.close()
+ main_q.close()
+ worker_prc.terminate()
+
+ def get_inventory(self):
+ return self._inventory
+
+ def get_variable_manager(self):
+ return self._variable_manager
+
+ def get_loader(self):
+ return self._loader
+
+ def get_notified_handlers(self):
+ return self._notified_handlers
+
+ def get_workers(self):
+ return self._workers[:]
+
+ def terminate(self):
+ self._terminated = True
+
+ def send_callback(self, method_name, *args, **kwargs):
+ for callback_plugin in self._callback_plugins:
+ # a plugin that set self.disabled to True will not be called
+ # see osx_say.py example for such a plugin
+ if getattr(callback_plugin, 'disabled', False):
+ continue
+ methods = [
+ getattr(callback_plugin, method_name, None),
+ getattr(callback_plugin, 'on_any', None)
+ ]
+ for method in methods:
+ if method is not None:
+ method(*args, **kwargs)
+
diff --git a/lib/ansible/callback_plugins/__init__.py b/lib/ansible/executor/task_queue_manager.py:
index e69de29bb2..e69de29bb2 100644
--- a/lib/ansible/callback_plugins/__init__.py
+++ b/lib/ansible/executor/task_queue_manager.py:
diff --git a/lib/ansible/executor/task_result.py b/lib/ansible/executor/task_result.py
new file mode 100644
index 0000000000..2b760bac00
--- /dev/null
+++ b/lib/ansible/executor/task_result.py
@@ -0,0 +1,61 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.parsing import DataLoader
+
+class TaskResult:
+ '''
+ This class is responsible for interpretting the resulting data
+ from an executed task, and provides helper methods for determining
+ the result of a given task.
+ '''
+
+ def __init__(self, host, task, return_data):
+ self._host = host
+ self._task = task
+ if isinstance(return_data, dict):
+ self._result = return_data.copy()
+ else:
+ self._result = DataLoader().load(return_data)
+
+ def is_changed(self):
+ return self._check_key('changed')
+
+ def is_skipped(self):
+ return self._check_key('skipped')
+
+ def is_failed(self):
+ if 'failed_when_result' in self._result:
+ return self._check_key('failed_when_result')
+ else:
+ return self._check_key('failed') or self._result.get('rc', 0) != 0
+
+ def is_unreachable(self):
+ return self._check_key('unreachable')
+
+ def _check_key(self, key):
+ if 'results' in self._result:
+ flag = False
+ for res in self._result.get('results', []):
+ if isinstance(res, dict):
+ flag |= res.get(key, False)
+ else:
+ return self._result.get(key, False)
diff --git a/lib/ansible/galaxy/__init__.py b/lib/ansible/galaxy/__init__.py
new file mode 100644
index 0000000000..3b89dac847
--- /dev/null
+++ b/lib/ansible/galaxy/__init__.py
@@ -0,0 +1,70 @@
+########################################################################
+#
+# (C) 2015, Brian Coca <bcoca@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+''' This manages remote shared Ansible objects, mainly roles'''
+
+import os
+
+from ansible.errors import AnsibleError
+from ansible.utils.display import Display
+
+# default_readme_template
+# default_meta_template
+
+
+class Galaxy(object):
+ ''' Keeps global galaxy info '''
+
+ def __init__(self, options, display=None):
+
+ if display is None:
+ self.display = Display()
+ else:
+ self.display = display
+
+ self.options = options
+ self.roles_path = getattr(self.options, 'roles_path', None)
+ if self.roles_path:
+ self.roles_path = os.path.expanduser(self.roles_path)
+
+ self.roles = {}
+
+ # load data path for resource usage
+ this_dir, this_filename = os.path.split(__file__)
+ self.DATA_PATH = os.path.join(this_dir, "data")
+
+ #TODO: move to getter for lazy loading
+ self.default_readme = self._str_from_data_file('readme')
+ self.default_meta = self._str_from_data_file('metadata_template.j2')
+
+ def add_role(self, role):
+ self.roles[role.name] = role
+
+ def remove_role(self, role_name):
+ del self.roles[role_name]
+
+
+ def _str_from_data_file(self, filename):
+ myfile = os.path.join(self.DATA_PATH, filename)
+ try:
+ return open(myfile).read()
+ except Exception as e:
+ raise AnsibleError("Could not open %s: %s" % (filename, str(e)))
+
diff --git a/lib/ansible/galaxy/api.py b/lib/ansible/galaxy/api.py
new file mode 100755
index 0000000000..f14afc52d3
--- /dev/null
+++ b/lib/ansible/galaxy/api.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python
+
+########################################################################
+#
+# (C) 2013, James Cammarata <jcammarata@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+import json
+from urllib2 import urlopen, quote as urlquote
+from urlparse import urlparse
+
+from ansible.errors import AnsibleError
+
+class GalaxyAPI(object):
+ ''' This class is meant to be used as a API client for an Ansible Galaxy server '''
+
+ SUPPORTED_VERSIONS = ['v1']
+
+ def __init__(self, galaxy, api_server):
+
+ self.galaxy = galaxy
+
+ try:
+ urlparse(api_server, scheme='https')
+ except:
+ raise AnsibleError("Invalid server API url passed: %s" % api_server)
+
+ server_version = self.get_server_api_version('%s/api/' % (api_server))
+ if not server_version:
+ raise AnsibleError("Could not retrieve server API version: %s" % api_server)
+
+ if server_version in self.SUPPORTED_VERSIONS:
+ self.baseurl = '%s/api/%s' % (api_server, server_version)
+ self.version = server_version # for future use
+ self.galaxy.display.vvvvv("Base API: %s" % self.baseurl)
+ else:
+ raise AnsibleError("Unsupported Galaxy server API version: %s" % server_version)
+
+ def get_server_api_version(self, api_server):
+ """
+ Fetches the Galaxy API current version to ensure
+ the API server is up and reachable.
+ """
+ #TODO: fix galaxy server which returns current_version path (/api/v1) vs actual version (v1)
+ # also should set baseurl using supported_versions which has path
+ return 'v1'
+
+ try:
+ data = json.load(urlopen(api_server))
+ return data.get("current_version", 'v1')
+ except Exception as e:
+ # TODO: report error
+ return None
+
+ def lookup_role_by_name(self, role_name, notify=True):
+ """
+ Find a role by name
+ """
+ role_name = urlquote(role_name)
+
+ try:
+ parts = role_name.split(".")
+ user_name = ".".join(parts[0:-1])
+ role_name = parts[-1]
+ if notify:
+ self.galaxy.display.display("- downloading role '%s', owned by %s" % (role_name, user_name))
+ except:
+ raise AnsibleError("- invalid role name (%s). Specify role as format: username.rolename" % role_name)
+
+ url = '%s/roles/?owner__username=%s&name=%s' % (self.baseurl, user_name, role_name)
+ self.galaxy.display.vvvv("- %s" % (url))
+ try:
+ data = json.load(urlopen(url))
+ if len(data["results"]) != 0:
+ return data["results"][0]
+ except:
+ # TODO: report on connection/availability errors
+ pass
+
+ return None
+
+ def fetch_role_related(self, related, role_id):
+ """
+ Fetch the list of related items for the given role.
+ The url comes from the 'related' field of the role.
+ """
+
+ try:
+ url = '%s/roles/%d/%s/?page_size=50' % (self.baseurl, int(role_id), related)
+ data = json.load(urlopen(url))
+ results = data['results']
+ done = (data.get('next', None) == None)
+ while not done:
+ url = '%s%s' % (self.baseurl, data['next'])
+ self.galaxy.display.display(url)
+ data = json.load(urlopen(url))
+ results += data['results']
+ done = (data.get('next', None) == None)
+ return results
+ except:
+ return None
+
+ def get_list(self, what):
+ """
+ Fetch the list of items specified.
+ """
+
+ try:
+ url = '%s/%s/?page_size' % (self.baseurl, what)
+ data = json.load(urlopen(url))
+ if "results" in data:
+ results = data['results']
+ else:
+ results = data
+ done = True
+ if "next" in data:
+ done = (data.get('next', None) == None)
+ while not done:
+ url = '%s%s' % (self.baseurl, data['next'])
+ self.galaxy.display.display(url)
+ data = json.load(urlopen(url))
+ results += data['results']
+ done = (data.get('next', None) == None)
+ return results
+ except Exception as error:
+ raise AnsibleError("Failed to download the %s list: %s" % (what, str(error)))
diff --git a/lib/ansible/galaxy/data/metadata_template.j2 b/lib/ansible/galaxy/data/metadata_template.j2
new file mode 100644
index 0000000000..328e13a814
--- /dev/null
+++ b/lib/ansible/galaxy/data/metadata_template.j2
@@ -0,0 +1,45 @@
+galaxy_info:
+ author: {{ author }}
+ description: {{description}}
+ company: {{ company }}
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: {{ issue_tracker_url }}
+ # Some suggested licenses:
+ # - BSD (default)
+ # - MIT
+ # - GPLv2
+ # - GPLv3
+ # - Apache
+ # - CC-BY
+ license: {{ license }}
+ min_ansible_version: {{ min_ansible_version }}
+ #
+ # Below are all platforms currently available. Just uncomment
+ # the ones that apply to your role. If you don't see your
+ # platform on this list, let us know and we'll get it added!
+ #
+ #platforms:
+ {%- for platform,versions in platforms.iteritems() %}
+ #- name: {{ platform }}
+ # versions:
+ # - all
+ {%- for version in versions %}
+ # - {{ version }}
+ {%- endfor %}
+ {%- endfor %}
+ #
+ # Below are all categories currently available. Just as with
+ # the platforms above, uncomment those that apply to your role.
+ #
+ #categories:
+ {%- for category in categories %}
+ #- {{ category.name }}
+ {%- endfor %}
+dependencies: []
+ # List your role dependencies here, one per line.
+ # Be sure to remove the '[]' above if you add dependencies
+ # to this list.
+ {% for dependency in dependencies %}
+ #- {{ dependency }}
+ {% endfor %}
diff --git a/lib/ansible/galaxy/data/readme b/lib/ansible/galaxy/data/readme
new file mode 100644
index 0000000000..225dd44b9f
--- /dev/null
+++ b/lib/ansible/galaxy/data/readme
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+A brief description of the role goes here.
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+
+Dependencies
+------------
+
+A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - { role: username.rolename, x: 42 }
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+An optional section for the role authors to include contact information, or a website (HTML is not allowed).
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
new file mode 100644
index 0000000000..b5a628726f
--- /dev/null
+++ b/lib/ansible/galaxy/role.py
@@ -0,0 +1,295 @@
+########################################################################
+#
+# (C) 2015, Brian Coca <bcoca@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+########################################################################
+
+import datetime
+import os
+import subprocess
+import tarfile
+import tempfile
+import yaml
+from shutil import rmtree
+from urllib2 import urlopen
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+
+class GalaxyRole(object):
+
+ SUPPORTED_SCMS = set(['git', 'hg'])
+ META_MAIN = os.path.join('meta', 'main.yml')
+ META_INSTALL = os.path.join('meta', '.galaxy_install_info')
+ ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
+
+
+ def __init__(self, galaxy, name, src=None, version=None, scm=None):
+
+ self._metadata = None
+ self._install_info = None
+
+ self.options = galaxy.options
+ self.display = galaxy.display
+
+ self.name = name
+ self.version = version
+ self.src = src
+ self.scm = scm
+
+ self.path = (os.path.join(galaxy.roles_path, self.name))
+
+ def fetch_from_scm_archive(self):
+
+ # this can be configured to prevent unwanted SCMS but cannot add new ones unless the code is also updated
+ if scm not in self.scms:
+ self.display.display("The %s scm is not currently supported" % scm)
+ return False
+
+ tempdir = tempfile.mkdtemp()
+ clone_cmd = [scm, 'clone', role_url, self.name]
+ with open('/dev/null', 'w') as devnull:
+ try:
+ self.display.display("- executing: %s" % " ".join(clone_cmd))
+ popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
+ except:
+ raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
+ rc = popen.wait()
+ if rc != 0:
+ self.display.display("- command %s failed" % ' '.join(clone_cmd))
+ self.display.display(" in directory %s" % tempdir)
+ return False
+
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
+ if scm == 'hg':
+ archive_cmd = ['hg', 'archive', '--prefix', "%s/" % self.name]
+ if role_version:
+ archive_cmd.extend(['-r', role_version])
+ archive_cmd.append(temp_file.name)
+ if scm == 'git':
+ archive_cmd = ['git', 'archive', '--prefix=%s/' % self.name, '--output=%s' % temp_file.name]
+ if role_version:
+ archive_cmd.append(role_version)
+ else:
+ archive_cmd.append('HEAD')
+
+ with open('/dev/null', 'w') as devnull:
+ self.display.display("- executing: %s" % " ".join(archive_cmd))
+ popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, self.name),
+ stderr=devnull, stdout=devnull)
+ rc = popen.wait()
+ if rc != 0:
+ self.display.display("- command %s failed" % ' '.join(archive_cmd))
+ self.display.display(" in directory %s" % tempdir)
+ return False
+
+ rmtree(tempdir, ignore_errors=True)
+
+ return temp_file.name
+
+ @property
+ def metadata(self):
+ """
+ Returns role metadata
+ """
+ if self._metadata is None:
+ meta_path = os.path.join(self.path, self.META_MAIN)
+ if os.path.isfile(meta_path):
+ try:
+ f = open(meta_path, 'r')
+ self._metadata = yaml.safe_load(f)
+ except:
+ self.display.vvvvv("Unable to load metadata for %s" % self.name)
+ return False
+ finally:
+ f.close()
+
+ return self._metadata
+
+
+ @property
+ def install_info(self):
+ """
+ Returns role install info
+ """
+ if self._install_info is None:
+
+ info_path = os.path.join(self.path, self.META_INSTALL)
+ if os.path.isfile(info_path):
+ try:
+ f = open(info_path, 'r')
+ self._install_info = yaml.safe_load(f)
+ except:
+ self.display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
+ return False
+ finally:
+ f.close()
+ return self._install_info
+
+ def _write_galaxy_install_info(self):
+ """
+ Writes a YAML-formatted file to the role's meta/ directory
+ (named .galaxy_install_info) which contains some information
+ we can use later for commands like 'list' and 'info'.
+ """
+
+ info = dict(
+ version=self.version,
+ install_date=datetime.datetime.utcnow().strftime("%c"),
+ )
+ info_path = os.path.join(self.path, self.META_INSTALL)
+ try:
+ f = open(info_path, 'w+')
+ self._install_info = yaml.safe_dump(info, f)
+ except:
+ return False
+ finally:
+ f.close()
+
+ return True
+
+ def remove(self):
+ """
+ Removes the specified role from the roles path. There is a
+ sanity check to make sure there's a meta/main.yml file at this
+ path so the user doesn't blow away random directories
+ """
+ if self.metadata:
+ try:
+ rmtree(self.path)
+ return True
+ except:
+ pass
+
+ return False
+
+ def fetch(self, target, role_data):
+ """
+ Downloads the archived role from github to a temp location, extracts
+ it, and then copies the extracted role to the role library path.
+ """
+
+ # first grab the file and save it to a temp location
+ if self.src:
+ archive_url = self.src
+ else:
+ archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
+ self.display.display("- downloading role from %s" % archive_url)
+
+ try:
+ url_file = urlopen(archive_url)
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
+ data = url_file.read()
+ while data:
+ temp_file.write(data)
+ data = url_file.read()
+ temp_file.close()
+ return temp_file.name
+ except:
+ # TODO: better urllib2 error handling for error
+ # messages that are more exact
+ self.display.error("failed to download the file.")
+ return False
+
+ def install(self, role_filename):
+ # the file is a tar, so open it that way and extract it
+ # to the specified (or default) roles directory
+
+ if not tarfile.is_tarfile(role_filename):
+ self.display.error("the file downloaded was not a tar.gz")
+ return False
+ else:
+ if role_filename.endswith('.gz'):
+ role_tar_file = tarfile.open(role_filename, "r:gz")
+ else:
+ role_tar_file = tarfile.open(role_filename, "r")
+ # verify the role's meta file
+ meta_file = None
+ members = role_tar_file.getmembers()
+ # next find the metadata file
+ for member in members:
+ if self.META_MAIN in member.name:
+ meta_file = member
+ break
+ if not meta_file:
+ self.display.error("this role does not appear to have a meta/main.yml file.")
+ return False
+ else:
+ try:
+ self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
+ except:
+ self.display.error("this role does not appear to have a valid meta/main.yml file.")
+ return False
+
+ # we strip off the top-level directory for all of the files contained within
+ # the tar file here, since the default is 'github_repo-target', and change it
+ # to the specified role's name
+ self.display.display("- extracting %s to %s" % (self.name, self.path))
+ try:
+ if os.path.exists(self.path):
+ if not os.path.isdir(self.path):
+ self.display.error("the specified roles path exists and is not a directory.")
+ return False
+ elif not getattr(self.options, "force", False):
+ self.display.error("the specified role %s appears to already exist. Use --force to replace it." % self.name)
+ return False
+ else:
+ # using --force, remove the old path
+ if not self.remove():
+ self.display.error("%s doesn't appear to contain a role." % self.path)
+ self.display.error(" please remove this directory manually if you really want to put the role here.")
+ return False
+ else:
+ os.makedirs(self.path)
+
+ # now we do the actual extraction to the path
+ for member in members:
+ # we only extract files, and remove any relative path
+ # bits that might be in the file for security purposes
+ # and drop the leading directory, as mentioned above
+ if member.isreg() or member.issym():
+ parts = member.name.split(os.sep)[1:]
+ final_parts = []
+ for part in parts:
+ if part != '..' and '~' not in part and '$' not in part:
+ final_parts.append(part)
+ member.name = os.path.join(*final_parts)
+ role_tar_file.extract(member, self.path)
+
+ # write out the install info file for later use
+ self._write_galaxy_install_info()
+ except OSError as e:
+ self.display.error("Could not update files in %s: %s" % (self.path, str(e)))
+ return False
+
+ # return the parsed yaml metadata
+ self.display.display("- %s was installed successfully" % self.name)
+ return True
+
+ @property
+ def spec(self):
+ """
+ Returns role spec info
+ {
+ 'scm': 'git',
+ 'src': 'http://git.example.com/repos/repo.git',
+ 'version': 'v1.0',
+ 'name': 'repo'
+ }
+ """
+ return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
diff --git a/lib/ansible/inventory/__init__.py b/lib/ansible/inventory/__init__.py
index 2048046d3c..063398f17f 100644
--- a/lib/ansible/inventory/__init__.py
+++ b/lib/ansible/inventory/__init__.py
@@ -16,36 +16,44 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
import fnmatch
import os
import sys
import re
+import stat
import subprocess
-import ansible.constants as C
+from ansible import constants as C
+from ansible.errors import *
+
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
from ansible.inventory.dir import InventoryDirectory
from ansible.inventory.group import Group
from ansible.inventory.host import Host
-from ansible import errors
-from ansible import utils
+from ansible.plugins import vars_loader
+from ansible.utils.path import is_executable
+from ansible.utils.vars import combine_vars
class Inventory(object):
"""
Host inventory for ansible.
"""
- __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
- 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
- '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
+ #__slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
+ # 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
+ # '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
- def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None):
+ def __init__(self, loader, variable_manager, host_list=C.DEFAULT_HOST_LIST):
# the host file file, or script path, or list of hosts
# if a list, inventory data will NOT be loaded
self.host_list = host_list
- self._vault_password=vault_password
+ self._loader = loader
+ self._variable_manager = variable_manager
# caching to avoid repeated calculations, particularly with
# external inventory scripts.
@@ -97,7 +105,7 @@ class Inventory(object):
if os.path.isdir(host_list):
# Ensure basedir is inside the directory
self.host_list = os.path.join(self.host_list, "")
- self.parser = InventoryDirectory(filename=host_list)
+ self.parser = InventoryDirectory(loader=self._loader, filename=host_list)
self.groups = self.parser.groups.values()
else:
# check to see if the specified file starts with a
@@ -113,9 +121,9 @@ class Inventory(object):
except:
pass
- if utils.is_executable(host_list):
+ if is_executable(host_list):
try:
- self.parser = InventoryScript(filename=host_list)
+ self.parser = InventoryScript(loader=self._loader, filename=host_list)
self.groups = self.parser.groups.values()
except:
if not shebang_present:
@@ -134,19 +142,23 @@ class Inventory(object):
else:
raise
- utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True)
+ vars_loader.add_directory(self.basedir(), with_subdir=True)
else:
raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
- self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ]
+ self._vars_plugins = [ x for x in vars_loader.all(self) ]
+ # FIXME: shouldn't be required, since the group/host vars file
+ # management will be done in VariableManager
# get group vars from group_vars/ files and vars plugins
for group in self.groups:
- group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password))
+ # FIXME: combine_vars
+ group.vars = combine_vars(group.vars, self.get_group_variables(group.name))
# get host vars from host_vars/ files and vars plugins
for host in self.get_hosts():
- host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password))
+ # FIXME: combine_vars
+ host.vars = combine_vars(host.vars, self.get_host_variables(host.name))
def _match(self, str, pattern_str):
@@ -192,9 +204,9 @@ class Inventory(object):
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
- hosts = [ h for h in hosts if h.name in self._restriction ]
+ hosts = [ h for h in hosts if h in self._restriction ]
if self._also_restriction is not None:
- hosts = [ h for h in hosts if h.name in self._also_restriction ]
+ hosts = [ h for h in hosts if h in self._also_restriction ]
return hosts
@@ -320,6 +332,8 @@ class Inventory(object):
new_host = Host(pattern)
new_host.set_variable("ansible_python_interpreter", sys.executable)
new_host.set_variable("ansible_connection", "local")
+ new_host.ipv4_address = '127.0.0.1'
+
ungrouped = self.get_group("ungrouped")
if ungrouped is None:
self.add_group(Group('ungrouped'))
@@ -420,7 +434,7 @@ class Inventory(object):
group = self.get_group(groupname)
if group is None:
- raise errors.AnsibleError("group not found: %s" % groupname)
+ raise Exception("group not found: %s" % groupname)
vars = {}
@@ -428,19 +442,21 @@ class Inventory(object):
vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
for updated in vars_results:
if updated is not None:
- vars = utils.combine_vars(vars, updated)
+ # FIXME: combine_vars
+ vars = combine_vars(vars, updated)
# Read group_vars/ files
- vars = utils.combine_vars(vars, self.get_group_vars(group))
+ # FIXME: combine_vars
+ vars = combine_vars(vars, self.get_group_vars(group))
return vars
- def get_variables(self, hostname, update_cached=False, vault_password=None):
+ def get_vars(self, hostname, update_cached=False, vault_password=None):
host = self.get_host(hostname)
if not host:
- raise errors.AnsibleError("host not found: %s" % hostname)
- return host.get_variables()
+ raise Exception("host not found: %s" % hostname)
+ return host.get_vars()
def get_host_variables(self, hostname, update_cached=False, vault_password=None):
@@ -460,22 +476,26 @@ class Inventory(object):
vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
for updated in vars_results:
if updated is not None:
- vars = utils.combine_vars(vars, updated)
+ # FIXME: combine_vars
+ vars = combine_vars(vars, updated)
# plugin.get_host_vars retrieves just vars for specific host
vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
for updated in vars_results:
if updated is not None:
- vars = utils.combine_vars(vars, updated)
+ # FIXME: combine_vars
+ vars = combine_vars(vars, updated)
# still need to check InventoryParser per host vars
# which actually means InventoryScript per host,
# which is not performant
if self.parser is not None:
- vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
+ # FIXME: combine_vars
+ vars = combine_vars(vars, self.parser.get_host_variables(host))
# Read host_vars/ files
- vars = utils.combine_vars(vars, self.get_host_vars(host))
+ # FIXME: combine_vars
+ vars = combine_vars(vars, self.get_host_vars(host))
return vars
@@ -490,7 +510,7 @@ class Inventory(object):
""" return a list of hostnames for a pattern """
- result = [ h.name for h in self.get_hosts(pattern) ]
+ result = [ h for h in self.get_hosts(pattern) ]
if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]:
result = [pattern]
return result
@@ -498,11 +518,7 @@ class Inventory(object):
def list_groups(self):
return sorted([ g.name for g in self.groups ], key=lambda x: x)
- # TODO: remove this function
- def get_restriction(self):
- return self._restriction
-
- def restrict_to(self, restriction):
+ def restrict_to_hosts(self, restriction):
"""
Restrict list operations to the hosts given in restriction. This is used
to exclude failed hosts in main playbook code, don't use this for other
@@ -544,7 +560,7 @@ class Inventory(object):
results.append(x)
self._subset = results
- def lift_restriction(self):
+ def remove_restriction(self):
""" Do not restrict list operations """
self._restriction = None
@@ -588,10 +604,12 @@ class Inventory(object):
self._playbook_basedir = dir
# get group vars from group_vars/ files
for group in self.groups:
- group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
+ # FIXME: combine_vars
+ group.vars = combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
# get host vars from host_vars/ files
for host in self.get_hosts():
- host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
+ # FIXME: combine_vars
+ host.vars = combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
# invalidate cache
self._vars_per_host = {}
self._vars_per_group = {}
@@ -639,15 +657,15 @@ class Inventory(object):
if _basedir == self._playbook_basedir and scan_pass != 1:
continue
+ # FIXME: these should go to VariableManager
if group and host is None:
# load vars in dir/group_vars/name_of_group
base_path = os.path.join(basedir, "group_vars/%s" % group.name)
- results = utils.load_vars(base_path, results, vault_password=self._vault_password)
-
+ self._variable_manager.add_group_vars_file(base_path, self._loader)
elif host and group is None:
# same for hostvars in dir/host_vars/name_of_host
base_path = os.path.join(basedir, "host_vars/%s" % host.name)
- results = utils.load_vars(base_path, results, vault_password=self._vault_password)
+ self._variable_manager.add_host_vars_file(base_path, self._loader)
# all done, results is a dictionary of variables for this particular host.
return results
diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py
index 9ac23fff89..735f32d62c 100644
--- a/lib/ansible/inventory/dir.py
+++ b/lib/ansible/inventory/dir.py
@@ -17,20 +17,25 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
-import ansible.constants as C
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.ini import InventoryParser
from ansible.inventory.script import InventoryScript
-from ansible import utils
-from ansible import errors
+from ansible.utils.path import is_executable
+from ansible.utils.vars import combine_vars
class InventoryDirectory(object):
''' Host inventory parser for ansible using a directory of inventories. '''
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
self.names = os.listdir(filename)
self.names.sort()
self.directory = filename
@@ -38,10 +43,12 @@ class InventoryDirectory(object):
self.hosts = {}
self.groups = {}
+ self._loader = loader
+
for i in self.names:
# Skip files that end with certain extensions or characters
- if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
+ if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo")):
continue
# Skip hidden files
if i.startswith('.') and not i.startswith('./'):
@@ -51,9 +58,9 @@ class InventoryDirectory(object):
continue
fullpath = os.path.join(self.directory, i)
if os.path.isdir(fullpath):
- parser = InventoryDirectory(filename=fullpath)
- elif utils.is_executable(fullpath):
- parser = InventoryScript(filename=fullpath)
+ parser = InventoryDirectory(loader=loader, filename=fullpath)
+ elif is_executable(fullpath):
+ parser = InventoryScript(loader=loader, filename=fullpath)
else:
parser = InventoryParser(filename=fullpath)
self.parsers.append(parser)
@@ -153,7 +160,7 @@ class InventoryDirectory(object):
# name
if group.name != newgroup.name:
- raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
+ raise AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
# depth
group.depth = max([group.depth, newgroup.depth])
@@ -196,14 +203,14 @@ class InventoryDirectory(object):
self.groups[newparent.name].add_child_group(group)
# variables
- group.vars = utils.combine_vars(group.vars, newgroup.vars)
+ group.vars = combine_vars(group.vars, newgroup.vars)
def _merge_hosts(self,host, newhost):
""" Merge all of instance newhost into host """
# name
if host.name != newhost.name:
- raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
+ raise AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
# group membership relation
for newgroup in newhost.groups:
@@ -218,7 +225,7 @@ class InventoryDirectory(object):
self.groups[newgroup.name].add_host(host)
# variables
- host.vars = utils.combine_vars(host.vars, newhost.vars)
+ host.vars = combine_vars(host.vars, newhost.vars)
def get_host_variables(self, host):
""" Gets additional host variables from all inventories """
diff --git a/lib/ansible/inventory/expand_hosts.py b/lib/ansible/inventory/expand_hosts.py
index f129740935..b5a957c53f 100644
--- a/lib/ansible/inventory/expand_hosts.py
+++ b/lib/ansible/inventory/expand_hosts.py
@@ -30,6 +30,9 @@ expanded into 001, 002 ...009, 010.
Note that when beg is specified with left zero padding, then the length of
end must be the same as that of beg, else an exception is raised.
'''
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
import string
from ansible import errors
diff --git a/lib/ansible/inventory/group.py b/lib/ansible/inventory/group.py
index 262558e69c..6525e69b46 100644
--- a/lib/ansible/inventory/group.py
+++ b/lib/ansible/inventory/group.py
@@ -14,11 +14,15 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-class Group(object):
+from ansible.utils.debug import debug
+
+class Group:
''' a group of ansible hosts '''
- __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
+ #__slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
def __init__(self, name=None):
@@ -29,9 +33,49 @@ class Group(object):
self.child_groups = []
self.parent_groups = []
self._hosts_cache = None
+
#self.clear_hosts_cache()
- if self.name is None:
- raise Exception("group name is required")
+ #if self.name is None:
+ # raise Exception("group name is required")
+
+ def __repr__(self):
+ return self.get_name()
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ return self.deserialize(data)
+
+ def serialize(self):
+ parent_groups = []
+ for parent in self.parent_groups:
+ parent_groups.append(parent.serialize())
+
+ result = dict(
+ name=self.name,
+ vars=self.vars.copy(),
+ parent_groups=parent_groups,
+ depth=self.depth,
+ )
+
+ debug("serializing group, result is: %s" % result)
+ return result
+
+ def deserialize(self, data):
+ debug("deserializing group, data is: %s" % data)
+ self.__init__()
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+
+ parent_groups = data.get('parent_groups', [])
+ for parent_data in parent_groups:
+ g = Group()
+ g.deserialize(parent_data)
+ self.parent_groups.append(g)
+
+ def get_name(self):
+ return self.name
def add_child_group(self, group):
@@ -100,7 +144,7 @@ class Group(object):
hosts.append(mine)
return hosts
- def get_variables(self):
+ def get_vars(self):
return self.vars.copy()
def _get_ancestors(self):
diff --git a/lib/ansible/inventory/host.py b/lib/ansible/inventory/host.py
index d4dc20fa46..29d6afd991 100644
--- a/lib/ansible/inventory/host.py
+++ b/lib/ansible/inventory/host.py
@@ -15,24 +15,88 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-import ansible.constants as C
-from ansible import utils
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-class Host(object):
+from ansible import constants as C
+from ansible.inventory.group import Group
+from ansible.utils.vars import combine_vars
+
+__all__ = ['Host']
+
+class Host:
''' a single ansible host '''
- __slots__ = [ 'name', 'vars', 'groups' ]
+ #__slots__ = [ 'name', 'vars', 'groups' ]
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ return self.deserialize(data)
+
+ def __eq__(self, other):
+ return self.name == other.name
+
+ def serialize(self):
+ groups = []
+ for group in self.groups:
+ groups.append(group.serialize())
+
+ return dict(
+ name=self.name,
+ vars=self.vars.copy(),
+ ipv4_address=self.ipv4_address,
+ ipv6_address=self.ipv6_address,
+ port=self.port,
+ gathered_facts=self._gathered_facts,
+ groups=groups,
+ )
+
+ def deserialize(self, data):
+ self.__init__()
+
+ self.name = data.get('name')
+ self.vars = data.get('vars', dict())
+ self.ipv4_address = data.get('ipv4_address', '')
+ self.ipv6_address = data.get('ipv6_address', '')
+ self.port = data.get('port')
+
+ groups = data.get('groups', [])
+ for group_data in groups:
+ g = Group()
+ g.deserialize(group_data)
+ self.groups.append(g)
def __init__(self, name=None, port=None):
self.name = name
self.vars = {}
self.groups = []
+
+ self.ipv4_address = name
+ self.ipv6_address = name
+
if port and port != C.DEFAULT_REMOTE_PORT:
- self.set_variable('ansible_ssh_port', int(port))
+ self.port = int(port)
+ else:
+ self.port = C.DEFAULT_REMOTE_PORT
+
+ self._gathered_facts = False
- if self.name is None:
- raise Exception("host name is required")
+ def __repr__(self):
+ return self.get_name()
+
+ def get_name(self):
+ return self.name
+
+ @property
+ def gathered_facts(self):
+ return self._gathered_facts
+
+ def set_gathered_facts(self, gathered):
+ self._gathered_facts = gathered
def add_group(self, group):
@@ -52,16 +116,15 @@ class Host(object):
groups[a.name] = a
return groups.values()
- def get_variables(self):
+ def get_vars(self):
results = {}
groups = self.get_groups()
for group in sorted(groups, key=lambda g: g.depth):
- results = utils.combine_vars(results, group.get_variables())
- results = utils.combine_vars(results, self.vars)
+ results = combine_vars(results, group.get_vars())
+ results = combine_vars(results, self.vars)
results['inventory_hostname'] = self.name
results['inventory_hostname_short'] = self.name.split('.')[0]
results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
return results
-
diff --git a/lib/ansible/inventory/ini.py b/lib/ansible/inventory/ini.py
index bd9a98e7f8..e004ee8bb7 100644
--- a/lib/ansible/inventory/ini.py
+++ b/lib/ansible/inventory/ini.py
@@ -16,17 +16,20 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-import ansible.constants as C
+import ast
+import shlex
+import re
+
+from ansible import constants as C
+from ansible.errors import *
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
-from ansible import errors
-from ansible import utils
-import shlex
-import re
-import ast
+from ansible.utils.unicode import to_unicode
class InventoryParser(object):
"""
@@ -34,9 +37,8 @@ class InventoryParser(object):
"""
def __init__(self, filename=C.DEFAULT_HOST_LIST):
-
+ self.filename = filename
with open(filename) as fh:
- self.filename = filename
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
@@ -54,10 +56,7 @@ class InventoryParser(object):
def _parse_value(v):
if "#" not in v:
try:
- ret = ast.literal_eval(v)
- if not isinstance(ret, float):
- # Do not trim floats. Eg: "1.20" to 1.2
- return ret
+ v = ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
@@ -66,7 +65,7 @@ class InventoryParser(object):
except SyntaxError:
# Is this a hash with an equals at the end?
pass
- return v
+ return to_unicode(v, nonstring='passthru', errors='strict')
# [webservers]
# alpha
@@ -91,8 +90,8 @@ class InventoryParser(object):
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
- for lineno in range(len(self.lines)):
- line = utils.before_comment(self.lines[lineno]).strip()
+ for line in self.lines:
+ line = self._before_comment(line).strip()
if line.startswith("[") and line.endswith("]"):
active_group_name = line.replace("[","").replace("]","")
if ":vars" in line or ":children" in line:
@@ -146,8 +145,11 @@ class InventoryParser(object):
try:
(k,v) = t.split("=", 1)
except ValueError, e:
- raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e)))
- host.set_variable(k, self._parse_value(v))
+ raise AnsibleError("Invalid ini entry in %s: %s - %s" % (self.filename, t, str(e)))
+ if k == 'ansible_ssh_host':
+ host.ipv4_address = self._parse_value(v)
+ else:
+ host.set_variable(k, self._parse_value(v))
self.groups[active_group_name].add_host(host)
# [southeast:children]
@@ -157,8 +159,8 @@ class InventoryParser(object):
def _parse_group_children(self):
group = None
- for lineno in range(len(self.lines)):
- line = self.lines[lineno].strip()
+ for line in self.lines:
+ line = line.strip()
if line is None or line == '':
continue
if line.startswith("[") and ":children]" in line:
@@ -173,7 +175,7 @@ class InventoryParser(object):
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
- raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line))
+ raise AnsibleError("child group is not defined: (%s)" % line)
else:
group.add_child_group(kid_group)
@@ -184,13 +186,13 @@ class InventoryParser(object):
def _parse_group_variables(self):
group = None
- for lineno in range(len(self.lines)):
- line = self.lines[lineno].strip()
+ for line in self.lines:
+ line = line.strip()
if line.startswith("[") and ":vars]" in line:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
- raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line))
+ raise AnsibleError("can't add vars to undefined group: %s" % line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
@@ -199,10 +201,18 @@ class InventoryParser(object):
pass
elif group:
if "=" not in line:
- raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1))
+ raise AnsibleError("variables assigned to group must be in key=value form")
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
group.set_variable(k, self._parse_value(v))
def get_host_variables(self, host):
return {}
+
+ def _before_comment(self, msg):
+ ''' what's the part of a string before a comment? '''
+ msg = msg.replace("\#","**NOT_A_COMMENT**")
+ msg = msg.split("#")[0]
+ msg = msg.replace("**NOT_A_COMMENT**","#")
+ return msg
+
diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py
index b83cb9bcc7..9675d70f69 100644
--- a/lib/ansible/inventory/script.py
+++ b/lib/ansible/inventory/script.py
@@ -16,22 +16,26 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
import subprocess
-import ansible.constants as C
+import sys
+
+from ansible import constants as C
+from ansible.errors import *
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.basic import json_dict_bytes_to_unicode
-from ansible import utils
-from ansible import errors
-import sys
-class InventoryScript(object):
+class InventoryScript:
''' Host inventory parser for ansible using external inventory scripts. '''
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
+ def __init__(self, loader, filename=C.DEFAULT_HOST_LIST):
+
+ self._loader = loader
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
@@ -41,11 +45,11 @@ class InventoryScript(object):
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(stdout, stderr) = sp.communicate()
if sp.returncode != 0:
- raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
+ raise AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
self.data = stdout
# see comment about _meta below
@@ -58,7 +62,7 @@ class InventoryScript(object):
all_hosts = {}
# not passing from_remote because data from CMDB is trusted
- self.raw = utils.parse_json(self.data)
+ self.raw = self._loader.load(self.data)
self.raw = json_dict_bytes_to_unicode(self.raw)
all = Group('all')
@@ -68,7 +72,7 @@ class InventoryScript(object):
if 'failed' in self.raw:
sys.stderr.write(err + "\n")
- raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
+ raise AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
for (group_name, data) in self.raw.items():
@@ -92,12 +96,12 @@ class InventoryScript(object):
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
- elif not any(k in data for k in ('hosts','vars','children')):
+ elif not any(k in data for k in ('hosts','vars')):
data = {'hosts': [group_name], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
- raise errors.AnsibleError("You defined a group \"%s\" with bad "
+ raise AnsibleError("You defined a group \"%s\" with bad "
"data for the host list:\n %s" % (group_name, data))
for hostname in data['hosts']:
@@ -108,7 +112,7 @@ class InventoryScript(object):
if 'vars' in data:
if not isinstance(data['vars'], dict):
- raise errors.AnsibleError("You defined a group \"%s\" with bad "
+ raise AnsibleError("You defined a group \"%s\" with bad "
"data for variables:\n %s" % (group_name, data))
for k, v in data['vars'].iteritems():
@@ -143,12 +147,12 @@ class InventoryScript(object):
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
+ raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return dict()
try:
- return json_dict_bytes_to_unicode(utils.parse_json(out))
+ return json_dict_bytes_to_unicode(self._loader.load(out))
except ValueError:
- raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
+ raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
diff --git a/lib/ansible/inventory/vars_plugins/noop.py b/lib/ansible/inventory/vars_plugins/noop.py
index 5d4b4b6658..8f0c98cad5 100644
--- a/lib/ansible/inventory/vars_plugins/noop.py
+++ b/lib/ansible/inventory/vars_plugins/noop.py
@@ -15,6 +15,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
class VarsModule(object):
diff --git a/lib/ansible/module_common.py b/lib/ansible/module_common.py
deleted file mode 100644
index 118c757f8d..0000000000
--- a/lib/ansible/module_common.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# from python and deps
-from cStringIO import StringIO
-import inspect
-import os
-import shlex
-
-# from Ansible
-from ansible import errors
-from ansible import utils
-from ansible import constants as C
-from ansible import __version__
-from ansible.utils.unicode import to_bytes
-
-REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
-REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
-REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
-REPLACER_WINDOWS = "# POWERSHELL_COMMON"
-REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
-
-class ModuleReplacer(object):
-
- """
- The Replacer is used to insert chunks of code into modules before
- transfer. Rather than doing classical python imports, this allows for more
- efficient transfer in a no-bootstrapping scenario by not moving extra files
- over the wire, and also takes care of embedding arguments in the transferred
- modules.
-
- This version is done in such a way that local imports can still be
- used in the module code, so IDEs don't have to be aware of what is going on.
-
- Example:
-
- from ansible.module_utils.basic import *
-
- ... will result in the insertion basic.py into the module
-
- from the module_utils/ directory in the source tree.
-
- All modules are required to import at least basic, though there will also
- be other snippets.
-
- # POWERSHELL_COMMON
-
- Also results in the inclusion of the common code in powershell.ps1
-
- """
-
- # ******************************************************************************
-
- def __init__(self, strip_comments=False):
- this_file = inspect.getfile(inspect.currentframe())
- self.snippet_path = os.path.join(os.path.dirname(this_file), 'module_utils')
- self.strip_comments = strip_comments # TODO: implement
-
- # ******************************************************************************
-
-
- def slurp(self, path):
- if not os.path.exists(path):
- raise errors.AnsibleError("imported module support code does not exist at %s" % path)
- fd = open(path)
- data = fd.read()
- fd.close()
- return data
-
- def _find_snippet_imports(self, module_data, module_path):
- """
- Given the source of the module, convert it to a Jinja2 template to insert
- module code and return whether it's a new or old style module.
- """
-
- module_style = 'old'
- if REPLACER in module_data:
- module_style = 'new'
- elif 'from ansible.module_utils.' in module_data:
- module_style = 'new'
- elif 'WANT_JSON' in module_data:
- module_style = 'non_native_want_json'
-
- output = StringIO()
- lines = module_data.split('\n')
- snippet_names = []
-
- for line in lines:
-
- if REPLACER in line:
- output.write(self.slurp(os.path.join(self.snippet_path, "basic.py")))
- snippet_names.append('basic')
- if REPLACER_WINDOWS in line:
- ps_data = self.slurp(os.path.join(self.snippet_path, "powershell.ps1"))
- output.write(ps_data)
- snippet_names.append('powershell')
- elif line.startswith('from ansible.module_utils.'):
- tokens=line.split(".")
- import_error = False
- if len(tokens) != 3:
- import_error = True
- if " import *" not in line:
- import_error = True
- if import_error:
- raise errors.AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
- snippet_name = tokens[2].split()[0]
- snippet_names.append(snippet_name)
- output.write(self.slurp(os.path.join(self.snippet_path, snippet_name + ".py")))
-
- else:
- if self.strip_comments and line.startswith("#") or line == '':
- pass
- output.write(line)
- output.write("\n")
-
- if not module_path.endswith(".ps1"):
- # Unixy modules
- if len(snippet_names) > 0 and not 'basic' in snippet_names:
- raise errors.AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
- else:
- # Windows modules
- if len(snippet_names) > 0 and not 'powershell' in snippet_names:
- raise errors.AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
-
- return (output.getvalue(), module_style)
-
- # ******************************************************************************
-
- def modify_module(self, module_path, complex_args, module_args, inject):
-
- with open(module_path) as f:
-
- # read in the module source
- module_data = f.read()
-
- (module_data, module_style) = self._find_snippet_imports(module_data, module_path)
-
- complex_args_json = utils.jsonify(complex_args)
- # We force conversion of module_args to str because module_common calls shlex.split,
- # a standard library function that incorrectly handles Unicode input before Python 2.7.3.
- # Note: it would be better to do all this conversion at the border
- # (when the data is originally parsed into data structures) but
- # it's currently coming from too many sources to make that
- # effective.
- try:
- encoded_args = repr(module_args.encode('utf-8'))
- except UnicodeDecodeError:
- encoded_args = repr(module_args)
- try:
- encoded_complex = repr(complex_args_json.encode('utf-8'))
- except UnicodeDecodeError:
- encoded_complex = repr(complex_args_json.encode('utf-8'))
-
- # these strings should be part of the 'basic' snippet which is required to be included
- module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
- module_data = module_data.replace(REPLACER_ARGS, encoded_args)
- module_data = module_data.replace(REPLACER_COMPLEX, encoded_complex)
-
- if module_style == 'new':
- facility = C.DEFAULT_SYSLOG_FACILITY
- if 'ansible_syslog_facility' in inject:
- facility = inject['ansible_syslog_facility']
- module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
-
- lines = module_data.split("\n")
- shebang = None
- if lines[0].startswith("#!"):
- shebang = lines[0].strip()
- args = shlex.split(str(shebang[2:]))
- interpreter = args[0]
- interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
-
- if interpreter_config in inject:
- interpreter = to_bytes(inject[interpreter_config], errors='strict')
- lines[0] = shebang = "#!%s %s" % (interpreter, " ".join(args[1:]))
- module_data = "\n".join(lines)
-
- return (module_data, module_style, shebang)
-
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index 54a1a9cfff..8f9b03f882 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -43,7 +43,7 @@ BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
# can be inserted in any module source automatically by including
# #<<INCLUDE_ANSIBLE_MODULE_COMMON>> on a blank line by itself inside
# of an ansible module. The source of this common code lives
-# in lib/ansible/module_common.py
+# in ansible/executor/module_common.py
import locale
import os
@@ -65,6 +65,7 @@ import pwd
import platform
import errno
import tempfile
+from itertools import imap, repeat
try:
import json
@@ -234,7 +235,7 @@ def load_platform_subclass(cls, *args, **kwargs):
return super(cls, subclass).__new__(subclass)
-def json_dict_unicode_to_bytes(d):
+def json_dict_unicode_to_bytes(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -242,17 +243,17 @@ def json_dict_unicode_to_bytes(d):
'''
if isinstance(d, unicode):
- return d.encode('utf-8')
+ return d.encode(encoding)
elif isinstance(d, dict):
- return dict(map(json_dict_unicode_to_bytes, d.iteritems()))
+ return dict(imap(json_dict_unicode_to_bytes, d.iteritems(), repeat(encoding)))
elif isinstance(d, list):
- return list(map(json_dict_unicode_to_bytes, d))
+ return list(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
elif isinstance(d, tuple):
- return tuple(map(json_dict_unicode_to_bytes, d))
+ return tuple(imap(json_dict_unicode_to_bytes, d, repeat(encoding)))
else:
return d
-def json_dict_bytes_to_unicode(d):
+def json_dict_bytes_to_unicode(d, encoding='utf-8'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
@@ -260,13 +261,13 @@ def json_dict_bytes_to_unicode(d):
'''
if isinstance(d, str):
- return unicode(d, 'utf-8')
+ return unicode(d, encoding)
elif isinstance(d, dict):
- return dict(map(json_dict_bytes_to_unicode, d.iteritems()))
+ return dict(imap(json_dict_bytes_to_unicode, d.iteritems(), repeat(encoding)))
elif isinstance(d, list):
- return list(map(json_dict_bytes_to_unicode, d))
+ return list(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
elif isinstance(d, tuple):
- return tuple(map(json_dict_bytes_to_unicode, d))
+ return tuple(imap(json_dict_bytes_to_unicode, d, repeat(encoding)))
else:
return d
@@ -359,9 +360,9 @@ class AnsibleModule(object):
# reset to LANG=C if it's an invalid/unavailable locale
self._check_locale()
- (self.params, self.args) = self._load_params()
+ self.params = self._load_params()
- self._legal_inputs = ['CHECKMODE', 'NO_LOG']
+ self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log']
self.aliases = self._handle_aliases()
@@ -888,7 +889,7 @@ class AnsibleModule(object):
def _check_for_check_mode(self):
for (k,v) in self.params.iteritems():
- if k == 'CHECKMODE':
+ if k == '_ansible_check_mode':
if not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module does not support check mode")
if self.supports_check_mode:
@@ -896,13 +897,13 @@ class AnsibleModule(object):
def _check_for_no_log(self):
for (k,v) in self.params.iteritems():
- if k == 'NO_LOG':
+ if k == '_ansible_no_log':
self.no_log = self.boolean(v)
def _check_invalid_arguments(self):
for (k,v) in self.params.iteritems():
# these should be in legal inputs already
- #if k in ('CHECKMODE', 'NO_LOG'):
+ #if k in ('_ansible_check_mode', '_ansible_no_log'):
# continue
if k not in self._legal_inputs:
self.fail_json(msg="unsupported parameter for module: %s" % k)
@@ -1075,20 +1076,11 @@ class AnsibleModule(object):
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
- args = MODULE_ARGS
- items = shlex.split(args)
- params = {}
- for x in items:
- try:
- (k, v) = x.split("=",1)
- except Exception, e:
- self.fail_json(msg="this module requires key=value arguments (%s)" % (items))
- if k in params:
- self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v))
- params[k] = v
- params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
- params2.update(params)
- return (params2, args)
+ params = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
+ if params is None:
+ params = dict()
+ return params
+
def _log_invocation(self):
''' log that ansible ran the module '''
@@ -1209,13 +1201,17 @@ class AnsibleModule(object):
self.fail_json(msg='Boolean %s not in either boolean list' % arg)
def jsonify(self, data):
- for encoding in ("utf-8", "latin-1", "unicode_escape"):
+ for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding)
- # Old systems using simplejson module does not support encoding keyword.
- except TypeError, e:
- return json.dumps(data)
- except UnicodeDecodeError, e:
+ # Old systems using old simplejson module does not support encoding keyword.
+ except TypeError:
+ try:
+ new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
+ except UnicodeDecodeError:
+ continue
+ return json.dumps(new_data)
+ except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
@@ -1452,7 +1448,7 @@ class AnsibleModule(object):
msg = None
st_in = None
- # Set a temporart env path if a prefix is passed
+ # Set a temporary env path if a prefix is passed
env=os.environ
if path_prefix:
env['PATH']="%s:%s" % (path_prefix, env['PATH'])
diff --git a/lib/ansible/module_utils/powershell.ps1 b/lib/ansible/module_utils/powershell.ps1
index ee7d3ddeca..57d2c1b101 100644
--- a/lib/ansible/module_utils/powershell.ps1
+++ b/lib/ansible/module_utils/powershell.ps1
@@ -142,14 +142,14 @@ Function ConvertTo-Bool
return
}
-# Helper function to calculate a hash of a file in a way which powershell 3
+# Helper function to calculate md5 of a file in a way which powershell 3
# and above can handle:
-Function Get-FileChecksum($path)
+Function Get-FileMd5($path)
{
$hash = ""
If (Test-Path -PathType Leaf $path)
{
- $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
+ $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider;
$fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
diff --git a/lib/ansible/module_utils/vmware.py b/lib/ansible/module_utils/vmware.py
deleted file mode 100644
index e2d8c18ca4..0000000000
--- a/lib/ansible/module_utils/vmware.py
+++ /dev/null
@@ -1,137 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# (c) 2015, Joseph Callen <jcallen () csc.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-try:
- import atexit
- import time
- # requests is required for exception handling of the ConnectionError
- import requests
- from pyVim import connect
- from pyVmomi import vim, vmodl
- HAS_PYVMOMI = True
-except ImportError:
- HAS_PYVMOMI = False
-
-
-class TaskError(Exception):
- pass
-
-
-def wait_for_task(task):
-
- while True:
- if task.info.state == vim.TaskInfo.State.success:
- return True, task.info.result
- if task.info.state == vim.TaskInfo.State.error:
- try:
- raise TaskError(task.info.error)
- except AttributeError:
- raise TaskError("An unknown error has occurred")
- if task.info.state == vim.TaskInfo.State.running:
- time.sleep(15)
- if task.info.state == vim.TaskInfo.State.queued:
- time.sleep(15)
-
-
-def find_dvspg_by_name(dv_switch, portgroup_name):
-
- portgroups = dv_switch.portgroup
-
- for pg in portgroups:
- if pg.name == portgroup_name:
- return pg
-
- return None
-
-
-def find_cluster_by_name_datacenter(datacenter, cluster_name):
-
- host_folder = datacenter.hostFolder
- for folder in host_folder.childEntity:
- if folder.name == cluster_name:
- return folder
- return None
-
-
-def find_datacenter_by_name(content, datacenter_name):
-
- datacenters = get_all_objs(content, [vim.Datacenter])
- for dc in datacenters:
- if dc.name == datacenter_name:
- return dc
-
- return None
-
-
-def find_dvs_by_name(content, switch_name):
-
- vmware_distributed_switches = get_all_objs(content, [vim.dvs.VmwareDistributedVirtualSwitch])
- for dvs in vmware_distributed_switches:
- if dvs.name == switch_name:
- return dvs
- return None
-
-
-def find_hostsystem_by_name(content, hostname):
-
- host_system = get_all_objs(content, [vim.HostSystem])
- for host in host_system:
- if host.name == hostname:
- return host
- return None
-
-
-def vmware_argument_spec():
-
- return dict(
- hostname=dict(type='str', required=True),
- username=dict(type='str', aliases=['user', 'admin'], required=True),
- password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
- )
-
-
-def connect_to_api(module, disconnect_atexit=True):
-
- hostname = module.params['hostname']
- username = module.params['username']
- password = module.params['password']
- try:
- service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password)
-
- # Disabling atexit should be used in special cases only.
- # Such as IP change of the ESXi host which removes the connection anyway.
- # Also removal significantly speeds up the return of the module
-
- if disconnect_atexit:
- atexit.register(connect.Disconnect, service_instance)
- return service_instance.RetrieveContent()
- except vim.fault.InvalidLogin as invalid_login:
- module.fail_json(msg=invalid_login.msg, apierror=str(invalid_login))
- except requests.ConnectionError as connection_error:
- module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error))
-
-
-def get_all_objs(content, vimtype):
-
- obj = {}
- container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
- for managed_object_ref in container.view:
- obj.update({managed_object_ref: managed_object_ref.name})
- return obj
diff --git a/lib/ansible/modules/__init__.py b/lib/ansible/modules/__init__.py
index e69de29bb2..ae8ccff595 100644
--- a/lib/ansible/modules/__init__.py
+++ b/lib/ansible/modules/__init__.py
@@ -0,0 +1,20 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/modules/core b/lib/ansible/modules/core
deleted file mode 160000
-Subproject 9028e9d4be8a3dbb96c81a799e18f3adf63d9fd
diff --git a/lib/ansible/modules/extras b/lib/ansible/modules/extras
deleted file mode 160000
-Subproject dd80fa221ce0adb3abd658fbd1aa09bf7cf8a6d
diff --git a/lib/ansible/new_inventory/__init__.py b/lib/ansible/new_inventory/__init__.py
new file mode 100644
index 0000000000..b91d9f05a2
--- /dev/null
+++ b/lib/ansible/new_inventory/__init__.py
@@ -0,0 +1,341 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.inventory.group import Group
+from .host import Host
+from ansible.plugins.inventory.aggregate import InventoryAggregateParser
+
+class Inventory:
+ '''
+ Create hosts and groups from inventory
+
+ Retrieve the hosts and groups that ansible knows about from this class.
+
+ Retrieve raw variables (non-expanded) from the Group and Host classes
+ returned from here.
+ '''
+
+ def __init__(self, inventory_list=C.DEFAULT_HOST_LIST):
+ '''
+ :kwarg inventory_list: A list of inventory sources. This may be file
+ names which will be parsed as ini-like files, executable scripts
+ which return inventory data as json, directories of both of the above,
+ or hostnames. Files and directories are
+ :kwarg vault_password: Password to use if any of the inventory sources
+ are in an ansible vault
+ '''
+
+ self._restricted_to = None
+ self._filter_pattern = None
+
+ parser = InventoryAggregateParser(inventory_list)
+ parser.parse()
+
+ self._basedir = parser.basedir
+ self._hosts = parser.hosts
+ self._groups = parser.groups
+
+ def get_hosts(self):
+ '''
+ Return the list of hosts, after filtering based on any set pattern
+ and restricting the results based on the set host restrictions.
+ '''
+
+ if self._filter_pattern:
+ hosts = self._filter_hosts()
+ else:
+ hosts = self._hosts[:]
+
+ if self._restricted_to is not None:
+ # this will preserve the order of hosts after intersecting them
+ res_set = set(hosts).intersection(self._restricted_to)
+ return [h for h in hosts if h in res_set]
+ else:
+ return hosts[:]
+
+ def get_groups(self):
+ '''
+ Retrieve the Group objects known to the Inventory
+ '''
+
+ return self._groups[:]
+
+ def get_host(self, hostname):
+ '''
+ Retrieve the Host object for a hostname
+ '''
+
+ for host in self._hosts:
+ if host.name == hostname:
+ return host
+
+ return None
+
+ def get_group(self, groupname):
+ '''
+ Retrieve the Group object for a groupname
+ '''
+
+ for group in self._groups:
+ if group.name == group_name:
+ return group
+
+ return None
+
+ def add_group(self, group):
+ '''
+ Add a new group to the inventory
+ '''
+
+ if group not in self._groups:
+ self._groups.append(group)
+
+ def set_filter_pattern(self, pattern='all'):
+ '''
+ Sets a pattern upon which hosts/groups will be filtered.
+ This pattern can contain logical groupings such as unions,
+ intersections and negations using special syntax.
+ '''
+
+ self._filter_pattern = pattern
+
+ def set_host_restriction(self, restriction):
+ '''
+ Restrict operations to hosts in the given list
+ '''
+
+ assert isinstance(restriction, list)
+ self._restricted_to = restriction[:]
+
+ def remove_host_restriction(self):
+ '''
+ Remove the restriction on hosts, if any.
+ '''
+
+ self._restricted_to = None
+
+ def _filter_hosts(self):
+ """
+ Limits inventory results to a subset of inventory that matches a given
+ list of patterns, such as to select a subset of a hosts selection that also
+ belongs to a certain geographic group or numeric slice.
+
+ Corresponds to --limit parameter to ansible-playbook
+
+ :arg patterns: The pattern to limit with. If this is None it
+ clears the subset. Multiple patterns may be specified as a comma,
+ semicolon, or colon separated string.
+ """
+
+ hosts = []
+
+ pattern_regular = []
+ pattern_intersection = []
+ pattern_exclude = []
+
+ patterns = self._pattern.replace(";",":").split(":")
+ for p in patterns:
+ if p.startswith("!"):
+ pattern_exclude.append(p)
+ elif p.startswith("&"):
+ pattern_intersection.append(p)
+ elif p:
+ pattern_regular.append(p)
+
+ # if no regular pattern was given, hence only exclude and/or intersection
+ # make that magically work
+ if pattern_regular == []:
+ pattern_regular = ['all']
+
+ # when applying the host selectors, run those without the "&" or "!"
+ # first, then the &s, then the !s.
+ patterns = pattern_regular + pattern_intersection + pattern_exclude
+
+ for p in patterns:
+ intersect = False
+ negate = False
+ if p.startswith('&'):
+ intersect = True
+ elif p.startswith('!'):
+ p = p[1:]
+ negate = True
+
+ target = self._resolve_pattern(p)
+ if isinstance(target, Host):
+ if negate and target in hosts:
+ # remove it
+ hosts.remove(target)
+ elif target not in hosts:
+ # for both union and intersections, we just append it
+ hosts.append(target)
+ else:
+ if intersect:
+ hosts = [ h for h in hosts if h not in target ]
+ elif negate:
+ hosts = [ h for h in hosts if h in target ]
+ else:
+ to_append = [ h for h in target if h.name not in [ y.name for y in hosts ] ]
+ hosts.extend(to_append)
+
+ return hosts
+
+ def _resolve_pattern(self, pattern):
+ target = self.get_host(pattern)
+ if target:
+ return target
+ else:
+ (name, enumeration_details) = self._enumeration_info(pattern)
+ hpat = self._hosts_in_unenumerated_pattern(name)
+ result = self._apply_ranges(pattern, hpat)
+ return result
+
+ def _enumeration_info(self, pattern):
+ """
+ returns (pattern, limits) taking a regular pattern and finding out
+ which parts of it correspond to start/stop offsets. limits is
+ a tuple of (start, stop) or None
+ """
+
+ # Do not parse regexes for enumeration info
+ if pattern.startswith('~'):
+ return (pattern, None)
+
+ # The regex used to match on the range, which can be [x] or [x-y].
+ pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$")
+ m = pattern_re.match(pattern)
+ if m:
+ (target, first, last, rest) = m.groups()
+ first = int(first)
+ if last:
+ if first < 0:
+ raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range")
+ last = int(last)
+ else:
+ last = first
+ return (target, (first, last))
+ else:
+ return (pattern, None)
+
+ def _apply_ranges(self, pat, hosts):
+ """
+ given a pattern like foo, that matches hosts, return all of hosts
+ given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
+ """
+
+ # If there are no hosts to select from, just return the
+ # empty set. This prevents trying to do selections on an empty set.
+ # issue#6258
+ if not hosts:
+ return hosts
+
+ (loose_pattern, limits) = self._enumeration_info(pat)
+ if not limits:
+ return hosts
+
+ (left, right) = limits
+
+ if left == '':
+ left = 0
+ if right == '':
+ right = 0
+ left=int(left)
+ right=int(right)
+ try:
+ if left != right:
+ return hosts[left:right]
+ else:
+ return [ hosts[left] ]
+ except IndexError:
+ raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat)
+
+ def _hosts_in_unenumerated_pattern(self, pattern):
+ """ Get all host names matching the pattern """
+
+ results = []
+ hosts = []
+ hostnames = set()
+
+ # ignore any negative checks here, this is handled elsewhere
+ pattern = pattern.replace("!","").replace("&", "")
+
+ def __append_host_to_results(host):
+ if host not in results and host.name not in hostnames:
+ hostnames.add(host.name)
+ results.append(host)
+
+ groups = self.get_groups()
+ for group in groups:
+ if pattern == 'all':
+ for host in group.get_hosts():
+ __append_host_to_results(host)
+ else:
+ if self._match(group.name, pattern):
+ for host in group.get_hosts():
+ __append_host_to_results(host)
+ else:
+ matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
+ for host in matching_hosts:
+ __append_host_to_results(host)
+
+ if pattern in ["localhost", "127.0.0.1"] and len(results) == 0:
+ new_host = self._create_implicit_localhost(pattern)
+ results.append(new_host)
+ return results
+
+ def _create_implicit_localhost(self, pattern):
+ new_host = Host(pattern)
+ new_host._connection = 'local'
+ new_host.set_variable("ansible_python_interpreter", sys.executable)
+ ungrouped = self.get_group("ungrouped")
+ if ungrouped is None:
+ self.add_group(Group('ungrouped'))
+ ungrouped = self.get_group('ungrouped')
+ self.get_group('all').add_child_group(ungrouped)
+ ungrouped.add_host(new_host)
+ return new_host
+
+ def is_file(self):
+ '''
+ Did inventory come from a file?
+
+ :returns: True if the inventory is file based, False otherwise
+ '''
+ pass
+
+ def src(self):
+ '''
+ What's the complete path to the inventory file?
+
+ :returns: Complete path to the inventory file. None if inventory is
+ not file-based
+ '''
+ pass
+
+ def basedir(self):
+ '''
+ What directory from which the inventory was read.
+ '''
+
+ return self._basedir
+
diff --git a/lib/ansible/new_inventory/group.py b/lib/ansible/new_inventory/group.py
new file mode 100644
index 0000000000..785fc45992
--- /dev/null
+++ b/lib/ansible/new_inventory/group.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/lib/ansible/new_inventory/host.py b/lib/ansible/new_inventory/host.py
new file mode 100644
index 0000000000..78f190c423
--- /dev/null
+++ b/lib/ansible/new_inventory/host.py
@@ -0,0 +1,51 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+class Host:
+ def __init__(self, name):
+ self._name = name
+ self._connection = None
+ self._ipv4_address = ''
+ self._ipv6_address = ''
+ self._port = 22
+ self._vars = dict()
+
+ def __repr__(self):
+ return self.get_name()
+
+ def get_name(self):
+ return self._name
+
+ def get_groups(self):
+ return []
+
+ def set_variable(self, name, value):
+ ''' sets a variable for this host '''
+
+ self._vars[name] = value
+
+ def get_vars(self):
+ ''' returns all variables for this host '''
+
+ all_vars = self._vars.copy()
+ all_vars.update(dict(inventory_hostname=self._name))
+ return all_vars
+
diff --git a/lib/ansible/parsing/__init__.py b/lib/ansible/parsing/__init__.py
new file mode 100644
index 0000000000..9551343fbf
--- /dev/null
+++ b/lib/ansible/parsing/__init__.py
@@ -0,0 +1,222 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+
+from yaml import load, YAMLError
+
+from ansible.errors import AnsibleParserError
+from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
+from ansible.parsing.vault import VaultLib
+from ansible.parsing.splitter import unquote
+from ansible.parsing.yaml.loader import AnsibleLoader
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode
+from ansible.utils.path import unfrackpath
+
+class DataLoader():
+
+ '''
+ The DataLoader class is used to load and parse YAML or JSON content,
+ either from a given file name or from a string that was previously
+ read in through other means. A Vault password can be specified, and
+ any vault-encrypted files will be decrypted.
+
+ Data read from files will also be cached, so the file will never be
+ read from disk more than once.
+
+ Usage:
+
+ dl = DataLoader()
+ (or)
+ dl = DataLoader(vault_password='foo')
+
+ ds = dl.load('...')
+ ds = dl.load_from_file('/path/to/file')
+ '''
+
+ def __init__(self, vault_password=None):
+ self._basedir = '.'
+ self._vault_password = vault_password
+ self._FILE_CACHE = dict()
+
+ self._vault = VaultLib(password=vault_password)
+
+ def load(self, data, file_name='<string>', show_content=True):
+ '''
+ Creates a python datastructure from the given data, which can be either
+ a JSON or YAML string.
+ '''
+
+ try:
+ # we first try to load this data as JSON
+ return json.loads(data)
+ except:
+ # if loading JSON failed for any reason, we go ahead
+ # and try to parse it as YAML instead
+
+ if isinstance(data, AnsibleUnicode):
+ # The PyYAML's libyaml bindings use PyUnicode_CheckExact so
+ # they are unable to cope with our subclass.
+ # Unwrap and re-wrap the unicode so we can keep track of line
+ # numbers
+ new_data = unicode(data)
+ else:
+ new_data = data
+ try:
+ new_data = self._safe_load(new_data, file_name=file_name)
+ except YAMLError as yaml_exc:
+ self._handle_error(yaml_exc, file_name, show_content)
+
+ if isinstance(data, AnsibleUnicode):
+ new_data = AnsibleUnicode(new_data)
+ new_data.ansible_pos = data.ansible_pos
+ return new_data
+
+ def load_from_file(self, file_name):
+ ''' Loads data from a file, which can contain either JSON or YAML. '''
+
+ file_name = self.path_dwim(file_name)
+
+ # if the file has already been read in and cached, we'll
+ # return those results to avoid more file/vault operations
+ if file_name in self._FILE_CACHE:
+ return self._FILE_CACHE[file_name]
+
+ # read the file contents and load the data structure from them
+ (file_data, show_content) = self._get_file_contents(file_name)
+ parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content)
+
+ # cache the file contents for next time
+ self._FILE_CACHE[file_name] = parsed_data
+
+ return parsed_data
+
+ def path_exists(self, path):
+ return os.path.exists(path)
+
+ def is_file(self, path):
+ return os.path.isfile(path)
+
+ def is_directory(self, path):
+ return os.path.isdir(path)
+
+ def list_directory(self, path):
+ return os.listdir(path)
+
+ def _safe_load(self, stream, file_name=None):
+ ''' Implements yaml.safe_load(), except using our custom loader class. '''
+
+ loader = AnsibleLoader(stream, file_name)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+ def _get_file_contents(self, file_name):
+ '''
+ Reads the file contents from the given file name, and will decrypt them
+ if they are found to be vault-encrypted.
+ '''
+
+ if not self.path_exists(file_name) or not self.is_file(file_name):
+ raise AnsibleParserError("the file_name '%s' does not exist, or is not readable" % file_name)
+
+ show_content = True
+ try:
+ with open(file_name, 'r') as f:
+ data = f.read()
+ if self._vault.is_encrypted(data):
+ data = self._vault.decrypt(data)
+ show_content = False
+ return (data, show_content)
+ except (IOError, OSError) as e:
+ raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e)))
+
+ def _handle_error(self, yaml_exc, file_name, show_content):
+ '''
+ Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
+ file name/position where a YAML exception occurred, and raises an AnsibleParserError
+ to display the syntax exception information.
+ '''
+
+ # if the YAML exception contains a problem mark, use it to construct
+ # an object the error class can use to display the faulty line
+ err_obj = None
+ if hasattr(yaml_exc, 'problem_mark'):
+ err_obj = AnsibleBaseYAMLObject()
+ err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
+
+ raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content)
+
+ def get_basedir(self):
+ ''' returns the current basedir '''
+ return self._basedir
+
+ def set_basedir(self, basedir):
+ ''' sets the base directory, used to find files when a relative path is given '''
+
+ if basedir is not None:
+ self._basedir = basedir
+
+ def path_dwim(self, given):
+ '''
+ make relative paths work like folks expect.
+ '''
+
+ given = unquote(given)
+
+ if given.startswith("/"):
+ return os.path.abspath(given)
+ elif given.startswith("~"):
+ return os.path.abspath(os.path.expanduser(given))
+ else:
+ return os.path.abspath(os.path.join(self._basedir, given))
+
+ def path_dwim_relative(self, role_path, dirname, source):
+ ''' find one file in a directory one level up in a dir named dirname relative to current '''
+
+ basedir = os.path.dirname(role_path)
+ if os.path.islink(basedir):
+ basedir = unfrackpath(basedir)
+ template2 = os.path.join(basedir, dirname, source)
+ else:
+ template2 = os.path.join(basedir, '..', dirname, source)
+
+ source1 = os.path.join(role_path, dirname, source)
+ if os.path.exists(source1):
+ return source1
+
+ cur_basedir = self._basedir
+ self.set_basedir(basedir)
+ source2 = self.path_dwim(template2)
+ if os.path.exists(source2):
+ self.set_basedir(cur_basedir)
+ return source2
+
+ obvious_local_path = self.path_dwim(source)
+ if os.path.exists(obvious_local_path):
+ self.set_basedir(cur_basedir)
+ return obvious_local_path
+
+ self.set_basedir(cur_basedir)
+ return source2 # which does not exist
+
diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py
new file mode 100644
index 0000000000..ed527f1b08
--- /dev/null
+++ b/lib/ansible/parsing/mod_args.py
@@ -0,0 +1,278 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six import iteritems, string_types
+
+from ansible.errors import AnsibleParserError
+from ansible.plugins import module_loader
+from ansible.parsing.splitter import parse_kv
+
+class ModuleArgsParser:
+
+ """
+ There are several ways a module and argument set can be expressed:
+
+ # legacy form (for a shell command)
+ - action: shell echo hi
+
+ # common shorthand for local actions vs delegate_to
+ - local_action: shell echo hi
+
+ # most commonly:
+ - copy: src=a dest=b
+
+ # legacy form
+ - action: copy src=a dest=b
+
+ # complex args form, for passing structured data
+ - copy:
+ src: a
+ dest: b
+
+ # gross, but technically legal
+ - action:
+ module: copy
+ args:
+ src: a
+ dest: b
+
+ # extra gross, but also legal. in this case, the args specified
+ # will act as 'defaults' and will be overridden by any args specified
+ # in one of the other formats (complex args under the action, or
+ # parsed from the k=v string
+ - command: 'pwd'
+ args:
+ chdir: '/tmp'
+
+
+ This class has some of the logic to canonicalize these into the form
+
+ - module: <module_name>
+ delegate_to: <optional>
+ args: <args>
+
+ Args may also be munged for certain shell command parameters.
+ """
+
+ def __init__(self, task_ds=dict()):
+ assert isinstance(task_ds, dict)
+ self._task_ds = task_ds
+
+
+ def _split_module_string(self, str):
+ '''
+ when module names are expressed like:
+ action: copy src=a dest=b
+ the first part of the string is the name of the module
+ and the rest are strings pertaining to the arguments.
+ '''
+
+ tokens = str.split()
+ if len(tokens) > 1:
+ return (tokens[0], " ".join(tokens[1:]))
+ else:
+ return (tokens[0], "")
+
+
+ def _handle_shell_weirdness(self, action, args):
+ '''
+ given an action name and an args dictionary, return the
+ proper action name and args dictionary. This mostly is due
+ to shell/command being treated special and nothing else
+ '''
+
+ # don't handle non shell/command modules in this function
+ # TODO: in terms of the whole app, should 'raw' also fit here?
+ if action not in ['shell', 'command']:
+ return (action, args)
+
+ # the shell module really is the command module with an additional
+ # parameter
+ if action == 'shell':
+ action = 'command'
+ args['_uses_shell'] = True
+
+ return (action, args)
+
+ def _normalize_parameters(self, thing, action=None, additional_args=dict()):
+ '''
+ arguments can be fuzzy. Deal with all the forms.
+ '''
+
+ # final args are the ones we'll eventually return, so first update
+ # them with any additional args specified, which have lower priority
+ # than those which may be parsed/normalized next
+ final_args = dict()
+ if additional_args:
+ final_args.update(additional_args)
+
+ # how we normalize depends if we figured out what the module name is
+ # yet. If we have already figured it out, it's an 'old style' invocation.
+ # otherwise, it's not
+
+ if action is not None:
+ args = self._normalize_old_style_args(thing, action)
+ else:
+ (action, args) = self._normalize_new_style_args(thing)
+
+ # this can occasionally happen, simplify
+ if args and 'args' in args:
+ tmp_args = args['args']
+ del args['args']
+ if isinstance(tmp_args, string_types):
+ tmp_args = parse_kv(tmp_args)
+ args.update(tmp_args)
+
+ # finally, update the args we're going to return with the ones
+ # which were normalized above
+ if args:
+ final_args.update(args)
+
+ return (action, final_args)
+
+ def _normalize_old_style_args(self, thing, action):
+ '''
+ deals with fuzziness in old-style (action/local_action) module invocations
+ returns tuple of (module_name, dictionary_args)
+
+ possible example inputs:
+ { 'local_action' : 'shell echo hi' }
+ { 'action' : 'shell echo hi' }
+ { 'local_action' : { 'module' : 'ec2', 'x' : 1, 'y': 2 }}
+ standardized outputs like:
+ ( 'command', { _raw_params: 'echo hi', _uses_shell: True }
+ '''
+
+ if isinstance(thing, dict):
+ # form is like: local_action: { module: 'xyz', x: 2, y: 3 } ... uncommon!
+ args = thing
+ elif isinstance(thing, string_types):
+ # form is like: local_action: copy src=a dest=b ... pretty common
+ check_raw = action in ('command', 'shell', 'script')
+ args = parse_kv(thing, check_raw=check_raw)
+ elif thing is None:
+ # this can happen with modules which take no params, like ping:
+ args = None
+ else:
+ raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
+ return args
+
+ def _normalize_new_style_args(self, thing):
+ '''
+ deals with fuzziness in new style module invocations
+ accepting key=value pairs and dictionaries, and always returning dictionaries
+ returns tuple of (module_name, dictionary_args)
+
+ possible example inputs:
+ { 'shell' : 'echo hi' }
+ { 'ec2' : { 'region' : 'xyz' }
+ { 'ec2' : 'region=xyz' }
+ standardized outputs like:
+ ('ec2', { region: 'xyz'} )
+ '''
+
+ action = None
+ args = None
+
+ if isinstance(thing, dict):
+ # form is like: copy: { src: 'a', dest: 'b' } ... common for structured (aka "complex") args
+ thing = thing.copy()
+ if 'module' in thing:
+ action = thing['module']
+ args = thing.copy()
+ del args['module']
+
+ elif isinstance(thing, string_types):
+ # form is like: copy: src=a dest=b ... common shorthand throughout ansible
+ (action, args) = self._split_module_string(thing)
+ check_raw = action in ('command', 'shell', 'script')
+ args = parse_kv(args, check_raw=check_raw)
+
+ else:
+ # need a dict or a string, so giving up
+ raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
+
+ return (action, args)
+
+ def parse(self):
+ '''
+ Given a task in one of the supported forms, parses and returns
+ returns the action, arguments, and delegate_to values for the
+ task, dealing with all sorts of levels of fuzziness.
+ '''
+
+ thing = None
+
+ action = None
+ delegate_to = self._task_ds.get('delegate_to', None)
+ args = dict()
+
+
+ #
+ # We can have one of action, local_action, or module specified
+ #
+
+
+ # this is the 'extra gross' scenario detailed above, so we grab
+ # the args and pass them in as additional arguments, which can/will
+ # be overwritten via dict updates from the other arg sources below
+ # FIXME: add test cases for this
+ additional_args = self._task_ds.get('args', dict())
+
+ # action
+ if 'action' in self._task_ds:
+ # an old school 'action' statement
+ thing = self._task_ds['action']
+ action, args = self._normalize_parameters(thing, additional_args=additional_args)
+
+ # local_action
+ if 'local_action' in self._task_ds:
+ # local_action is similar but also implies a delegate_to
+ if action is not None:
+ raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
+ thing = self._task_ds.get('local_action', '')
+ delegate_to = 'localhost'
+ action, args = self._normalize_parameters(thing, additional_args=additional_args)
+
+ # module: <stuff> is the more new-style invocation
+
+ # walk the input dictionary to see we recognize a module name
+ for (item, value) in iteritems(self._task_ds):
+ if item in module_loader or item == 'meta' or item == 'include':
+ # finding more than one module name is a problem
+ if action is not None:
+ raise AnsibleParserError("conflicting action statements", obj=self._task_ds)
+ action = item
+ thing = value
+ action, args = self._normalize_parameters(value, action=action, additional_args=additional_args)
+
+ # if we didn't see any module in the task at all, it's not a task really
+ if action is None:
+ raise AnsibleParserError("no action detected in task", obj=self._task_ds)
+ # FIXME: disabled for now, as there are other places besides the shell/script modules where
+ # having variables as the sole param for the module is valid (include_vars, add_host, and group_by?)
+ #elif args.get('_raw_params', '') != '' and action not in ('command', 'shell', 'script', 'include_vars'):
+ # raise AnsibleParserError("this task has extra params, which is only allowed in the command, shell or script module.", obj=self._task_ds)
+
+ # shell modules require special handling
+ (action, args) = self._handle_shell_weirdness(action, args)
+
+ return (action, args, delegate_to)
diff --git a/lib/ansible/parsing/splitter.py b/lib/ansible/parsing/splitter.py
new file mode 100644
index 0000000000..a1dc051d24
--- /dev/null
+++ b/lib/ansible/parsing/splitter.py
@@ -0,0 +1,273 @@
+# (c) 2014 James Cammarata, <jcammarata@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+import codecs
+
+# Decode escapes adapted from rspeer's answer here:
+# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
+_HEXCHAR = '[a-fA-F0-9]'
+_ESCAPE_SEQUENCE_RE = re.compile(r'''
+ ( \\U{0} # 8-digit hex escapes
+ | \\u{1} # 4-digit hex escapes
+ | \\x{2} # 2-digit hex escapes
+ | \\[0-7]{{1,3}} # Octal escapes
+ | \\N\{{[^}}]+\}} # Unicode characters by name
+ | \\[\\'"abfnrtv] # Single-character escapes
+ )'''.format(_HEXCHAR*8, _HEXCHAR*4, _HEXCHAR*2), re.UNICODE | re.VERBOSE)
+
+def _decode_escapes(s):
+ def decode_match(match):
+ return codecs.decode(match.group(0), 'unicode-escape')
+
+ return _ESCAPE_SEQUENCE_RE.sub(decode_match, s)
+
+def parse_kv(args, check_raw=False):
+ '''
+ Convert a string of key/value items to a dict. If any free-form params
+ are found and the check_raw option is set to True, they will be added
+ to a new parameter called '_raw_params'. If check_raw is not enabled,
+ they will simply be ignored.
+ '''
+
+ ### FIXME: args should already be a unicode string
+ from ansible.utils.unicode import to_unicode
+ args = to_unicode(args, nonstring='passthru')
+
+ options = {}
+ if args is not None:
+ try:
+ vargs = split_args(args)
+ except ValueError as ve:
+ if 'no closing quotation' in str(ve).lower():
+ raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
+ else:
+ raise
+
+ raw_params = []
+ for x in vargs:
+ x = _decode_escapes(x)
+ if "=" in x:
+ pos = 0
+ try:
+ while True:
+ pos = x.index('=', pos + 1)
+ if pos > 0 and x[pos - 1] != '\\':
+ break
+ except ValueError:
+ # ran out of string, but we must have some escaped equals,
+ # so replace those and append this to the list of raw params
+ raw_params.append(x.replace('\\=', '='))
+ continue
+
+ k = x[:pos]
+ v = x[pos + 1:]
+
+ # only internal variables can start with an underscore, so
+ # we don't allow users to set them directy in arguments
+ if k.startswith('_'):
+ raise AnsibleError("invalid parameter specified: '%s'" % k)
+
+ # FIXME: make the retrieval of this list of shell/command
+ # options a function, so the list is centralized
+ if check_raw and k not in ('creates', 'removes', 'chdir', 'executable', 'warn'):
+ raw_params.append(x)
+ else:
+ options[k.strip()] = unquote(v.strip())
+ else:
+ raw_params.append(x)
+
+ # recombine the free-form params, if any were found, and assign
+ # them to a special option for use later by the shell/command module
+ if len(raw_params) > 0:
+ options[u'_raw_params'] = ' '.join(raw_params)
+
+ return options
+
+def _get_quote_state(token, quote_char):
+ '''
+ the goal of this block is to determine if the quoted string
+ is unterminated in which case it needs to be put back together
+ '''
+ # the char before the current one, used to see if
+ # the current character is escaped
+ prev_char = None
+ for idx, cur_char in enumerate(token):
+ if idx > 0:
+ prev_char = token[idx-1]
+ if cur_char in '"\'' and prev_char != '\\':
+ if quote_char:
+ if cur_char == quote_char:
+ quote_char = None
+ else:
+ quote_char = cur_char
+ return quote_char
+
+def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
+ '''
+ this function counts the number of opening/closing blocks for a
+ given opening/closing type and adjusts the current depth for that
+ block based on the difference
+ '''
+ num_open = token.count(open_token)
+ num_close = token.count(close_token)
+ if num_open != num_close:
+ cur_depth += (num_open - num_close)
+ if cur_depth < 0:
+ cur_depth = 0
+ return cur_depth
+
+def split_args(args):
+ '''
+ Splits args on whitespace, but intelligently reassembles
+ those that may have been split over a jinja2 block or quotes.
+
+ When used in a remote module, we won't ever have to be concerned about
+ jinja2 blocks, however this function is/will be used in the
+ core portions as well before the args are templated.
+
+ example input: a=b c="foo bar"
+ example output: ['a=b', 'c="foo bar"']
+
+ Basically this is a variation shlex that has some more intelligence for
+ how Ansible needs to use it.
+ '''
+
+ # the list of params parsed out of the arg string
+ # this is going to be the result value when we are done
+ params = []
+
+ # Initial split on white space
+ args = args.strip()
+ items = args.strip().split('\n')
+
+ # iterate over the tokens, and reassemble any that may have been
+ # split on a space inside a jinja2 block.
+ # ex if tokens are "{{", "foo", "}}" these go together
+
+ # These variables are used
+ # to keep track of the state of the parsing, since blocks and quotes
+ # may be nested within each other.
+
+ quote_char = None
+ inside_quotes = False
+ print_depth = 0 # used to count nested jinja2 {{ }} blocks
+ block_depth = 0 # used to count nested jinja2 {% %} blocks
+ comment_depth = 0 # used to count nested jinja2 {# #} blocks
+
+ # now we loop over each split chunk, coalescing tokens if the white space
+ # split occurred within quotes or a jinja2 block of some kind
+ for itemidx,item in enumerate(items):
+
+ # we split on spaces and newlines separately, so that we
+ # can tell which character we split on for reassembly
+ # inside quotation characters
+ tokens = item.strip().split(' ')
+
+ line_continuation = False
+ for idx,token in enumerate(tokens):
+
+ # if we hit a line continuation character, but
+ # we're not inside quotes, ignore it and continue
+ # on to the next token while setting a flag
+ if token == '\\' and not inside_quotes:
+ line_continuation = True
+ continue
+
+ # store the previous quoting state for checking later
+ was_inside_quotes = inside_quotes
+ quote_char = _get_quote_state(token, quote_char)
+ inside_quotes = quote_char is not None
+
+ # multiple conditions may append a token to the list of params,
+ # so we keep track with this flag to make sure it only happens once
+ # append means add to the end of the list, don't append means concatenate
+ # it to the end of the last token
+ appended = False
+
+ # if we're inside quotes now, but weren't before, append the token
+ # to the end of the list, since we'll tack on more to it later
+ # otherwise, if we're inside any jinja2 block, inside quotes, or we were
+ # inside quotes (but aren't now) concat this token to the last param
+ if inside_quotes and not was_inside_quotes:
+ params.append(token)
+ appended = True
+ elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
+ if idx == 0 and was_inside_quotes:
+ params[-1] = "%s%s" % (params[-1], token)
+ elif len(tokens) > 1:
+ spacer = ''
+ if idx > 0:
+ spacer = ' '
+ params[-1] = "%s%s%s" % (params[-1], spacer, token)
+ else:
+ params[-1] = "%s\n%s" % (params[-1], token)
+ appended = True
+
+ # if the number of paired block tags is not the same, the depth has changed, so we calculate that here
+ # and may append the current token to the params (if we haven't previously done so)
+ prev_print_depth = print_depth
+ print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
+ if print_depth != prev_print_depth and not appended:
+ params.append(token)
+ appended = True
+
+ prev_block_depth = block_depth
+ block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
+ if block_depth != prev_block_depth and not appended:
+ params.append(token)
+ appended = True
+
+ prev_comment_depth = comment_depth
+ comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
+ if comment_depth != prev_comment_depth and not appended:
+ params.append(token)
+ appended = True
+
+ # finally, if we're at zero depth for all blocks and not inside quotes, and have not
+ # yet appended anything to the list of params, we do so now
+ if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
+ params.append(token)
+
+ # if this was the last token in the list, and we have more than
+ # one item (meaning we split on newlines), add a newline back here
+ # to preserve the original structure
+ if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
+ params[-1] += '\n'
+
+ # always clear the line continuation flag
+ line_continuation = False
+
+ # If we're done and things are not at zero depth or we're still inside quotes,
+ # raise an error to indicate that the args were unbalanced
+ if print_depth or block_depth or comment_depth or inside_quotes:
+ raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
+
+ return params
+
+def is_quoted(data):
+ return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
+
+def unquote(data):
+ ''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
+ if is_quoted(data):
+ return data[1:-1]
+ return data
diff --git a/lib/ansible/parsing/utils/__init__.py b/lib/ansible/parsing/utils/__init__.py
new file mode 100644
index 0000000000..785fc45992
--- /dev/null
+++ b/lib/ansible/parsing/utils/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/lib/ansible/parsing/utils/jsonify.py b/lib/ansible/parsing/utils/jsonify.py
new file mode 100644
index 0000000000..59dbf9f8c4
--- /dev/null
+++ b/lib/ansible/parsing/utils/jsonify.py
@@ -0,0 +1,45 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+def jsonify(result, format=False):
+ ''' format JSON output (uncompressed or uncompressed) '''
+
+ if result is None:
+ return "{}"
+ result2 = result.copy()
+ for key, value in result2.items():
+ if type(value) is str:
+ result2[key] = value.decode('utf-8', 'ignore')
+
+ indent = None
+ if format:
+ indent = 4
+
+ try:
+ return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
+ except UnicodeDecodeError:
+ return json.dumps(result2, sort_keys=True, indent=indent)
+
diff --git a/lib/ansible/parsing/vault/__init__.py b/lib/ansible/parsing/vault/__init__.py
new file mode 100644
index 0000000000..e45fddc197
--- /dev/null
+++ b/lib/ansible/parsing/vault/__init__.py
@@ -0,0 +1,603 @@
+# (c) 2014, James Tanner <tanner.jc@gmail.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+# ansible-pull is a script that runs ansible in local mode
+# after checking out a playbooks directory from source repo. There is an
+# example playbook to bootstrap this script in the examples/ dir which
+# installs ansible and sets it up to run on cron.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import os
+import shlex
+import shutil
+import tempfile
+from io import BytesIO
+from subprocess import call
+from ansible import errors
+from hashlib import sha256
+# Note: Only used for loading obsolete VaultAES files. All files are written
+# using the newer VaultAES256 which does not require md5
+from hashlib import md5
+from binascii import hexlify
+from binascii import unhexlify
+from six import binary_type, byte2int, PY2, text_type
+from ansible import constants as C
+from ansible.utils.unicode import to_unicode, to_bytes
+
+
+try:
+ from Crypto.Hash import SHA256, HMAC
+ HAS_HASH = True
+except ImportError:
+ HAS_HASH = False
+
+# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
+try:
+ from Crypto.Util import Counter
+ HAS_COUNTER = True
+except ImportError:
+ HAS_COUNTER = False
+
+# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
+try:
+ from Crypto.Protocol.KDF import PBKDF2
+ HAS_PBKDF2 = True
+except ImportError:
+ HAS_PBKDF2 = False
+
+# AES IMPORTS
+try:
+ from Crypto.Cipher import AES as AES
+ HAS_AES = True
+except ImportError:
+ HAS_AES = False
+
+CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto"
+
+HEADER=u'$ANSIBLE_VAULT'
+CIPHER_WHITELIST=['AES', 'AES256']
+
+
+class VaultLib(object):
+
+ def __init__(self, password):
+ self.password = password
+ self.cipher_name = None
+ self.version = '1.1'
+
+ def is_encrypted(self, data):
+ data = to_unicode(data)
+ if data.startswith(HEADER):
+ return True
+ else:
+ return False
+
+ def encrypt(self, data):
+ data = to_unicode(data)
+
+ if self.is_encrypted(data):
+ raise errors.AnsibleError("data is already encrypted")
+
+ if not self.cipher_name:
+ self.cipher_name = "AES256"
+ # raise errors.AnsibleError("the cipher must be set before encrypting data")
+
+ if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
+ cipher = globals()['Vault' + self.cipher_name]
+ this_cipher = cipher()
+ else:
+ raise errors.AnsibleError("{} cipher could not be found".format(self.cipher_name))
+
+ """
+ # combine sha + data
+ this_sha = sha256(data).hexdigest()
+ tmp_data = this_sha + "\n" + data
+ """
+
+ # encrypt sha + data
+ enc_data = this_cipher.encrypt(data, self.password)
+
+ # add header
+ tmp_data = self._add_header(enc_data)
+ return tmp_data
+
+ def decrypt(self, data):
+ data = to_bytes(data)
+
+ if self.password is None:
+ raise errors.AnsibleError("A vault password must be specified to decrypt data")
+
+ if not self.is_encrypted(data):
+ raise errors.AnsibleError("data is not encrypted")
+
+ # clean out header
+ data = self._split_header(data)
+
+ # create the cipher object
+ ciphername = to_unicode(self.cipher_name)
+ if 'Vault' + ciphername in globals() and ciphername in CIPHER_WHITELIST:
+ cipher = globals()['Vault' + ciphername]
+ this_cipher = cipher()
+ else:
+ raise errors.AnsibleError("{} cipher could not be found".format(ciphername))
+
+ # try to unencrypt data
+ data = this_cipher.decrypt(data, self.password)
+ if data is None:
+ raise errors.AnsibleError("Decryption failed")
+
+ return data
+
+ def _add_header(self, data):
+ # combine header and encrypted data in 80 char columns
+
+ #tmpdata = hexlify(data)
+ tmpdata = [to_bytes(data[i:i+80]) for i in range(0, len(data), 80)]
+ if not self.cipher_name:
+ raise errors.AnsibleError("the cipher must be set before adding a header")
+
+ dirty_data = to_bytes(HEADER + ";" + self.version + ";" + self.cipher_name + "\n")
+ for l in tmpdata:
+ dirty_data += l + b'\n'
+
+ return dirty_data
+
+
+ def _split_header(self, data):
+ # used by decrypt
+
+ tmpdata = data.split(b'\n')
+ tmpheader = tmpdata[0].strip().split(b';')
+
+ self.version = to_unicode(tmpheader[1].strip())
+ self.cipher_name = to_unicode(tmpheader[2].strip())
+ clean_data = b'\n'.join(tmpdata[1:])
+
+ """
+ # strip out newline, join, unhex
+ clean_data = [ x.strip() for x in clean_data ]
+ clean_data = unhexlify(''.join(clean_data))
+ """
+
+ return clean_data
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *err):
+ pass
+
+class VaultEditor(object):
+ # uses helper methods for write_file(self, filename, data)
+ # to write a file so that code isn't duplicated for simple
+ # file I/O, ditto read_file(self, filename) and launch_editor(self, filename)
+ # ... "Don't Repeat Yourself", etc.
+
+ def __init__(self, cipher_name, password, filename):
+ # instantiates a member variable for VaultLib
+ self.cipher_name = cipher_name
+ self.password = password
+ self.filename = filename
+
+ def _edit_file_helper(self, existing_data=None, cipher=None):
+ # make sure the umask is set to a sane value
+ old_umask = os.umask(0o077)
+
+ # Create a tempfile
+ _, tmp_path = tempfile.mkstemp()
+
+ if existing_data:
+ self.write_data(existing_data, tmp_path)
+
+ # drop the user into an editor on the tmp file
+ call(self._editor_shell_command(tmp_path))
+ tmpdata = self.read_data(tmp_path)
+
+ # create new vault
+ this_vault = VaultLib(self.password)
+ if cipher:
+ this_vault.cipher_name = cipher
+
+ # encrypt new data and write out to tmp
+ enc_data = this_vault.encrypt(tmpdata)
+ self.write_data(enc_data, tmp_path)
+
+ # shuffle tmp file into place
+ self.shuffle_files(tmp_path, self.filename)
+
+ # and restore umask
+ os.umask(old_umask)
+
+ def create_file(self):
+ """ create a new encrypted file """
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ if os.path.isfile(self.filename):
+ raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename)
+
+ # Let the user specify contents and save file
+ self._edit_file_helper(cipher=self.cipher_name)
+
+ def decrypt_file(self):
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ if not os.path.isfile(self.filename):
+ raise errors.AnsibleError("%s does not exist" % self.filename)
+
+ tmpdata = self.read_data(self.filename)
+ this_vault = VaultLib(self.password)
+ if this_vault.is_encrypted(tmpdata):
+ dec_data = this_vault.decrypt(tmpdata)
+ if dec_data is None:
+ raise errors.AnsibleError("Decryption failed")
+ else:
+ self.write_data(dec_data, self.filename)
+ else:
+ raise errors.AnsibleError("%s is not encrypted" % self.filename)
+
+ def edit_file(self):
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ # decrypt to tmpfile
+ tmpdata = self.read_data(self.filename)
+ this_vault = VaultLib(self.password)
+ dec_data = this_vault.decrypt(tmpdata)
+
+ # let the user edit the data and save
+ self._edit_file_helper(existing_data=dec_data)
+ ###we want the cipher to default to AES256 (get rid of files
+ # encrypted with the AES cipher)
+ #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name)
+
+
+ def view_file(self):
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ # decrypt to tmpfile
+ tmpdata = self.read_data(self.filename)
+ this_vault = VaultLib(self.password)
+ dec_data = this_vault.decrypt(tmpdata)
+ _, tmp_path = tempfile.mkstemp()
+ self.write_data(dec_data, tmp_path)
+
+ # drop the user into pager on the tmp file
+ call(self._pager_shell_command(tmp_path))
+ os.remove(tmp_path)
+
+ def encrypt_file(self):
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ if not os.path.isfile(self.filename):
+ raise errors.AnsibleError("%s does not exist" % self.filename)
+
+ tmpdata = self.read_data(self.filename)
+ this_vault = VaultLib(self.password)
+ this_vault.cipher_name = self.cipher_name
+ if not this_vault.is_encrypted(tmpdata):
+ enc_data = this_vault.encrypt(tmpdata)
+ self.write_data(enc_data, self.filename)
+ else:
+ raise errors.AnsibleError("%s is already encrypted" % self.filename)
+
+ def rekey_file(self, new_password):
+
+ if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ # decrypt
+ tmpdata = self.read_data(self.filename)
+ this_vault = VaultLib(self.password)
+ dec_data = this_vault.decrypt(tmpdata)
+
+ # create new vault
+ new_vault = VaultLib(new_password)
+
+ # we want to force cipher to the default
+ #new_vault.cipher_name = this_vault.cipher_name
+
+ # re-encrypt data and re-write file
+ enc_data = new_vault.encrypt(dec_data)
+ self.write_data(enc_data, self.filename)
+
+ def read_data(self, filename):
+ f = open(filename, "rb")
+ tmpdata = f.read()
+ f.close()
+ return tmpdata
+
+ def write_data(self, data, filename):
+ if os.path.isfile(filename):
+ os.remove(filename)
+ f = open(filename, "wb")
+ f.write(to_bytes(data))
+ f.close()
+
+ def shuffle_files(self, src, dest):
+ # overwrite dest with src
+ if os.path.isfile(dest):
+ os.remove(dest)
+ shutil.move(src, dest)
+
+ def _editor_shell_command(self, filename):
+ EDITOR = os.environ.get('EDITOR','vim')
+ editor = shlex.split(EDITOR)
+ editor.append(filename)
+
+ return editor
+
+ def _pager_shell_command(self, filename):
+ PAGER = os.environ.get('PAGER','less')
+ pager = shlex.split(PAGER)
+ pager.append(filename)
+
+ return pager
+
+########################################
+# CIPHERS #
+########################################
+
+class VaultAES(object):
+
+ # this version has been obsoleted by the VaultAES256 class
+ # which uses encrypt-then-mac (fixing order) and also improving the KDF used
+ # code remains for upgrade purposes only
+ # http://stackoverflow.com/a/16761459
+
+ def __init__(self):
+ if not HAS_AES:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ def aes_derive_key_and_iv(self, password, salt, key_length, iv_length):
+
+ """ Create a key and an initialization vector """
+
+ d = d_i = b''
+ while len(d) < key_length + iv_length:
+ text = "{}{}{}".format(d_i, password, salt)
+ d_i = md5(to_bytes(text)).digest()
+ d += d_i
+
+ key = d[:key_length]
+ iv = d[key_length:key_length+iv_length]
+
+ return key, iv
+
+ def encrypt(self, data, password, key_length=32):
+
+ """ Read plaintext data from in_file and write encrypted to out_file """
+
+
+ # combine sha + data
+ this_sha = sha256(to_bytes(data)).hexdigest()
+ tmp_data = this_sha + "\n" + data
+
+ in_file = BytesIO(to_bytes(tmp_data))
+ in_file.seek(0)
+ out_file = BytesIO()
+
+ bs = AES.block_size
+
+ # Get a block of random data. EL does not have Crypto.Random.new()
+ # so os.urandom is used for cross platform purposes
+ salt = os.urandom(bs - len('Salted__'))
+
+ key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ full = to_bytes(b'Salted__' + salt)
+ out_file.write(full)
+ finished = False
+ while not finished:
+ chunk = in_file.read(1024 * bs)
+ if len(chunk) == 0 or len(chunk) % bs != 0:
+ padding_length = (bs - len(chunk) % bs) or bs
+ chunk += to_bytes(padding_length * chr(padding_length))
+ finished = True
+ out_file.write(cipher.encrypt(chunk))
+
+ out_file.seek(0)
+ enc_data = out_file.read()
+ tmp_data = hexlify(enc_data)
+
+ return tmp_data
+
+
+ def decrypt(self, data, password, key_length=32):
+
+ """ Read encrypted data from in_file and write decrypted to out_file """
+
+ # http://stackoverflow.com/a/14989032
+
+ data = b''.join(data.split(b'\n'))
+ data = unhexlify(data)
+
+ in_file = BytesIO(data)
+ in_file.seek(0)
+ out_file = BytesIO()
+
+ bs = AES.block_size
+ tmpsalt = in_file.read(bs)
+ salt = tmpsalt[len('Salted__'):]
+ key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
+ cipher = AES.new(key, AES.MODE_CBC, iv)
+ next_chunk = b''
+ finished = False
+
+ while not finished:
+ chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))
+ if len(next_chunk) == 0:
+ if PY2:
+ padding_length = ord(chunk[-1])
+ else:
+ padding_length = chunk[-1]
+
+ chunk = chunk[:-padding_length]
+ finished = True
+
+ out_file.write(chunk)
+ out_file.flush()
+
+ # reset the stream pointer to the beginning
+ out_file.seek(0)
+ out_data = out_file.read()
+ out_file.close()
+ new_data = to_unicode(out_data)
+
+ # split out sha and verify decryption
+ split_data = new_data.split("\n")
+ this_sha = split_data[0]
+ this_data = '\n'.join(split_data[1:])
+ test_sha = sha256(to_bytes(this_data)).hexdigest()
+
+ if this_sha != test_sha:
+ raise errors.AnsibleError("Decryption failed")
+
+ return this_data
+
+
+class VaultAES256(object):
+
+ """
+ Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
+ Keys are derived using PBKDF2
+ """
+
+ # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
+
+ def __init__(self):
+
+ if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH:
+ raise errors.AnsibleError(CRYPTO_UPGRADE)
+
+ def gen_key_initctr(self, password, salt):
+ # 16 for AES 128, 32 for AES256
+ keylength = 32
+
+ # match the size used for counter.new to avoid extra work
+ ivlength = 16
+
+ hash_function = SHA256
+
+ # make two keys and one iv
+ pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest()
+
+
+ derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength,
+ count=10000, prf=pbkdf2_prf)
+
+ key1 = derivedkey[:keylength]
+ key2 = derivedkey[keylength:(keylength * 2)]
+ iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength]
+
+ return key1, key2, hexlify(iv)
+
+
+ def encrypt(self, data, password):
+
+ salt = os.urandom(32)
+ key1, key2, iv = self.gen_key_initctr(password, salt)
+
+ # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3
+ bs = AES.block_size
+ padding_length = (bs - len(data) % bs) or bs
+ data += padding_length * chr(padding_length)
+
+ # COUNTER.new PARAMETERS
+ # 1) nbits (integer) - Length of the counter, in bits.
+ # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr
+
+ ctr = Counter.new(128, initial_value=int(iv, 16))
+
+ # AES.new PARAMETERS
+ # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr
+ # 2) MODE_CTR, is the recommended mode
+ # 3) counter=<CounterObject>
+
+ cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
+
+ # ENCRYPT PADDED DATA
+ cryptedData = cipher.encrypt(data)
+
+ # COMBINE SALT, DIGEST AND DATA
+ hmac = HMAC.new(key2, cryptedData, SHA256)
+ message = b''.join([hexlify(salt), b"\n", to_bytes(hmac.hexdigest()), b"\n", hexlify(cryptedData)])
+ message = hexlify(message)
+ return message
+
+ def decrypt(self, data, password):
+
+ # SPLIT SALT, DIGEST, AND DATA
+ data = b''.join(data.split(b"\n"))
+ data = unhexlify(data)
+ salt, cryptedHmac, cryptedData = data.split(b"\n", 2)
+ salt = unhexlify(salt)
+ cryptedData = unhexlify(cryptedData)
+
+ key1, key2, iv = self.gen_key_initctr(password, salt)
+
+ # EXIT EARLY IF DIGEST DOESN'T MATCH
+ hmacDecrypt = HMAC.new(key2, cryptedData, SHA256)
+ if not self.is_equal(cryptedHmac, to_bytes(hmacDecrypt.hexdigest())):
+ return None
+
+ # SET THE COUNTER AND THE CIPHER
+ ctr = Counter.new(128, initial_value=int(iv, 16))
+ cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
+
+ # DECRYPT PADDED DATA
+ decryptedData = cipher.decrypt(cryptedData)
+
+ # UNPAD DATA
+ try:
+ padding_length = ord(decryptedData[-1])
+ except TypeError:
+ padding_length = decryptedData[-1]
+
+ decryptedData = decryptedData[:-padding_length]
+
+ return to_unicode(decryptedData)
+
+ def is_equal(self, a, b):
+ """
+ Comparing 2 byte arrrays in constant time
+ to avoid timing attacks.
+
+ It would be nice if there was a library for this but
+ hey.
+ """
+ # http://codahale.com/a-lesson-in-timing-attacks/
+ if len(a) != len(b):
+ return False
+
+ result = 0
+ for x, y in zip(a, b):
+ if PY2:
+ result |= ord(x) ^ ord(y)
+ else:
+ result |= x ^ y
+ return result == 0
diff --git a/lib/ansible/parsing/yaml/__init__.py b/lib/ansible/parsing/yaml/__init__.py
new file mode 100644
index 0000000000..785fc45992
--- /dev/null
+++ b/lib/ansible/parsing/yaml/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/lib/ansible/parsing/yaml/constructor.py b/lib/ansible/parsing/yaml/constructor.py
new file mode 100644
index 0000000000..d1a2a01bc2
--- /dev/null
+++ b/lib/ansible/parsing/yaml/constructor.py
@@ -0,0 +1,91 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from yaml.constructor import Constructor
+from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
+
+class AnsibleConstructor(Constructor):
+ def __init__(self, file_name=None):
+ self._ansible_file_name = file_name
+ super(AnsibleConstructor, self).__init__()
+
+ def construct_yaml_map(self, node):
+ data = AnsibleMapping()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+ data.ansible_pos = self._node_position_info(node)
+
+ def construct_mapping(self, node, deep=False):
+ ret = AnsibleMapping(super(Constructor, self).construct_mapping(node, deep))
+ ret.ansible_pos = self._node_position_info(node)
+
+ return ret
+
+ def construct_yaml_str(self, node):
+ # Override the default string handling function
+ # to always return unicode objects
+ value = self.construct_scalar(node)
+ ret = AnsibleUnicode(value)
+
+ ret.ansible_pos = self._node_position_info(node)
+
+ return ret
+
+ def construct_yaml_seq(self, node):
+ data = AnsibleSequence()
+ yield data
+ data.extend(self.construct_sequence(node))
+ data.ansible_pos = self._node_position_info(node)
+
+ def _node_position_info(self, node):
+ # the line number where the previous token has ended (plus empty lines)
+ # Add one so that the first line is line 1 rather than line 0
+ column = node.start_mark.column + 1
+ line = node.start_mark.line + 1
+
+ # in some cases, we may have pre-read the data and then
+ # passed it to the load() call for YAML, in which case we
+ # want to override the default datasource (which would be
+ # '<string>') to the actual filename we read in
+ datasource = self._ansible_file_name or node.start_mark.name
+
+ return (datasource, line, column)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ AnsibleConstructor.construct_yaml_map)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ AnsibleConstructor.construct_yaml_map)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ AnsibleConstructor.construct_yaml_str)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ AnsibleConstructor.construct_yaml_str)
+
+AnsibleConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ AnsibleConstructor.construct_yaml_seq)
diff --git a/lib/ansible/parsing/yaml/loader.py b/lib/ansible/parsing/yaml/loader.py
new file mode 100644
index 0000000000..e8547ff0d1
--- /dev/null
+++ b/lib/ansible/parsing/yaml/loader.py
@@ -0,0 +1,51 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from _yaml import CParser, CEmitter
+ HAVE_PYYAML_C = True
+except ImportError:
+ HAVE_PYYAML_C = False
+
+from yaml.resolver import Resolver
+
+from ansible.parsing.yaml.constructor import AnsibleConstructor
+
+if HAVE_PYYAML_C:
+ class AnsibleLoader(CParser, AnsibleConstructor, Resolver):
+ def __init__(self, stream, file_name=None):
+ CParser.__init__(self, stream)
+ AnsibleConstructor.__init__(self, file_name=file_name)
+ Resolver.__init__(self)
+else:
+ from yaml.composer import Composer
+ from yaml.reader import Reader
+ from yaml.scanner import Scanner
+ from yaml.parser import Parser
+
+ class AnsibleLoader(Reader, Scanner, Parser, Composer, AnsibleConstructor, Resolver):
+ def __init__(self, stream, file_name=None):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ AnsibleConstructor.__init__(self, file_name=file_name)
+ Resolver.__init__(self)
diff --git a/lib/ansible/parsing/yaml/objects.py b/lib/ansible/parsing/yaml/objects.py
new file mode 100644
index 0000000000..33ea1ad37e
--- /dev/null
+++ b/lib/ansible/parsing/yaml/objects.py
@@ -0,0 +1,65 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six import text_type
+
+
+class AnsibleBaseYAMLObject(object):
+ '''
+ the base class used to sub-class python built-in objects
+ so that we can add attributes to them during yaml parsing
+
+ '''
+ _data_source = None
+ _line_number = 0
+ _column_number = 0
+
+ def _get_ansible_position(self):
+ return (self._data_source, self._line_number, self._column_number)
+
+ def _set_ansible_position(self, obj):
+ try:
+ (src, line, col) = obj
+ except (TypeError, ValueError):
+ raise AssertionError(
+ 'ansible_pos can only be set with a tuple/list '
+ 'of three values: source, line number, column number'
+ )
+ self._data_source = src
+ self._line_number = line
+ self._column_number = col
+
+ ansible_pos = property(_get_ansible_position, _set_ansible_position)
+
+
+class AnsibleMapping(AnsibleBaseYAMLObject, dict):
+ ''' sub class for dictionaries '''
+ pass
+
+
+class AnsibleUnicode(AnsibleBaseYAMLObject, text_type):
+ ''' sub class for unicode objects '''
+ pass
+
+
+class AnsibleSequence(AnsibleBaseYAMLObject, list):
+ ''' sub class for lists '''
+ pass
diff --git a/lib/ansible/playbook/__init__.py b/lib/ansible/playbook/__init__.py
index 24ba2d3c6e..40e6638f23 100644
--- a/lib/ansible/playbook/__init__.py
+++ b/lib/ansible/playbook/__init__.py
@@ -15,860 +15,71 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-import ansible.inventory
-import ansible.constants as C
-import ansible.runner
-from ansible.utils.template import template
-from ansible import utils
-from ansible import errors
-from ansible.module_utils.splitter import split_args, unquote
-import ansible.callbacks
-import ansible.cache
-import os
-import shlex
-import collections
-from play import Play
-import StringIO
-import pipes
-
-# the setup cache stores all variables about a host
-# gathered during the setup step, while the vars cache
-# holds all other variables about a host
-SETUP_CACHE = ansible.cache.FactCache()
-VARS_CACHE = collections.defaultdict(dict)
-RESERVED_TAGS = ['all','tagged','untagged','always']
-
-
-class PlayBook(object):
- '''
- runs an ansible playbook, given as a datastructure or YAML filename.
- A playbook is a deployment, config management, or automation based
- set of commands to run in series.
-
- multiple plays/tasks do not execute simultaneously, but tasks in each
- pattern do execute in parallel (according to the number of forks
- requested) among the hosts they address
- '''
-
- # *****************************************************
-
- def __init__(self,
- playbook = None,
- host_list = C.DEFAULT_HOST_LIST,
- module_path = None,
- forks = C.DEFAULT_FORKS,
- timeout = C.DEFAULT_TIMEOUT,
- remote_user = C.DEFAULT_REMOTE_USER,
- remote_pass = C.DEFAULT_REMOTE_PASS,
- remote_port = None,
- transport = C.DEFAULT_TRANSPORT,
- private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
- callbacks = None,
- runner_callbacks = None,
- stats = None,
- extra_vars = None,
- only_tags = None,
- skip_tags = None,
- subset = C.DEFAULT_SUBSET,
- inventory = None,
- check = False,
- diff = False,
- any_errors_fatal = False,
- vault_password = False,
- force_handlers = False,
- # privilege escalation
- become = C.DEFAULT_BECOME,
- become_method = C.DEFAULT_BECOME_METHOD,
- become_user = C.DEFAULT_BECOME_USER,
- become_pass = None,
- ):
-
- """
- playbook: path to a playbook file
- host_list: path to a file like /etc/ansible/hosts
- module_path: path to ansible modules, like /usr/share/ansible/
- forks: desired level of parallelism
- timeout: connection timeout
- remote_user: run as this user if not specified in a particular play
- remote_pass: use this remote password (for all plays) vs using SSH keys
- remote_port: default remote port to use if not specified with the host or play
- transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
- callbacks output callbacks for the playbook
- runner_callbacks: more callbacks, this time for the runner API
- stats: holds aggregrate data about events occurring to each host
- inventory: can be specified instead of host_list to use a pre-existing inventory object
- check: don't change anything, just try to detect some potential changes
- any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
- force_handlers: continue to notify and run handlers even if a task fails
- """
-
- self.SETUP_CACHE = SETUP_CACHE
- self.VARS_CACHE = VARS_CACHE
-
- arguments = []
- if playbook is None:
- arguments.append('playbook')
- if callbacks is None:
- arguments.append('callbacks')
- if runner_callbacks is None:
- arguments.append('runner_callbacks')
- if stats is None:
- arguments.append('stats')
- if arguments:
- raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments))
-
- if extra_vars is None:
- extra_vars = {}
- if only_tags is None:
- only_tags = [ 'all' ]
- if skip_tags is None:
- skip_tags = []
-
- self.check = check
- self.diff = diff
- self.module_path = module_path
- self.forks = forks
- self.timeout = timeout
- self.remote_user = remote_user
- self.remote_pass = remote_pass
- self.remote_port = remote_port
- self.transport = transport
- self.callbacks = callbacks
- self.runner_callbacks = runner_callbacks
- self.stats = stats
- self.extra_vars = extra_vars
- self.global_vars = {}
- self.private_key_file = private_key_file
- self.only_tags = only_tags
- self.skip_tags = skip_tags
- self.any_errors_fatal = any_errors_fatal
- self.vault_password = vault_password
- self.force_handlers = force_handlers
-
- self.become = become
- self.become_method = become_method
- self.become_user = become_user
- self.become_pass = become_pass
-
- self.callbacks.playbook = self
- self.runner_callbacks.playbook = self
-
- if inventory is None:
- self.inventory = ansible.inventory.Inventory(host_list)
- self.inventory.subset(subset)
- else:
- self.inventory = inventory
-
- if self.module_path is not None:
- utils.plugins.module_finder.add_directory(self.module_path)
-
- self.basedir = os.path.dirname(playbook) or '.'
- utils.plugins.push_basedir(self.basedir)
-
- # let inventory know the playbook basedir so it can load more vars
- self.inventory.set_playbook_basedir(self.basedir)
-
- vars = extra_vars.copy()
- vars['playbook_dir'] = os.path.abspath(self.basedir)
- if self.inventory.basedir() is not None:
- vars['inventory_dir'] = self.inventory.basedir()
-
- if self.inventory.src() is not None:
- vars['inventory_file'] = self.inventory.src()
-
- self.filename = playbook
- (self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars)
- ansible.callbacks.load_callback_plugins()
- ansible.callbacks.set_playbook(self.callbacks, self)
-
- self._ansible_version = utils.version_info(gitinfo=True)
-
- # *****************************************************
-
- def _get_playbook_vars(self, play_ds, existing_vars):
- '''
- Gets the vars specified with the play and blends them
- with any existing vars that have already been read in
- '''
- new_vars = existing_vars.copy()
- if 'vars' in play_ds:
- if isinstance(play_ds['vars'], dict):
- new_vars.update(play_ds['vars'])
- elif isinstance(play_ds['vars'], list):
- for v in play_ds['vars']:
- new_vars.update(v)
- return new_vars
-
- # *****************************************************
-
- def _get_include_info(self, play_ds, basedir, existing_vars={}):
- '''
- Gets any key=value pairs specified with the included file
- name and returns the merged vars along with the path
- '''
- new_vars = existing_vars.copy()
- tokens = split_args(play_ds.get('include', ''))
- for t in tokens[1:]:
- try:
- (k,v) = unquote(t).split("=", 1)
- new_vars[k] = template(basedir, v, new_vars)
- except ValueError, e:
- raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
-
- return (new_vars, unquote(tokens[0]))
-
- # *****************************************************
-
- def _get_playbook_vars_files(self, play_ds, existing_vars_files):
- new_vars_files = list(existing_vars_files)
- if 'vars_files' in play_ds:
- new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files'])
- return new_vars_files
-
- # *****************************************************
-
- def _extend_play_vars(self, play, vars={}):
- '''
- Extends the given play's variables with the additional specified vars.
- '''
-
- if 'vars' not in play or not play['vars']:
- # someone left out or put an empty "vars:" entry in their playbook
- return vars.copy()
-
- play_vars = None
- if isinstance(play['vars'], dict):
- play_vars = play['vars'].copy()
- play_vars.update(vars)
- elif isinstance(play['vars'], list):
- # nobody should really do this, but handle vars: a=1 b=2
- play_vars = play['vars'][:]
- play_vars.extend([{k:v} for k,v in vars.iteritems()])
-
- return play_vars
-
- # *****************************************************
-
- def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
- '''
- run top level error checking on playbooks and allow them to include other playbooks.
- '''
-
- playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password)
- accumulated_plays = []
- play_basedirs = []
-
- if type(playbook_data) != list:
- raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data))
-
- basedir = os.path.dirname(path) or '.'
- utils.plugins.push_basedir(basedir)
- for play in playbook_data:
- if type(play) != dict:
- raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
-
- if 'include' in play:
- # a playbook (list of plays) decided to include some other list of plays
- # from another file. The result is a flat list of plays in the end.
-
- play_vars = self._get_playbook_vars(play, vars)
- play_vars_files = self._get_playbook_vars_files(play, vars_files)
- inc_vars, inc_path = self._get_include_info(play, basedir, play_vars)
- play_vars.update(inc_vars)
-
- included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars))
- (plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files)
- for p in plays:
- # support for parameterized play includes works by passing
- # those variables along to the subservient play
- p['vars'] = self._extend_play_vars(p, play_vars)
- # now add in the vars_files
- p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files)
-
- accumulated_plays.extend(plays)
- play_basedirs.extend(basedirs)
-
- else:
-
- # this is a normal (non-included play)
- accumulated_plays.append(play)
- play_basedirs.append(basedir)
-
- return (accumulated_plays, play_basedirs)
-
- # *****************************************************
-
- def run(self):
- ''' run all patterns in the playbook '''
- plays = []
- matched_tags_all = set()
- unmatched_tags_all = set()
-
- # loop through all patterns and run them
- self.callbacks.on_start()
- for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
- play = Play(self, play_ds, play_basedir, vault_password=self.vault_password)
- assert play is not None
-
- matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
-
- matched_tags_all = matched_tags_all | matched_tags
- unmatched_tags_all = unmatched_tags_all | unmatched_tags
-
- # Remove tasks we wish to skip
- matched_tags = matched_tags - set(self.skip_tags)
-
- # if we have matched_tags, the play must be run.
- # if the play contains no tasks, assume we just want to gather facts
- # in this case there are actually 3 meta tasks (handler flushes) not 0
- # tasks, so that's why there's a check against 3
- if (len(matched_tags) > 0 or len(play.tasks()) == 3):
- plays.append(play)
-
- # if the playbook is invoked with --tags or --skip-tags that don't
- # exist at all in the playbooks then we need to raise an error so that
- # the user can correct the arguments.
- unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
- (matched_tags_all | unmatched_tags_all))
-
- for t in RESERVED_TAGS:
- unknown_tags.discard(t)
-
- if len(unknown_tags) > 0:
- for t in RESERVED_TAGS:
- unmatched_tags_all.discard(t)
- msg = 'tag(s) not found in playbook: %s. possible values: %s'
- unknown = ','.join(sorted(unknown_tags))
- unmatched = ','.join(sorted(unmatched_tags_all))
- raise errors.AnsibleError(msg % (unknown, unmatched))
-
- for play in plays:
- ansible.callbacks.set_play(self.callbacks, play)
- ansible.callbacks.set_play(self.runner_callbacks, play)
- if not self._run_play(play):
- break
-
- ansible.callbacks.set_play(self.callbacks, None)
- ansible.callbacks.set_play(self.runner_callbacks, None)
-
- # summarize the results
- results = {}
- for host in self.stats.processed.keys():
- results[host] = self.stats.summarize(host)
- return results
-
- # *****************************************************
-
- def _async_poll(self, poller, async_seconds, async_poll_interval):
- ''' launch an async job, if poll_interval is set, wait for completion '''
-
- results = poller.wait(async_seconds, async_poll_interval)
-
- # mark any hosts that are still listed as started as failed
- # since these likely got killed by async_wrapper
- for host in poller.hosts_to_poll:
- reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
- self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id'])
- results['contacted'][host] = reason
-
- return results
-
- # *****************************************************
-
- def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
- ''' returns a list of hosts that haven't failed and aren't dark '''
-
- return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
-
- # *****************************************************
-
- def _run_task_internal(self, task, include_failed=False):
- ''' run a particular module step in a playbook '''
-
- hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
- self.inventory.restrict_to(hosts)
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
- runner = ansible.runner.Runner(
- pattern=task.play.hosts,
- inventory=self.inventory,
- module_name=task.module_name,
- module_args=task.module_args,
- forks=self.forks,
- remote_pass=self.remote_pass,
- module_path=self.module_path,
- timeout=self.timeout,
- remote_user=task.remote_user,
- remote_port=task.play.remote_port,
- module_vars=task.module_vars,
- play_vars=task.play_vars,
- play_file_vars=task.play_file_vars,
- role_vars=task.role_vars,
- role_params=task.role_params,
- default_vars=task.default_vars,
- extra_vars=self.extra_vars,
- private_key_file=self.private_key_file,
- setup_cache=self.SETUP_CACHE,
- vars_cache=self.VARS_CACHE,
- basedir=task.play.basedir,
- conditional=task.when,
- callbacks=self.runner_callbacks,
- transport=task.transport,
- is_playbook=True,
- check=self.check,
- diff=self.diff,
- environment=task.environment,
- complex_args=task.args,
- accelerate=task.play.accelerate,
- accelerate_port=task.play.accelerate_port,
- accelerate_ipv6=task.play.accelerate_ipv6,
- error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
- vault_pass = self.vault_password,
- run_hosts=hosts,
- no_log=task.no_log,
- run_once=task.run_once,
- become=task.become,
- become_method=task.become_method,
- become_user=task.become_user,
- become_pass=task.become_pass,
- )
-
- runner.module_vars.update({'play_hosts': hosts})
- runner.module_vars.update({'ansible_version': self._ansible_version})
-
- if task.async_seconds == 0:
- results = runner.run()
- else:
- results, poller = runner.run_async(task.async_seconds)
- self.stats.compute(results)
- if task.async_poll_interval > 0:
- # if not polling, playbook requested fire and forget, so don't poll
- results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
- else:
- for (host, res) in results.get('contacted', {}).iteritems():
- self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id'])
-
- contacted = results.get('contacted',{})
- dark = results.get('dark', {})
-
- self.inventory.lift_restriction()
-
- if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
- return None
-
- return results
-
- # *****************************************************
-
- def _run_task(self, play, task, is_handler):
- ''' run a single task in the playbook and recursively run any subtasks. '''
-
- ansible.callbacks.set_task(self.callbacks, task)
- ansible.callbacks.set_task(self.runner_callbacks, task)
-
- if task.role_name:
- name = '%s | %s' % (task.role_name, task.name)
- else:
- name = task.name
-
- try:
- # v1 HACK: we don't have enough information to template many names
- # at this point. Rather than making this work for all cases in
- # v1, just make this degrade gracefully. Will fix in v2
- name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False)
- except:
- pass
-
- self.callbacks.on_task_start(name, is_handler)
- if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task:
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
- return True
-
- # template ignore_errors
- # TODO: Is this needed here? cond is templated again in
- # check_conditional after some more manipulations.
- # TODO: we don't have enough information here to template cond either
- # (see note on templating name above)
- cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False)
- task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
-
- # load up an appropriate ansible runner to run the task in parallel
- include_failed = is_handler and play.force_handlers
- results = self._run_task_internal(task, include_failed=include_failed)
-
- # if no hosts are matched, carry on
- hosts_remaining = True
- if results is None:
- hosts_remaining = False
- results = {}
-
- contacted = results.get('contacted', {})
- self.stats.compute(results, ignore_errors=task.ignore_errors)
-
- def _register_play_vars(host, result):
- # when 'register' is used, persist the result in the vars cache
- # rather than the setup cache - vars should be transient between
- # playbook executions
- if 'stdout' in result and 'stdout_lines' not in result:
- result['stdout_lines'] = result['stdout'].splitlines()
- utils.update_hash(self.VARS_CACHE, host, {task.register: result})
-
- def _save_play_facts(host, facts):
- # saves play facts in SETUP_CACHE, unless the module executed was
- # set_fact, in which case we add them to the VARS_CACHE
- if task.module_name in ('set_fact', 'include_vars'):
- utils.update_hash(self.VARS_CACHE, host, facts)
- else:
- utils.update_hash(self.SETUP_CACHE, host, facts)
-
- # add facts to the global setup cache
- for host, result in contacted.iteritems():
- if 'results' in result:
- # task ran with_ lookup plugin, so facts are encapsulated in
- # multiple list items in the results key
- for res in result['results']:
- if type(res) == dict:
- facts = res.get('ansible_facts', {})
- _save_play_facts(host, facts)
- else:
- # when facts are returned, persist them in the setup cache
- facts = result.get('ansible_facts', {})
- _save_play_facts(host, facts)
-
- # if requested, save the result into the registered variable name
- if task.register:
- _register_play_vars(host, result)
-
- # also have to register some failed, but ignored, tasks
- if task.ignore_errors and task.register:
- failed = results.get('failed', {})
- for host, result in failed.iteritems():
- _register_play_vars(host, result)
-
- # flag which notify handlers need to be run
- if len(task.notify) > 0:
- for host, results in results.get('contacted',{}).iteritems():
- if results.get('changed', False):
- for handler_name in task.notify:
- self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host)
-
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
- return hosts_remaining
-
- # *****************************************************
-
- def _flag_handler(self, play, handler_name, host):
- '''
- if a task has any notify elements, flag handlers for run
- at end of execution cycle for hosts that have indicated
- changes have been made
- '''
-
- found = False
- for x in play.handlers():
- if handler_name == template(play.basedir, x.name, x.module_vars):
- found = True
- self.callbacks.on_notify(host, x.name)
- x.notified_by.append(host)
- if not found:
- raise errors.AnsibleError("change handler (%s) is not defined" % handler_name)
-
- # *****************************************************
-
- def _do_setup_step(self, play):
- ''' get facts from the remote system '''
-
- host_list = self._trim_unavailable_hosts(play._play_hosts)
-
- if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart':
- host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]]
- if len(host_list) == 0:
- return {}
- elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'):
- return {}
-
- self.callbacks.on_setup()
- self.inventory.restrict_to(host_list)
-
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
-
- # push any variables down to the system
- setup_results = ansible.runner.Runner(
- basedir=self.basedir,
- pattern=play.hosts,
- module_name='setup',
- module_args={},
- inventory=self.inventory,
- forks=self.forks,
- module_path=self.module_path,
- timeout=self.timeout,
- remote_user=play.remote_user,
- remote_pass=self.remote_pass,
- remote_port=play.remote_port,
- private_key_file=self.private_key_file,
- setup_cache=self.SETUP_CACHE,
- vars_cache=self.VARS_CACHE,
- callbacks=self.runner_callbacks,
- become=play.become,
- become_method=play.become_method,
- become_user=play.become_user,
- become_pass=self.become_pass,
- vault_pass=self.vault_password,
- transport=play.transport,
- is_playbook=True,
- module_vars=play.vars,
- play_vars=play.vars,
- play_file_vars=play.vars_file_vars,
- role_vars=play.role_vars,
- default_vars=play.default_vars,
- check=self.check,
- diff=self.diff,
- accelerate=play.accelerate,
- accelerate_port=play.accelerate_port,
- ).run()
- self.stats.compute(setup_results, setup=True)
-
- self.inventory.lift_restriction()
-
- # now for each result, load into the setup cache so we can
- # let runner template out future commands
- setup_ok = setup_results.get('contacted', {})
- for (host, result) in setup_ok.iteritems():
- utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
- utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {}))
- return setup_results
-
- # *****************************************************
-
-
- def generate_retry_inventory(self, replay_hosts):
- '''
- called by /usr/bin/ansible when a playbook run fails. It generates an inventory
- that allows re-running on ONLY the failed hosts. This may duplicate some
- variable information in group_vars/host_vars but that is ok, and expected.
- '''
-
- buf = StringIO.StringIO()
- for x in replay_hosts:
- buf.write("%s\n" % x)
- basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH)
- filename = "%s.retry" % os.path.basename(self.filename)
- filename = filename.replace(".yml","")
- filename = os.path.join(basedir, filename)
-
- try:
- if not os.path.exists(basedir):
- os.makedirs(basedir)
-
- fd = open(filename, 'w')
- fd.write(buf.getvalue())
- fd.close()
- except:
- ansible.callbacks.display(
- "\nERROR: could not create retry file. Check the value of \n"
- + "the configuration variable 'retry_files_save_path' or set \n"
- + "'retry_files_enabled' to False to avoid this message.\n",
- color='red'
- )
- return None
-
- return filename
-
- # *****************************************************
- def tasks_to_run_in_play(self, play):
-
- tasks = []
-
- for task in play.tasks():
- # only run the task if the requested tags match or has 'always' tag
- u = set(['untagged'])
- task_set = set(task.tags)
-
- if 'always' in task.tags:
- should_run = True
- else:
- if 'all' in self.only_tags:
- should_run = True
- else:
- should_run = False
- if 'tagged' in self.only_tags:
- if task_set != u:
- should_run = True
- elif 'untagged' in self.only_tags:
- if task_set == u:
- should_run = True
- else:
- if task_set.intersection(self.only_tags):
- should_run = True
-
- # Check for tags that we need to skip
- if 'all' in self.skip_tags:
- should_run = False
- else:
- if 'tagged' in self.skip_tags:
- if task_set != u:
- should_run = False
- elif 'untagged' in self.skip_tags:
- if task_set == u:
- should_run = False
- else:
- if should_run:
- if task_set.intersection(self.skip_tags):
- should_run = False
-
- if should_run:
- tasks.append(task)
+import os
- return tasks
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.parsing import DataLoader
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.play import Play
+from ansible.playbook.playbook_include import PlaybookInclude
+from ansible.plugins import push_basedir
- # *****************************************************
- def _run_play(self, play):
- ''' run a list of tasks for a given pattern, in order '''
- self.callbacks.on_play_start(play.name)
- # Get the hosts for this play
- play._play_hosts = self.inventory.list_hosts(play.hosts)
- # if no hosts matches this play, drop out
- if not play._play_hosts:
- self.callbacks.on_no_hosts_matched()
- return True
+__all__ = ['Playbook']
- # get facts from system
- self._do_setup_step(play)
- # now with that data, handle contentional variable file imports!
- all_hosts = self._trim_unavailable_hosts(play._play_hosts)
- play.update_vars_files(all_hosts, vault_password=self.vault_password)
- hosts_count = len(all_hosts)
+class Playbook:
- if play.serial.endswith("%"):
+ def __init__(self, loader):
+ # Entries in the datastructure of a playbook may
+ # be either a play or an include statement
+ self._entries = []
+ self._basedir = os.getcwd()
+ self._loader = loader
- # This is a percentage, so calculate it based on the
- # number of hosts
- serial_pct = int(play.serial.replace("%",""))
- serial = int((serial_pct/100.0) * len(all_hosts))
+ @staticmethod
+ def load(file_name, variable_manager=None, loader=None):
+ pb = Playbook(loader=loader)
+ pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
+ return pb
- # Ensure that no matter how small the percentage, serial
- # can never fall below 1, so that things actually happen
- serial = max(serial, 1)
- else:
- serial = int(play.serial)
+ def _load_playbook_data(self, file_name, variable_manager):
- serialized_batch = []
- if serial <= 0:
- serialized_batch = [all_hosts]
+ if os.path.isabs(file_name):
+ self._basedir = os.path.dirname(file_name)
else:
- # do N forks all the way through before moving to next
- while len(all_hosts) > 0:
- play_hosts = []
- for x in range(serial):
- if len(all_hosts) > 0:
- play_hosts.append(all_hosts.pop(0))
- serialized_batch.append(play_hosts)
-
- task_errors = False
- for on_hosts in serialized_batch:
-
- # restrict the play to just the hosts we have in our on_hosts block that are
- # available.
- play._play_hosts = self._trim_unavailable_hosts(on_hosts)
- self.inventory.also_restrict_to(on_hosts)
-
- for task in self.tasks_to_run_in_play(play):
-
- if task.meta is not None:
- # meta tasks can force handlers to run mid-play
- if task.meta == 'flush_handlers':
- self.run_handlers(play)
-
- # skip calling the handler till the play is finished
- continue
-
- if not self._run_task(play, task, False):
- # whether no hosts matched is fatal or not depends if it was on the initial step.
- # if we got exactly no hosts on the first step (setup!) then the host group
- # just didn't match anything and that's ok
- return False
+ self._basedir = os.path.normpath(os.path.join(self._basedir, os.path.dirname(file_name)))
- # Get a new list of what hosts are left as available, the ones that
- # did not go fail/dark during the task
- host_list = self._trim_unavailable_hosts(play._play_hosts)
+ # set the loaders basedir
+ self._loader.set_basedir(self._basedir)
- # Set max_fail_pct to 0, So if any hosts fails, bail out
- if task.any_errors_fatal and len(host_list) < hosts_count:
- play.max_fail_pct = 0
+ # also add the basedir to the list of module directories
+ push_basedir(self._basedir)
- # If threshold for max nodes failed is exceeded, bail out.
- if play.serial > 0:
- # if serial is set, we need to shorten the size of host_count
- play_count = len(play._play_hosts)
- if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count):
- host_list = None
- else:
- if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
- host_list = None
+ ds = self._loader.load_from_file(os.path.basename(file_name))
+ if not isinstance(ds, list):
+ raise AnsibleParserError("playbooks must be a list of plays", obj=ds)
- # if no hosts remain, drop out
- if not host_list:
- if play.force_handlers:
- task_errors = True
- break
- else:
- self.callbacks.on_no_hosts_remaining()
- return False
+ # Parse the playbook entries. For plays, we simply parse them
+ # using the Play() object, and includes are parsed using the
+ # PlaybookInclude() object
+ for entry in ds:
+ if not isinstance(entry, dict):
+ raise AnsibleParserError("playbook entries must be either a valid play or an include statement", obj=entry)
- # lift restrictions after each play finishes
- self.inventory.lift_also_restriction()
-
- if task_errors and not play.force_handlers:
- # if there were failed tasks and handler execution
- # is not forced, quit the play with an error
- return False
+ if 'include' in entry:
+ pb = PlaybookInclude.load(entry, basedir=self._basedir, variable_manager=variable_manager, loader=self._loader)
+ self._entries.extend(pb._entries)
else:
- # no errors, go ahead and execute all handlers
- if not self.run_handlers(play):
- return False
-
- return True
-
-
- def run_handlers(self, play):
- on_hosts = play._play_hosts
- hosts_count = len(on_hosts)
- for task in play.tasks():
- if task.meta is not None:
-
- fired_names = {}
- for handler in play.handlers():
- if len(handler.notified_by) > 0:
- self.inventory.restrict_to(handler.notified_by)
-
- # Resolve the variables first
- handler_name = template(play.basedir, handler.name, handler.module_vars)
- if handler_name not in fired_names:
- self._run_task(play, handler, True)
- # prevent duplicate handler includes from running more than once
- fired_names[handler_name] = 1
-
- host_list = self._trim_unavailable_hosts(play._play_hosts)
- if handler.any_errors_fatal and len(host_list) < hosts_count:
- play.max_fail_pct = 0
- if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
- host_list = None
- if not host_list and not play.force_handlers:
- self.callbacks.on_no_hosts_remaining()
- return False
-
- self.inventory.lift_restriction()
- new_list = handler.notified_by[:]
- for host in handler.notified_by:
- if host in on_hosts:
- while host in new_list:
- new_list.remove(host)
- handler.notified_by = new_list
+ entry_obj = Play.load(entry, variable_manager=variable_manager, loader=self._loader)
+ self._entries.append(entry_obj)
- continue
+ def get_loader(self):
+ return self._loader
- return True
+ def get_plays(self):
+ return self._entries[:]
diff --git a/lib/ansible/errors.py b/lib/ansible/playbook/attribute.py
index 65edbc294a..8a727a0193 100644
--- a/lib/ansible/errors.py
+++ b/lib/ansible/playbook/attribute.py
@@ -15,21 +15,18 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-class AnsibleError(Exception):
- ''' The base Ansible exception from which all others should subclass '''
- pass
-
-class AnsibleFileNotFound(AnsibleError):
- pass
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-class AnsibleConnectionFailed(AnsibleError):
- pass
+class Attribute:
-class AnsibleYAMLValidationFailed(AnsibleError):
- pass
+ def __init__(self, isa=None, private=False, default=None, required=False):
-class AnsibleUndefinedVariable(AnsibleError):
- pass
+ self.isa = isa
+ self.private = private
+ self.default = default
+ self.required = required
-class AnsibleFilterError(AnsibleError):
+class FieldAttribute(Attribute):
pass
diff --git a/lib/ansible/playbook/base.py b/lib/ansible/playbook/base.py
new file mode 100644
index 0000000000..ecd217c1e8
--- /dev/null
+++ b/lib/ansible/playbook/base.py
@@ -0,0 +1,345 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import uuid
+
+from functools import partial
+from inspect import getmembers
+from io import FileIO
+
+from six import iteritems, string_types
+
+from jinja2.exceptions import UndefinedError
+
+from ansible.errors import AnsibleParserError
+from ansible.parsing import DataLoader
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.template import Templar
+from ansible.utils.boolean import boolean
+
+from ansible.utils.debug import debug
+
+from ansible.template import template
+
+class Base:
+
+ # connection/transport
+ _connection = FieldAttribute(isa='string')
+ _port = FieldAttribute(isa='int')
+ _remote_user = FieldAttribute(isa='string')
+
+ # vars and flags
+ _vars = FieldAttribute(isa='dict', default=dict())
+ _environment = FieldAttribute(isa='dict', default=dict())
+ _no_log = FieldAttribute(isa='bool', default=False)
+
+ def __init__(self):
+
+ # initialize the data loader and variable manager, which will be provided
+ # later when the object is actually loaded
+ self._loader = None
+ self._variable_manager = None
+
+ # every object gets a random uuid:
+ self._uuid = uuid.uuid4()
+
+ # and initialize the base attributes
+ self._initialize_base_attributes()
+
+ # The following three functions are used to programatically define data
+ # descriptors (aka properties) for the Attributes of all of the playbook
+ # objects (tasks, blocks, plays, etc).
+ #
+ # The function signature is a little strange because of how we define
+ # them. We use partial to give each method the name of the Attribute that
+ # it is for. Since partial prefills the positional arguments at the
+ # beginning of the function we end up with the first positional argument
+ # being allocated to the name instead of to the class instance (self) as
+ # normal. To deal with that we make the property name field the first
+ # positional argument and self the second arg.
+ #
+ # Because these methods are defined inside of the class, they get bound to
+ # the instance when the object is created. After we run partial on them
+ # and put the result back into the class as a property, they get bound
+ # a second time. This leads to self being placed in the arguments twice.
+ # To work around that, we mark the functions as @staticmethod so that the
+ # first binding to the instance doesn't happen.
+
+ @staticmethod
+ def _generic_g(prop_name, self):
+ method = "_get_attr_%s" % prop_name
+ if method in dir(self):
+ return getattr(self, method)()
+
+ return self._attributes[prop_name]
+
+ @staticmethod
+ def _generic_s(prop_name, self, value):
+ self._attributes[prop_name] = value
+
+ @staticmethod
+ def _generic_d(prop_name, self):
+ del self._attributes[prop_name]
+
+ def _get_base_attributes(self):
+ '''
+ Returns the list of attributes for this class (or any subclass thereof).
+ If the attribute name starts with an underscore, it is removed
+ '''
+ base_attributes = dict()
+ for (name, value) in getmembers(self.__class__):
+ if isinstance(value, Attribute):
+ if name.startswith('_'):
+ name = name[1:]
+ base_attributes[name] = value
+ return base_attributes
+
+ def _initialize_base_attributes(self):
+ # each class knows attributes set upon it, see Task.py for example
+ self._attributes = dict()
+
+ for (name, value) in self._get_base_attributes().items():
+ getter = partial(self._generic_g, name)
+ setter = partial(self._generic_s, name)
+ deleter = partial(self._generic_d, name)
+
+ # Place the property into the class so that cls.name is the
+ # property functions.
+ setattr(Base, name, property(getter, setter, deleter))
+
+ # Place the value into the instance so that the property can
+ # process and hold that value/
+ setattr(self, name, value.default)
+
+ def preprocess_data(self, ds):
+ ''' infrequently used method to do some pre-processing of legacy terms '''
+
+ for base_class in self.__class__.mro():
+ method = getattr(self, "_preprocess_data_%s" % base_class.__name__.lower(), None)
+ if method:
+ return method(ds)
+ return ds
+
+ def load_data(self, ds, variable_manager=None, loader=None):
+ ''' walk the input datastructure and assign any values '''
+
+ assert ds is not None
+
+ # the variable manager class is used to manage and merge variables
+ # down to a single dictionary for reference in templating, etc.
+ self._variable_manager = variable_manager
+
+ # the data loader class is used to parse data from strings and files
+ if loader is not None:
+ self._loader = loader
+ else:
+ self._loader = DataLoader()
+
+ if isinstance(ds, string_types) or isinstance(ds, FileIO):
+ ds = self._loader.load(ds)
+
+ # call the preprocess_data() function to massage the data into
+ # something we can more easily parse, and then call the validation
+ # function on it to ensure there are no incorrect key values
+ ds = self.preprocess_data(ds)
+ self._validate_attributes(ds)
+
+ # Walk all attributes in the class.
+ #
+ # FIXME: we currently don't do anything with private attributes but
+ # may later decide to filter them out of 'ds' here.
+
+ for name in self._get_base_attributes():
+ # copy the value over unless a _load_field method is defined
+ if name in ds:
+ method = getattr(self, '_load_%s' % name, None)
+ if method:
+ self._attributes[name] = method(name, ds[name])
+ else:
+ self._attributes[name] = ds[name]
+
+ # run early, non-critical validation
+ self.validate()
+
+ # cache the datastructure internally
+ setattr(self, '_ds', ds)
+
+ # return the constructed object
+ return self
+
+ def get_ds(self):
+ try:
+ return getattr(self, '_ds')
+ except AttributeError:
+ return None
+
+ def get_loader(self):
+ return self._loader
+
+ def get_variable_manager(self):
+ return self._variable_manager
+
+ def _validate_attributes(self, ds):
+ '''
+ Ensures that there are no keys in the datastructure which do
+ not map to attributes for this object.
+ '''
+
+ valid_attrs = frozenset(name for name in self._get_base_attributes())
+ for key in ds:
+ if key not in valid_attrs:
+ raise AnsibleParserError("'%s' is not a valid attribute for a %s" % (key, self.__class__.__name__), obj=ds)
+
+ def validate(self, all_vars=dict()):
+ ''' validation that is done at parse time, not load time '''
+
+ # walk all fields in the object
+ for (name, attribute) in iteritems(self._get_base_attributes()):
+
+ # run validator only if present
+ method = getattr(self, '_validate_%s' % name, None)
+ if method:
+ method(attribute, name, getattr(self, name))
+
+ def copy(self):
+ '''
+ Create a copy of this object and return it.
+ '''
+
+ new_me = self.__class__()
+
+ for name in self._get_base_attributes():
+ setattr(new_me, name, getattr(self, name))
+
+ new_me._loader = self._loader
+ new_me._variable_manager = self._variable_manager
+
+ return new_me
+
+ def post_validate(self, templar):
+ '''
+ we can't tell that everything is of the right type until we have
+ all the variables. Run basic types (from isa) as well as
+ any _post_validate_<foo> functions.
+ '''
+
+ basedir = None
+ if self._loader is not None:
+ basedir = self._loader.get_basedir()
+
+ for (name, attribute) in iteritems(self._get_base_attributes()):
+
+ if getattr(self, name) is None:
+ if not attribute.required:
+ continue
+ else:
+ raise AnsibleParserError("the field '%s' is required but was not set" % name)
+
+ try:
+ # if the attribute contains a variable, template it now
+ value = templar.template(getattr(self, name))
+
+ # run the post-validator if present
+ method = getattr(self, '_post_validate_%s' % name, None)
+ if method:
+ value = method(attribute, value, all_vars, templar._fail_on_undefined_errors)
+ else:
+ # otherwise, just make sure the attribute is of the type it should be
+ if attribute.isa == 'string':
+ value = unicode(value)
+ elif attribute.isa == 'int':
+ value = int(value)
+ elif attribute.isa == 'bool':
+ value = boolean(value)
+ elif attribute.isa == 'list':
+ if not isinstance(value, list):
+ value = [ value ]
+ elif attribute.isa == 'dict' and not isinstance(value, dict):
+ raise TypeError()
+
+ # and assign the massaged value back to the attribute field
+ setattr(self, name, value)
+
+ except (TypeError, ValueError) as e:
+ raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s. Error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds())
+ except UndefinedError as e:
+ if templar._fail_on_undefined_errors:
+ raise AnsibleParserError("the field '%s' has an invalid value, which appears to include a variable that is undefined. The error was: %s" % (name,e), obj=self.get_ds())
+
+ def serialize(self):
+ '''
+ Serializes the object derived from the base object into
+ a dictionary of values. This only serializes the field
+ attributes for the object, so this may need to be overridden
+ for any classes which wish to add additional items not stored
+ as field attributes.
+ '''
+
+ repr = dict()
+
+ for name in self._get_base_attributes():
+ repr[name] = getattr(self, name)
+
+ # serialize the uuid field
+ repr['uuid'] = getattr(self, '_uuid')
+
+ return repr
+
+ def deserialize(self, data):
+ '''
+ Given a dictionary of values, load up the field attributes for
+ this object. As with serialize(), if there are any non-field
+ attribute data members, this method will need to be overridden
+ and extended.
+ '''
+
+ assert isinstance(data, dict)
+
+ for (name, attribute) in iteritems(self._get_base_attributes()):
+ if name in data:
+ setattr(self, name, data[name])
+ else:
+ setattr(self, name, attribute.default)
+
+ # restore the UUID field
+ setattr(self, '_uuid', data.get('uuid'))
+
+ def _extend_value(self, value, new_value):
+ '''
+ Will extend the value given with new_value (and will turn both
+ into lists if they are not so already). The values are run through
+ a set to remove duplicate values.
+ '''
+
+ if not isinstance(value, list):
+ value = [ value ]
+ if not isinstance(new_value, list):
+ new_value = [ new_value ]
+
+ return list(set(value + new_value))
+
+ def __getstate__(self):
+ return self.serialize()
+
+ def __setstate__(self, data):
+ self.__init__()
+ self.deserialize(data)
+
diff --git a/lib/ansible/playbook/become.py b/lib/ansible/playbook/become.py
new file mode 100644
index 0000000000..daa8c80ba9
--- /dev/null
+++ b/lib/ansible/playbook/become.py
@@ -0,0 +1,141 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.playbook.attribute import Attribute, FieldAttribute
+#from ansible.utils.display import deprecated
+
+class Become:
+
+ # Privlege escalation
+ _become = FieldAttribute(isa='bool', default=False)
+ _become_method = FieldAttribute(isa='string')
+ _become_user = FieldAttribute(isa='string')
+ _become_pass = FieldAttribute(isa='string')
+
+ def __init__(self):
+ return super(Become, self).__init__()
+
+ def _detect_privilege_escalation_conflict(self, ds):
+
+ # Fail out if user specifies conflicting privilege escalations
+ has_become = 'become' in ds or 'become_user'in ds
+ has_sudo = 'sudo' in ds or 'sudo_user' in ds
+ has_su = 'su' in ds or 'su_user' in ds
+
+ if has_become:
+ msg = 'The become params ("become", "become_user") and'
+ if has_sudo:
+ raise AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg)
+ elif has_su:
+ raise AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg)
+ elif has_sudo and has_su:
+ raise AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
+
+ def _preprocess_data_become(self, ds):
+ """Preprocess the playbook data for become attributes
+
+ This is called from the Base object's preprocess_data() method which
+ in turn is called pretty much anytime any sort of playbook object
+ (plays, tasks, blocks, etc) are created.
+ """
+
+ self._detect_privilege_escalation_conflict(ds)
+
+ # Setting user implies setting become/sudo/su to true
+ if 'become_user' in ds and not ds.get('become', False):
+ ds['become'] = True
+
+ # Privilege escalation, backwards compatibility for sudo/su
+ if 'sudo' in ds or 'sudo_user' in ds:
+ ds['become_method'] = 'sudo'
+ if 'sudo' in ds:
+ ds['become'] = ds['sudo']
+ del ds['sudo']
+ else:
+ ds['become'] = True
+ if 'sudo_user' in ds:
+ ds['become_user'] = ds['sudo_user']
+ del ds['sudo_user']
+
+ #deprecated("Instead of sudo/sudo_user, use become/become_user and set become_method to 'sudo' (default)")
+
+ elif 'su' in ds or 'su_user' in ds:
+ ds['become_method'] = 'su'
+ if 'su' in ds:
+ ds['become'] = ds['su']
+ del ds['su']
+ else:
+ ds['become'] = True
+ if 'su_user' in ds:
+ ds['become_user'] = ds['su_user']
+ del ds['su_user']
+
+ #deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
+
+ # if we are becoming someone else, but some fields are unset,
+ # make sure they're initialized to the default config values
+ if ds.get('become', False):
+ if ds.get('become_method', None) is None:
+ ds['become_method'] = C.DEFAULT_BECOME_METHOD
+ if ds.get('become_user', None) is None:
+ ds['become_user'] = C.DEFAULT_BECOME_USER
+
+ return ds
+
+ def _get_attr_become(self):
+ '''
+ Override for the 'become' getattr fetcher, used from Base.
+ '''
+ if hasattr(self, '_get_parent_attribute'):
+ return self._get_parent_attribute('become')
+ else:
+ return self._attributes['become']
+
+ def _get_attr_become_method(self):
+ '''
+ Override for the 'become_method' getattr fetcher, used from Base.
+ '''
+ if hasattr(self, '_get_parent_attribute'):
+ return self._get_parent_attribute('become_method')
+ else:
+ return self._attributes['become_method']
+
+ def _get_attr_become_user(self):
+ '''
+ Override for the 'become_user' getattr fetcher, used from Base.
+ '''
+ if hasattr(self, '_get_parent_attribute'):
+ return self._get_parent_attribute('become_user')
+ else:
+ return self._attributes['become_user']
+
+ def _get_attr_become_password(self):
+ '''
+ Override for the 'become_password' getattr fetcher, used from Base.
+ '''
+ if hasattr(self, '_get_parent_attribute'):
+ return self._get_parent_attribute('become_password')
+ else:
+ return self._attributes['become_password']
+
+
diff --git a/lib/ansible/playbook/block.py b/lib/ansible/playbook/block.py
new file mode 100644
index 0000000000..e6ad8e5745
--- /dev/null
+++ b/lib/ansible/playbook/block.py
@@ -0,0 +1,319 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.become import Become
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.helpers import load_list_of_tasks
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
+
+class Block(Base, Become, Conditional, Taggable):
+
+ _block = FieldAttribute(isa='list', default=[])
+ _rescue = FieldAttribute(isa='list', default=[])
+ _always = FieldAttribute(isa='list', default=[])
+
+ # for future consideration? this would be functionally
+ # similar to the 'else' clause for exceptions
+ #_otherwise = FieldAttribute(isa='list')
+
+ def __init__(self, play=None, parent_block=None, role=None, task_include=None, use_handlers=False):
+ self._play = play
+ self._role = role
+ self._task_include = task_include
+ self._parent_block = parent_block
+ self._use_handlers = use_handlers
+ self._dep_chain = []
+
+ super(Block, self).__init__()
+
+ def get_vars(self):
+ '''
+ Blocks do not store variables directly, however they may be a member
+ of a role or task include which does, so return those if present.
+ '''
+
+ all_vars = dict()
+
+ if self._role:
+ all_vars.update(self._role.get_vars())
+ if self._parent_block:
+ all_vars.update(self._parent_block.get_vars())
+ if self._task_include:
+ all_vars.update(self._task_include.get_vars())
+
+ all_vars.update(self.vars)
+ return all_vars
+
+ @staticmethod
+ def load(data, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
+ b = Block(play=play, parent_block=parent_block, role=role, task_include=task_include, use_handlers=use_handlers)
+ return b.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def preprocess_data(self, ds):
+ '''
+ If a simple task is given, an implicit block for that single task
+ is created, which goes in the main portion of the block
+ '''
+
+ is_block = False
+ for attr in ('block', 'rescue', 'always'):
+ if attr in ds:
+ is_block = True
+ break
+
+ if not is_block:
+ if isinstance(ds, list):
+ return super(Block, self).preprocess_data(dict(block=ds))
+ else:
+ return super(Block, self).preprocess_data(dict(block=[ds]))
+
+ return super(Block, self).preprocess_data(ds)
+
+ def _load_block(self, attr, ds):
+ return load_list_of_tasks(
+ ds,
+ play=self._play,
+ block=self,
+ role=self._role,
+ task_include=self._task_include,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ use_handlers=self._use_handlers,
+ )
+
+ def _load_rescue(self, attr, ds):
+ return load_list_of_tasks(
+ ds,
+ play=self._play,
+ block=self,
+ role=self._role,
+ task_include=self._task_include,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ use_handlers=self._use_handlers,
+ )
+
+ def _load_always(self, attr, ds):
+ return load_list_of_tasks(
+ ds,
+ play=self._play,
+ block=self,
+ role=self._role,
+ task_include=self._task_include,
+ variable_manager=self._variable_manager,
+ loader=self._loader,
+ use_handlers=self._use_handlers,
+ )
+
+ # not currently used
+ #def _load_otherwise(self, attr, ds):
+ # return load_list_of_tasks(
+ # ds,
+ # play=self._play,
+ # block=self,
+ # role=self._role,
+ # task_include=self._task_include,
+ # variable_manager=self._variable_manager,
+ # loader=self._loader,
+ # use_handlers=self._use_handlers,
+ # )
+
+ def copy(self, exclude_parent=False):
+ def _dupe_task_list(task_list, new_block):
+ new_task_list = []
+ for task in task_list:
+ if isinstance(task, Block):
+ new_task = task.copy(exclude_parent=True)
+ new_task._parent_block = new_block
+ else:
+ new_task = task.copy(exclude_block=True)
+ new_task._block = new_block
+ new_task_list.append(new_task)
+ return new_task_list
+
+ new_me = super(Block, self).copy()
+ new_me._play = self._play
+ new_me._use_handlers = self._use_handlers
+ new_me._dep_chain = self._dep_chain[:]
+
+ new_me.block = _dupe_task_list(self.block or [], new_me)
+ new_me.rescue = _dupe_task_list(self.rescue or [], new_me)
+ new_me.always = _dupe_task_list(self.always or [], new_me)
+
+ new_me._parent_block = None
+ if self._parent_block and not exclude_parent:
+ new_me._parent_block = self._parent_block.copy()
+
+ new_me._role = None
+ if self._role:
+ new_me._role = self._role
+
+ new_me._task_include = None
+ if self._task_include:
+ new_me._task_include = self._task_include.copy()
+
+ return new_me
+
+ def serialize(self):
+ '''
+ Override of the default serialize method, since when we're serializing
+ a task we don't want to include the attribute list of tasks.
+ '''
+
+ data = dict()
+ for attr in self._get_base_attributes():
+ if attr not in ('block', 'rescue', 'always'):
+ data[attr] = getattr(self, attr)
+
+ data['dep_chain'] = self._dep_chain
+
+ if self._role is not None:
+ data['role'] = self._role.serialize()
+ if self._task_include is not None:
+ data['task_include'] = self._task_include.serialize()
+
+ return data
+
+ def deserialize(self, data):
+ '''
+ Override of the default deserialize method, to match the above overridden
+ serialize method
+ '''
+
+ from ansible.playbook.task import Task
+
+ # we don't want the full set of attributes (the task lists), as that
+ # would lead to a serialize/deserialize loop
+ for attr in self._get_base_attributes():
+ if attr in data and attr not in ('block', 'rescue', 'always'):
+ setattr(self, attr, data.get(attr))
+
+ self._dep_chain = data.get('dep_chain', [])
+
+ # if there was a serialized role, unpack it too
+ role_data = data.get('role')
+ if role_data:
+ r = Role()
+ r.deserialize(role_data)
+ self._role = r
+
+ # if there was a serialized task include, unpack it too
+ ti_data = data.get('task_include')
+ if ti_data:
+ ti = Task()
+ ti.deserialize(ti_data)
+ self._task_include = ti
+
+ def evaluate_conditional(self, all_vars):
+ if len(self._dep_chain):
+ for dep in self._dep_chain:
+ if not dep.evaluate_conditional(all_vars):
+ return False
+ if self._task_include is not None:
+ if not self._task_include.evaluate_conditional(all_vars):
+ return False
+ if self._parent_block is not None:
+ if not self._parent_block.evaluate_conditional(all_vars):
+ return False
+ elif self._role is not None:
+ if not self._role.evaluate_conditional(all_vars):
+ return False
+ return super(Block, self).evaluate_conditional(all_vars)
+
+ def set_loader(self, loader):
+ self._loader = loader
+ if self._parent_block:
+ self._parent_block.set_loader(loader)
+ elif self._role:
+ self._role.set_loader(loader)
+
+ if self._task_include:
+ self._task_include.set_loader(loader)
+
+ for dep in self._dep_chain:
+ dep.set_loader(loader)
+
+ def _get_parent_attribute(self, attr, extend=False):
+ '''
+ Generic logic to get the attribute or parent attribute for a block value.
+ '''
+
+ value = self._attributes[attr]
+ if self._parent_block and (not value or extend):
+ parent_value = getattr(self._parent_block, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
+ else:
+ value = parent_value
+ if self._task_include and (not value or extend):
+ parent_value = getattr(self._task_include, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
+ else:
+ value = parent_value
+ if self._role and (not value or extend):
+ parent_value = getattr(self._role, attr)
+ if len(self._dep_chain) and (not value or extend):
+ reverse_dep_chain = self._dep_chain[:]
+ reverse_dep_chain.reverse()
+ for dep in reverse_dep_chain:
+ dep_value = getattr(dep, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
+ else:
+ value = parent_value
+
+ if value and not extend:
+ break
+ if self._play and (not value or extend):
+ parent_value = getattr(self._play, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
+ else:
+ value = parent_value
+
+ return value
+
+ def filter_tagged_tasks(self, connection_info, all_vars):
+ '''
+ Creates a new block, with task lists filtered based on the tags contained
+ within the connection_info object.
+ '''
+
+ def evaluate_and_append_task(target):
+ tmp_list = []
+ for task in target:
+ if task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, all_vars=all_vars):
+ tmp_list.append(task)
+ return tmp_list
+
+ new_block = self.copy()
+ new_block.block = evaluate_and_append_task(self.block)
+ new_block.rescue = evaluate_and_append_task(self.rescue)
+ new_block.always = evaluate_and_append_task(self.always)
+
+ return new_block
+
+ def has_tasks(self):
+ return len(self.block) > 0 or len(self.rescue) > 0 or len(self.always) > 0
diff --git a/lib/ansible/playbook/conditional.py b/lib/ansible/playbook/conditional.py
new file mode 100644
index 0000000000..2233f3fa9e
--- /dev/null
+++ b/lib/ansible/playbook/conditional.py
@@ -0,0 +1,102 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import *
+from ansible.playbook.attribute import FieldAttribute
+from ansible.template import Templar
+
+class Conditional:
+
+ '''
+ This is a mix-in class, to be used with Base to allow the object
+ to be run conditionally when a condition is met or skipped.
+ '''
+
+ _when = FieldAttribute(isa='list', default=[])
+
+ def __init__(self, loader=None):
+ # when used directly, this class needs a loader, but we want to
+ # make sure we don't trample on the existing one if this class
+ # is used as a mix-in with a playbook base class
+ if not hasattr(self, '_loader'):
+ if loader is None:
+ raise AnsibleError("a loader must be specified when using Conditional() directly")
+ else:
+ self._loader = loader
+ super(Conditional, self).__init__()
+
+ def _validate_when(self, attr, name, value):
+ if not isinstance(value, list):
+ setattr(self, name, [ value ])
+
+ def evaluate_conditional(self, all_vars):
+ '''
+ Loops through the conditionals set on this object, returning
+ False if any of them evaluate as such.
+ '''
+
+ templar = Templar(loader=self._loader, variables=all_vars, fail_on_undefined=False)
+ for conditional in self.when:
+ if not self._check_conditional(conditional, templar, all_vars):
+ return False
+ return True
+
+ def _check_conditional(self, conditional, templar, all_vars):
+ '''
+ This method does the low-level evaluation of each conditional
+ set on this object, using jinja2 to wrap the conditionals for
+ evaluation.
+ '''
+
+ original = conditional
+ if conditional is None or conditional == '':
+ return True
+
+ if conditional in all_vars and '-' not in unicode(all_vars[conditional]):
+ conditional = all_vars[conditional]
+
+ conditional = templar.template(conditional)
+ if not isinstance(conditional, basestring) or conditional == "":
+ return conditional
+
+ # a Jinja2 evaluation that results in something Python can eval!
+ presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
+ conditional = templar.template(presented)
+
+ val = conditional.strip()
+ if val == presented:
+ # the templating failed, meaning most likely a
+ # variable was undefined. If we happened to be
+ # looking for an undefined variable, return True,
+ # otherwise fail
+ if "is undefined" in original:
+ return True
+ elif "is defined" in original:
+ return False
+ else:
+ raise AnsibleError("error while evaluating conditional: %s (%s)" % (original, presented))
+ elif val == "True":
+ return True
+ elif val == "False":
+ return False
+ else:
+ raise AnsibleError("unable to evaluate conditional: %s" % original)
+
diff --git a/lib/ansible/playbook/handler.py b/lib/ansible/playbook/handler.py
new file mode 100644
index 0000000000..c8c1572e48
--- /dev/null
+++ b/lib/ansible/playbook/handler.py
@@ -0,0 +1,53 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+#from ansible.inventory.host import Host
+from ansible.playbook.task import Task
+
+class Handler(Task):
+
+ def __init__(self, block=None, role=None, task_include=None):
+ self._flagged_hosts = []
+
+ super(Handler, self).__init__(block=block, role=role, task_include=task_include)
+
+ def __repr__(self):
+ ''' returns a human readable representation of the handler '''
+ return "HANDLER: %s" % self.get_name()
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ t = Handler(block=block, role=role, task_include=task_include)
+ return t.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def flag_for_host(self, host):
+ #assert instanceof(host, Host)
+ if host not in self._flagged_hosts:
+ self._flagged_hosts.append(host)
+
+ def has_triggered(self, host):
+ return host in self._flagged_hosts
+
+ def serialize(self):
+ result = super(Handler, self).serialize()
+ result['is_handler'] = True
+ return result
diff --git a/lib/ansible/playbook/helpers.py b/lib/ansible/playbook/helpers.py
new file mode 100644
index 0000000000..302e14a6e0
--- /dev/null
+++ b/lib/ansible/playbook/helpers.py
@@ -0,0 +1,116 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from types import NoneType
+
+from ansible.errors import AnsibleParserError
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleSequence
+
+
+def load_list_of_blocks(ds, play, parent_block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
+ '''
+ Given a list of mixed task/block data (parsed from YAML),
+ return a list of Block() objects, where implicit blocks
+ are created for each bare Task.
+ '''
+
+ # we import here to prevent a circular dependency with imports
+ from ansible.playbook.block import Block
+
+ assert ds is None or isinstance(ds, list), 'block has bad type: %s' % type(ds)
+
+ block_list = []
+ if ds:
+ for block in ds:
+ b = Block.load(
+ block,
+ play=play,
+ parent_block=parent_block,
+ role=role,
+ task_include=task_include,
+ use_handlers=use_handlers,
+ variable_manager=variable_manager,
+ loader=loader
+ )
+ block_list.append(b)
+
+ return block_list
+
+
+def load_list_of_tasks(ds, play, block=None, role=None, task_include=None, use_handlers=False, variable_manager=None, loader=None):
+ '''
+ Given a list of task datastructures (parsed from YAML),
+ return a list of Task() or TaskInclude() objects.
+ '''
+
+ # we import here to prevent a circular dependency with imports
+ from ansible.playbook.block import Block
+ from ansible.playbook.handler import Handler
+ from ansible.playbook.task import Task
+
+ assert isinstance(ds, list), 'task has bad type: %s' % type(ds)
+
+ task_list = []
+ for task in ds:
+ if not isinstance(task, dict):
+ raise AnsibleParserError("task/handler entries must be dictionaries (got a %s)" % type(task), obj=ds)
+
+ if 'block' in task:
+ t = Block.load(
+ task,
+ play=play,
+ parent_block=block,
+ role=role,
+ task_include=task_include,
+ use_handlers=use_handlers,
+ variable_manager=variable_manager,
+ loader=loader,
+ )
+ else:
+ if use_handlers:
+ t = Handler.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
+ else:
+ t = Task.load(task, block=block, role=role, task_include=task_include, variable_manager=variable_manager, loader=loader)
+
+ task_list.append(t)
+
+ return task_list
+
+
+def load_list_of_roles(ds, current_role_path=None, variable_manager=None, loader=None):
+ '''
+ Loads and returns a list of RoleInclude objects from the datastructure
+ list of role definitions
+ '''
+
+ # we import here to prevent a circular dependency with imports
+ from ansible.playbook.role.include import RoleInclude
+
+ assert isinstance(ds, list), 'roles has bad type: %s' % type(ds)
+
+ roles = []
+ for role_def in ds:
+ i = RoleInclude.load(role_def, current_role_path=current_role_path, variable_manager=variable_manager, loader=loader)
+ roles.append(i)
+
+ return roles
+
diff --git a/lib/ansible/playbook/play.py b/lib/ansible/playbook/play.py
index 6ee85e0bf4..b99c01fdf7 100644
--- a/lib/ansible/playbook/play.py
+++ b/lib/ansible/playbook/play.py
@@ -15,935 +15,249 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#############################################
-
-from ansible.utils.template import template
-from ansible import utils
-from ansible import errors
-from ansible.playbook.task import Task
-from ansible.module_utils.splitter import split_args, unquote
-import ansible.constants as C
-import pipes
-import shlex
-import os
-import sys
-import uuid
-
-
-class Play(object):
-
- _pb_common = [
- 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
- 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
- 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
- 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
- 'vault_password',
- ]
-
- __slots__ = _pb_common + [
- '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir',
- 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port',
- 'role_vars', 'transport', 'vars_file_vars',
- ]
-
- # to catch typos and so forth -- these are userland names
- # and don't line up 1:1 with how they are stored
- VALID_KEYS = frozenset(_pb_common + [
- 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks',
- 'pre_tasks', 'role_names', 'tasks', 'user',
- ])
-
- # *************************************************
-
- def __init__(self, playbook, ds, basedir, vault_password=None):
- ''' constructor loads from a play datastructure '''
-
- for x in ds.keys():
- if not x in Play.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x)
-
- # allow all playbook keys to be set by --extra-vars
- self.vars = ds.get('vars', {})
- self.vars_prompt = ds.get('vars_prompt', {})
- self.playbook = playbook
- self.vars = self._get_vars()
- self.vars_file_vars = dict() # these are vars read in from vars_files:
- self.role_vars = dict() # these are vars read in from vars/main.yml files in roles
- self.basedir = basedir
- self.roles = ds.get('roles', None)
- self.tags = ds.get('tags', None)
- self.vault_password = vault_password
- self.environment = ds.get('environment', {})
-
- if self.tags is None:
- self.tags = []
- elif type(self.tags) in [ str, unicode ]:
- self.tags = self.tags.split(",")
- elif type(self.tags) != list:
- self.tags = []
-
- # make sure we have some special internal variables set, which
- # we use later when loading tasks and handlers
- load_vars = dict()
- load_vars['playbook_dir'] = os.path.abspath(self.basedir)
- if self.playbook.inventory.basedir() is not None:
- load_vars['inventory_dir'] = self.playbook.inventory.basedir()
- if self.playbook.inventory.src() is not None:
- load_vars['inventory_file'] = self.playbook.inventory.src()
-
- # We first load the vars files from the datastructure
- # so we have the default variables to pass into the roles
- self.vars_files = ds.get('vars_files', [])
- if not isinstance(self.vars_files, list):
- raise errors.AnsibleError('vars_files must be a list')
- processed_vars_files = self._update_vars_files_for_host(None)
-
- # now we load the roles into the datastructure
- self.included_roles = []
- ds = self._load_roles(self.roles, ds)
-
- # and finally re-process the vars files as they may have been updated
- # by the included roles, but exclude any which have been processed
- self.vars_files = utils.list_difference(ds.get('vars_files', []), processed_vars_files)
- if not isinstance(self.vars_files, list):
- raise errors.AnsibleError('vars_files must be a list')
-
- self._update_vars_files_for_host(None)
-
- # template everything to be efficient, but do not pre-mature template
- # tasks/handlers as they may have inventory scope overrides. We also
- # create a set of temporary variables for templating, so we don't
- # trample on the existing vars structures
- _tasks = ds.pop('tasks', [])
- _handlers = ds.pop('handlers', [])
-
- temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
- temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
- try:
- ds = template(basedir, ds, temp_vars)
- except errors.AnsibleError, e:
- utils.warning("non fatal error while trying to template play variables: %s" % (str(e)))
-
- ds['tasks'] = _tasks
- ds['handlers'] = _handlers
-
- self._ds = ds
-
- hosts = ds.get('hosts')
- if hosts is None:
- raise errors.AnsibleError('hosts declaration is required')
- elif isinstance(hosts, list):
- try:
- hosts = ';'.join(hosts)
- except TypeError,e:
- raise errors.AnsibleError('improper host declaration: %s' % str(e))
-
- self.serial = str(ds.get('serial', 0))
- self.hosts = hosts
- self.name = ds.get('name', self.hosts)
- self._tasks = ds.get('tasks', [])
- self._handlers = ds.get('handlers', [])
- self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user))
- self.remote_port = ds.get('port', self.playbook.remote_port)
- self.transport = ds.get('connection', self.playbook.transport)
- self.remote_port = self.remote_port
- self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false'))
- self.accelerate = utils.boolean(ds.get('accelerate', 'false'))
- self.accelerate_port = ds.get('accelerate_port', None)
- self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
- self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
- self.no_log = utils.boolean(ds.get('no_log', 'false'))
- self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
-
- # Fail out if user specifies conflicting privilege escalations
- if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
- raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together')
- if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together')
- if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
-
- # become settings are inherited and updated normally
- self.become = ds.get('become', self.playbook.become)
- self.become_method = ds.get('become_method', self.playbook.become_method)
- self.become_user = ds.get('become_user', self.playbook.become_user)
-
- # Make sure current play settings are reflected in become fields
- if 'sudo' in ds:
- self.become=ds['sudo']
- self.become_method='sudo'
- if 'sudo_user' in ds:
- self.become_user=ds['sudo_user']
- elif 'su' in ds:
- self.become=True
- self.become=ds['su']
- self.become_method='su'
- if 'su_user' in ds:
- self.become_user=ds['su_user']
-
- # gather_facts is not a simple boolean, as None means that a 'smart'
- # fact gathering mode will be used, so we need to be careful here as
- # calling utils.boolean(None) returns False
- self.gather_facts = ds.get('gather_facts', None)
- if self.gather_facts is not None:
- self.gather_facts = utils.boolean(self.gather_facts)
-
- load_vars['role_names'] = ds.get('role_names', [])
-
- self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars)
- self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars)
-
- # apply any missing tags to role tasks
- self._late_merge_role_tags()
-
- # place holder for the discovered hosts to be used in this play
- self._play_hosts = None
-
- # *************************************************
-
- def _get_role_path(self, role):
- """
- Returns the path on disk to the directory containing
- the role directories like tasks, templates, etc. Also
- returns any variables that were included with the role
- """
- orig_path = template(self.basedir,role,self.vars)
-
- role_vars = {}
- if type(orig_path) == dict:
- # what, not a path?
- role_name = orig_path.get('role', None)
- if role_name is None:
- raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path)
- role_vars = orig_path
- else:
- role_name = utils.role_spec_parse(orig_path)["name"]
-
- role_path = None
-
- possible_paths = [
- utils.path_dwim(self.basedir, os.path.join('roles', role_name)),
- utils.path_dwim(self.basedir, role_name)
- ]
-
- if C.DEFAULT_ROLES_PATH:
- search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep)
- for loc in search_locations:
- loc = os.path.expanduser(loc)
- possible_paths.append(utils.path_dwim(loc, role_name))
-
- for path_option in possible_paths:
- if os.path.isdir(path_option):
- role_path = path_option
- break
-
- if role_path is None:
- raise errors.AnsibleError("cannot find role in %s" % " or ".join(possible_paths))
-
- return (role_path, role_vars)
-
- def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0):
- # this number is arbitrary, but it seems sane
- if level > 20:
- raise errors.AnsibleError("too many levels of recursion while resolving role dependencies")
- for role in roles:
- role_path,role_vars = self._get_role_path(role)
-
- # save just the role params for this role, which exclude the special
- # keywords 'role', 'tags', and 'when'.
- role_params = role_vars.copy()
- for item in ('role', 'tags', 'when'):
- if item in role_params:
- del role_params[item]
-
- role_vars = utils.combine_vars(passed_vars, role_vars)
-
- vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')))
- vars_data = {}
- if os.path.isfile(vars):
- vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
- if vars_data:
- if not isinstance(vars_data, dict):
- raise errors.AnsibleError("vars from '%s' are not a dict" % vars)
- role_vars = utils.combine_vars(vars_data, role_vars)
-
- defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')))
- defaults_data = {}
- if os.path.isfile(defaults):
- defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
-
- # the meta directory contains the yaml that should
- # hold the list of dependencies (if any)
- meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')))
- if os.path.isfile(meta):
- data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
- if data:
- dependencies = data.get('dependencies',[])
- if dependencies is None:
- dependencies = []
- for dep in dependencies:
- allow_dupes = False
- (dep_path,dep_vars) = self._get_role_path(dep)
-
- # save the dep params, just as we did above
- dep_params = dep_vars.copy()
- for item in ('role', 'tags', 'when'):
- if item in dep_params:
- del dep_params[item]
-
- meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta')))
- if os.path.isfile(meta):
- meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
- if meta_data:
- allow_dupes = utils.boolean(meta_data.get('allow_duplicates',''))
-
- # if any tags were specified as role/dep variables, merge
- # them into the current dep_vars so they're passed on to any
- # further dependencies too, and so we only have one place
- # (dep_vars) to look for tags going forward
- def __merge_tags(var_obj):
- old_tags = dep_vars.get('tags', [])
- if isinstance(old_tags, basestring):
- old_tags = [old_tags, ]
- if isinstance(var_obj, dict):
- new_tags = var_obj.get('tags', [])
- if isinstance(new_tags, basestring):
- new_tags = [new_tags, ]
- else:
- new_tags = []
- return list(set(old_tags).union(set(new_tags)))
-
- dep_vars['tags'] = __merge_tags(role_vars)
- dep_vars['tags'] = __merge_tags(passed_vars)
-
- # if tags are set from this role, merge them
- # into the tags list for the dependent role
- if "tags" in passed_vars:
- for included_role_dep in dep_stack:
- included_dep_name = included_role_dep[0]
- included_dep_vars = included_role_dep[2]
- if included_dep_name == dep:
- if "tags" in included_dep_vars:
- included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"])))
- else:
- included_dep_vars["tags"] = passed_vars["tags"][:]
-
- dep_vars = utils.combine_vars(passed_vars, dep_vars)
- dep_vars = utils.combine_vars(role_vars, dep_vars)
-
- vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars')))
- vars_data = {}
- if os.path.isfile(vars):
- vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
- if vars_data:
- dep_vars = utils.combine_vars(dep_vars, vars_data)
- pass
-
- defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults')))
- dep_defaults_data = {}
- if os.path.isfile(defaults):
- dep_defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
- if 'role' in dep_vars:
- del dep_vars['role']
-
- if not allow_dupes:
- if dep in self.included_roles:
- # skip back to the top, since we don't want to
- # do anything else with this role
- continue
- else:
- self.included_roles.append(dep)
-
- def _merge_conditional(cur_conditionals, new_conditionals):
- if isinstance(new_conditionals, (basestring, bool)):
- cur_conditionals.append(new_conditionals)
- elif isinstance(new_conditionals, list):
- cur_conditionals.extend(new_conditionals)
-
- # pass along conditionals from roles to dep roles
- passed_when = passed_vars.get('when')
- role_when = role_vars.get('when')
- dep_when = dep_vars.get('when')
-
- tmpcond = []
- _merge_conditional(tmpcond, passed_when)
- _merge_conditional(tmpcond, role_when)
- _merge_conditional(tmpcond, dep_when)
-
- if len(tmpcond) > 0:
- dep_vars['when'] = tmpcond
-
- self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1)
- dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data])
-
- # only add the current role when we're at the top level,
- # otherwise we'll end up in a recursive loop
- if level == 0:
- self.included_roles.append(role)
- dep_stack.append([role, role_path, role_vars, role_params, defaults_data])
- return dep_stack
-
- def _load_role_vars_files(self, vars_files):
- # process variables stored in vars/main.yml files
- role_vars = {}
- for filename in vars_files:
- if os.path.exists(filename):
- new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
- if new_vars:
- if type(new_vars) != dict:
- raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars)))
- role_vars = utils.combine_vars(role_vars, new_vars)
-
- return role_vars
-
- def _load_role_defaults(self, defaults_files):
- # process default variables
- default_vars = {}
- for filename in defaults_files:
- if os.path.exists(filename):
- new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
- if new_default_vars:
- if type(new_default_vars) != dict:
- raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars)))
- default_vars = utils.combine_vars(default_vars, new_default_vars)
-
- return default_vars
-
- def _load_roles(self, roles, ds):
- # a role is a name that auto-includes the following if they exist
- # <rolename>/tasks/main.yml
- # <rolename>/handlers/main.yml
- # <rolename>/vars/main.yml
- # <rolename>/library
- # and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found
-
- if roles is None:
- roles = []
- if type(roles) != list:
- raise errors.AnsibleError("value of 'roles:' must be a list")
-
- new_tasks = []
- new_handlers = []
- role_vars_files = []
- defaults_files = []
-
- pre_tasks = ds.get('pre_tasks', None)
- if type(pre_tasks) != list:
- pre_tasks = []
- for x in pre_tasks:
- new_tasks.append(x)
-
- # flush handlers after pre_tasks
- new_tasks.append(dict(meta='flush_handlers'))
-
- roles = self._build_role_dependencies(roles, [], {})
-
- # give each role an uuid and
- # make role_path available as variable to the task
- for idx, val in enumerate(roles):
- this_uuid = str(uuid.uuid4())
- roles[idx][-3]['role_uuid'] = this_uuid
- roles[idx][-3]['role_path'] = roles[idx][1]
-
- role_names = []
-
- for (role, role_path, role_vars, role_params, default_vars) in roles:
- # special vars must be extracted from the dict to the included tasks
- special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ]
- special_vars = {}
- for k in special_keys:
- if k in role_vars:
- special_vars[k] = role_vars[k]
-
- task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks'))
- handler_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'handlers'))
- vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))
- meta_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))
- defaults_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))
-
- task = self._resolve_main(task_basepath)
- handler = self._resolve_main(handler_basepath)
- vars_file = self._resolve_main(vars_basepath)
- meta_file = self._resolve_main(meta_basepath)
- defaults_file = self._resolve_main(defaults_basepath)
-
- library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library'))
-
- missing = lambda f: not os.path.isfile(f)
- if missing(task) and missing(handler) and missing(vars_file) and missing(defaults_file) and missing(meta_file) and not os.path.isdir(library):
- raise errors.AnsibleError("found role at %s, but cannot find %s or %s or %s or %s or %s or %s" % (role_path, task, handler, vars_file, defaults_file, meta_file, library))
-
- if isinstance(role, dict):
- role_name = role['role']
- else:
- role_name = utils.role_spec_parse(role)["name"]
-
- role_names.append(role_name)
- if os.path.isfile(task):
- nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name)
- for k in special_keys:
- if k in special_vars:
- nt[k] = special_vars[k]
- new_tasks.append(nt)
- if os.path.isfile(handler):
- nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name)
- for k in special_keys:
- if k in special_vars:
- nt[k] = special_vars[k]
- new_handlers.append(nt)
- if os.path.isfile(vars_file):
- role_vars_files.append(vars_file)
- if os.path.isfile(defaults_file):
- defaults_files.append(defaults_file)
- if os.path.isdir(library):
- utils.plugins.module_finder.add_directory(library)
-
- tasks = ds.get('tasks', None)
- post_tasks = ds.get('post_tasks', None)
- handlers = ds.get('handlers', None)
- vars_files = ds.get('vars_files', None)
-
- if type(tasks) != list:
- tasks = []
- if type(handlers) != list:
- handlers = []
- if type(vars_files) != list:
- vars_files = []
- if type(post_tasks) != list:
- post_tasks = []
-
- new_tasks.extend(tasks)
- # flush handlers after tasks + role tasks
- new_tasks.append(dict(meta='flush_handlers'))
- new_tasks.extend(post_tasks)
- # flush handlers after post tasks
- new_tasks.append(dict(meta='flush_handlers'))
-
- new_handlers.extend(handlers)
-
- ds['tasks'] = new_tasks
- ds['handlers'] = new_handlers
- ds['role_names'] = role_names
-
- self.role_vars = self._load_role_vars_files(role_vars_files)
- self.default_vars = self._load_role_defaults(defaults_files)
-
- return ds
-
- # *************************************************
-
- def _resolve_main(self, basepath):
- ''' flexibly handle variations in main filenames '''
- # these filenames are acceptable:
- mains = (
- os.path.join(basepath, 'main'),
- os.path.join(basepath, 'main.yml'),
- os.path.join(basepath, 'main.yaml'),
- os.path.join(basepath, 'main.json'),
- )
- if sum([os.path.isfile(x) for x in mains]) > 1:
- raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
- else:
- for m in mains:
- if os.path.isfile(m):
- return m # exactly one main file
- return mains[0] # zero mains (we still need to return something)
-
- # *************************************************
-
- def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None,
- additional_conditions=None, original_file=None, role_name=None):
- ''' handle task and handler include statements '''
-
- results = []
- if tasks is None:
- # support empty handler files, and the like.
- tasks = []
- if additional_conditions is None:
- additional_conditions = []
- if vars is None:
- vars = {}
- if role_params is None:
- role_params = {}
- if default_vars is None:
- default_vars = {}
- if become_vars is None:
- become_vars = {}
-
- old_conditions = list(additional_conditions)
-
- for x in tasks:
-
- # prevent assigning the same conditions to each task on an include
- included_additional_conditions = list(old_conditions)
-
- if not isinstance(x, dict):
- raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file))
-
- # evaluate privilege escalation vars for current and child tasks
- included_become_vars = {}
- for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]:
- if k in x:
- included_become_vars[k] = x[k]
- elif k in become_vars:
- included_become_vars[k] = become_vars[k]
- x[k] = become_vars[k]
-
- task_vars = vars.copy()
- if original_file:
- task_vars['_original_file'] = original_file
-
- if 'meta' in x:
- if x['meta'] == 'flush_handlers':
- if role_name and 'role_name' not in x:
- x['role_name'] = role_name
- results.append(Task(self, x, module_vars=task_vars, role_name=role_name))
- continue
-
- if 'include' in x:
- tokens = split_args(str(x['include']))
- included_additional_conditions = list(additional_conditions)
- include_vars = {}
- for k in x:
- if k.startswith("with_"):
- if original_file:
- offender = " (in %s)" % original_file
- else:
- offender = ""
- utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True)
- elif k.startswith("when_"):
- utils.deprecated("\"when_<criteria>:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True)
- elif k == 'when':
- if isinstance(x[k], (basestring, bool)):
- included_additional_conditions.append(x[k])
- elif type(x[k]) is list:
- included_additional_conditions.extend(x[k])
- elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"):
- continue
- else:
- include_vars[k] = x[k]
-
- # get any role parameters specified
- role_params = x.get('role_params', {})
-
- # get any role default variables specified
- default_vars = x.get('default_vars', {})
- if not default_vars:
- default_vars = self.default_vars
- else:
- default_vars = utils.combine_vars(self.default_vars, default_vars)
-
- # append the vars defined with the include (from above)
- # as well as the old-style 'vars' element. The old-style
- # vars are given higher precedence here (just in case)
- task_vars = utils.combine_vars(task_vars, include_vars)
- if 'vars' in x:
- task_vars = utils.combine_vars(task_vars, x['vars'])
-
- new_role = None
- if 'role_name' in x:
- new_role = x['role_name']
-
- mv = task_vars.copy()
- for t in tokens[1:]:
- (k,v) = t.split("=", 1)
- v = unquote(v)
- mv[k] = template(self.basedir, v, mv)
- dirname = self.basedir
- if original_file:
- dirname = os.path.dirname(original_file)
-
- # temp vars are used here to avoid trampling on the existing vars structures
- temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
- temp_vars = utils.combine_vars(temp_vars, mv)
- temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
- include_file = template(dirname, tokens[0], temp_vars)
- include_filename = utils.path_dwim(dirname, include_file)
-
- data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password)
- if 'role_name' in x and data is not None:
- for y in data:
- if isinstance(y, dict) and 'include' in y:
- y['role_name'] = new_role
- loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
- results += loaded
- elif type(x) == dict:
- task = Task(
- self, x,
- module_vars=task_vars,
- play_vars=self.vars,
- play_file_vars=self.vars_file_vars,
- role_vars=self.role_vars,
- role_params=role_params,
- default_vars=default_vars,
- additional_conditions=list(additional_conditions),
- role_name=role_name
- )
- results.append(task)
- else:
- raise Exception("unexpected task type")
+from ansible.errors import AnsibleError, AnsibleParserError
+
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.become import Become
+from ansible.playbook.helpers import load_list_of_blocks, load_list_of_roles
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
+from ansible.playbook.block import Block
- for x in results:
- if self.tags is not None:
- x.tags.extend(self.tags)
+from ansible.utils.vars import combine_vars
- return results
- # *************************************************
+__all__ = ['Play']
- def tasks(self):
- ''' return task objects for this play '''
- return self._tasks
- def handlers(self):
- ''' return handler objects for this play '''
- return self._handlers
+class Play(Base, Taggable, Become):
- # *************************************************
+ """
+ A play is a language feature that represents a list of roles and/or
+ task/handler blocks to execute on a given set of hosts.
- def _get_vars(self):
- ''' load the vars section from a play, accounting for all sorts of variable features
- including loading from yaml files, prompting, and conditional includes of the first
- file found in a list. '''
+ Usage:
- if self.vars is None:
- self.vars = {}
+ Play.load(datastructure) -> Play
+ Play.something(...)
+ """
- if type(self.vars) not in [dict, list]:
- raise errors.AnsibleError("'vars' section must contain only key/value pairs")
+ # =================================================================================
+ # Connection-Related Attributes
- vars = {}
+ # TODO: generalize connection
+ _accelerate = FieldAttribute(isa='bool', default=False)
+ _accelerate_ipv6 = FieldAttribute(isa='bool', default=False)
+ _accelerate_port = FieldAttribute(isa='int', default=5099) # should be alias of port
- # translate a list of vars into a dict
- if type(self.vars) == list:
- for item in self.vars:
- if getattr(item, 'items', None) is None:
- raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
- k, v = item.items()[0]
- vars[k] = v
- else:
- vars.update(self.vars)
+ # Connection
+ _gather_facts = FieldAttribute(isa='string', default='smart')
+ _hosts = FieldAttribute(isa='list', default=[], required=True)
+ _name = FieldAttribute(isa='string', default='<no name specified>')
- if type(self.vars_prompt) == list:
- for var in self.vars_prompt:
- if not 'name' in var:
- raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
+ # Variable Attributes
+ _vars_files = FieldAttribute(isa='list', default=[])
+ _vars_prompt = FieldAttribute(isa='dict', default=dict())
+ _vault_password = FieldAttribute(isa='string')
- vname = var['name']
- prompt = var.get("prompt", vname)
- default = var.get("default", None)
- private = var.get("private", True)
+ # Block (Task) Lists Attributes
+ _handlers = FieldAttribute(isa='list', default=[])
+ _pre_tasks = FieldAttribute(isa='list', default=[])
+ _post_tasks = FieldAttribute(isa='list', default=[])
+ _tasks = FieldAttribute(isa='list', default=[])
- confirm = var.get("confirm", False)
- encrypt = var.get("encrypt", None)
- salt_size = var.get("salt_size", None)
- salt = var.get("salt", None)
-
- if vname not in self.playbook.extra_vars:
- vars[vname] = self.playbook.callbacks.on_vars_prompt(
- vname, private, prompt, encrypt, confirm, salt_size, salt, default
- )
+ # Role Attributes
+ _roles = FieldAttribute(isa='list', default=[])
- elif type(self.vars_prompt) == dict:
- for (vname, prompt) in self.vars_prompt.iteritems():
- prompt_msg = "%s: " % prompt
- if vname not in self.playbook.extra_vars:
- vars[vname] = self.playbook.callbacks.on_vars_prompt(
- varname=vname, private=False, prompt=prompt_msg, default=None
- )
+ # Flag/Setting Attributes
+ _any_errors_fatal = FieldAttribute(isa='bool', default=False)
+ _max_fail_percentage = FieldAttribute(isa='string', default='0')
+ _serial = FieldAttribute(isa='int', default=0)
+ _strategy = FieldAttribute(isa='string', default='linear')
- else:
- raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
+ # =================================================================================
- if type(self.playbook.extra_vars) == dict:
- vars = utils.combine_vars(vars, self.playbook.extra_vars)
+ def __init__(self):
+ super(Play, self).__init__()
- return vars
+ def __repr__(self):
+ return self.get_name()
- # *************************************************
+ def get_name(self):
+ ''' return the name of the Play '''
+ return "PLAY: %s" % self._attributes.get('name')
- def update_vars_files(self, hosts, vault_password=None):
- ''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
-
- # now loop through all the hosts...
- for h in hosts:
- self._update_vars_files_for_host(h, vault_password=vault_password)
-
- # *************************************************
-
- def compare_tags(self, tags):
- ''' given a list of tags that the user has specified, return two lists:
- matched_tags: tags were found within the current play and match those given
- by the user
- unmatched_tags: tags that were found within the current play but do not match
- any provided by the user '''
-
- # gather all the tags in all the tasks and handlers into one list
- # FIXME: isn't this in self.tags already?
-
- all_tags = []
- for task in self._tasks:
- if not task.meta:
- all_tags.extend(task.tags)
- for handler in self._handlers:
- all_tags.extend(handler.tags)
-
- # compare the lists of tags using sets and return the matched and unmatched
- all_tags_set = set(all_tags)
- tags_set = set(tags)
-
- matched_tags = all_tags_set.intersection(tags_set)
- unmatched_tags = all_tags_set.difference(tags_set)
-
- a = set(['always'])
- u = set(['untagged'])
- if 'always' in all_tags_set:
- matched_tags = matched_tags.union(a)
- unmatched_tags = all_tags_set.difference(a)
-
- if 'all' in tags_set:
- matched_tags = matched_tags.union(all_tags_set)
- unmatched_tags = set()
-
- if 'tagged' in tags_set:
- matched_tags = all_tags_set.difference(u)
- unmatched_tags = u
-
- if 'untagged' in tags_set and 'untagged' in all_tags_set:
- matched_tags = matched_tags.union(u)
- unmatched_tags = unmatched_tags.difference(u)
-
- return matched_tags, unmatched_tags
-
- # *************************************************
-
- def _late_merge_role_tags(self):
- # build a local dict of tags for roles
- role_tags = {}
- for task in self._ds['tasks']:
- if 'role_name' in task:
- this_role = task['role_name'] + "-" + task['vars']['role_uuid']
-
- if this_role not in role_tags:
- role_tags[this_role] = []
-
- if 'tags' in task['vars']:
- if isinstance(task['vars']['tags'], basestring):
- role_tags[this_role] += shlex.split(task['vars']['tags'])
- else:
- role_tags[this_role] += task['vars']['tags']
-
- # apply each role's tags to its tasks
- for idx, val in enumerate(self._tasks):
- if getattr(val, 'role_name', None) is not None:
- this_role = val.role_name + "-" + val.module_vars['role_uuid']
- if this_role in role_tags:
- self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role]))
-
- # *************************************************
-
- def _update_vars_files_for_host(self, host, vault_password=None):
+ @staticmethod
+ def load(data, variable_manager=None, loader=None):
+ p = Play()
+ return p.load_data(data, variable_manager=variable_manager, loader=loader)
- def generate_filenames(host, inject, filename):
-
- """ Render the raw filename into 3 forms """
+ def preprocess_data(self, ds):
+ '''
+ Adjusts play datastructure to cleanup old/legacy items
+ '''
- # filename2 is the templated version of the filename, which will
- # be fully rendered if any variables contained within it are
- # non-inventory related
- filename2 = template(self.basedir, filename, self.vars)
+ assert isinstance(ds, dict)
- # filename3 is the same as filename2, but when the host object is
- # available, inventory variables will be expanded as well since the
- # name is templated with the injected variables
- filename3 = filename2
- if host is not None:
- filename3 = template(self.basedir, filename2, inject)
+ # The use of 'user' in the Play datastructure was deprecated to
+ # line up with the same change for Tasks, due to the fact that
+ # 'user' conflicted with the user module.
+ if 'user' in ds:
+ # this should never happen, but error out with a helpful message
+ # to the user if it does...
+ if 'remote_user' in ds:
+ raise AnsibleParserError("both 'user' and 'remote_user' are set for %s. The use of 'user' is deprecated, and should be removed" % self.get_name(), obj=ds)
- # filename4 is the dwim'd path, but may also be mixed-scope, so we use
- # both play scoped vars and host scoped vars to template the filepath
- if utils.contains_vars(filename3) and host is not None:
- inject.update(self.vars)
- filename4 = template(self.basedir, filename3, inject)
- filename4 = utils.path_dwim(self.basedir, filename4)
+ ds['remote_user'] = ds['user']
+ del ds['user']
+
+ return super(Play, self).preprocess_data(ds)
+
+ def _load_vars(self, attr, ds):
+ '''
+ Vars in a play can be specified either as a dictionary directly, or
+ as a list of dictionaries. If the later, this method will turn the
+ list into a single dictionary.
+ '''
+
+ try:
+ if isinstance(ds, dict):
+ return ds
+ elif isinstance(ds, list):
+ all_vars = dict()
+ for item in ds:
+ if not isinstance(item, dict):
+ raise ValueError
+ all_vars = combine_vars(all_vars, item)
+ return all_vars
else:
- filename4 = utils.path_dwim(self.basedir, filename3)
-
- return filename2, filename3, filename4
-
-
- def update_vars_cache(host, data, target_filename=None):
-
- """ update a host's varscache with new var data """
-
- self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
- if target_filename:
- self.playbook.callbacks.on_import_for_host(host, target_filename)
-
- def process_files(filename, filename2, filename3, filename4, host=None):
-
- """ pseudo-algorithm for deciding where new vars should go """
-
- data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password)
- if data:
- if type(data) != dict:
- raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
- if host is not None:
- target_filename = None
- if utils.contains_vars(filename2):
- if not utils.contains_vars(filename3):
- target_filename = filename3
- else:
- target_filename = filename4
- update_vars_cache(host, data, target_filename=target_filename)
- else:
- self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data)
- # we did process this file
- return True
- # we did not process this file
- return False
-
- # Enforce that vars_files is always a list
- if type(self.vars_files) != list:
- self.vars_files = [ self.vars_files ]
-
- # Build an inject if this is a host run started by self.update_vars_files
- if host is not None:
- inject = {}
- inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password))
- inject.update(self.playbook.SETUP_CACHE.get(host, {}))
- inject.update(self.playbook.VARS_CACHE.get(host, {}))
- else:
- inject = None
-
- processed = []
- for filename in self.vars_files:
- if type(filename) == list:
- # loop over all filenames, loading the first one, and failing if none found
- found = False
- sequence = []
- for real_filename in filename:
- filename2, filename3, filename4 = generate_filenames(host, inject, real_filename)
- sequence.append(filename4)
- if os.path.exists(filename4):
- found = True
- if process_files(filename, filename2, filename3, filename4, host=host):
- processed.append(filename)
- elif host is not None:
- self.playbook.callbacks.on_not_import_for_host(host, filename4)
- if found:
- break
- if not found and host is not None:
- raise errors.AnsibleError(
- "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
- )
+ raise ValueError
+ except ValueError:
+ raise AnsibleParserError("Vars in a playbook must be specified as a dictionary, or a list of dictionaries", obj=ds)
+
+ def _load_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _load_pre_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _load_post_tasks(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed tasks/blocks.
+ Bare tasks outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _load_handlers(self, attr, ds):
+ '''
+ Loads a list of blocks from a list which may be mixed handlers/blocks.
+ Bare handlers outside of a block are given an implicit block.
+ '''
+ return load_list_of_blocks(ds=ds, play=self, use_handlers=True, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _load_roles(self, attr, ds):
+ '''
+ Loads and returns a list of RoleInclude objects from the datastructure
+ list of role definitions and creates the Role from those objects
+ '''
+
+ role_includes = load_list_of_roles(ds, variable_manager=self._variable_manager, loader=self._loader)
+
+ roles = []
+ for ri in role_includes:
+ roles.append(Role.load(ri))
+ return roles
+
+ # FIXME: post_validation needs to ensure that become/su/sudo have only 1 set
+
+ def _compile_roles(self):
+ '''
+ Handles the role compilation step, returning a flat list of tasks
+ with the lowest level dependencies first. For example, if a role R
+ has a dependency D1, which also has a dependency D2, the tasks from
+ D2 are merged first, followed by D1, and lastly by the tasks from
+ the parent role R last. This is done for all roles in the Play.
+ '''
+
+ block_list = []
+
+ if len(self.roles) > 0:
+ for r in self.roles:
+ block_list.extend(r.compile(play=self))
+
+ return block_list
+
+ def compile(self):
+ '''
+ Compiles and returns the task list for this play, compiled from the
+ roles (which are themselves compiled recursively) and/or the list of
+ tasks specified in the play.
+ '''
+
+ block_list = []
+
+ block_list.extend(self.pre_tasks)
+ block_list.extend(self._compile_roles())
+ block_list.extend(self.tasks)
+ block_list.extend(self.post_tasks)
+
+ return block_list
+
+ def get_vars(self):
+ return self.vars.copy()
+
+ def get_vars_files(self):
+ return self.vars_files
+
+ def get_handlers(self):
+ return self.handlers[:]
+
+ def get_roles(self):
+ return self.roles[:]
+
+ def get_tasks(self):
+ tasklist = []
+ for task in self.pre_tasks + self.tasks + self.post_tasks:
+ if isinstance(task, Block):
+ tasklist.append(task.block + task.rescue + task.always)
else:
- # just one filename supplied, load it!
- filename2, filename3, filename4 = generate_filenames(host, inject, filename)
- if utils.contains_vars(filename4):
- continue
- if process_files(filename, filename2, filename3, filename4, host=host):
- processed.append(filename)
-
- return processed
+ tasklist.append(task)
+ return tasklist
+
+ def serialize(self):
+ data = super(Play, self).serialize()
+
+ roles = []
+ for role in self.get_roles():
+ roles.append(role.serialize())
+ data['roles'] = roles
+
+ return data
+
+ def deserialize(self, data):
+ super(Play, self).deserialize(data)
+
+ if 'roles' in data:
+ role_data = data.get('roles', [])
+ roles = []
+ for role in role_data:
+ r = Role()
+ r.deserialize(role)
+ roles.append(r)
+
+ setattr(self, 'roles', roles)
+ del data['roles']
+
diff --git a/lib/ansible/playbook/playbook_include.py b/lib/ansible/playbook/playbook_include.py
new file mode 100644
index 0000000000..5c91dd14ad
--- /dev/null
+++ b/lib/ansible/playbook/playbook_include.py
@@ -0,0 +1,125 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.parsing.splitter import split_args, parse_kv
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.taggable import Taggable
+from ansible.errors import AnsibleParserError
+
+class PlaybookInclude(Base):
+
+ _name = FieldAttribute(isa='string')
+ _include = FieldAttribute(isa='string')
+ _vars = FieldAttribute(isa='dict', default=dict())
+
+ @staticmethod
+ def load(data, basedir, variable_manager=None, loader=None):
+ return PlaybookInclude().load_data(ds=data, basedir=basedir, variable_manager=variable_manager, loader=loader)
+
+ def load_data(self, ds, basedir, variable_manager=None, loader=None):
+ '''
+ Overrides the base load_data(), as we're actually going to return a new
+ Playbook() object rather than a PlaybookInclude object
+ '''
+
+ # import here to avoid a dependency loop
+ from ansible.playbook import Playbook
+
+ # first, we use the original parent method to correctly load the object
+ # via the load_data/preprocess_data system we normally use for other
+ # playbook objects
+ new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
+
+ # then we use the object to load a Playbook
+ pb = Playbook(loader=loader)
+
+ file_name = new_obj.include
+ if not os.path.isabs(file_name):
+ file_name = os.path.join(basedir, file_name)
+
+ pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
+
+ # finally, playbook includes can specify a list of variables, which are simply
+ # used to update the vars of each play in the playbook
+ for entry in pb._entries:
+ entry.vars.update(new_obj.vars)
+
+ return pb
+
+ def preprocess_data(self, ds):
+ '''
+ Regorganizes the data for a PlaybookInclude datastructure to line
+ up with what we expect the proper attributes to be
+ '''
+
+ assert isinstance(ds, dict)
+
+ # the new, cleaned datastructure, which will have legacy
+ # items reduced to a standard structure
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ for (k,v) in ds.iteritems():
+ if k == 'include':
+ self._preprocess_include(ds, new_ds, k, v)
+ else:
+ # some basic error checking, to make sure vars are properly
+ # formatted and do not conflict with k=v parameters
+ # FIXME: we could merge these instead, but controlling the order
+ # in which they're encountered could be difficult
+ if k == 'vars':
+ if 'vars' in new_ds:
+ raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds)
+ elif not isinstance(v, dict):
+ raise AnsibleParserError("vars for include statements must be specified as a dictionary", obj=ds)
+ new_ds[k] = v
+
+ return super(PlaybookInclude, self).preprocess_data(new_ds)
+
+ def _preprocess_include(self, ds, new_ds, k, v):
+ '''
+ Splits the include line up into filename and parameters
+ '''
+
+ # The include line must include at least one item, which is the filename
+ # to include. Anything after that should be regarded as a parameter to the include
+ items = split_args(v)
+ if len(items) == 0:
+ raise AnsibleParserError("include statements must specify the file name to include", obj=ds)
+ else:
+ # FIXME/TODO: validate that items[0] is a file, which also
+ # exists and is readable
+ new_ds['include'] = items[0]
+ if len(items) > 1:
+ # rejoin the parameter portion of the arguments and
+ # then use parse_kv() to get a dict of params back
+ params = parse_kv(" ".join(items[1:]))
+ if 'vars' in new_ds:
+ # FIXME: see fixme above regarding merging vars
+ raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds)
+ new_ds['vars'] = params
+
diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py
new file mode 100644
index 0000000000..bea61147ae
--- /dev/null
+++ b/lib/ansible/playbook/role/__init__.py
@@ -0,0 +1,396 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six import iteritems, string_types
+
+import inspect
+import os
+
+from hashlib import sha1
+from types import NoneType
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.parsing import DataLoader
+from ansible.playbook.attribute import FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.become import Become
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.helpers import load_list_of_blocks
+from ansible.playbook.role.include import RoleInclude
+from ansible.playbook.role.metadata import RoleMetadata
+from ansible.playbook.taggable import Taggable
+from ansible.plugins import get_all_plugin_loaders
+from ansible.utils.vars import combine_vars
+
+
+__all__ = ['Role', 'ROLE_CACHE', 'hash_params']
+
+# FIXME: this should be a utility function, but can't be a member of
+# the role due to the fact that it would require the use of self
+# in a static method. This is also used in the base class for
+# strategies (ansible/plugins/strategies/__init__.py)
+def hash_params(params):
+ if not isinstance(params, dict):
+ return params
+ else:
+ s = set()
+ for k,v in params.iteritems():
+ if isinstance(v, dict):
+ s.update((k, hash_params(v)))
+ elif isinstance(v, list):
+ things = []
+ for item in v:
+ things.append(hash_params(item))
+ s.update((k, tuple(things)))
+ else:
+ s.update((k, v))
+ return frozenset(s)
+
+# The role cache is used to prevent re-loading roles, which
+# may already exist. Keys into this cache are the SHA1 hash
+# of the role definition (for dictionary definitions, this
+# will be based on the repr() of the dictionary object)
+ROLE_CACHE = dict()
+
+
+class Role(Base, Become, Conditional, Taggable):
+
+ def __init__(self):
+ self._role_name = None
+ self._role_path = None
+ self._role_params = dict()
+ self._loader = None
+
+ self._metadata = None
+ self._play = None
+ self._parents = []
+ self._dependencies = []
+ self._task_blocks = []
+ self._handler_blocks = []
+ self._default_vars = dict()
+ self._role_vars = dict()
+ self._had_task_run = False
+ self._completed = False
+
+ super(Role, self).__init__()
+
+ def __repr__(self):
+ return self.get_name()
+
+ def get_name(self):
+ return self._role_name
+
+ @staticmethod
+ def load(role_include, parent_role=None):
+ # FIXME: add back in the role caching support
+ try:
+ # The ROLE_CACHE is a dictionary of role names, with each entry
+ # containing another dictionary corresponding to a set of parameters
+ # specified for a role as the key and the Role() object itself.
+ # We use frozenset to make the dictionary hashable.
+
+ #hashed_params = frozenset(role_include.get_role_params().iteritems())
+ hashed_params = hash_params(role_include.get_role_params())
+ if role_include.role in ROLE_CACHE:
+ for (entry, role_obj) in ROLE_CACHE[role_include.role].iteritems():
+ if hashed_params == entry:
+ if parent_role:
+ role_obj.add_parent(parent_role)
+ return role_obj
+
+ r = Role()
+ r._load_role_data(role_include, parent_role=parent_role)
+
+ if role_include.role not in ROLE_CACHE:
+ ROLE_CACHE[role_include.role] = dict()
+
+ ROLE_CACHE[role_include.role][hashed_params] = r
+ return r
+
+ except RuntimeError:
+ # FIXME: needs a better way to access the ds in the role include
+ raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles", obj=role_include._ds)
+
+ def _load_role_data(self, role_include, parent_role=None):
+ self._role_name = role_include.role
+ self._role_path = role_include.get_role_path()
+ self._role_params = role_include.get_role_params()
+ self._variable_manager = role_include.get_variable_manager()
+ self._loader = role_include.get_loader()
+
+ if parent_role:
+ self.add_parent(parent_role)
+
+ # copy over all field attributes, except for when and tags, which
+ # are special cases and need to preserve pre-existing values
+ for (attr_name, _) in iteritems(self._get_base_attributes()):
+ if attr_name not in ('when', 'tags'):
+ setattr(self, attr_name, getattr(role_include, attr_name))
+
+ current_when = getattr(self, 'when')[:]
+ current_when.extend(role_include.when)
+ setattr(self, 'when', current_when)
+
+ current_tags = getattr(self, 'tags')[:]
+ current_tags.extend(role_include.tags)
+ setattr(self, 'tags', current_tags)
+
+ # dynamically load any plugins from the role directory
+ for name, obj in get_all_plugin_loaders():
+ if obj.subdir:
+ plugin_path = os.path.join(self._role_path, obj.subdir)
+ if os.path.isdir(plugin_path):
+ obj.add_directory(plugin_path)
+
+ # load the role's other files, if they exist
+ metadata = self._load_role_yaml('meta')
+ if metadata:
+ self._metadata = RoleMetadata.load(metadata, owner=self, loader=self._loader)
+ self._dependencies = self._load_dependencies()
+
+ task_data = self._load_role_yaml('tasks')
+ if task_data:
+ self._task_blocks = load_list_of_blocks(task_data, play=None, role=self, loader=self._loader)
+
+ handler_data = self._load_role_yaml('handlers')
+ if handler_data:
+ self._handler_blocks = load_list_of_blocks(handler_data, play=None, role=self, loader=self._loader)
+
+ # vars and default vars are regular dictionaries
+ self._role_vars = self._load_role_yaml('vars')
+ if not isinstance(self._role_vars, (dict, NoneType)):
+ raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
+ elif self._role_vars is None:
+ self._role_vars = dict()
+
+ self._default_vars = self._load_role_yaml('defaults')
+ if not isinstance(self._default_vars, (dict, NoneType)):
+ raise AnsibleParserError("The default/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
+ elif self._default_vars is None:
+ self._default_vars = dict()
+
+ def _load_role_yaml(self, subdir):
+ file_path = os.path.join(self._role_path, subdir)
+ if self._loader.path_exists(file_path) and self._loader.is_directory(file_path):
+ main_file = self._resolve_main(file_path)
+ if self._loader.path_exists(main_file):
+ return self._loader.load_from_file(main_file)
+ return None
+
+ def _resolve_main(self, basepath):
+ ''' flexibly handle variations in main filenames '''
+ possible_mains = (
+ os.path.join(basepath, 'main.yml'),
+ os.path.join(basepath, 'main.yaml'),
+ os.path.join(basepath, 'main.json'),
+ os.path.join(basepath, 'main'),
+ )
+
+ if sum([self._loader.is_file(x) for x in possible_mains]) > 1:
+ raise AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
+ else:
+ for m in possible_mains:
+ if self._loader.is_file(m):
+ return m # exactly one main file
+ return possible_mains[0] # zero mains (we still need to return something)
+
+ def _load_dependencies(self):
+ '''
+ Recursively loads role dependencies from the metadata list of
+ dependencies, if it exists
+ '''
+
+ deps = []
+ if self._metadata:
+ for role_include in self._metadata.dependencies:
+ r = Role.load(role_include, parent_role=self)
+ deps.append(r)
+
+ return deps
+
+ #------------------------------------------------------------------------------
+ # other functions
+
+ def add_parent(self, parent_role):
+ ''' adds a role to the list of this roles parents '''
+ assert isinstance(parent_role, Role)
+
+ if parent_role not in self._parents:
+ self._parents.append(parent_role)
+
+ def get_parents(self):
+ return self._parents
+
+ def get_default_vars(self):
+ # FIXME: get these from dependent roles too
+ default_vars = dict()
+ for dep in self.get_all_dependencies():
+ default_vars = combine_vars(default_vars, dep.get_default_vars())
+ default_vars = combine_vars(default_vars, self._default_vars)
+ return default_vars
+
+ def get_inherited_vars(self):
+ inherited_vars = dict()
+ for parent in self._parents:
+ inherited_vars = combine_vars(inherited_vars, parent.get_inherited_vars())
+ inherited_vars = combine_vars(inherited_vars, parent._role_vars)
+ inherited_vars = combine_vars(inherited_vars, parent._role_params)
+ return inherited_vars
+
+ def get_vars(self):
+ all_vars = self.get_inherited_vars()
+
+ for dep in self.get_all_dependencies():
+ all_vars = combine_vars(all_vars, dep.get_vars())
+
+ all_vars = combine_vars(all_vars, self._role_vars)
+ all_vars = combine_vars(all_vars, self._role_params)
+
+ return all_vars
+
+ def get_direct_dependencies(self):
+ return self._dependencies[:]
+
+ def get_all_dependencies(self):
+ '''
+ Returns a list of all deps, built recursively from all child dependencies,
+ in the proper order in which they should be executed or evaluated.
+ '''
+
+ child_deps = []
+
+ for dep in self.get_direct_dependencies():
+ for child_dep in dep.get_all_dependencies():
+ child_deps.append(child_dep)
+ child_deps.append(dep)
+
+ return child_deps
+
+ def get_task_blocks(self):
+ return self._task_blocks[:]
+
+ def get_handler_blocks(self):
+ return self._handler_blocks[:]
+
+ def has_run(self):
+ '''
+ Returns true if this role has been iterated over completely and
+ at least one task was run
+ '''
+
+ return self._had_task_run and self._completed
+
+ def compile(self, play, dep_chain=[]):
+ '''
+ Returns the task list for this role, which is created by first
+ recursively compiling the tasks for all direct dependencies, and
+ then adding on the tasks for this role.
+
+ The role compile() also remembers and saves the dependency chain
+ with each task, so tasks know by which route they were found, and
+ can correctly take their parent's tags/conditionals into account.
+ '''
+
+ block_list = []
+
+ # update the dependency chain here
+ new_dep_chain = dep_chain + [self]
+
+ deps = self.get_direct_dependencies()
+ for dep in deps:
+ dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
+ for dep_block in dep_blocks:
+ new_dep_block = dep_block.copy()
+ new_dep_block._dep_chain = new_dep_chain
+ new_dep_block._play = play
+ block_list.append(new_dep_block)
+
+ block_list.extend(self._task_blocks)
+
+ return block_list
+
+ def serialize(self, include_deps=True):
+ res = super(Role, self).serialize()
+
+ res['_role_name'] = self._role_name
+ res['_role_path'] = self._role_path
+ res['_role_vars'] = self._role_vars
+ res['_role_params'] = self._role_params
+ res['_default_vars'] = self._default_vars
+ res['_had_task_run'] = self._had_task_run
+ res['_completed'] = self._completed
+
+ if self._metadata:
+ res['_metadata'] = self._metadata.serialize()
+
+ if include_deps:
+ deps = []
+ for role in self.get_direct_dependencies():
+ deps.append(role.serialize())
+ res['_dependencies'] = deps
+
+ parents = []
+ for parent in self._parents:
+ parents.append(parent.serialize(include_deps=False))
+ res['_parents'] = parents
+
+ return res
+
+ def deserialize(self, data, include_deps=True):
+ self._role_name = data.get('_role_name', '')
+ self._role_path = data.get('_role_path', '')
+ self._role_vars = data.get('_role_vars', dict())
+ self._role_params = data.get('_role_params', dict())
+ self._default_vars = data.get('_default_vars', dict())
+ self._had_task_run = data.get('_had_task_run', False)
+ self._completed = data.get('_completed', False)
+
+ if include_deps:
+ deps = []
+ for dep in data.get('_dependencies', []):
+ r = Role()
+ r.deserialize(dep)
+ deps.append(r)
+ setattr(self, '_dependencies', deps)
+
+ parent_data = data.get('_parents', [])
+ parents = []
+ for parent in parent_data:
+ r = Role()
+ r.deserialize(parent, include_deps=False)
+ parents.append(r)
+ setattr(self, '_parents', parents)
+
+ metadata_data = data.get('_metadata')
+ if metadata_data:
+ m = RoleMetadata()
+ m.deserialize(metadata_data)
+ self._metadata = m
+
+ super(Role, self).deserialize(data)
+
+ def set_loader(self, loader):
+ self._loader = loader
+ for parent in self._parents:
+ parent.set_loader(loader)
+ for dep in self.get_direct_dependencies():
+ dep.set_loader(loader)
+
diff --git a/lib/ansible/playbook/role/definition.py b/lib/ansible/playbook/role/definition.py
new file mode 100644
index 0000000000..0cb1e45760
--- /dev/null
+++ b/lib/ansible/playbook/role/definition.py
@@ -0,0 +1,175 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six import iteritems, string_types
+
+import os
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.become import Become
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.taggable import Taggable
+from ansible.utils.path import unfrackpath
+
+
+__all__ = ['RoleDefinition']
+
+
+class RoleDefinition(Base, Become, Conditional, Taggable):
+
+ _role = FieldAttribute(isa='string')
+
+ def __init__(self, role_basedir=None):
+ self._role_path = None
+ self._role_basedir = role_basedir
+ self._role_params = dict()
+ super(RoleDefinition, self).__init__()
+
+ #def __repr__(self):
+ # return 'ROLEDEF: ' + self._attributes.get('role', '<no name set>')
+
+ @staticmethod
+ def load(data, variable_manager=None, loader=None):
+ raise AnsibleError("not implemented")
+
+ def preprocess_data(self, ds):
+
+ assert isinstance(ds, dict) or isinstance(ds, string_types)
+
+ if isinstance(ds, dict):
+ ds = super(RoleDefinition, self).preprocess_data(ds)
+
+ # we create a new data structure here, using the same
+ # object used internally by the YAML parsing code so we
+ # can preserve file:line:column information if it exists
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ # first we pull the role name out of the data structure,
+ # and then use that to determine the role path (which may
+ # result in a new role name, if it was a file path)
+ role_name = self._load_role_name(ds)
+ (role_name, role_path) = self._load_role_path(role_name)
+
+ # next, we split the role params out from the valid role
+ # attributes and update the new datastructure with that
+ # result and the role name
+ if isinstance(ds, dict):
+ (new_role_def, role_params) = self._split_role_params(ds)
+ new_ds.update(new_role_def)
+ self._role_params = role_params
+
+ # set the role name in the new ds
+ new_ds['role'] = role_name
+
+ # we store the role path internally
+ self._role_path = role_path
+
+ # save the original ds for use later
+ self._ds = ds
+
+ # and return the cleaned-up data structure
+ return new_ds
+
+ def _load_role_name(self, ds):
+ '''
+ Returns the role name (either the role: or name: field) from
+ the role definition, or (when the role definition is a simple
+ string), just that string
+ '''
+
+ if isinstance(ds, string_types):
+ return ds
+
+ role_name = ds.get('role', ds.get('name'))
+ if not role_name:
+ raise AnsibleError('role definitions must contain a role name', obj=ds)
+
+ return role_name
+
+ def _load_role_path(self, role_name):
+ '''
+ the 'role', as specified in the ds (or as a bare string), can either
+ be a simple name or a full path. If it is a full path, we use the
+ basename as the role name, otherwise we take the name as-given and
+ append it to the default role path
+ '''
+
+ role_path = unfrackpath(role_name)
+
+ if self._loader.path_exists(role_path):
+ role_name = os.path.basename(role_name)
+ return (role_name, role_path)
+ else:
+ # we always start the search for roles in the base directory of the playbook
+ role_search_paths = [os.path.join(self._loader.get_basedir(), 'roles'), './roles', './']
+
+ # also search in the configured roles path
+ if C.DEFAULT_ROLES_PATH:
+ configured_paths = C.DEFAULT_ROLES_PATH.split(os.pathsep)
+ role_search_paths.extend(configured_paths)
+
+ # finally, append the roles basedir, if it was set, so we can
+ # search relative to that directory for dependent roles
+ if self._role_basedir:
+ role_search_paths.append(self._role_basedir)
+
+ # now iterate through the possible paths and return the first one we find
+ for path in role_search_paths:
+ role_path = unfrackpath(os.path.join(path, role_name))
+ if self._loader.path_exists(role_path):
+ return (role_name, role_path)
+
+ # FIXME: make the parser smart about list/string entries in
+ # the yaml so the error line/file can be reported here
+
+ raise AnsibleError("the role '%s' was not found" % role_name)
+
+ def _split_role_params(self, ds):
+ '''
+ Splits any random role params off from the role spec and store
+ them in a dictionary of params for parsing later
+ '''
+
+ role_def = dict()
+ role_params = dict()
+ for (key, value) in iteritems(ds):
+ # use the list of FieldAttribute values to determine what is and is not
+ # an extra parameter for this role (or sub-class of this role)
+ if key not in [attr_name for (attr_name, attr_value) in self._get_base_attributes().iteritems()]:
+ # this key does not match a field attribute, so it must be a role param
+ role_params[key] = value
+ else:
+ # this is a field attribute, so copy it over directly
+ role_def[key] = value
+
+ return (role_def, role_params)
+
+ def get_role_params(self):
+ return self._role_params.copy()
+
+ def get_role_path(self):
+ return self._role_path
diff --git a/lib/ansible/playbook/role/include.py b/lib/ansible/playbook/role/include.py
new file mode 100644
index 0000000000..b063aecc35
--- /dev/null
+++ b/lib/ansible/playbook/role/include.py
@@ -0,0 +1,49 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six import iteritems, string_types
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.role.definition import RoleDefinition
+
+
+__all__ = ['RoleInclude']
+
+
+class RoleInclude(RoleDefinition):
+
+ """
+ FIXME: docstring
+ """
+
+ def __init__(self, role_basedir=None):
+ super(RoleInclude, self).__init__(role_basedir=role_basedir)
+
+ @staticmethod
+ def load(data, current_role_path=None, parent_role=None, variable_manager=None, loader=None):
+ assert isinstance(data, string_types) or isinstance(data, dict)
+
+ ri = RoleInclude(role_basedir=current_role_path)
+ return ri.load_data(data, variable_manager=variable_manager, loader=loader)
+
diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py
new file mode 100644
index 0000000000..461a9a4a62
--- /dev/null
+++ b/lib/ansible/playbook/role/metadata.py
@@ -0,0 +1,91 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from six import iteritems, string_types
+
+from ansible.errors import AnsibleParserError
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.helpers import load_list_of_roles
+from ansible.playbook.role.include import RoleInclude
+
+
+__all__ = ['RoleMetadata']
+
+
+class RoleMetadata(Base):
+ '''
+ This class wraps the parsing and validation of the optional metadata
+ within each Role (meta/main.yml).
+ '''
+
+ _allow_duplicates = FieldAttribute(isa='bool', default=False)
+ _dependencies = FieldAttribute(isa='list', default=[])
+ _galaxy_info = FieldAttribute(isa='GalaxyInfo')
+
+ def __init__(self, owner=None):
+ self._owner = owner
+ super(RoleMetadata, self).__init__()
+
+ @staticmethod
+ def load(data, owner, variable_manager=None, loader=None):
+ '''
+ Returns a new RoleMetadata object based on the datastructure passed in.
+ '''
+
+ if not isinstance(data, dict):
+ raise AnsibleParserError("the 'meta/main.yml' for role %s is not a dictionary" % owner.get_name())
+
+ m = RoleMetadata(owner=owner).load_data(data, variable_manager=variable_manager, loader=loader)
+ return m
+
+ def _load_dependencies(self, attr, ds):
+ '''
+ This is a helper loading function for the dependencies list,
+ which returns a list of RoleInclude objects
+ '''
+
+ current_role_path = None
+ if self._owner:
+ current_role_path = os.path.dirname(self._owner._role_path)
+
+ return load_list_of_roles(ds, current_role_path=current_role_path, variable_manager=self._variable_manager, loader=self._loader)
+
+ def _load_galaxy_info(self, attr, ds):
+ '''
+ This is a helper loading function for the galaxy info entry
+ in the metadata, which returns a GalaxyInfo object rather than
+ a simple dictionary.
+ '''
+
+ return ds
+
+ def serialize(self):
+ return dict(
+ allow_duplicates = self._allow_duplicates,
+ dependencies = self._dependencies,
+ )
+
+ def deserialize(self, data):
+ setattr(self, 'allow_duplicates', data.get('allow_duplicates', False))
+ setattr(self, 'dependencies', data.get('dependencies', []))
diff --git a/lib/ansible/playbook/role/requirement.py b/lib/ansible/playbook/role/requirement.py
new file mode 100644
index 0000000000..03ffc3d710
--- /dev/null
+++ b/lib/ansible/playbook/role/requirement.py
@@ -0,0 +1,166 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six import iteritems, string_types
+
+import os
+
+from ansible.errors import AnsibleError, AnsibleParserError
+from ansible.playbook.role.definition import RoleDefinition
+
+__all__ = ['RoleRequirement']
+
+
+class RoleRequirement(RoleDefinition):
+
+ """
+ FIXME: document various ways role specs can be specified
+ """
+
+ def __init__(self):
+ pass
+
+ def _get_valid_spec_keys(self):
+ return (
+ 'name',
+ 'role',
+ 'scm',
+ 'src',
+ 'version',
+ )
+
+ def parse(self, ds):
+ '''
+ FIXME: docstring
+ '''
+
+ assert type(ds) == dict or isinstance(ds, string_types)
+
+ role_name = ''
+ role_params = dict()
+ new_ds = dict()
+
+ if isinstance(ds, string_types):
+ role_name = ds
+ else:
+ ds = self._preprocess_role_spec(ds)
+ (new_ds, role_params) = self._split_role_params(ds)
+
+ # pull the role name out of the ds
+ role_name = new_ds.get('role_name')
+ del ds['role_name']
+
+ return (new_ds, role_name, role_params)
+
+ def _preprocess_role_spec(self, ds):
+ if 'role' in ds:
+ # Old style: {role: "galaxy.role,version,name", other_vars: "here" }
+ role_info = self._role_spec_parse(ds['role'])
+ if isinstance(role_info, dict):
+ # Warning: Slight change in behaviour here. name may be being
+ # overloaded. Previously, name was only a parameter to the role.
+ # Now it is both a parameter to the role and the name that
+ # ansible-galaxy will install under on the local system.
+ if 'name' in ds and 'name' in role_info:
+ del role_info['name']
+ ds.update(role_info)
+ else:
+ # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
+ if 'github.com' in ds["src"] and 'http' in ds["src"] and '+' not in ds["src"] and not ds["src"].endswith('.tar.gz'):
+ ds["src"] = "git+" + ds["src"]
+
+ if '+' in ds["src"]:
+ (scm, src) = ds["src"].split('+')
+ ds["scm"] = scm
+ ds["src"] = src
+
+ if 'name' in ds:
+ ds["role"] = ds["name"]
+ del ds["name"]
+ else:
+ ds["role"] = self._repo_url_to_role_name(ds["src"])
+
+ # set some values to a default value, if none were specified
+ ds.setdefault('version', '')
+ ds.setdefault('scm', None)
+
+ return ds
+
+ def _repo_url_to_role_name(self, repo_url):
+ # gets the role name out of a repo like
+ # http://git.example.com/repos/repo.git" => "repo"
+
+ if '://' not in repo_url and '@' not in repo_url:
+ return repo_url
+ trailing_path = repo_url.split('/')[-1]
+ if trailing_path.endswith('.git'):
+ trailing_path = trailing_path[:-4]
+ if trailing_path.endswith('.tar.gz'):
+ trailing_path = trailing_path[:-7]
+ if ',' in trailing_path:
+ trailing_path = trailing_path.split(',')[0]
+ return trailing_path
+
+ def _role_spec_parse(self, role_spec):
+ # takes a repo and a version like
+ # git+http://git.example.com/repos/repo.git,v1.0
+ # and returns a list of properties such as:
+ # {
+ # 'scm': 'git',
+ # 'src': 'http://git.example.com/repos/repo.git',
+ # 'version': 'v1.0',
+ # 'name': 'repo'
+ # }
+
+ default_role_versions = dict(git='master', hg='tip')
+
+ role_spec = role_spec.strip()
+ role_version = ''
+ if role_spec == "" or role_spec.startswith("#"):
+ return (None, None, None, None)
+
+ tokens = [s.strip() for s in role_spec.split(',')]
+
+ # assume https://github.com URLs are git+https:// URLs and not
+ # tarballs unless they end in '.zip'
+ if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
+ tokens[0] = 'git+' + tokens[0]
+
+ if '+' in tokens[0]:
+ (scm, role_url) = tokens[0].split('+')
+ else:
+ scm = None
+ role_url = tokens[0]
+
+ if len(tokens) >= 2:
+ role_version = tokens[1]
+
+ if len(tokens) == 3:
+ role_name = tokens[2]
+ else:
+ role_name = self._repo_url_to_role_name(tokens[0])
+
+ if scm and not role_version:
+ role_version = default_role_versions.get(scm, '')
+
+ return dict(scm=scm, src=role_url, version=role_version, role_name=role_name)
+
+
diff --git a/lib/ansible/playbook/taggable.py b/lib/ansible/playbook/taggable.py
new file mode 100644
index 0000000000..40e05d1817
--- /dev/null
+++ b/lib/ansible/playbook/taggable.py
@@ -0,0 +1,95 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.playbook.attribute import FieldAttribute
+from ansible.template import Templar
+
+class Taggable:
+
+ untagged = set(['untagged'])
+ _tags = FieldAttribute(isa='list', default=[])
+
+ def __init__(self):
+ super(Taggable, self).__init__()
+
+ def _load_tags(self, attr, ds):
+ if isinstance(ds, list):
+ return ds
+ elif isinstance(ds, basestring):
+ return [ ds ]
+ else:
+ raise AnsibleError('tags must be specified as a list', obj=ds)
+
+ def _get_attr_tags(self):
+ '''
+ Override for the 'tags' getattr fetcher, used from Base.
+ '''
+ tags = self._attributes['tags']
+ if tags is None:
+ tags = []
+ if hasattr(self, '_get_parent_attribute'):
+ tags = self._get_parent_attribute('tags', extend=True)
+ return tags
+
+ def evaluate_tags(self, only_tags, skip_tags, all_vars):
+ ''' this checks if the current item should be executed depending on tag options '''
+
+ should_run = True
+
+ if self.tags:
+ templar = Templar(loader=self._loader, variables=all_vars)
+ tags = templar.template(self.tags)
+
+ if not isinstance(tags, list):
+ if tags.find(',') != -1:
+ tags = set(tags.split(','))
+ else:
+ tags = set([tags])
+ else:
+ tags = set(tags)
+ else:
+ # this makes intersection work for untagged
+ tags = self.__class__.untagged
+
+ if only_tags:
+
+ should_run = False
+
+ if 'always' in tags or 'all' in only_tags:
+ should_run = True
+ elif tags.intersection(only_tags):
+ should_run = True
+ elif 'tagged' in only_tags and tags != self.__class__.untagged:
+ should_run = True
+
+ if should_run and skip_tags:
+
+ # Check for tags that we need to skip
+ if 'all' in skip_tags:
+ if 'always' not in tags or 'always' in skip_tags:
+ should_run = False
+ elif tags.intersection(skip_tags):
+ should_run = False
+ elif 'tagged' in skip_tags and tags != self.__class__.untagged:
+ should_run = False
+
+ return should_run
diff --git a/lib/ansible/playbook/task.py b/lib/ansible/playbook/task.py
index 70c1bc8df6..0606025798 100644
--- a/lib/ansible/playbook/task.py
+++ b/lib/ansible/playbook/task.py
@@ -15,332 +15,296 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-from ansible import errors
-from ansible import utils
-from ansible.module_utils.splitter import split_args
-import os
-import ansible.utils.template as template
-import sys
-
-class Task(object):
-
- _t_common = [
- 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass',
- 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when',
- 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log',
- 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user',
- 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when',
- ]
-
- __slots__ = [
- 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file',
- 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars',
- 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars',
- ] + _t_common
-
- # to prevent typos and such
- VALID_KEYS = frozenset([
- 'async', 'connection', 'include', 'poll',
- ] + _t_common)
-
- def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None):
- ''' constructor loads from a task or handler datastructure '''
-
- # meta directives are used to tell things like ansible/playbook to run
- # operations like handler execution. Meta tasks are not executed
- # normally.
- if 'meta' in ds:
- self.meta = ds['meta']
- self.tags = []
- self.module_vars = module_vars
- self.role_name = role_name
- return
- else:
- self.meta = None
-
-
- library = os.path.join(play.basedir, 'library')
- if os.path.exists(library):
- utils.plugins.module_finder.add_directory(library)
-
- for x in ds.keys():
-
- # code to allow for saying "modulename: args" versus "action: modulename args"
- if x in utils.plugins.module_finder:
-
- if 'action' in ds:
- raise errors.AnsibleError("multiple actions specified in task: '%s' and '%s'" % (x, ds.get('name', ds['action'])))
- if isinstance(ds[x], dict):
- if 'args' in ds:
- raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x]))))
- ds['args'] = ds[x]
- ds[x] = ''
- elif ds[x] is None:
- ds[x] = ''
- if not isinstance(ds[x], basestring):
- raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x])))
- ds['action'] = x + " " + ds[x]
- ds.pop(x)
-
- # code to allow "with_glob" and to reference a lookup plugin named glob
- elif x.startswith("with_"):
- if isinstance(ds[x], basestring):
- param = ds[x].strip()
-
- plugin_name = x.replace("with_","")
- if plugin_name in utils.plugins.lookup_loader:
- ds['items_lookup_plugin'] = plugin_name
- ds['items_lookup_terms'] = ds[x]
- ds.pop(x)
- else:
- raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
-
- elif x in [ 'changed_when', 'failed_when', 'when']:
- if isinstance(ds[x], basestring):
- param = ds[x].strip()
- # Only a variable, no logic
- if (param.startswith('{{') and
- param.find('}}') == len(ds[x]) - 2 and
- param.find('|') == -1):
- utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.")
- elif x.startswith("when_"):
- utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True)
-
- if 'when' in ds:
- raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action'])))
- when_name = x.replace("when_","")
- ds['when'] = "%s %s" % (when_name, ds[x])
- ds.pop(x)
- elif not x in Task.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x)
-
- self.module_vars = module_vars
- self.play_vars = play_vars
- self.play_file_vars = play_file_vars
- self.role_vars = role_vars
- self.role_params = role_params
- self.default_vars = default_vars
- self.play = play
-
- # load various attributes
- self.name = ds.get('name', None)
- self.tags = [ 'untagged' ]
- self.register = ds.get('register', None)
- self.environment = ds.get('environment', play.environment)
- self.role_name = role_name
- self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log
- self.run_once = utils.boolean(ds.get('run_once', 'false'))
-
- #Code to allow do until feature in a Task
- if 'until' in ds:
- if not ds.get('register'):
- raise errors.AnsibleError("register keyword is mandatory when using do until feature")
- self.module_vars['delay'] = ds.get('delay', 5)
- self.module_vars['retries'] = ds.get('retries', 3)
- self.module_vars['register'] = ds.get('register', None)
- self.until = ds.get('until')
- self.module_vars['until'] = self.until
-
- # rather than simple key=value args on the options line, these represent structured data and the values
- # can be hashes and lists, not just scalars
- self.args = ds.get('args', {})
-
- # get remote_user for task, then play, then playbook
- if ds.get('remote_user') is not None:
- self.remote_user = ds.get('remote_user')
- elif ds.get('remote_user', play.remote_user) is not None:
- self.remote_user = ds.get('remote_user', play.remote_user)
- else:
- self.remote_user = ds.get('remote_user', play.playbook.remote_user)
-
- # Fail out if user specifies privilege escalation params in conflict
- if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')):
- raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
-
- if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name)
-
- if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
-
- self.become = utils.boolean(ds.get('become', play.become))
- self.become_method = ds.get('become_method', play.become_method)
- self.become_user = ds.get('become_user', play.become_user)
- self.become_pass = ds.get('become_pass', play.playbook.become_pass)
-
- # set only if passed in current task data
- if 'sudo' in ds or 'sudo_user' in ds:
- self.become_method='sudo'
-
- if 'sudo' in ds:
- self.become=ds['sudo']
- del ds['sudo']
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+
+from ansible.parsing.mod_args import ModuleArgsParser
+from ansible.parsing.splitter import parse_kv
+from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
+
+from ansible.plugins import module_loader, lookup_loader
+
+from ansible.playbook.attribute import Attribute, FieldAttribute
+from ansible.playbook.base import Base
+from ansible.playbook.become import Become
+from ansible.playbook.block import Block
+from ansible.playbook.conditional import Conditional
+from ansible.playbook.role import Role
+from ansible.playbook.taggable import Taggable
+
+__all__ = ['Task']
+
+class Task(Base, Conditional, Taggable, Become):
+
+ """
+ A task is a language feature that represents a call to a module, with given arguments and other parameters.
+ A handler is a subclass of a task.
+
+ Usage:
+
+ Task.load(datastructure) -> Task
+ Task.something(...)
+ """
+
+ # =================================================================================
+ # ATTRIBUTES
+ # load_<attribute_name> and
+ # validate_<attribute_name>
+ # will be used if defined
+ # might be possible to define others
+
+ _args = FieldAttribute(isa='dict', default=dict())
+ _action = FieldAttribute(isa='string')
+
+ _always_run = FieldAttribute(isa='bool')
+ _any_errors_fatal = FieldAttribute(isa='bool')
+ _async = FieldAttribute(isa='int', default=0)
+ _changed_when = FieldAttribute(isa='string')
+ _delay = FieldAttribute(isa='int', default=5)
+ _delegate_to = FieldAttribute(isa='string')
+ _failed_when = FieldAttribute(isa='string')
+ _first_available_file = FieldAttribute(isa='list')
+ _ignore_errors = FieldAttribute(isa='bool')
+
+ _loop = FieldAttribute(isa='string', private=True)
+ _loop_args = FieldAttribute(isa='list', private=True)
+ _local_action = FieldAttribute(isa='string')
+
+ # FIXME: this should not be a Task
+ _meta = FieldAttribute(isa='string')
+
+ _name = FieldAttribute(isa='string', default='')
+
+ _notify = FieldAttribute(isa='list')
+ _poll = FieldAttribute(isa='int')
+ _register = FieldAttribute(isa='string')
+ _retries = FieldAttribute(isa='int', default=1)
+ _run_once = FieldAttribute(isa='bool')
+ _until = FieldAttribute(isa='list') # ?
+
+ def __init__(self, block=None, role=None, task_include=None):
+ ''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
+
+ self._block = block
+ self._role = role
+ self._task_include = task_include
+
+ super(Task, self).__init__()
+
+ def get_name(self):
+ ''' return the name of the task '''
+
+ if self._role and self.name:
+ return "%s : %s" % (self._role.get_name(), self.name)
+ elif self.name:
+ return self.name
+ else:
+ flattened_args = self._merge_kv(self.args)
+ if self._role:
+ return "%s : %s %s" % (self._role.get_name(), self.action, flattened_args)
+ else:
+ return "%s %s" % (self.action, flattened_args)
+
+ def _merge_kv(self, ds):
+ if ds is None:
+ return ""
+ elif isinstance(ds, basestring):
+ return ds
+ elif isinstance(ds, dict):
+ buf = ""
+ for (k,v) in ds.iteritems():
+ if k.startswith('_'):
+ continue
+ buf = buf + "%s=%s " % (k,v)
+ buf = buf.strip()
+ return buf
+
+ @staticmethod
+ def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
+ t = Task(block=block, role=role, task_include=task_include)
+ return t.load_data(data, variable_manager=variable_manager, loader=loader)
+
+ def __repr__(self):
+ ''' returns a human readable representation of the task '''
+ return "TASK: %s" % self.get_name()
+
+ def _preprocess_loop(self, ds, new_ds, k, v):
+ ''' take a lookup plugin name and store it correctly '''
+
+ loop_name = k.replace("with_", "")
+ if new_ds.get('loop') is not None:
+ raise AnsibleError("duplicate loop in task: %s" % loop_name)
+ new_ds['loop'] = loop_name
+ new_ds['loop_args'] = v
+
+ def preprocess_data(self, ds):
+ '''
+ tasks are especially complex arguments so need pre-processing.
+ keep it short.
+ '''
+
+ assert isinstance(ds, dict)
+
+ # the new, cleaned datastructure, which will have legacy
+ # items reduced to a standard structure suitable for the
+ # attributes of the task class
+ new_ds = AnsibleMapping()
+ if isinstance(ds, AnsibleBaseYAMLObject):
+ new_ds.ansible_pos = ds.ansible_pos
+
+ # use the args parsing class to determine the action, args,
+ # and the delegate_to value from the various possible forms
+ # supported as legacy
+ args_parser = ModuleArgsParser(task_ds=ds)
+ (action, args, delegate_to) = args_parser.parse()
+
+ new_ds['action'] = action
+ new_ds['args'] = args
+ new_ds['delegate_to'] = delegate_to
+
+ for (k,v) in ds.iteritems():
+ if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
+ # we don't want to re-assign these values, which were
+ # determined by the ModuleArgsParser() above
+ continue
+ elif k.replace("with_", "") in lookup_loader:
+ self._preprocess_loop(ds, new_ds, k, v)
else:
- self.become=True
- if 'sudo_user' in ds:
- self.become_user = ds['sudo_user']
- del ds['sudo_user']
- if 'sudo_pass' in ds:
- self.become_pass = ds['sudo_pass']
- del ds['sudo_pass']
-
- elif 'su' in ds or 'su_user' in ds:
- self.become_method='su'
-
- if 'su' in ds:
- self.become=ds['su']
+ new_ds[k] = v
+
+ return super(Task, self).preprocess_data(new_ds)
+
+ def post_validate(self, templar):
+ '''
+ Override of base class post_validate, to also do final validation on
+ the block and task include (if any) to which this task belongs.
+ '''
+
+ if self._block:
+ self._block.post_validate(templar)
+ if self._task_include:
+ self._task_include.post_validate(templar)
+
+ super(Task, self).post_validate(templar)
+
+ def get_vars(self):
+ all_vars = self.vars.copy()
+ if self._block:
+ all_vars.update(self._block.get_vars())
+ if self._task_include:
+ all_vars.update(self._task_include.get_vars())
+
+ all_vars.update(self.serialize())
+
+ if 'tags' in all_vars:
+ del all_vars['tags']
+ if 'when' in all_vars:
+ del all_vars['when']
+ return all_vars
+
+ def copy(self, exclude_block=False):
+ new_me = super(Task, self).copy()
+
+ new_me._block = None
+ if self._block and not exclude_block:
+ new_me._block = self._block.copy()
+
+ new_me._role = None
+ if self._role:
+ new_me._role = self._role
+
+ new_me._task_include = None
+ if self._task_include:
+ new_me._task_include = self._task_include.copy()
+
+ return new_me
+
+ def serialize(self):
+ data = super(Task, self).serialize()
+
+ if self._block:
+ data['block'] = self._block.serialize()
+
+ if self._role:
+ data['role'] = self._role.serialize()
+
+ if self._task_include:
+ data['task_include'] = self._task_include.serialize()
+
+ return data
+
+ def deserialize(self, data):
+
+ # import is here to avoid import loops
+ #from ansible.playbook.task_include import TaskInclude
+
+ block_data = data.get('block')
+
+ if block_data:
+ b = Block()
+ b.deserialize(block_data)
+ self._block = b
+ del data['block']
+
+ role_data = data.get('role')
+ if role_data:
+ r = Role()
+ r.deserialize(role_data)
+ self._role = r
+ del data['role']
+
+ ti_data = data.get('task_include')
+ if ti_data:
+ #ti = TaskInclude()
+ ti = Task()
+ ti.deserialize(ti_data)
+ self._task_include = ti
+ del data['task_include']
+
+ super(Task, self).deserialize(data)
+
+ def evaluate_conditional(self, all_vars):
+ if self._block is not None:
+ if not self._block.evaluate_conditional(all_vars):
+ return False
+ if self._task_include is not None:
+ if not self._task_include.evaluate_conditional(all_vars):
+ return False
+ return super(Task, self).evaluate_conditional(all_vars)
+
+ def set_loader(self, loader):
+ '''
+ Sets the loader on this object and recursively on parent, child objects.
+ This is used primarily after the Task has been serialized/deserialized, which
+ does not preserve the loader.
+ '''
+
+ self._loader = loader
+
+ if self._block:
+ self._block.set_loader(loader)
+ if self._task_include:
+ self._task_include.set_loader(loader)
+
+ def _get_parent_attribute(self, attr, extend=False):
+ '''
+ Generic logic to get the attribute or parent attribute for a task value.
+ '''
+ value = self._attributes[attr]
+ if self._block and (not value or extend):
+ parent_value = getattr(self._block, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
else:
- self.become=True
- del ds['su']
- if 'su_user' in ds:
- self.become_user = ds['su_user']
- del ds['su_user']
- if 'su_pass' in ds:
- self.become_pass = ds['su_pass']
- del ds['su_pass']
-
- # Both are defined
- if ('action' in ds) and ('local_action' in ds):
- raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together")
- # Both are NOT defined
- elif (not 'action' in ds) and (not 'local_action' in ds):
- raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', '<Unnamed>'))
- # Only one of them is defined
- elif 'local_action' in ds:
- self.action = ds.get('local_action', '')
- self.delegate_to = '127.0.0.1'
- else:
- self.action = ds.get('action', '')
- self.delegate_to = ds.get('delegate_to', None)
- self.transport = ds.get('connection', ds.get('transport', play.transport))
-
- if isinstance(self.action, dict):
- if 'module' not in self.action:
- raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action))
- if self.args:
- raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action))
- self.args = self.action
- self.action = self.args.pop('module')
-
- # delegate_to can use variables
- if not (self.delegate_to is None):
- # delegate_to: localhost should use local transport
- if self.delegate_to in ['127.0.0.1', 'localhost']:
- self.transport = 'local'
-
- # notified by is used by Playbook code to flag which hosts
- # need to run a notifier
- self.notified_by = []
-
- # if no name is specified, use the action line as the name
- if self.name is None:
- self.name = self.action
-
- # load various attributes
- self.when = ds.get('when', None)
- self.changed_when = ds.get('changed_when', None)
- self.failed_when = ds.get('failed_when', None)
-
- # combine the default and module vars here for use in templating
- all_vars = self.default_vars.copy()
- all_vars = utils.combine_vars(all_vars, self.play_vars)
- all_vars = utils.combine_vars(all_vars, self.play_file_vars)
- all_vars = utils.combine_vars(all_vars, self.role_vars)
- all_vars = utils.combine_vars(all_vars, self.module_vars)
- all_vars = utils.combine_vars(all_vars, self.role_params)
-
- self.async_seconds = ds.get('async', 0) # not async by default
- self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars)
- self.async_seconds = int(self.async_seconds)
- self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds
- self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, all_vars)
- self.async_poll_interval = int(self.async_poll_interval)
- self.notify = ds.get('notify', [])
- self.first_available_file = ds.get('first_available_file', None)
-
- self.items_lookup_plugin = ds.get('items_lookup_plugin', None)
- self.items_lookup_terms = ds.get('items_lookup_terms', None)
-
-
- self.ignore_errors = ds.get('ignore_errors', False)
- self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal)
-
- self.always_run = ds.get('always_run', False)
-
- # action should be a string
- if not isinstance(self.action, basestring):
- raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name))
-
- # notify can be a string or a list, store as a list
- if isinstance(self.notify, basestring):
- self.notify = [ self.notify ]
-
- # split the action line into a module name + arguments
- try:
- tokens = split_args(self.action)
- except Exception, e:
- if "unbalanced" in str(e):
- raise errors.AnsibleError("There was an error while parsing the task %s.\n" % repr(self.action) + \
- "Make sure quotes are matched or escaped properly")
+ value = parent_value
+ if self._task_include and (not value or extend):
+ parent_value = getattr(self._task_include, attr)
+ if extend:
+ value = self._extend_value(value, parent_value)
else:
- raise
- if len(tokens) < 1:
- raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name)
- self.module_name = tokens[0]
- self.module_args = ''
- if len(tokens) > 1:
- self.module_args = " ".join(tokens[1:])
-
- import_tags = self.module_vars.get('tags',[])
- if type(import_tags) in [int,float]:
- import_tags = str(import_tags)
- elif type(import_tags) in [str,unicode]:
- # allow the user to list comma delimited tags
- import_tags = import_tags.split(",")
-
- # handle mutually incompatible options
- incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ]
- if len(incompatibles) > 1:
- raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
-
- # make first_available_file accessible to Runner code
- if self.first_available_file:
- self.module_vars['first_available_file'] = self.first_available_file
- # make sure that the 'item' variable is set when using
- # first_available_file (issue #8220)
- if 'item' not in self.module_vars:
- self.module_vars['item'] = ''
-
- if self.items_lookup_plugin is not None:
- self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin
- self.module_vars['items_lookup_terms'] = self.items_lookup_terms
-
- # allow runner to see delegate_to option
- self.module_vars['delegate_to'] = self.delegate_to
-
- # make some task attributes accessible to Runner code
- self.module_vars['ignore_errors'] = self.ignore_errors
- self.module_vars['register'] = self.register
- self.module_vars['changed_when'] = self.changed_when
- self.module_vars['failed_when'] = self.failed_when
- self.module_vars['always_run'] = self.always_run
-
- # tags allow certain parts of a playbook to be run without running the whole playbook
- apply_tags = ds.get('tags', None)
- if apply_tags is not None:
- if type(apply_tags) in [ str, unicode ]:
- self.tags.append(apply_tags)
- elif type(apply_tags) in [ int, float ]:
- self.tags.append(str(apply_tags))
- elif type(apply_tags) == list:
- self.tags.extend(apply_tags)
- self.tags.extend(import_tags)
-
- if len(self.tags) > 1:
- self.tags.remove('untagged')
-
- if additional_conditions:
- new_conditions = additional_conditions[:]
- if self.when:
- new_conditions.append(self.when)
- self.when = new_conditions
+ value = parent_value
+ return value
+
diff --git a/lib/ansible/playbook/vars.py b/lib/ansible/playbook/vars.py
new file mode 100644
index 0000000000..785fc45992
--- /dev/null
+++ b/lib/ansible/playbook/vars.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/lib/ansible/playbook/vars_file.py b/lib/ansible/playbook/vars_file.py
new file mode 100644
index 0000000000..785fc45992
--- /dev/null
+++ b/lib/ansible/playbook/vars_file.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/lib/ansible/utils/plugins.py b/lib/ansible/plugins/__init__.py
index 14953d8f44..5791677bd2 100644
--- a/lib/ansible/utils/plugins.py
+++ b/lib/ansible/plugins/__init__.py
@@ -1,4 +1,5 @@
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> and others
#
# This file is part of Ansible
#
@@ -15,12 +16,19 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import glob
+import imp
+import inspect
import os
import os.path
import sys
-import glob
-import imp
+
from ansible import constants as C
+from ansible.utils.display import Display
from ansible import errors
MODULE_CACHE = {}
@@ -34,7 +42,10 @@ def push_basedir(basedir):
if basedir not in _basedirs:
_basedirs.insert(0, basedir)
-class PluginLoader(object):
+def get_all_plugin_loaders():
+ return [(name, obj) for (name, obj) in inspect.getmembers(sys.modules[__name__]) if isinstance(obj, PluginLoader)]
+
+class PluginLoader:
'''
PluginLoader loads plugins from the configured plugin directories.
@@ -108,7 +119,6 @@ class PluginLoader(object):
for basedir in _basedirs:
fullpath = os.path.realpath(os.path.join(basedir, self.subdir))
if os.path.isdir(fullpath):
-
files = glob.glob("%s/*" % fullpath)
# allow directories to be two levels deep
@@ -173,7 +183,11 @@ class PluginLoader(object):
found = None
for path in [p for p in self._get_paths() if p not in self._searched_paths]:
if os.path.isdir(path):
- full_paths = (os.path.join(path, f) for f in os.listdir(path))
+ try:
+ full_paths = (os.path.join(path, f) for f in os.listdir(path))
+ except OSError as e:
+ d = Display()
+ d.warning("Error accessing plugin paths: %s" % str(e))
for full_path in (f for f in full_paths if os.path.isfile(f)):
for suffix in suffixes:
if full_path.endswith(suffix):
@@ -214,6 +228,9 @@ class PluginLoader(object):
path = self.find_plugin(name)
if path is None:
return None
+ elif kwargs.get('class_only', False):
+ return getattr(self._module_cache[path], self.class_name)
+
if path not in self._module_cache:
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
return getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
@@ -230,32 +247,38 @@ class PluginLoader(object):
continue
if path not in self._module_cache:
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
- yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
+ if kwargs.get('class_only', False):
+ obj = getattr(self._module_cache[path], self.class_name)
+ else:
+ obj = getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
+ # set extra info on the module, in case we want it later
+ setattr(obj, '_original_path', path)
+ yield obj
action_loader = PluginLoader(
'ActionModule',
- 'ansible.runner.action_plugins',
+ 'ansible.plugins.action',
C.DEFAULT_ACTION_PLUGIN_PATH,
'action_plugins'
)
cache_loader = PluginLoader(
'CacheModule',
- 'ansible.cache',
+ 'ansible.plugins.cache',
C.DEFAULT_CACHE_PLUGIN_PATH,
'cache_plugins'
)
callback_loader = PluginLoader(
'CallbackModule',
- 'ansible.callback_plugins',
+ 'ansible.plugins.callback',
C.DEFAULT_CALLBACK_PLUGIN_PATH,
'callback_plugins'
)
connection_loader = PluginLoader(
'Connection',
- 'ansible.runner.connection_plugins',
+ 'ansible.plugins.connections',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
aliases={'paramiko': 'paramiko_ssh'}
@@ -263,12 +286,12 @@ connection_loader = PluginLoader(
shell_loader = PluginLoader(
'ShellModule',
- 'ansible.runner.shell_plugins',
+ 'ansible.plugins.shell',
'shell_plugins',
'shell_plugins',
)
-module_finder = PluginLoader(
+module_loader = PluginLoader(
'',
'ansible.modules',
C.DEFAULT_MODULE_PATH,
@@ -277,21 +300,21 @@ module_finder = PluginLoader(
lookup_loader = PluginLoader(
'LookupModule',
- 'ansible.runner.lookup_plugins',
+ 'ansible.plugins.lookup',
C.DEFAULT_LOOKUP_PLUGIN_PATH,
'lookup_plugins'
)
vars_loader = PluginLoader(
'VarsModule',
- 'ansible.inventory.vars_plugins',
+ 'ansible.plugins.vars',
C.DEFAULT_VARS_PLUGIN_PATH,
'vars_plugins'
)
filter_loader = PluginLoader(
'FilterModule',
- 'ansible.runner.filter_plugins',
+ 'ansible.plugins.filter',
C.DEFAULT_FILTER_PLUGIN_PATH,
'filter_plugins'
)
@@ -302,3 +325,10 @@ fragment_loader = PluginLoader(
os.path.join(os.path.dirname(__file__), 'module_docs_fragments'),
'',
)
+
+strategy_loader = PluginLoader(
+ 'StrategyModule',
+ 'ansible.plugins.strategies',
+ None,
+ 'strategy_plugins',
+)
diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py
new file mode 100644
index 0000000000..62036cc706
--- /dev/null
+++ b/lib/ansible/plugins/action/__init__.py
@@ -0,0 +1,471 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six.moves import StringIO
+import json
+import os
+import random
+import sys # FIXME: probably not needed
+import tempfile
+import time
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.executor.module_common import modify_module
+from ansible.parsing.utils.jsonify import jsonify
+from ansible.plugins import shell_loader
+
+from ansible.utils.debug import debug
+
+class ActionBase:
+
+ '''
+ This class is the base class for all action plugins, and defines
+ code common to all actions. The base class handles the connection
+ by putting/getting files and executing commands based on the current
+ action in use.
+ '''
+
+ def __init__(self, task, connection, connection_info, loader, shared_loader_obj):
+ self._task = task
+ self._connection = connection
+ self._connection_info = connection_info
+ self._loader = loader
+ self._shared_loader_obj = shared_loader_obj
+ self._shell = self.get_shell()
+
+ self._supports_check_mode = True
+
+ def get_shell(self):
+
+ if hasattr(self._connection, '_shell'):
+ shell_plugin = getattr(self._connection, '_shell', '')
+ else:
+ shell_plugin = shell_loader.get(os.path.basename(C.DEFAULT_EXECUTABLE))
+ if shell_plugin is None:
+ shell_plugin = shell_loader.get('sh')
+
+ return shell_plugin
+
+ def _configure_module(self, module_name, module_args):
+ '''
+ Handles the loading and templating of the module code through the
+ modify_module() function.
+ '''
+
+ # Search module path(s) for named module.
+ module_suffixes = getattr(self._connection, 'default_suffixes', None)
+ module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, module_suffixes)
+ if module_path is None:
+ module_path2 = self._shared_loader_obj.module_loader.find_plugin('ping', module_suffixes)
+ if module_path2 is not None:
+ raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
+ else:
+ raise AnsibleError("The module %s was not found in configured module paths. " \
+ "Additionally, core modules are missing. If this is a checkout, " \
+ "run 'git submodule update --init --recursive' to correct this problem." % (module_name))
+
+ # insert shared code and arguments into the module
+ (module_data, module_style, module_shebang) = modify_module(module_path, module_args)
+
+ return (module_style, module_shebang, module_data)
+
+ def _compute_environment_string(self):
+ '''
+ Builds the environment string to be used when executing the remote task.
+ '''
+
+ enviro = {}
+
+ # FIXME: not sure where this comes from, probably task but maybe also the play?
+ #if self.environment:
+ # enviro = template.template(self.basedir, self.environment, inject, convert_bare=True)
+ # enviro = utils.safe_eval(enviro)
+ # if type(enviro) != dict:
+ # raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro)
+
+ return self._shell.env_prefix(**enviro)
+
+ def _early_needs_tmp_path(self):
+ '''
+ Determines if a temp path should be created before the action is executed.
+ '''
+
+ # FIXME: modified from original, needs testing? Since this is now inside
+ # the action plugin, it should make it just this simple
+ return getattr(self, 'TRANSFERS_FILES', False)
+
+ def _late_needs_tmp_path(self, tmp, module_style):
+ '''
+ Determines if a temp path is required after some early actions have already taken place.
+ '''
+ if tmp and "tmp" in tmp:
+ # tmp has already been created
+ return False
+ if not self._connection.__class__.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._connection_info.become:
+ # tmp is necessary to store module source code
+ return True
+ if not self._connection.__class__.has_pipelining:
+ # tmp is necessary to store the module source code
+ # or we want to keep the files on the target system
+ return True
+ if module_style != "new":
+ # even when conn has pipelining, old style modules need tmp to store arguments
+ return True
+ return False
+
+ # FIXME: return a datastructure in this function instead of raising errors -
+ # the new executor pipeline handles it much better that way
+ def _make_tmp_path(self):
+ '''
+ Create and return a temporary path on a remote box.
+ '''
+
+ basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
+ use_system_tmp = False
+
+ if self._connection_info.become and self._connection_info.become_user != 'root':
+ use_system_tmp = True
+
+ tmp_mode = None
+ if self._connection_info.remote_user != 'root' or self._connection_info.become and self._connection_info.become_user != 'root':
+ tmp_mode = 'a+rx'
+
+ cmd = self._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
+ debug("executing _low_level_execute_command to create the tmp path")
+ result = self._low_level_execute_command(cmd, None, sudoable=False)
+ debug("done with creation of tmp path")
+
+ # error handling on this seems a little aggressive?
+ if result['rc'] != 0:
+ if result['rc'] == 5:
+ output = 'Authentication failure.'
+ elif result['rc'] == 255 and self._connection.transport in ('ssh',):
+ # FIXME: more utils.VERBOSITY
+ #if utils.VERBOSITY > 3:
+ # output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
+ #else:
+ # output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
+ output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
+ elif 'No space left on device' in result['stderr']:
+ output = result['stderr']
+ else:
+ output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
+ if 'stdout' in result and result['stdout'] != '':
+ output = output + ": %s" % result['stdout']
+ raise AnsibleError(output)
+
+ # FIXME: do we still need to do this?
+ #rc = self._shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
+ rc = self._shell.join_path(result['stdout'].strip(), '').splitlines()[-1]
+
+ # Catch failure conditions, files should never be
+ # written to locations in /.
+ if rc == '/':
+ raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
+
+ return rc
+
+ def _remove_tmp_path(self, tmp_path):
+ '''Remove a temporary path we created. '''
+
+ if tmp_path and "-tmp-" in tmp_path:
+ cmd = self._shell.remove(tmp_path, recurse=True)
+ # If we have gotten here we have a working ssh configuration.
+ # If ssh breaks we could leave tmp directories out on the remote system.
+ debug("calling _low_level_execute_command to remove the tmp path")
+ self._low_level_execute_command(cmd, None, sudoable=False)
+ debug("done removing the tmp path")
+
+ def _transfer_data(self, remote_path, data):
+ '''
+ Copies the module data out to the temporary module path.
+ '''
+
+ if type(data) == dict:
+ data = jsonify(data)
+
+ afd, afile = tempfile.mkstemp()
+ afo = os.fdopen(afd, 'w')
+ try:
+ # FIXME: is this still necessary?
+ #if not isinstance(data, unicode):
+ # #ensure the data is valid UTF-8
+ # data = data.decode('utf-8')
+ #else:
+ # data = data.encode('utf-8')
+ afo.write(data)
+ except Exception as e:
+ #raise AnsibleError("failure encoding into utf-8: %s" % str(e))
+ raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e))
+
+ afo.flush()
+ afo.close()
+
+ try:
+ self._connection.put_file(afile, remote_path)
+ finally:
+ os.unlink(afile)
+
+ return remote_path
+
+ def _remote_chmod(self, tmp, mode, path, sudoable=False):
+ '''
+ Issue a remote chmod command
+ '''
+
+ cmd = self._shell.chmod(mode, path)
+ debug("calling _low_level_execute_command to chmod the remote path")
+ res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable)
+ debug("done with chmod call")
+ return res
+
+ def _remote_checksum(self, tmp, path):
+ '''
+ Takes a remote checksum and returns 1 if no file
+ '''
+
+ # FIXME: figure out how this will work, probably pulled from the
+ # variable manager data
+ #python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python')
+ python_interp = 'python'
+ cmd = self._shell.checksum(path, python_interp)
+ debug("calling _low_level_execute_command to get the remote checksum")
+ data = self._low_level_execute_command(cmd, tmp, sudoable=True)
+ debug("done getting the remote checksum")
+ # FIXME: implement this function?
+ #data2 = utils.last_non_blank_line(data['stdout'])
+ try:
+ data2 = data['stdout'].strip().splitlines()[-1]
+ if data2 == '':
+ # this may happen if the connection to the remote server
+ # failed, so just return "INVALIDCHECKSUM" to avoid errors
+ return "INVALIDCHECKSUM"
+ else:
+ return data2.split()[0]
+ except IndexError:
+ # FIXME: this should probably not print to sys.stderr, but should instead
+ # fail in a more normal way?
+ sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n")
+ sys.stderr.write("command: %s\n" % cmd)
+ sys.stderr.write("----\n")
+ sys.stderr.write("output: %s\n" % data)
+ sys.stderr.write("----\n")
+ # this will signal that it changed and allow things to keep going
+ return "INVALIDCHECKSUM"
+
+ def _remote_expand_user(self, path, tmp):
+ ''' takes a remote path and performs tilde expansion on the remote host '''
+ if not path.startswith('~'):
+ return path
+
+ split_path = path.split(os.path.sep, 1)
+ expand_path = split_path[0]
+ if expand_path == '~':
+ if self._connection_info.become and self._connection_info.become_user:
+ expand_path = '~%s' % self._connection_info.become_user
+
+ cmd = self._shell.expand_user(expand_path)
+ debug("calling _low_level_execute_command to expand the remote user path")
+ data = self._low_level_execute_command(cmd, tmp, sudoable=False)
+ debug("done expanding the remote user path")
+ #initial_fragment = utils.last_non_blank_line(data['stdout'])
+ initial_fragment = data['stdout'].strip().splitlines()[-1]
+
+ if not initial_fragment:
+ # Something went wrong trying to expand the path remotely. Return
+ # the original string
+ return path
+
+ if len(split_path) > 1:
+ return self._shell.join_path(initial_fragment, *split_path[1:])
+ else:
+ return initial_fragment
+
+ def _filter_leading_non_json_lines(self, data):
+ '''
+ Used to avoid random output from SSH at the top of JSON output, like messages from
+ tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
+
+ need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
+ filter only leading lines since multiline JSON is valid.
+ '''
+
+ filtered_lines = StringIO()
+ stop_filtering = False
+ for line in data.splitlines():
+ if stop_filtering or line.startswith('{') or line.startswith('['):
+ stop_filtering = True
+ filtered_lines.write(line + '\n')
+ return filtered_lines.getvalue()
+
+ def _execute_module(self, module_name=None, module_args=None, tmp=None, persist_files=False, delete_remote_tmp=True):
+ '''
+ Transfer and run a module along with its arguments.
+ '''
+
+ # if a module name was not specified for this execution, use
+ # the action from the task
+ if module_name is None:
+ module_name = self._task.action
+ if module_args is None:
+ module_args = self._task.args
+
+ # set check mode in the module arguments, if required
+ if self._connection_info.check_mode and not self._task.always_run:
+ if not self._supports_check_mode:
+ raise AnsibleError("check mode is not supported for this operation")
+ module_args['_ansible_check_mode'] = True
+
+ # set no log in the module arguments, if required
+ if self._connection_info.no_log:
+ module_args['_ansible_no_log'] = True
+
+ debug("in _execute_module (%s, %s)" % (module_name, module_args))
+
+ (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args)
+ if not shebang:
+ raise AnsibleError("module is missing interpreter line")
+
+ # a remote tmp path may be necessary and not already created
+ remote_module_path = None
+ if not tmp and self._late_needs_tmp_path(tmp, module_style):
+ tmp = self._make_tmp_path()
+ remote_module_path = self._shell.join_path(tmp, module_name)
+
+ # FIXME: async stuff here?
+ #if (module_style != 'new' or async_jid is not None or not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES):
+ if remote_module_path:
+ debug("transferring module to remote")
+ self._transfer_data(remote_module_path, module_data)
+ debug("done transferring module to remote")
+
+ environment_string = self._compute_environment_string()
+
+ if tmp and "tmp" in tmp and self._connection_info.become and self._connection_info.become_user != 'root':
+ # deal with possible umask issues once sudo'ed to other user
+ self._remote_chmod(tmp, 'a+r', remote_module_path)
+
+ cmd = ""
+ in_data = None
+
+ # FIXME: all of the old-module style and async stuff has been removed from here, and
+ # might need to be re-added (unless we decide to drop support for old-style modules
+ # at this point and rework things to support non-python modules specifically)
+ if self._connection.__class__.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES:
+ in_data = module_data
+ else:
+ if remote_module_path:
+ cmd = remote_module_path
+
+ rm_tmp = None
+ if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
+ if not self._connection_info.become or self._connection_info.become_user == 'root':
+ # not sudoing or sudoing to root, so can cleanup files in the same step
+ rm_tmp = tmp
+
+ cmd = self._shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
+ cmd = cmd.strip()
+
+ sudoable = True
+ if module_name == "accelerate":
+ # always run the accelerate module as the user
+ # specified in the play, not the sudo_user
+ sudoable = False
+
+ debug("calling _low_level_execute_command() for command %s" % cmd)
+ res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable, in_data=in_data)
+ debug("_low_level_execute_command returned ok")
+
+ if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
+ if self._connection_info.become and self._connection_info.become_user != 'root':
+ # not sudoing to root, so maybe can't delete files as that other user
+ # have to clean up temp files as original user in a second step
+ cmd2 = self._shell.remove(tmp, recurse=True)
+ self._low_level_execute_command(cmd2, tmp, sudoable=False)
+
+ try:
+ data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', '')))
+ except ValueError:
+ # not valid json, lets try to capture error
+ data = dict(failed=True, parsed=False)
+ if 'stderr' in res and res['stderr'].startswith('Traceback'):
+ data['traceback'] = res['stderr']
+ else:
+ data['msg'] = res.get('stdout', '')
+ if 'stderr' in res:
+ data['msg'] += res['stderr']
+
+ # pre-split stdout into lines, if stdout is in the data and there
+ # isn't already a stdout_lines value there
+ if 'stdout' in data and 'stdout_lines' not in data:
+ data['stdout_lines'] = data.get('stdout', '').splitlines()
+
+ # store the module invocation details back into the result
+ data['invocation'] = dict(
+ module_args = module_args,
+ module_name = module_name,
+ )
+
+ debug("done with _execute_module (%s, %s)" % (module_name, module_args))
+ return data
+
+ def _low_level_execute_command(self, cmd, tmp, executable=None, sudoable=True, in_data=None):
+ '''
+ This is the function which executes the low level shell command, which
+ may be commands to create/remove directories for temporary files, or to
+ run the module code or python directly when pipelining.
+ '''
+
+ debug("in _low_level_execute_command() (%s)" % (cmd,))
+ if not cmd:
+ # this can happen with powershell modules when there is no analog to a Windows command (like chmod)
+ debug("no command, exiting _low_level_execute_command()")
+ return dict(stdout='', stderr='')
+
+ if executable is None:
+ executable = C.DEFAULT_EXECUTABLE
+
+ prompt = None
+ success_key = None
+
+ if sudoable:
+ cmd, prompt, success_key = self._connection_info.make_become_cmd(cmd, executable)
+
+ debug("executing the command %s through the connection" % cmd)
+ rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, executable=executable, in_data=in_data)
+ debug("command execution done")
+
+ if not isinstance(stdout, basestring):
+ out = ''.join(stdout.readlines())
+ else:
+ out = stdout
+
+ if not isinstance(stderr, basestring):
+ err = ''.join(stderr.readlines())
+ else:
+ err = stderr
+
+ debug("done with _low_level_execute_command() (%s)" % (cmd,))
+ if rc is not None:
+ return dict(rc=rc, stdout=out, stderr=err)
+ else:
+ return dict(stdout=out, stderr=err)
diff --git a/lib/ansible/plugins/action/add_host.py b/lib/ansible/plugins/action/add_host.py
new file mode 100644
index 0000000000..e28361b714
--- /dev/null
+++ b/lib/ansible/plugins/action/add_host.py
@@ -0,0 +1,62 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright 2012, Seth Vidal <skvidal@fedoraproject.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+ ''' Create inventory hosts and groups in the memory inventory'''
+
+ ### We need to be able to modify the inventory
+ BYPASS_HOST_LOOP = True
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ # FIXME: is this necessary in v2?
+ #if self.runner.noop_on_check(inject):
+ # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
+
+ # Parse out any hostname:port patterns
+ new_name = self._task.args.get('name', self._task.args.get('hostname', None))
+ #vv("creating host via 'add_host': hostname=%s" % new_name)
+
+ if ":" in new_name:
+ new_name, new_port = new_name.split(":")
+ self._task.args['ansible_ssh_port'] = new_port
+
+ groups = self._task.args.get('groupname', self._task.args.get('groups', self._task.args.get('group', '')))
+ # add it to the group if that was specified
+ new_groups = []
+ if groups:
+ for group_name in groups.split(","):
+ if group_name not in new_groups:
+ new_groups.append(group_name.strip())
+
+ # Add any variables to the new_host
+ host_vars = dict()
+ for k in self._task.args.keys():
+ if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
+ host_vars[k] = self._task.args[k]
+
+ return dict(changed=True, add_host=dict(host_name=new_name, groups=new_groups, host_vars=host_vars))
+
+
diff --git a/lib/ansible/plugins/action/assemble.py b/lib/ansible/plugins/action/assemble.py
new file mode 100644
index 0000000000..4e796bddb6
--- /dev/null
+++ b/lib/ansible/plugins/action/assemble.py
@@ -0,0 +1,156 @@
+# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# Stephen Fromm <sfromm@gmail.com>
+# Brian Coca <briancoca+dev@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import pipes
+import shutil
+import tempfile
+import base64
+import re
+
+from ansible.plugins.action import ActionBase
+from ansible.utils.boolean import boolean
+from ansible.utils.hashing import checksum_s
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None):
+ ''' assemble a file from a directory of fragments '''
+
+ tmpfd, temp_path = tempfile.mkstemp()
+ tmp = os.fdopen(tmpfd,'w')
+ delimit_me = False
+ add_newline = False
+
+ for f in sorted(os.listdir(src_path)):
+ if compiled_regexp and not compiled_regexp.search(f):
+ continue
+ fragment = "%s/%s" % (src_path, f)
+ if not os.path.isfile(fragment):
+ continue
+ fragment_content = file(fragment).read()
+
+ # always put a newline between fragments if the previous fragment didn't end with a newline.
+ if add_newline:
+ tmp.write('\n')
+
+ # delimiters should only appear between fragments
+ if delimit_me:
+ if delimiter:
+ # un-escape anything like newlines
+ delimiter = delimiter.decode('unicode-escape')
+ tmp.write(delimiter)
+ # always make sure there's a newline after the
+ # delimiter, so lines don't run together
+ if delimiter[-1] != '\n':
+ tmp.write('\n')
+
+ tmp.write(fragment_content)
+ delimit_me = True
+ if fragment_content.endswith('\n'):
+ add_newline = False
+ else:
+ add_newline = True
+
+ tmp.close()
+ return temp_path
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ src = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ delimiter = self._task.args.get('delimiter', None)
+ remote_src = self._task.args.get('remote_src', 'yes')
+ regexp = self._task.args.get('regexp', None)
+
+ if src is None or dest is None:
+ return dict(failed=True, msg="src and dest are required")
+
+ if boolean(remote_src):
+ return self._execute_module(tmp=tmp)
+ elif self._task._role is not None:
+ src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
+ else:
+ # the source is local, so expand it here
+ src = self._loader.path_dwim(os.path.expanduser(src))
+
+ _re = None
+ if regexp is not None:
+ _re = re.compile(regexp)
+
+ # Does all work assembling the file
+ path = self._assemble_from_fragments(src, delimiter, _re)
+
+ path_checksum = checksum_s(path)
+ dest = self._remote_expand_user(dest, tmp)
+ remote_checksum = self._remote_checksum(tmp, dest)
+
+ if path_checksum != remote_checksum:
+ resultant = file(path).read()
+ # FIXME: diff needs to be moved somewhere else
+ #if self.runner.diff:
+ # dest_result = self._execute_module(module_name='slurp', module_args=dict(path=dest), tmp=tmp, persist_files=True)
+ # if 'content' in dest_result:
+ # dest_contents = dest_result['content']
+ # if dest_result['encoding'] == 'base64':
+ # dest_contents = base64.b64decode(dest_contents)
+ # else:
+ # raise Exception("unknown encoding, failed: %s" % dest_result)
+ xfered = self._transfer_data('src', resultant)
+
+ # fix file permissions when the copy is done as a different user
+ if self._connection_info.become and self._connection_info.become_user != 'root':
+ self._remote_chmod('a+r', xfered, tmp)
+
+ # run the copy module
+
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ src=xfered,
+ dest=dest,
+ original_basename=os.path.basename(src),
+ )
+ )
+
+ # FIXME: checkmode stuff
+ #if self.runner.noop_on_check(inject):
+ # return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
+ #else:
+ # res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject)
+ # res.diff = dict(after=resultant)
+ # return res
+ res = self._execute_module(module_name='copy', module_args=new_module_args, tmp=tmp)
+ #res.diff = dict(after=resultant)
+ return res
+ else:
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ src=xfered,
+ dest=dest,
+ original_basename=os.path.basename(src),
+ )
+ )
+
+ return self._execute_module(module_name='file', module_args=new_module_args, tmp=tmp)
diff --git a/lib/ansible/plugins/action/assert.py b/lib/ansible/plugins/action/assert.py
new file mode 100644
index 0000000000..5c4fdd7b89
--- /dev/null
+++ b/lib/ansible/plugins/action/assert.py
@@ -0,0 +1,65 @@
+# Copyright 2012, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.playbook.conditional import Conditional
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+ ''' Fail with custom message '''
+
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ if not 'that' in self._task.args:
+ raise AnsibleError('conditional required in "that" string')
+
+ msg = None
+ if 'msg' in self._task.args:
+ msg = self._task.args['msg']
+
+ # make sure the 'that' items are a list
+ thats = self._task.args['that']
+ if not isinstance(thats, list):
+ thats = [ thats ]
+
+ # Now we iterate over the that items, temporarily assigning them
+ # to the task's when value so we can evaluate the conditional using
+ # the built in evaluate function. The when has already been evaluated
+ # by this point, and is not used again, so we don't care about mangling
+ # that value now
+ cond = Conditional(loader=self._loader)
+ for that in thats:
+ cond.when = [ that ]
+ test_result = cond.evaluate_conditional(all_vars=task_vars)
+ if not test_result:
+ result = dict(
+ failed = True,
+ evaluated_to = test_result,
+ assertion = that,
+ )
+
+ if msg:
+ result['msg'] = msg
+
+ return result
+
+ return dict(changed=False, msg='all assertions passed')
+
diff --git a/lib/ansible/plugins/action/async.py b/lib/ansible/plugins/action/async.py
new file mode 100644
index 0000000000..7c02e09757
--- /dev/null
+++ b/lib/ansible/plugins/action/async.py
@@ -0,0 +1,70 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import random
+
+from ansible import constants as C
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=dict()):
+ ''' transfer the given module name, plus the async module, then run it '''
+
+ # FIXME: noop stuff needs to be sorted ut
+ #if self.runner.noop_on_check(inject):
+ # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
+
+ if not tmp:
+ tmp = self._make_tmp_path()
+
+ module_name = self._task.action
+ async_module_path = self._shell.join_path(tmp, 'async_wrapper')
+ remote_module_path = self._shell.join_path(tmp, module_name)
+
+ env_string = self._compute_environment_string()
+
+ # configure, upload, and chmod the target module
+ (module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=self._task.args)
+ self._transfer_data(remote_module_path, module_data)
+ self._remote_chmod(tmp, 'a+rx', remote_module_path)
+
+ # configure, upload, and chmod the async_wrapper module
+ (async_module_style, shebang, async_module_data) = self._configure_module(module_name='async_wrapper', module_args=dict())
+ self._transfer_data(async_module_path, async_module_data)
+ self._remote_chmod(tmp, 'a+rx', async_module_path)
+
+ argsfile = self._transfer_data(self._shell.join_path(tmp, 'arguments'), json.dumps(self._task.args))
+
+ async_limit = self._task.async
+ async_jid = str(random.randint(0, 999999999999))
+
+ async_cmd = " ".join([str(x) for x in [async_module_path, async_jid, async_limit, remote_module_path, argsfile]])
+ result = self._low_level_execute_command(cmd=async_cmd, tmp=None)
+
+ # clean up after
+ if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
+ self._remove_tmp_path(tmp)
+
+ result['changed'] = True
+
+ return result
+
+
diff --git a/lib/ansible/plugins/action/copy.py b/lib/ansible/plugins/action/copy.py
new file mode 100644
index 0000000000..6db130ad7f
--- /dev/null
+++ b/lib/ansible/plugins/action/copy.py
@@ -0,0 +1,349 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import json
+import os
+import pipes
+import stat
+import tempfile
+
+from ansible import constants as C
+from ansible.plugins.action import ActionBase
+from ansible.utils.boolean import boolean
+from ansible.utils.hashing import checksum
+from ansible.utils.unicode import to_bytes
+from ansible.parsing.vault import VaultLib
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=dict()):
+ ''' handler for file transfer operations '''
+
+ source = self._task.args.get('src', None)
+ content = self._task.args.get('content', None)
+ dest = self._task.args.get('dest', None)
+ raw = boolean(self._task.args.get('raw', 'no'))
+ force = boolean(self._task.args.get('force', 'yes'))
+
+ # FIXME: first available file needs to be reworked somehow...
+ #if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
+ # result=dict(failed=True, msg="src (or content) and dest are required")
+ # return ReturnData(conn=conn, result=result)
+ #elif (source is not None or 'first_available_file' in inject) and content is not None:
+ # result=dict(failed=True, msg="src and content are mutually exclusive")
+ # return ReturnData(conn=conn, result=result)
+
+ # Check if the source ends with a "/"
+ source_trailing_slash = False
+ if source:
+ source_trailing_slash = source.endswith(os.sep)
+
+ # Define content_tempfile in case we set it after finding content populated.
+ content_tempfile = None
+
+ # If content is defined make a temp file and write the content into it.
+ if content is not None:
+ try:
+ # If content comes to us as a dict it should be decoded json.
+ # We need to encode it back into a string to write it out.
+ if isinstance(content, dict):
+ content_tempfile = self._create_content_tempfile(json.dumps(content))
+ else:
+ content_tempfile = self._create_content_tempfile(content)
+ source = content_tempfile
+ except Exception as err:
+ return dict(failed=True, msg="could not write content temp file: %s" % err)
+
+ ###############################################################################################
+ # FIXME: first_available_file needs to be reworked?
+ ###############################################################################################
+ # if we have first_available_file in our vars
+ # look up the files and use the first one we find as src
+ #elif 'first_available_file' in inject:
+ # found = False
+ # for fn in inject.get('first_available_file'):
+ # fn_orig = fn
+ # fnt = template.template(self.runner.basedir, fn, inject)
+ # fnd = utils.path_dwim(self.runner.basedir, fnt)
+ # if not os.path.exists(fnd) and '_original_file' in inject:
+ # fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
+ # if os.path.exists(fnd):
+ # source = fnd
+ # found = True
+ # break
+ # if not found:
+ # results = dict(failed=True, msg="could not find src in first_available_file list")
+ # return ReturnData(conn=conn, result=results)
+ ###############################################################################################
+ else:
+ if self._task._role is not None:
+ source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
+ else:
+ source = self._loader.path_dwim(source)
+
+ # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
+ source_files = []
+
+ # If source is a directory populate our list else source is a file and translate it to a tuple.
+ if os.path.isdir(source):
+ # Get the amount of spaces to remove to get the relative path.
+ if source_trailing_slash:
+ sz = len(source)
+ else:
+ sz = len(source.rsplit('/', 1)[0]) + 1
+
+ # Walk the directory and append the file tuples to source_files.
+ for base_path, sub_folders, files in os.walk(source):
+ for file in files:
+ full_path = os.path.join(base_path, file)
+ rel_path = full_path[sz:]
+ source_files.append((full_path, rel_path))
+
+ # If it's recursive copy, destination is always a dir,
+ # explicitly mark it so (note - copy module relies on this).
+ if not self._shell.path_has_trailing_slash(dest):
+ dest = self._shell.join_path(dest, '')
+ else:
+ source_files.append((source, os.path.basename(source)))
+
+ changed = False
+ diffs = []
+ module_result = {"changed": False}
+
+ # A register for if we executed a module.
+ # Used to cut down on command calls when not recursive.
+ module_executed = False
+
+ # Tell _execute_module to delete the file if there is one file.
+ delete_remote_tmp = (len(source_files) == 1)
+
+ # If this is a recursive action create a tmp path that we can share as the _exec_module create is too late.
+ if not delete_remote_tmp:
+ if tmp is None or "-tmp-" not in tmp:
+ tmp = self._make_tmp_path()
+
+ # expand any user home dir specifier
+ dest = self._remote_expand_user(dest, tmp)
+
+ for source_full, source_rel in source_files:
+
+ # Generate a hash of the local file.
+ local_checksum = checksum(source_full)
+
+ # If local_checksum is not defined we can't find the file so we should fail out.
+ if local_checksum is None:
+ return dict(failed=True, msg="could not find src=%s" % source_full)
+
+ # This is kind of optimization - if user told us destination is
+ # dir, do path manipulation right away, otherwise we still check
+ # for dest being a dir via remote call below.
+ if self._shell.path_has_trailing_slash(dest):
+ dest_file = self._shell.join_path(dest, source_rel)
+ else:
+ dest_file = self._shell.join_path(dest)
+
+ # Attempt to get the remote checksum
+ remote_checksum = self._remote_checksum(tmp, dest_file)
+
+ if remote_checksum == '3':
+ # The remote_checksum was executed on a directory.
+ if content is not None:
+ # If source was defined as content remove the temporary file and fail out.
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+ return dict(failed=True, msg="can not use content with a dir as dest")
+ else:
+ # Append the relative source location to the destination and retry remote_checksum
+ dest_file = self._shell.join_path(dest, source_rel)
+ remote_checksum = self._remote_checksum(tmp, dest_file)
+
+ if remote_checksum != '1' and not force:
+ # remote_file does not exist so continue to next iteration.
+ continue
+
+ if local_checksum != remote_checksum:
+ # The checksums don't match and we will change or error out.
+ changed = True
+
+ # Create a tmp path if missing only if this is not recursive.
+ # If this is recursive we already have a tmp path.
+ if delete_remote_tmp:
+ if tmp is None or "-tmp-" not in tmp:
+ tmp = self._make_tmp_path()
+
+ # FIXME: runner shouldn't have the diff option there
+ #if self.runner.diff and not raw:
+ # diff = self._get_diff_data(tmp, dest_file, source_full)
+ #else:
+ # diff = {}
+ diff = {}
+
+ # FIXME: noop stuff
+ #if self.runner.noop_on_check(inject):
+ # self._remove_tempfile_if_content_defined(content, content_tempfile)
+ # diffs.append(diff)
+ # changed = True
+ # module_result = dict(changed=True)
+ # continue
+
+ # Define a remote directory that we will copy the file to.
+ tmp_src = tmp + 'source'
+
+ if not raw:
+ self._connection.put_file(source_full, tmp_src)
+ else:
+ self._connection.put_file(source_full, dest_file)
+
+ # We have copied the file remotely and no longer require our content_tempfile
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+
+ # fix file permissions when the copy is done as a different user
+ if self._connection_info.become and self._connection_info.become_user != 'root':
+ self._remote_chmod('a+r', tmp_src, tmp)
+
+ if raw:
+ # Continue to next iteration if raw is defined.
+ continue
+
+ # Run the copy module
+
+ # src and dest here come after original and override them
+ # we pass dest only to make sure it includes trailing slash in case of recursive copy
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ src=tmp_src,
+ dest=dest,
+ original_basename=source_rel,
+ )
+ )
+
+ module_return = self._execute_module(module_name='copy', module_args=new_module_args, delete_remote_tmp=delete_remote_tmp)
+ module_executed = True
+
+ else:
+ # no need to transfer the file, already correct hash, but still need to call
+ # the file module in case we want to change attributes
+ self._remove_tempfile_if_content_defined(content, content_tempfile)
+
+ if raw:
+ # Continue to next iteration if raw is defined.
+ # self._remove_tmp_path(tmp)
+ continue
+
+ # Build temporary module_args.
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ src=source_rel,
+ dest=dest,
+ original_basename=source_rel
+ )
+ )
+
+ # Execute the file module.
+ module_return = self._execute_module(module_name='file', module_args=new_module_args, delete_remote_tmp=delete_remote_tmp)
+ module_executed = True
+
+ if not module_return.get('checksum'):
+ module_return['checksum'] = local_checksum
+ if module_return.get('failed') == True:
+ return module_return
+ if module_return.get('changed') == True:
+ changed = True
+
+ # the file module returns the file path as 'path', but
+ # the copy module uses 'dest', so add it if it's not there
+ if 'path' in module_return and 'dest' not in module_return:
+ module_return['dest'] = module_return['path']
+
+ # Delete tmp path if we were recursive or if we did not execute a module.
+ if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
+ self._remove_tmp_path(tmp)
+
+ # TODO: Support detailed status/diff for multiple files
+ if module_executed and len(source_files) == 1:
+ result = module_return
+ else:
+ result = dict(dest=dest, src=source, changed=changed)
+
+ if len(diffs) == 1:
+ result['diff']=diffs[0]
+
+ return result
+
+ def _create_content_tempfile(self, content):
+ ''' Create a tempfile containing defined content '''
+ fd, content_tempfile = tempfile.mkstemp()
+ f = os.fdopen(fd, 'wb')
+ content = to_bytes(content)
+ try:
+ f.write(content)
+ except Exception as err:
+ os.remove(content_tempfile)
+ raise Exception(err)
+ finally:
+ f.close()
+ return content_tempfile
+
+ def _get_diff_data(self, tmp, destination, source):
+ peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), persist_files=True)
+ if 'failed' in peek_result and peek_result['failed'] or peek_result.get('rc', 0) != 0:
+ return {}
+
+ diff = {}
+ if peek_result['state'] == 'absent':
+ diff['before'] = ''
+ elif peek_result['appears_binary']:
+ diff['dst_binary'] = 1
+ # FIXME: this should not be in utils..
+ #elif peek_result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
+ # diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
+ else:
+ dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), tmp=tmp, persist_files=True)
+ if 'content' in dest_result:
+ dest_contents = dest_result['content']
+ if dest_result['encoding'] == 'base64':
+ dest_contents = base64.b64decode(dest_contents)
+ else:
+ raise Exception("unknown encoding, failed: %s" % dest_result)
+ diff['before_header'] = destination
+ diff['before'] = dest_contents
+
+ src = open(source)
+ src_contents = src.read(8192)
+ st = os.stat(source)
+ if "\x00" in src_contents:
+ diff['src_binary'] = 1
+ # FIXME: this should not be in utils
+ #elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
+ # diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
+ else:
+ src.seek(0)
+ diff['after_header'] = source
+ diff['after'] = src.read()
+
+ return diff
+
+ def _remove_tempfile_if_content_defined(self, content, content_tempfile):
+ if content is not None:
+ os.remove(content_tempfile)
+
diff --git a/lib/ansible/plugins/action/debug.py b/lib/ansible/plugins/action/debug.py
new file mode 100644
index 0000000000..04db3c9cc1
--- /dev/null
+++ b/lib/ansible/plugins/action/debug.py
@@ -0,0 +1,48 @@
+# Copyright 2012, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+from ansible.utils.boolean import boolean
+from ansible.template import Templar
+
+class ActionModule(ActionBase):
+ ''' Print statements during execution '''
+
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ if 'msg' in self._task.args:
+ if 'fail' in self._task.args and boolean(self._task.args['fail']):
+ result = dict(failed=True, msg=self._task.args['msg'])
+ else:
+ result = dict(msg=self._task.args['msg'])
+ # FIXME: move the LOOKUP_REGEX somewhere else
+ elif 'var' in self._task.args: # and not utils.LOOKUP_REGEX.search(self._task.args['var']):
+ templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=task_vars)
+ results = templar.template(self._task.args['var'], convert_bare=True)
+ result = dict()
+ result[self._task.args['var']] = results
+ else:
+ result = dict(msg='here we are')
+
+ # force flag to make debug output module always verbose
+ result['verbose_always'] = True
+
+ return result
diff --git a/lib/ansible/plugins/action/fail.py b/lib/ansible/plugins/action/fail.py
new file mode 100644
index 0000000000..b7845c95c5
--- /dev/null
+++ b/lib/ansible/plugins/action/fail.py
@@ -0,0 +1,35 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2012, Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+ ''' Fail with custom message '''
+
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ msg = 'Failed as requested from task'
+ if self._task.args and 'msg' in self._task.args:
+ msg = self._task.args.get('msg')
+
+ return dict(failed=True, msg=msg)
+
diff --git a/lib/ansible/runner/action_plugins/fetch.py b/lib/ansible/plugins/action/fetch.py
index 27d2f6b3c6..c242c8739d 100644
--- a/lib/ansible/runner/action_plugins/fetch.py
+++ b/lib/ansible/plugins/action/fetch.py
@@ -14,6 +14,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
import pwd
@@ -22,95 +24,81 @@ import traceback
import tempfile
import base64
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible import module_common
-from ansible.runner.return_data import ReturnData
+from ansible import constants as C
+from ansible.errors import *
+from ansible.plugins.action import ActionBase
+from ansible.utils.boolean import boolean
+from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
-class ActionModule(object):
+class ActionModule(ActionBase):
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
+ def run(self, tmp=None, task_vars=dict()):
''' handler for fetch operations '''
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module'))
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- dest = options.get('dest', None)
- flat = options.get('flat', False)
- flat = utils.boolean(flat)
- fail_on_missing = options.get('fail_on_missing', False)
- fail_on_missing = utils.boolean(fail_on_missing)
- validate_checksum = options.get('validate_checksum', None)
- if validate_checksum is not None:
- validate_checksum = utils.boolean(validate_checksum)
- # Alias for validate_checksum (old way of specifying it)
- validate_md5 = options.get('validate_md5', None)
- if validate_md5 is not None:
- validate_md5 = utils.boolean(validate_md5)
- if validate_md5 is None and validate_checksum is None:
- # Default
- validate_checksum = True
- elif validate_checksum is None:
- validate_checksum = validate_md5
- elif validate_md5 is not None and validate_checksum is not None:
- results = dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified")
- return ReturnData(conn, result=results)
+ # FIXME: is this even required anymore?
+ #if self.runner.noop_on_check(inject):
+ # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module'))
+
+ source = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ flat = boolean(self._task.args.get('flat'))
+ fail_on_missing = boolean(self._task.args.get('fail_on_missing'))
+ validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5')))
+
+ if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args:
+ return dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified")
if source is None or dest is None:
- results = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, result=results)
+ return dict(failed=True, msg="src and dest are required")
- source = conn.shell.join_path(source)
- source = self.runner._remote_expand_user(conn, source, tmp)
+ source = self._shell.join_path(source)
+ source = self._remote_expand_user(source, tmp)
# calculate checksum for the remote file
- remote_checksum = self.runner._remote_checksum(conn, tmp, source, inject)
+ remote_checksum = self._remote_checksum(tmp, source)
# use slurp if sudo and permissions are lacking
remote_data = None
- if remote_checksum in ('1', '2') or self.runner.become:
- slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject)
- if slurpres.is_successful():
- if slurpres.result['encoding'] == 'base64':
- remote_data = base64.b64decode(slurpres.result['content'])
+ if remote_checksum in ('1', '2') or self._connection_info.become:
+ slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), tmp=tmp)
+ if slurpres.get('rc') == 0:
+ if slurpres['encoding'] == 'base64':
+ remote_data = base64.b64decode(slurpres['content'])
if remote_data is not None:
- remote_checksum = utils.checksum_s(remote_data)
+ remote_checksum = checksum_s(remote_data)
# the source path may have been expanded on the
# target system, so we compare it here and use the
# expanded version if it's different
- remote_source = slurpres.result.get('source')
+ remote_source = slurpres.get('source')
if remote_source and remote_source != source:
source = remote_source
+ else:
+ # FIXME: should raise an error here? the old code did nothing
+ pass
# calculate the destination name
- if os.path.sep not in conn.shell.join_path('a', ''):
+ if os.path.sep not in self._shell.join_path('a', ''):
source_local = source.replace('\\', '/')
else:
source_local = source
dest = os.path.expanduser(dest)
if flat:
- if dest.endswith("/"):
+ if dest.endswith(os.sep):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source_local)
dest = os.path.join(dest, base)
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
- dest = utils.path_dwim(self.runner.basedir, dest)
+ dest = self._loader.path_dwim(dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
- dest = "%s/%s/%s" % (utils.path_dwim(self.runner.basedir, dest), inject['inventory_hostname'], source_local)
+ if 'inventory_hostname' in task_vars:
+ target_name = task_vars['inventory_hostname']
+ else:
+ target_name = self._connection_info.remote_addr
+ dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
dest = dest.replace("//","/")
@@ -130,10 +118,10 @@ class ActionModule(object):
result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False)
elif remote_checksum == '4':
result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False)
- return ReturnData(conn=conn, result=result)
+ return result
# calculate checksum for the local file
- local_checksum = utils.checksum(dest)
+ local_checksum = checksum(dest)
if remote_checksum != local_checksum:
# create the containing directories, if needed
@@ -142,32 +130,29 @@ class ActionModule(object):
# fetch the file and check for changes
if remote_data is None:
- conn.fetch_file(source, dest)
+ self._connection.fetch_file(source, dest)
else:
f = open(dest, 'w')
f.write(remote_data)
f.close()
- new_checksum = utils.secure_hash(dest)
+ new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled
# systems
try:
- new_md5 = utils.md5(dest)
+ new_md5 = md5(dest)
except ValueError:
new_md5 = None
if validate_checksum and new_checksum != remote_checksum:
- result = dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
- return ReturnData(conn=conn, result=result)
- result = dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
- return ReturnData(conn=conn, result=result)
+ return dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
+ return dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
else:
# For backwards compatibility. We'll return None on FIPS enabled
# systems
try:
- local_md5 = utils.md5(dest)
+ local_md5 = md5(dest)
except ValueError:
local_md5 = None
- result = dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)
- return ReturnData(conn=conn, result=result)
+ return dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)
diff --git a/lib/ansible/plugins/action/group_by.py b/lib/ansible/plugins/action/group_by.py
new file mode 100644
index 0000000000..95db33aa43
--- /dev/null
+++ b/lib/ansible/plugins/action/group_by.py
@@ -0,0 +1,39 @@
+# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import *
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+ ''' Create inventory groups based on variables '''
+
+ ### We need to be able to modify the inventory
+ BYPASS_HOST_LOOP = True
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ if not 'key' in self._task.args:
+ return dict(failed=True, msg="the 'key' param is required when using group_by")
+
+ group_name = self._task.args.get('key')
+ group_name = group_name.replace(' ','-')
+
+ return dict(changed=True, add_group=group_name)
+
diff --git a/lib/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py
new file mode 100644
index 0000000000..8a7a74d870
--- /dev/null
+++ b/lib/ansible/plugins/action/include_vars.py
@@ -0,0 +1,50 @@
+# (c) 2013-2014, Benno Joy <benno@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from types import NoneType
+
+from ansible.errors import AnsibleError
+from ansible.parsing import DataLoader
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ source = self._task.args.get('_raw_params')
+
+ if self._task._role:
+ source = self._loader.path_dwim_relative(self._task._role._role_path, 'vars', source)
+ else:
+ source = self._loader.path_dwim(source)
+
+ if os.path.exists(source):
+ data = self._loader.load_from_file(source)
+ if data is None:
+ data = {}
+ if not isinstance(data, dict):
+ raise AnsibleError("%s must be stored as a dictionary/hash" % source)
+ return dict(ansible_facts=data)
+ else:
+ return dict(failed=True, msg="Source file not found.", file=source)
+
diff --git a/lib/ansible/plugins/action/normal.py b/lib/ansible/plugins/action/normal.py
new file mode 100644
index 0000000000..431d9b0eeb
--- /dev/null
+++ b/lib/ansible/plugins/action/normal.py
@@ -0,0 +1,29 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ #vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host)
+ return self._execute_module(tmp)
+
+
diff --git a/lib/ansible/plugins/action/patch.py b/lib/ansible/plugins/action/patch.py
new file mode 100644
index 0000000000..bf2af1be1e
--- /dev/null
+++ b/lib/ansible/plugins/action/patch.py
@@ -0,0 +1,66 @@
+# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.plugins.action import ActionBase
+from ansible.utils.boolean import boolean
+
+class ActionModule(ActionBase):
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ src = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ remote_src = boolean(self._task.args.get('remote_src', 'no'))
+
+ if src is None:
+ return dict(failed=True, msg="src is required")
+ elif remote_src:
+ # everything is remote, so we just execute the module
+ # without changing any of the module arguments
+ return self._execute_module()
+
+ if self._task._role is not None:
+ src = self._loader.path_dwim_relative(self._task._role._role_path, 'files', src)
+ else:
+ src = self._loader.path_dwim(src)
+
+ # create the remote tmp dir if needed, and put the source file there
+ if tmp is None or "-tmp-" not in tmp:
+ tmp = self._make_tmp_path()
+
+ tmp_src = self._shell.join_path(tmp, os.path.basename(src))
+ self._connection.put_file(src, tmp_src)
+
+ if self._connection_info.become and self._connection_info.become_user != 'root':
+ # FIXME: noop stuff here
+ #if not self.runner.noop_on_check(inject):
+ # self._remote_chmod('a+r', tmp_src, tmp)
+ self._remote_chmod('a+r', tmp_src, tmp)
+
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ src=tmp_src,
+ )
+ )
+
+ return self._execute_module('patch', module_args=new_module_args)
diff --git a/lib/ansible/plugins/action/pause.py b/lib/ansible/plugins/action/pause.py
new file mode 100644
index 0000000000..c5a97d5366
--- /dev/null
+++ b/lib/ansible/plugins/action/pause.py
@@ -0,0 +1,136 @@
+# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import sys
+import time
+
+from termios import tcflush, TCIFLUSH
+
+from ansible.errors import *
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+ ''' pauses execution for a length or time, or until input is received '''
+
+ PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
+ BYPASS_HOST_LOOP = True
+
+ def run(self, tmp=None, task_vars=dict()):
+ ''' run the pause action module '''
+
+ duration_unit = 'minutes'
+ prompt = None
+ seconds = None
+ result = dict(
+ changed = False,
+ rc = 0,
+ stderr = '',
+ stdout = '',
+ start = None,
+ stop = None,
+ delta = None,
+ )
+
+ # FIXME: not sure if we can get this info directly like this anymore?
+ #hosts = ', '.join(self.runner.host_set)
+
+ # Is 'args' empty, then this is the default prompted pause
+ if self._task.args is None or len(self._task.args.keys()) == 0:
+ pause_type = 'prompt'
+ #prompt = "[%s]\nPress enter to continue:\n" % hosts
+ prompt = "[%s]\nPress enter to continue:\n" % self._task.get_name().strip()
+
+ # Are 'minutes' or 'seconds' keys that exist in 'args'?
+ elif 'minutes' in self._task.args or 'seconds' in self._task.args:
+ try:
+ if 'minutes' in self._task.args:
+ pause_type = 'minutes'
+ # The time() command operates in seconds so we need to
+ # recalculate for minutes=X values.
+ seconds = int(self._task.args['minutes']) * 60
+ else:
+ pause_type = 'seconds'
+ seconds = int(self._task.args['seconds'])
+ duration_unit = 'seconds'
+
+ except ValueError as e:
+ return dict(failed=True, msg="non-integer value given for prompt duration:\n%s" % str(e))
+
+ # Is 'prompt' a key in 'args'?
+ elif 'prompt' in self._task.args:
+ pause_type = 'prompt'
+ #prompt = "[%s]\n%s:\n" % (hosts, self._task.args['prompt'])
+ prompt = "[%s]\n%s:\n" % (self._task.get_name().strip(), self._task.args['prompt'])
+
+ # I have no idea what you're trying to do. But it's so wrong.
+ else:
+ return dict(failed=True, msg="invalid pause type given. must be one of: %s" % ", ".join(self.PAUSE_TYPES))
+
+ #vv("created 'pause' ActionModule: pause_type=%s, duration_unit=%s, calculated_seconds=%s, prompt=%s" % \
+ # (self.pause_type, self.duration_unit, self.seconds, self.prompt))
+
+ ########################################################################
+ # Begin the hard work!
+
+ start = time.time()
+ result['start'] = str(datetime.datetime.now())
+
+
+ # FIXME: this is all very broken right now, as prompting from the worker side
+ # is not really going to be supported, and actions marked as BYPASS_HOST_LOOP
+ # probably should not be run through the executor engine at all. Also, ctrl+c
+ # is now captured on the parent thread, so it can't be caught here via the
+ # KeyboardInterrupt exception.
+
+ try:
+ if not pause_type == 'prompt':
+ print("(^C-c = continue early, ^C-a = abort)")
+ #print("[%s]\nPausing for %s seconds" % (hosts, seconds))
+ print("[%s]\nPausing for %s seconds" % (self._task.get_name().strip(), seconds))
+ time.sleep(seconds)
+ else:
+ # Clear out any unflushed buffered input which would
+ # otherwise be consumed by raw_input() prematurely.
+ #tcflush(sys.stdin, TCIFLUSH)
+ result['user_input'] = raw_input(prompt.encode(sys.stdout.encoding))
+ except KeyboardInterrupt:
+ while True:
+ print('\nAction? (a)bort/(c)ontinue: ')
+ c = getch()
+ if c == 'c':
+ # continue playbook evaluation
+ break
+ elif c == 'a':
+ # abort further playbook evaluation
+ raise ae('user requested abort!')
+ finally:
+ duration = time.time() - start
+ result['stop'] = str(datetime.datetime.now())
+ result['delta'] = int(duration)
+
+ if duration_unit == 'minutes':
+ duration = round(duration / 60.0, 2)
+ else:
+ duration = round(duration, 2)
+
+ result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
+
+ return result
+
diff --git a/lib/ansible/plugins/action/raw.py b/lib/ansible/plugins/action/raw.py
new file mode 100644
index 0000000000..f9cd56572b
--- /dev/null
+++ b/lib/ansible/plugins/action/raw.py
@@ -0,0 +1,41 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=dict()):
+
+ # FIXME: need to rework the noop stuff still
+ #if self.runner.noop_on_check(inject):
+ # # in --check mode, always skip this module execution
+ # return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True))
+
+ executable = self._task.args.get('executable')
+ result = self._low_level_execute_command(self._task.args.get('_raw_params'), tmp=tmp, executable=executable)
+
+ # for some modules (script, raw), the sudo success key
+ # may leak into the stdout due to the way the sudo/su
+ # command is constructed, so we filter that out here
+ if result.get('stdout','').strip().startswith('SUDO-SUCCESS-'):
+ result['stdout'] = re.sub(r'^((\r)?\n)?SUDO-SUCCESS.*(\r)?\n', '', result['stdout'])
+
+ return result
diff --git a/lib/ansible/plugins/action/script.py b/lib/ansible/plugins/action/script.py
new file mode 100644
index 0000000000..3ca7dc6a34
--- /dev/null
+++ b/lib/ansible/plugins/action/script.py
@@ -0,0 +1,98 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible import constants as C
+from ansible.plugins.action import ActionBase
+
+class ActionModule(ActionBase):
+ TRANSFERS_FILES = True
+
+ def run(self, tmp=None, task_vars=None):
+ ''' handler for file transfer operations '''
+
+ # FIXME: noop stuff still needs to be sorted out
+ #if self.runner.noop_on_check(inject):
+ # # in check mode, always skip this module
+ # return ReturnData(conn=conn, comm_ok=True,
+ # result=dict(skipped=True, msg='check mode not supported for this module'))
+
+ if not tmp:
+ tmp = self._make_tmp_path()
+
+ creates = self._task.args.get('creates')
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ result = self._execute_module(module_name='stat', module_args=dict(path=creates), tmp=tmp, persist_files=True)
+ stat = result.get('stat', None)
+ if stat and stat.get('exists', False):
+ return dict(skipped=True, msg=("skipped, since %s exists" % creates))
+
+ removes = self._task.args.get('removes')
+ if removes:
+ # do not run the command if the line contains removes=filename
+ # and the filename does not exist. This allows idempotence
+ # of command executions.
+ result = self._execute_module(module_name='stat', module_args=dict(path=removes), tmp=tmp, persist_files=True)
+ stat = result.get('stat', None)
+ if stat and not stat.get('exists', False):
+ return dict(skipped=True, msg=("skipped, since %s does not exist" % removes))
+
+ # the script name is the first item in the raw params, so we split it
+ # out now so we know the file name we need to transfer to the remote,
+ # and everything else is an argument to the script which we need later
+ # to append to the remote command
+ parts = self._task.args.get('_raw_params', '').strip().split()
+ source = parts[0]
+ args = ' '.join(parts[1:])
+
+ if self._task._role is not None:
+ source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
+ else:
+ source = self._loader.path_dwim(source)
+
+ # transfer the file to a remote tmp location
+ tmp_src = self._shell.join_path(tmp, os.path.basename(source))
+ self._connection.put_file(source, tmp_src)
+
+ sudoable = True
+ # set file permissions, more permissive when the copy is done as a different user
+ if self._connection_info.become and self._connection_info.become_user != 'root':
+ chmod_mode = 'a+rx'
+ sudoable = False
+ else:
+ chmod_mode = '+rx'
+ self._remote_chmod(tmp, chmod_mode, tmp_src, sudoable=sudoable)
+
+ # add preparation steps to one ssh roundtrip executing the script
+ env_string = self._compute_environment_string()
+ script_cmd = ' '.join([env_string, tmp_src, args])
+
+ result = self._low_level_execute_command(cmd=script_cmd, tmp=None, sudoable=sudoable)
+
+ # clean up after
+ if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
+ self._remove_tmp_path(tmp)
+
+ result['changed'] = True
+
+ return result
diff --git a/lib/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py
new file mode 100644
index 0000000000..6086ee6e8b
--- /dev/null
+++ b/lib/ansible/plugins/action/set_fact.py
@@ -0,0 +1,38 @@
+# Copyright 2013 Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.plugins.action import ActionBase
+from ansible.template import Templar
+from ansible.utils.boolean import boolean
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=dict()):
+ templar = Templar(loader=self._loader, variables=task_vars)
+ facts = dict()
+ if self._task.args:
+ for (k, v) in self._task.args.iteritems():
+ k = templar.template(k)
+ if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
+ v = boolean(v)
+ facts[k] = v
+ return dict(changed=False, ansible_facts=facts)
diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py
new file mode 100644
index 0000000000..1bc64ff4d5
--- /dev/null
+++ b/lib/ansible/plugins/action/synchronize.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2012-2013, Timothy Appnel <tim@appnel.com>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os.path
+
+from ansible.plugins.action import ActionBase
+from ansible.utils.boolean import boolean
+
+class ActionModule(ActionBase):
+
+ def _get_absolute_path(self, path):
+ if self._task._role is not None:
+ original_path = path
+ path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path)
+ if original_path and original_path[-1] == '/' and path[-1] != '/':
+ # make sure the dwim'd path ends in a trailing "/"
+ # if the original path did
+ path += '/'
+
+ return path
+
+ def _process_origin(self, host, path, user):
+
+ if not host in ['127.0.0.1', 'localhost']:
+ if user:
+ return '%s@%s:%s' % (user, host, path)
+ else:
+ return '%s:%s' % (host, path)
+ else:
+ if not ':' in path:
+ if not path.startswith('/'):
+ path = self._get_absolute_path(path=path)
+ return path
+
+ def _process_remote(self, host, task, path, user):
+ transport = self._connection_info.connection
+ return_data = None
+ if not host in ['127.0.0.1', 'localhost'] or transport != "local":
+ if user:
+ return_data = '%s@%s:%s' % (user, host, path)
+ else:
+ return_data = '%s:%s' % (host, path)
+ else:
+ return_data = path
+
+ if not ':' in return_data:
+ if not return_data.startswith('/'):
+ return_data = self._get_absolute_path(path=return_data)
+
+ return return_data
+
+ def run(self, tmp=None, task_vars=dict()):
+ ''' generates params and passes them on to the rsync module '''
+
+ original_transport = task_vars.get('ansible_connection', self._connection_info.connection)
+ transport_overridden = False
+ if task_vars.get('delegate_to') is None:
+ task_vars['delegate_to'] = '127.0.0.1'
+ # IF original transport is not local, override transport and disable sudo.
+ if original_transport != 'local':
+ task_vars['ansible_connection'] = 'local'
+ transport_overridden = True
+ self.runner.sudo = False
+
+ src = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+
+ # FIXME: this doesn't appear to be used anywhere?
+ local_rsync_path = task_vars.get('ansible_rsync_path')
+
+ # from the perspective of the rsync call the delegate is the localhost
+ src_host = '127.0.0.1'
+ dest_host = task_vars.get('ansible_ssh_host', task_vars.get('inventory_hostname'))
+
+ # allow ansible_ssh_host to be templated
+ dest_is_local = dest_host in ['127.0.0.1', 'localhost']
+
+ # CHECK FOR NON-DEFAULT SSH PORT
+ dest_port = self._task.args.get('dest_port')
+ inv_port = task_vars.get('ansible_ssh_port', task_vars.get('inventory_hostname'))
+ if inv_port != dest_port and inv_port != task_vars.get('inventory_hostname'):
+ dest_port = inv_port
+
+ # edge case: explicit delegate and dest_host are the same
+ if dest_host == task_vars.get('delegate_to'):
+ dest_host = '127.0.0.1'
+
+ # SWITCH SRC AND DEST PER MODE
+ if self._task.args.get('mode', 'push') == 'pull':
+ (dest_host, src_host) = (src_host, dest_host)
+
+ # CHECK DELEGATE HOST INFO
+ use_delegate = False
+ # FIXME: not sure if this is in connection info yet or not...
+ #if conn.delegate != conn.host:
+ # if 'hostvars' in task_vars:
+ # if conn.delegate in task_vars['hostvars'] and original_transport != 'local':
+ # # use a delegate host instead of localhost
+ # use_delegate = True
+
+ # COMPARE DELEGATE, HOST AND TRANSPORT
+ process_args = False
+ if not dest_host is src_host and original_transport != 'local':
+ # interpret and task_vars remote host info into src or dest
+ process_args = True
+
+ # MUNGE SRC AND DEST PER REMOTE_HOST INFO
+ if process_args or use_delegate:
+
+ user = None
+ if boolean(task_vars.get('set_remote_user', 'yes')):
+ if use_delegate:
+ user = task_vars['hostvars'][conn.delegate].get('ansible_ssh_user')
+
+ if not use_delegate or not user:
+ user = task_vars.get('ansible_ssh_user', self.runner.remote_user)
+
+ if use_delegate:
+ # FIXME
+ private_key = task_vars.get('ansible_ssh_private_key_file', self.runner.private_key_file)
+ else:
+ private_key = task_vars.get('ansible_ssh_private_key_file', self.runner.private_key_file)
+
+ if private_key is not None:
+ private_key = os.path.expanduser(private_key)
+
+ # use the mode to define src and dest's url
+ if self._task.args.get('mode', 'push') == 'pull':
+ # src is a remote path: <user>@<host>, dest is a local path
+ src = self._process_remote(src_host, src, user)
+ dest = self._process_origin(dest_host, dest, user)
+ else:
+ # src is a local path, dest is a remote path: <user>@<host>
+ src = self._process_origin(src_host, src, user)
+ dest = self._process_remote(dest_host, dest, user)
+
+ # Allow custom rsync path argument.
+ rsync_path = self._task.args.get('rsync_path', None)
+
+ # If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
+ if not rsync_path and transport_overridden and self._connection_info.become and self._connection_info.become_method == 'sudo' and not dest_is_local:
+ rsync_path = 'sudo rsync'
+
+ # make sure rsync path is quoted.
+ if rsync_path:
+ self._task.args['rsync_path'] = '"%s"' % rsync_path
+
+ # run the module and store the result
+ result = self._execute_module('synchronize')
+
+ return result
+
diff --git a/lib/ansible/plugins/action/template.py b/lib/ansible/plugins/action/template.py
new file mode 100644
index 0000000000..a234ef2eee
--- /dev/null
+++ b/lib/ansible/plugins/action/template.py
@@ -0,0 +1,186 @@
+# (c) 2015, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import os
+
+from ansible.plugins.action import ActionBase
+from ansible.template import Templar
+from ansible.utils.hashing import checksum_s
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def get_checksum(self, tmp, dest, try_directory=False, source=None):
+ remote_checksum = self._remote_checksum(tmp, dest)
+
+ if remote_checksum in ('0', '2', '3', '4'):
+ # Note: 1 means the file is not present which is fine; template
+ # will create it. 3 means directory was specified instead of file
+ if try_directory and remote_checksum == '3' and source:
+ base = os.path.basename(source)
+ dest = os.path.join(dest, base)
+ remote_checksum = self.get_checksum(tmp, dest, try_directory=False)
+ if remote_checksum not in ('0', '2', '3', '4'):
+ return remote_checksum
+
+ result = dict(failed=True, msg="failed to checksum remote file."
+ " Checksum error code: %s" % remote_checksum)
+ return result
+
+ return remote_checksum
+
+ def run(self, tmp=None, task_vars=dict()):
+ ''' handler for template operations '''
+
+ source = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+
+ if (source is None and 'first_available_file' not in task_vars) or dest is None:
+ return dict(failed=True, msg="src and dest are required")
+
+ if tmp is None:
+ tmp = self._make_tmp_path()
+
+ ##################################################################################################
+ # FIXME: this all needs to be sorted out
+ ##################################################################################################
+ # if we have first_available_file in our vars
+ # look up the files and use the first one we find as src
+ #if 'first_available_file' in task_vars:
+ # found = False
+ # for fn in task_vars.get('first_available_file'):
+ # fn_orig = fn
+ # fnt = template.template(self.runner.basedir, fn, task_vars)
+ # fnd = utils.path_dwim(self.runner.basedir, fnt)
+ # if not os.path.exists(fnd) and '_original_file' in task_vars:
+ # fnd = utils.path_dwim_relative(task_vars['_original_file'], 'templates', fnt, self.runner.basedir, check=False)
+ # if os.path.exists(fnd):
+ # source = fnd
+ # found = True
+ # break
+ # if not found:
+ # result = dict(failed=True, msg="could not find src in first_available_file list")
+ # return ReturnData(conn=conn, comm_ok=False, result=result)
+ #else:
+ if 1:
+ if self._task._role is not None:
+ source = self._loader.path_dwim_relative(self._task._role._role_path, 'templates', source)
+ else:
+ source = self._loader.path_dwim(source)
+ ##################################################################################################
+ # END FIXME
+ ##################################################################################################
+
+ # Expand any user home dir specification
+ dest = self._remote_expand_user(dest, tmp)
+
+ directory_prepended = False
+ if dest.endswith(os.sep):
+ directory_prepended = True
+ base = os.path.basename(source)
+ dest = os.path.join(dest, base)
+
+ # template the source data locally & get ready to transfer
+ templar = Templar(loader=self._loader, variables=task_vars)
+ try:
+ with open(source, 'r') as f:
+ template_data = f.read()
+ resultant = templar.template(template_data, preserve_trailing_newlines=True)
+ except Exception as e:
+ return dict(failed=True, msg=type(e).__name__ + ": " + str(e))
+
+ local_checksum = checksum_s(resultant)
+ remote_checksum = self.get_checksum(tmp, dest, not directory_prepended, source=source)
+ if isinstance(remote_checksum, dict):
+ # Error from remote_checksum is a dict. Valid return is a str
+ return remote_checksum
+
+ if local_checksum != remote_checksum:
+ # if showing diffs, we need to get the remote value
+ dest_contents = ''
+
+ # FIXME: still need to implement diff mechanism
+ #if self.runner.diff:
+ # # using persist_files to keep the temp directory around to avoid needing to grab another
+ # dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, task_vars=task_vars, persist_files=True)
+ # if 'content' in dest_result.result:
+ # dest_contents = dest_result.result['content']
+ # if dest_result.result['encoding'] == 'base64':
+ # dest_contents = base64.b64decode(dest_contents)
+ # else:
+ # raise Exception("unknown encoding, failed: %s" % dest_result.result)
+
+ xfered = self._transfer_data(self._shell.join_path(tmp, 'source'), resultant)
+
+ # fix file permissions when the copy is done as a different user
+ if self._connection_info.become and self._connection_info.become_user != 'root':
+ self._remote_chmod('a+r', xfered, tmp)
+
+ # run the copy module
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ src=xfered,
+ dest=dest,
+ original_basename=os.path.basename(source),
+ follow=True,
+ ),
+ )
+
+ # FIXME: noop stuff needs to be sorted out
+ #if self.runner.noop_on_check(task_vars):
+ # return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant))
+ #else:
+ # res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, task_vars=task_vars, complex_args=complex_args)
+ # if res.result.get('changed', False):
+ # res.diff = dict(before=dest_contents, after=resultant)
+ # return res
+
+ result = self._execute_module(module_name='copy', module_args=new_module_args)
+ if result.get('changed', False):
+ result['diff'] = dict(before=dest_contents, after=resultant)
+ return result
+
+ else:
+ # when running the file module based on the template data, we do
+ # not want the source filename (the name of the template) to be used,
+ # since this would mess up links, so we clear the src param and tell
+ # the module to follow links. When doing that, we have to set
+ # original_basename to the template just in case the dest is
+ # a directory.
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ src=None,
+ original_basename=os.path.basename(source),
+ follow=True,
+ ),
+ )
+
+ # FIXME: this may not be required anymore, as the checkmod params
+ # should be in the regular module args?
+ # be sure to task_vars the check mode param into the module args and
+ # rely on the file module to report its changed status
+ #if self.runner.noop_on_check(task_vars):
+ # new_module_args['CHECKMODE'] = True
+
+ return self._execute_module(module_name='file', module_args=new_module_args)
+
diff --git a/lib/ansible/plugins/action/unarchive.py b/lib/ansible/plugins/action/unarchive.py
new file mode 100644
index 0000000000..b7601ed910
--- /dev/null
+++ b/lib/ansible/plugins/action/unarchive.py
@@ -0,0 +1,114 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import pipes
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+
+ TRANSFERS_FILES = True
+
+ def run(self, tmp=None, task_vars=dict()):
+ ''' handler for unarchive operations '''
+
+ source = self._task.args.get('src', None)
+ dest = self._task.args.get('dest', None)
+ copy = self._task.args.get('copy', True)
+ creates = self._task.args.get('creates', None)
+
+ if source is None or dest is None:
+ return dict(failed=True, msg="src (or content) and dest are required")
+
+ if not tmp:
+ tmp = self._make_tmp_path()
+
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ module_args_tmp = "path=%s" % creates
+ result = self._execute_module(module_name='stat', module_args=dict(path=creates))
+ stat = result.get('stat', None)
+ if stat and stat.get('exists', False):
+ return dict(skipped=True, msg=("skipped, since %s exists" % creates))
+
+ dest = self._remote_expand_user(dest, tmp) # CCTODO: Fix path for Windows hosts.
+ source = os.path.expanduser(source)
+
+ if copy:
+ # FIXME: the original file stuff needs to be reworked
+ if '_original_file' in task_vars:
+ source = self._loader.path_dwim_relative(task_vars['_original_file'], 'files', source)
+ else:
+ source = self._loader.path_dwim(source)
+
+ remote_checksum = self._remote_checksum(tmp, dest)
+ if remote_checksum != '3':
+ return dict(failed=True, msg="dest '%s' must be an existing dir" % dest)
+ elif remote_checksum == '4':
+ return dict(failed=True, msg="python isn't present on the system. Unable to compute checksum")
+
+ if copy:
+ # transfer the file to a remote tmp location
+ tmp_src = tmp + 'source'
+ self._connection.put_file(source, tmp_src)
+
+ # handle diff mode client side
+ # handle check mode client side
+ # fix file permissions when the copy is done as a different user
+ if copy:
+ if self._connection_info.become and self._connection_info.become_user != 'root':
+ # FIXME: noop stuff needs to be reworked
+ #if not self.runner.noop_on_check(task_vars):
+ # self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
+ self._remote_chmod(tmp, 'a+r', tmp_src)
+
+ # Build temporary module_args.
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ src=tmp_src,
+ original_basename=os.path.basename(source),
+ ),
+ )
+
+ # make sure checkmod is passed on correctly
+ # FIXME: noop again, probably doesn't need to be done here anymore?
+ #if self.runner.noop_on_check(task_vars):
+ # new_module_args['CHECKMODE'] = True
+
+ else:
+ new_module_args = self._task.args.copy()
+ new_module_args.update(
+ dict(
+ original_basename=os.path.basename(source),
+ ),
+ )
+ # make sure checkmod is passed on correctly
+ # FIXME: noop again, probably doesn't need to be done here anymore?
+ #if self.runner.noop_on_check(task_vars):
+ # module_args += " CHECKMODE=True"
+
+ # execute the unarchive module now, with the updated args
+ return self._execute_module(module_args=new_module_args)
+
diff --git a/lib/ansible/cache/__init__.py b/lib/ansible/plugins/cache/__init__.py
index 4100861c14..8ffe554cc6 100644
--- a/lib/ansible/cache/__init__.py
+++ b/lib/ansible/plugins/cache/__init__.py
@@ -14,19 +14,20 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
from collections import MutableMapping
-from ansible import utils
from ansible import constants as C
-from ansible import errors
-
+from ansible.plugins import cache_loader
class FactCache(MutableMapping):
def __init__(self, *args, **kwargs):
- self._plugin = utils.plugins.cache_loader.get(C.CACHE_PLUGIN)
+ self._plugin = cache_loader.get(C.CACHE_PLUGIN)
if self._plugin is None:
+ # FIXME: this should be an exception
return
def __getitem__(self, key):
diff --git a/lib/ansible/cache/base.py b/lib/ansible/plugins/cache/base.py
index b6254cdfd4..051f02d0b0 100644
--- a/lib/ansible/cache/base.py
+++ b/lib/ansible/plugins/cache/base.py
@@ -15,27 +15,41 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-import exceptions
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-class BaseCacheModule(object):
+from abc import ABCMeta, abstractmethod
+from six import add_metaclass
+
+
+@add_metaclass(ABCMeta)
+class BaseCacheModule:
+
+ @abstractmethod
def get(self, key):
- raise exceptions.NotImplementedError
+ pass
+ @abstractmethod
def set(self, key, value):
- raise exceptions.NotImplementedError
+ pass
+ @abstractmethod
def keys(self):
- raise exceptions.NotImplementedError
+ pass
+ @abstractmethod
def contains(self, key):
- raise exceptions.NotImplementedError
+ pass
+ @abstractmethod
def delete(self, key):
- raise exceptions.NotImplementedError
+ pass
+ @abstractmethod
def flush(self):
- raise exceptions.NotImplementedError
+ pass
+ @abstractmethod
def copy(self):
- raise exceptions.NotImplementedError
+ pass
diff --git a/lib/ansible/cache/memcached.py b/lib/ansible/plugins/cache/memcached.py
index ea922434b5..e7321a5a6b 100644
--- a/lib/ansible/cache/memcached.py
+++ b/lib/ansible/plugins/cache/memcached.py
@@ -14,6 +14,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import collections
import os
@@ -23,12 +25,12 @@ import threading
from itertools import chain
from ansible import constants as C
-from ansible.cache.base import BaseCacheModule
+from ansible.plugins.cache.base import BaseCacheModule
try:
import memcache
except ImportError:
- print 'python-memcached is required for the memcached fact cache'
+ print('python-memcached is required for the memcached fact cache')
sys.exit(1)
diff --git a/lib/ansible/cache/memory.py b/lib/ansible/plugins/cache/memory.py
index 735ed32893..1562836151 100644
--- a/lib/ansible/cache/memory.py
+++ b/lib/ansible/plugins/cache/memory.py
@@ -14,8 +14,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible.cache.base import BaseCacheModule
+from ansible.plugins.cache.base import BaseCacheModule
class CacheModule(BaseCacheModule):
diff --git a/lib/ansible/cache/redis.py b/lib/ansible/plugins/cache/redis.py
index 7ae5ef74c1..287c14bd2a 100644
--- a/lib/ansible/cache/redis.py
+++ b/lib/ansible/plugins/cache/redis.py
@@ -14,26 +14,21 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from __future__ import absolute_import
-import collections
# FIXME: can we store these as something else before we ship it?
import sys
import time
-
-try:
- import simplejson as json
-except ImportError:
- import json
+import json
from ansible import constants as C
-from ansible.utils import jsonify
-from ansible.cache.base import BaseCacheModule
+from ansible.plugins.cache.base import BaseCacheModule
try:
from redis import StrictRedis
except ImportError:
- print "The 'redis' python module is required, 'pip install redis'"
+ print("The 'redis' python module is required, 'pip install redis'")
sys.exit(1)
class CacheModule(BaseCacheModule):
@@ -70,7 +65,7 @@ class CacheModule(BaseCacheModule):
return json.loads(value)
def set(self, key, value):
- value2 = jsonify(value)
+ value2 = json.dumps(value)
if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
self._cache.setex(self._make_key(key), int(self._timeout), value2)
else:
diff --git a/lib/ansible/callback_plugins/noop.py b/lib/ansible/plugins/callback/__init__.py
index b5d5886874..2c2e7e74c6 100644
--- a/lib/ansible/callback_plugins/noop.py
+++ b/lib/ansible/plugins/callback/__init__.py
@@ -1,5 +1,5 @@
-# (C) 2012-2014, Michael DeHaan, <michael.dehaan@gmail.com>
-
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -15,22 +15,32 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-class CallbackModule(object):
+#from ansible.utils.display import Display
- """
- this is an example ansible callback file that does nothing. You can drop
- other classes in the same directory to define your own handlers. Methods
- you do not use can be omitted. If self.disabled is set to True, the plugin
- methods will not be called.
+__all__ = ["CallbackBase"]
- example uses include: logging, emailing, storing info, etc
- """
+class CallbackBase:
- def __init__(self):
- #if foo:
- # self.disabled = True
- pass
+ '''
+ This is a base ansible callback class that does nothing. New callbacks should
+ use this class as a base and override any callback methods they wish to execute
+ custom actions.
+ '''
+
+ # FIXME: the list of functions here needs to be updated once we have
+ # finalized the list of callback methods used in the default callback
+
+ def __init__(self, display):
+ self._display = display
+
+ def set_connection_info(self, conn_info):
+ # FIXME: this is a temporary hack, as the connection info object
+ # should be created early and passed down through objects
+ self._display._verbosity = conn_info.verbosity
def on_any(self, *args, **kwargs):
pass
diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py
new file mode 100644
index 0000000000..de6548ef18
--- /dev/null
+++ b/lib/ansible/plugins/callback/default.py
@@ -0,0 +1,136 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.plugins.callback import CallbackBase
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+
+ def v2_on_any(self, *args, **kwargs):
+ pass
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if 'exception' in result._result and self._display.verbosity < 3:
+ del result._result['exception']
+ self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), json.dumps(result._result, ensure_ascii=False)), color='red')
+
+ def v2_runner_on_ok(self, result):
+
+ if result._task.action == 'include':
+ msg = 'included: %s for %s' % (result._task.args.get('_raw_params'), result._host.name)
+ color = 'cyan'
+ elif result._result.get('changed', False):
+ msg = "changed: [%s]" % result._host.get_name()
+ color = 'yellow'
+ else:
+ msg = "ok: [%s]" % result._host.get_name()
+ color = 'green'
+
+ if (self._display._verbosity > 0 or 'verbose_always' in result._result) and result._task.action not in ('setup', 'include'):
+ indent = None
+ if 'verbose_always' in result._result:
+ indent = 4
+ del result._result['verbose_always']
+ msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False)
+ self._display.display(msg, color=color)
+
+ def v2_runner_on_skipped(self, result):
+ msg = "skipping: [%s]" % result._host.get_name()
+ if self._display._verbosity > 0 or 'verbose_always' in result._result:
+ indent = None
+ if 'verbose_always' in result._result:
+ indent = 4
+ del result._result['verbose_always']
+ msg += " => %s" % json.dumps(result._result, indent=indent, ensure_ascii=False)
+ self._display.display(msg, color='cyan')
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), result._result), color='red')
+
+ def v2_runner_on_no_hosts(self, task):
+ pass
+
+ def v2_runner_on_async_poll(self, result):
+ pass
+
+ def v2_runner_on_async_ok(self, result):
+ pass
+
+ def v2_runner_on_async_failed(self, result):
+ pass
+
+ def v2_runner_on_file_diff(self, result, diff):
+ pass
+
+ def v2_playbook_on_start(self):
+ pass
+
+ def v2_playbook_on_notify(self, result, handler):
+ pass
+
+ def v2_playbook_on_no_hosts_matched(self):
+ self._display.display("skipping: no hosts matched", color='cyan')
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ self._display.banner("NO MORE HOSTS LEFT")
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ self._display.banner("TASK [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_handler_task_start(self, task):
+ self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
+
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ pass
+
+ def v2_playbook_on_setup(self):
+ pass
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ pass
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ pass
+
+ def v2_playbook_on_play_start(self, play):
+ name = play.get_name().strip()
+ if not name:
+ msg = "PLAY"
+ else:
+ msg = "PLAY [%s]" % name
+
+ self._display.banner(name)
+
+ def v2_playbook_on_stats(self, stats):
+ pass
+
diff --git a/lib/ansible/plugins/callback/minimal.py b/lib/ansible/plugins/callback/minimal.py
new file mode 100644
index 0000000000..c6b2282e62
--- /dev/null
+++ b/lib/ansible/plugins/callback/minimal.py
@@ -0,0 +1,104 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.plugins.callback import CallbackBase
+
+
+class CallbackModule(CallbackBase):
+
+ '''
+ This is the default callback interface, which simply prints messages
+ to stdout when new callback events are received.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'stdout'
+
+ def v2_on_any(self, *args, **kwargs):
+ pass
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ if 'exception' in result._result and self._display.verbosity < 3:
+ del result._result['exception']
+ self._display.display("%s | FAILED! => %s" % (result._host.get_name(), result._result), color='red')
+
+ def v2_runner_on_ok(self, result):
+ self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), json.dumps(result._result, indent=4)), color='green')
+
+ def v2_runner_on_skipped(self, result):
+ pass
+
+ def v2_runner_on_unreachable(self, result):
+ self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
+
+ def v2_runner_on_no_hosts(self, task):
+ pass
+
+ def v2_runner_on_async_poll(self, host, res, jid, clock):
+ pass
+
+ def v2_runner_on_async_ok(self, host, res, jid):
+ pass
+
+ def v2_runner_on_async_failed(self, host, res, jid):
+ pass
+
+ def v2_playbook_on_start(self):
+ pass
+
+ def v2_playbook_on_notify(self, host, handler):
+ pass
+
+ def v2_playbook_on_no_hosts_matched(self):
+ pass
+
+ def v2_playbook_on_no_hosts_remaining(self):
+ pass
+
+ def v2_playbook_on_task_start(self, task, is_conditional):
+ pass
+
+ def v2_playbook_on_cleanup_task_start(self, task):
+ pass
+
+ def v2_playbook_on_handler_task_start(self, task):
+ pass
+
+ def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
+ pass
+
+ def v2_playbook_on_setup(self):
+ pass
+
+ def v2_playbook_on_import_for_host(self, result, imported_file):
+ pass
+
+ def v2_playbook_on_not_import_for_host(self, result, missing_file):
+ pass
+
+ def v2_playbook_on_play_start(self, play):
+ pass
+
+ def v2_playbook_on_stats(self, stats):
+ pass
+
diff --git a/lib/ansible/plugins/connections/__init__.py b/lib/ansible/plugins/connections/__init__.py
new file mode 100644
index 0000000000..d11f365182
--- /dev/null
+++ b/lib/ansible/plugins/connections/__init__.py
@@ -0,0 +1,95 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import ABCMeta, abstractmethod, abstractproperty
+
+from six import add_metaclass
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+
+# FIXME: this object should be created upfront and passed through
+# the entire chain of calls to here, as there are other things
+# which may want to output display/logs too
+from ansible.utils.display import Display
+
+__all__ = ['ConnectionBase']
+
+@add_metaclass(ABCMeta)
+class ConnectionBase:
+ '''
+ A base class for connections to contain common code.
+ '''
+
+ has_pipelining = False
+ become_methods = C.BECOME_METHODS
+
+ def __init__(self, connection_info, new_stdin, *args, **kwargs):
+ # All these hasattrs allow subclasses to override these parameters
+ if not hasattr(self, '_connection_info'):
+ self._connection_info = connection_info
+ if not hasattr(self, '_new_stdin'):
+ self._new_stdin = new_stdin
+ if not hasattr(self, '_display'):
+ self._display = Display(verbosity=connection_info.verbosity)
+ if not hasattr(self, '_connected'):
+ self._connected = False
+
+ self._connect()
+
+ def _become_method_supported(self, become_method):
+ ''' Checks if the current class supports this privilege escalation method '''
+
+ if become_method in self.__class__.become_methods:
+ return True
+
+ raise AnsibleError("Internal Error: this connection module does not support running commands via %s" % become_method)
+
+ @abstractproperty
+ def transport(self):
+ """String used to identify this Connection class from other classes"""
+ pass
+
+ @abstractmethod
+ def _connect(self):
+ """Connect to the host we've been initialized with"""
+ pass
+
+ @abstractmethod
+ def exec_command(self, cmd, tmp_path, executable=None, in_data=None):
+ """Run a command on the remote host"""
+ pass
+
+ @abstractmethod
+ def put_file(self, in_path, out_path):
+ """Transfer a file from local to remote"""
+ pass
+
+ @abstractmethod
+ def fetch_file(self, in_path, out_path):
+ """Fetch a file from remote to local"""
+ pass
+
+ @abstractmethod
+ def close(self):
+ """Terminate the connection"""
+ pass
diff --git a/lib/ansible/runner/connection_plugins/accelerate.py b/lib/ansible/plugins/connections/accelerate.py
index 0627267c16..d0bd5ad3d1 100644
--- a/lib/ansible/runner/connection_plugins/accelerate.py
+++ b/lib/ansible/plugins/connections/accelerate.py
@@ -15,6 +15,9 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
import json
import os
import base64
@@ -23,8 +26,9 @@ import struct
import time
from ansible.callbacks import vvv, vvvv
from ansible.errors import AnsibleError, AnsibleFileNotFound
-from ansible.runner.connection_plugins.ssh import Connection as SSHConnection
-from ansible.runner.connection_plugins.paramiko_ssh import Connection as ParamikoConnection
+from . import ConnectionBase
+from .ssh import Connection as SSHConnection
+from .paramiko_ssh import Connection as ParamikoConnection
from ansible import utils
from ansible import constants
@@ -35,7 +39,7 @@ from ansible import constants
# multiple of the value to speed up file reads.
CHUNK_SIZE=1044*20
-class Connection(object):
+class Connection(ConnectionBase):
''' raw socket accelerated connection '''
def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
@@ -88,6 +92,11 @@ class Connection(object):
if getattr(self.runner, 'aes_keys', None):
utils.AES_KEYS = self.runner.aes_keys
+ @property
+ def transport(self):
+ """String used to identify this Connection class from other classes"""
+ return 'accelerate'
+
def _execute_accelerate_module(self):
args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (
base64.b64encode(self.key.__str__()),
@@ -141,7 +150,7 @@ class Connection(object):
# shutdown, so we'll reconnect.
wrong_user = True
- except AnsibleError, e:
+ except AnsibleError as e:
if allow_ssh:
if "WRONG_USER" in e:
vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host)
diff --git a/lib/ansible/runner/connection_plugins/chroot.py b/lib/ansible/plugins/connections/chroot.py
index 3e96047287..3ecc0f7030 100644
--- a/lib/ansible/runner/connection_plugins/chroot.py
+++ b/lib/ansible/plugins/connections/chroot.py
@@ -15,6 +15,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import distutils.spawn
import traceback
diff --git a/lib/ansible/runner/connection_plugins/funcd.py b/lib/ansible/plugins/connections/funcd.py
index 92b7f53605..92bda4bb34 100644
--- a/lib/ansible/runner/connection_plugins/funcd.py
+++ b/lib/ansible/plugins/connections/funcd.py
@@ -21,6 +21,8 @@
# The func transport permit to use ansible over func. For people who have already setup
# func and that wish to play with ansible, this permit to move gradually to ansible
# without having to redo completely the setup of the network.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
HAVE_FUNC=False
try:
diff --git a/lib/ansible/runner/connection_plugins/jail.py b/lib/ansible/plugins/connections/jail.py
index c7b61bc638..f7623b3938 100644
--- a/lib/ansible/runner/connection_plugins/jail.py
+++ b/lib/ansible/plugins/connections/jail.py
@@ -16,6 +16,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import distutils.spawn
import traceback
diff --git a/lib/ansible/runner/connection_plugins/libvirt_lxc.py b/lib/ansible/plugins/connections/libvirt_lxc.py
index 832b78251c..1905eb6a66 100644
--- a/lib/ansible/runner/connection_plugins/libvirt_lxc.py
+++ b/lib/ansible/plugins/connections/libvirt_lxc.py
@@ -16,6 +16,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import distutils.spawn
import os
diff --git a/lib/ansible/plugins/connections/local.py b/lib/ansible/plugins/connections/local.py
new file mode 100644
index 0000000000..1dc6076b0d
--- /dev/null
+++ b/lib/ansible/plugins/connections/local.py
@@ -0,0 +1,132 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+import os
+import shutil
+import subprocess
+#import select
+#import fcntl
+
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.plugins.connections import ConnectionBase
+
+from ansible.utils.debug import debug
+
+class Connection(ConnectionBase):
+ ''' Local based connections '''
+
+ @property
+ def transport(self):
+ ''' used to identify this connection object '''
+ return 'local'
+
+ def _connect(self, port=None):
+ ''' connect to the local host; nothing to do here '''
+
+ if not self._connected:
+ self._display.vvv("ESTABLISH LOCAL CONNECTION FOR USER: {0}".format(self._connection_info.remote_user, host=self._connection_info.remote_addr))
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
+ ''' run a command on the local host '''
+
+ debug("in local.exec_command()")
+ # su requires to be run from a terminal, and therefore isn't supported here (yet?)
+ #if self._connection_info.su:
+ # raise AnsibleError("Internal Error: this module does not support running commands via su")
+
+ if in_data:
+ raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
+
+ executable = executable.split()[0] if executable else None
+
+ self._display.vvv("{0} EXEC {1}".format(self._connection_info.remote_addr, cmd))
+ # FIXME: cwd= needs to be set to the basedir of the playbook
+ debug("opening command with Popen()")
+ p = subprocess.Popen(
+ cmd,
+ shell=isinstance(cmd, basestring),
+ executable=executable, #cwd=...
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ debug("done running command with Popen()")
+
+ # FIXME: more su/sudo stuff
+ #if self.runner.sudo and sudoable and self.runner.sudo_pass:
+ # fcntl.fcntl(p.stdout, fcntl.F_SETFL,
+ # fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ # fcntl.fcntl(p.stderr, fcntl.F_SETFL,
+ # fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ # sudo_output = ''
+ # while not sudo_output.endswith(prompt) and success_key not in sudo_output:
+ # rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
+ # [p.stdout, p.stderr], self.runner.timeout)
+ # if p.stdout in rfd:
+ # chunk = p.stdout.read()
+ # elif p.stderr in rfd:
+ # chunk = p.stderr.read()
+ # else:
+ # stdout, stderr = p.communicate()
+ # raise AnsibleError('timeout waiting for sudo password prompt:\n' + sudo_output)
+ # if not chunk:
+ # stdout, stderr = p.communicate()
+ # raise AnsibleError('sudo output closed while waiting for password prompt:\n' + sudo_output)
+ # sudo_output += chunk
+ # if success_key not in sudo_output:
+ # p.stdin.write(self.runner.sudo_pass + '\n')
+ # fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ # fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ debug("getting output with communicate()")
+ stdout, stderr = p.communicate()
+ debug("done communicating")
+
+ debug("done with local.exec_command()")
+ return (p.returncode, '', stdout, stderr)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to local '''
+
+ #vvv("PUT {0} TO {1}".format(in_path, out_path), host=self.host)
+ self._display.vvv("{0} PUT {1} TO {2}".format(self._connection_info.remote_addr, in_path, out_path))
+ if not os.path.exists(in_path):
+ raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
+ try:
+ shutil.copyfile(in_path, out_path)
+ except shutil.Error:
+ traceback.print_exc()
+ raise AnsibleError("failed to copy: {0} and {1} are the same".format(in_path, out_path))
+ except IOError:
+ traceback.print_exc()
+ raise AnsibleError("failed to transfer file to {0}".format(out_path))
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from local to local -- for copatibility '''
+ #vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self.host)
+ self._display.vvv("{0} FETCH {1} TO {2}".format(self._connection_info.remote_addr, in_path, out_path))
+ self.put_file(in_path, out_path)
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ self._connected = False
diff --git a/lib/ansible/runner/connection_plugins/paramiko_ssh.py b/lib/ansible/plugins/connections/paramiko_ssh.py
index 8eaf97c3f6..01e95451b8 100644
--- a/lib/ansible/runner/connection_plugins/paramiko_ssh.py
+++ b/lib/ansible/plugins/connections/paramiko_ssh.py
@@ -14,7 +14,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
# ---
# The paramiko transport is provided because many distributions, in particular EL6 and before
@@ -34,12 +35,13 @@ import traceback
import fcntl
import re
import sys
+
from termios import tcflush, TCIFLUSH
from binascii import hexlify
-from ansible.callbacks import vvv
-from ansible import errors
-from ansible import utils
+
from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
+from ansible.plugins.connections import ConnectionBase
AUTHENTICITY_MSG="""
paramiko: The authenticity of host '%s' can't be established.
@@ -67,33 +69,38 @@ class MyAddPolicy(object):
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
- def __init__(self, runner):
- self.runner = runner
+ def __init__(self, new_stdin):
+ self._new_stdin = new_stdin
def missing_host_key(self, client, hostname, key):
if C.HOST_KEY_CHECKING:
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
+ # FIXME: need to fix lock file stuff
+ #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
+ #fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
old_stdin = sys.stdin
- sys.stdin = self.runner._new_stdin
- fingerprint = hexlify(key.get_fingerprint())
- ktype = key.get_name()
+ sys.stdin = self._new_stdin
# clear out any premature input on sys.stdin
tcflush(sys.stdin, TCIFLUSH)
+ fingerprint = hexlify(key.get_fingerprint())
+ ktype = key.get_name()
+
inp = raw_input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
sys.stdin = old_stdin
+
if inp not in ['yes','y','']:
- fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN)
- fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN)
- raise errors.AnsibleError("host connection rejected by user")
+ # FIXME: lock file stuff
+ #fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN)
+ #fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN)
+ raise AnsibleError("host connection rejected by user")
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
+ # FIXME: lock file stuff
+ #fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
+ #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
key._added_by_ansible_this_time = True
@@ -110,28 +117,18 @@ class MyAddPolicy(object):
SSH_CONNECTION_CACHE = {}
SFTP_CONNECTION_CACHE = {}
-class Connection(object):
+class Connection(ConnectionBase):
''' SSH based connections with Paramiko '''
- def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
-
- self.ssh = None
- self.sftp = None
- self.runner = runner
- self.host = host
- self.port = port or 22
- self.user = user
- self.password = password
- self.private_key_file = private_key_file
- self.has_pipelining = False
-
- # TODO: add pbrun, pfexec
- self.become_methods_supported=['sudo', 'su', 'pbrun']
+ @property
+ def transport(self):
+ ''' used to identify this connection object from other classes '''
+ return 'paramiko'
def _cache_key(self):
- return "%s__%s__" % (self.host, self.user)
+ return "%s__%s__" % (self._connection_info.remote_addr, self._connection_info.remote_user)
- def connect(self):
+ def _connect(self):
cache_key = self._cache_key()
if cache_key in SSH_CONNECTION_CACHE:
self.ssh = SSH_CONNECTION_CACHE[cache_key]
@@ -143,9 +140,10 @@ class Connection(object):
''' activates the connection object '''
if not HAVE_PARAMIKO:
- raise errors.AnsibleError("paramiko is not installed")
+ raise AnsibleError("paramiko is not installed")
- vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self.user, self.port, self.host), host=self.host)
+ port = self._connection_info.port or 22
+ self._display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._connection_info.remote_user, port, self._connection_info.remote_addr), host=self._connection_info.remote_addr)
ssh = paramiko.SSHClient()
@@ -154,122 +152,95 @@ class Connection(object):
if C.HOST_KEY_CHECKING:
ssh.load_system_host_keys()
- ssh.set_missing_host_key_policy(MyAddPolicy(self.runner))
+ ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin))
allow_agent = True
- if self.password is not None:
+ if self._connection_info.password is not None:
allow_agent = False
try:
-
- if self.private_key_file:
- key_filename = os.path.expanduser(self.private_key_file)
- elif self.runner.private_key_file:
- key_filename = os.path.expanduser(self.runner.private_key_file)
- else:
- key_filename = None
- ssh.connect(self.host, username=self.user, allow_agent=allow_agent, look_for_keys=True,
- key_filename=key_filename, password=self.password,
- timeout=self.runner.timeout, port=self.port)
-
- except Exception, e:
-
+ key_filename = None
+ if self._connection_info.private_key_file:
+ key_filename = os.path.expanduser(self._connection_info.private_key_file)
+
+ ssh.connect(
+ self._connection_info.remote_addr,
+ username=self._connection_info.remote_user,
+ allow_agent=allow_agent,
+ look_for_keys=True,
+ key_filename=key_filename,
+ password=self._connection_info.password,
+ timeout=self._connection_info.timeout,
+ port=port,
+ )
+ except Exception as e:
msg = str(e)
if "PID check failed" in msg:
- raise errors.AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
+ raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
elif "Private key file is encrypted" in msg:
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
- self.user, self.host, self.port, msg)
- raise errors.AnsibleConnectionFailed(msg)
+ self._connection_info.remote_user, self._connection_info.remote_addr, port, msg)
+ raise AnsibleConnectionFailure(msg)
else:
- raise errors.AnsibleConnectionFailed(msg)
+ raise AnsibleConnectionFailure(msg)
return ssh
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
+ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
''' run a command on the remote host '''
- if self.runner.become and sudoable and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
+ raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
bufsize = 4096
try:
-
self.ssh.get_transport().set_keepalive(5)
chan = self.ssh.get_transport().open_session()
-
- except Exception, e:
-
+ except Exception as e:
msg = "Failed to open session"
if len(str(e)) > 0:
msg += ": %s" % str(e)
- raise errors.AnsibleConnectionFailed(msg)
+ raise AnsibleConnectionFailure(msg)
- no_prompt_out = ''
- no_prompt_err = ''
- if not (self.runner.become and sudoable):
-
- if executable:
- quoted_command = executable + ' -c ' + pipes.quote(cmd)
- else:
- quoted_command = cmd
- vvv("EXEC %s" % quoted_command, host=self.host)
- chan.exec_command(quoted_command)
-
- else:
-
- # sudo usually requires a PTY (cf. requiretty option), therefore
- # we give it one by default (pty=True in ansble.cfg), and we try
- # to initialise from the calling environment
- if C.PARAMIKO_PTY:
- chan.get_pty(term=os.getenv('TERM', 'vt100'),
- width=int(os.getenv('COLUMNS', 0)),
- height=int(os.getenv('LINES', 0)))
- if self.runner.become and sudoable:
- shcmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
-
- vvv("EXEC %s" % shcmd, host=self.host)
- become_output = ''
-
- try:
+ # sudo usually requires a PTY (cf. requiretty option), therefore
+ # we give it one by default (pty=True in ansble.cfg), and we try
+ # to initialise from the calling environment
+ if C.PARAMIKO_PTY:
+ chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
- chan.exec_command(shcmd)
+ self._display.vvv("EXEC %s" % cmd, host=self._connection_info.remote_addr)
- if self.runner.become_pass:
-
- while True:
-
- if success_key in become_output or \
- (prompt and become_output.endswith(prompt)) or \
- utils.su_prompts.check_su_prompt(become_output):
- break
- chunk = chan.recv(bufsize)
-
- if not chunk:
- if 'unknown user' in become_output:
- raise errors.AnsibleError(
- 'user %s does not exist' % become_user)
- else:
- raise errors.AnsibleError('ssh connection ' +
- 'closed waiting for password prompt')
- become_output += chunk
-
- if success_key not in become_output:
-
- if sudoable:
- chan.sendall(self.runner.become_pass + '\n')
- else:
- no_prompt_out += become_output
- no_prompt_err += become_output
-
- except socket.timeout:
+ no_prompt_out = ''
+ no_prompt_err = ''
+ become_output = ''
- raise errors.AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
+ try:
+ chan.exec_command(cmd)
+ if self._connection_info.become_pass:
+ while True:
+ if success_key in become_output or \
+ (prompt and become_output.endswith(prompt)) or \
+ utils.su_prompts.check_su_prompt(become_output):
+ break
+ chunk = chan.recv(bufsize)
+ if not chunk:
+ if 'unknown user' in become_output:
+ raise AnsibleError(
+ 'user %s does not exist' % become_user)
+ else:
+ raise AnsibleError('ssh connection ' +
+ 'closed waiting for password prompt')
+ become_output += chunk
+ if success_key not in become_output:
+ if self._connection_info.become:
+ chan.sendall(self._connection_info.become_pass + '\n')
+ else:
+ no_prompt_out += become_output
+ no_prompt_err += become_output
+ except socket.timeout:
+ raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
stdout = ''.join(chan.makefile('rb', bufsize))
stderr = ''.join(chan.makefile_stderr('rb', bufsize))
@@ -279,24 +250,24 @@ class Connection(object):
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr)
if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
+ raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
try:
self.sftp = self.ssh.open_sftp()
- except Exception, e:
- raise errors.AnsibleError("failed to open a SFTP connection (%s)" % e)
+ except Exception as e:
+ raise AnsibleError("failed to open a SFTP connection (%s)" % e)
try:
self.sftp.put(in_path, out_path)
except IOError:
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
+ raise AnsibleError("failed to transfer file to %s" % out_path)
def _connect_sftp(self):
- cache_key = "%s__%s__" % (self.host, self.user)
+ cache_key = "%s__%s__" % (self._connection_info.remote_addr, self._connection_info.remote_user)
if cache_key in SFTP_CONNECTION_CACHE:
return SFTP_CONNECTION_CACHE[cache_key]
else:
@@ -306,17 +277,17 @@ class Connection(object):
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr)
try:
self.sftp = self._connect_sftp()
- except Exception, e:
- raise errors.AnsibleError("failed to open a SFTP connection (%s)", e)
+ except Exception as e:
+ raise AnsibleError("failed to open a SFTP connection (%s)", e)
try:
self.sftp.get(in_path, out_path)
except IOError:
- raise errors.AnsibleError("failed to transfer file from %s" % in_path)
+ raise AnsibleError("failed to transfer file from %s" % in_path)
def _any_keys_added(self):
diff --git a/lib/ansible/plugins/connections/ssh.py b/lib/ansible/plugins/connections/ssh.py
new file mode 100644
index 0000000000..49e1e3b966
--- /dev/null
+++ b/lib/ansible/plugins/connections/ssh.py
@@ -0,0 +1,462 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import subprocess
+import shlex
+import pipes
+import random
+import select
+import fcntl
+import hmac
+import pwd
+import gettext
+import pty
+from hashlib import sha1
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
+from ansible.plugins.connections import ConnectionBase
+
+class Connection(ConnectionBase):
+ ''' ssh based connections '''
+
+ def __init__(self, *args, **kwargs):
+ # SSH connection specific init stuff
+ self.HASHED_KEY_MAGIC = "|1|"
+ self._has_pipelining = True
+
+ # FIXME: move the lockfile locations to ActionBase?
+ #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
+ #self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
+ self._cp_dir = '/tmp'
+ #fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
+
+ super(Connection, self).__init__(*args, **kwargs)
+
+ @property
+ def transport(self):
+ ''' used to identify this connection object from other classes '''
+ return 'ssh'
+
+ def _connect(self):
+ ''' connect to the remote host '''
+
+ self._display.vvv("ESTABLISH SSH CONNECTION FOR USER: {0}".format(self._connection_info.remote_user), host=self._connection_info.remote_addr)
+
+ if self._connected:
+ return self
+
+ self._common_args = []
+ extra_args = C.ANSIBLE_SSH_ARGS
+ if extra_args is not None:
+ # make sure there is no empty string added as this can produce weird errors
+ self._common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
+ else:
+ self._common_args += (
+ "-o", "ControlMaster=auto",
+ "-o", "ControlPersist=60s",
+ "-o", "ControlPath=\"{0}\"".format(C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir)),
+ )
+
+ cp_in_use = False
+ cp_path_set = False
+ for arg in self._common_args:
+ if "ControlPersist" in arg:
+ cp_in_use = True
+ if "ControlPath" in arg:
+ cp_path_set = True
+
+ if cp_in_use and not cp_path_set:
+ self._common_args += ("-o", "ControlPath=\"{0}\"".format(
+ C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self._cp_dir))
+ )
+
+ if not C.HOST_KEY_CHECKING:
+ self._common_args += ("-o", "StrictHostKeyChecking=no")
+
+ if self._connection_info.port is not None:
+ self._common_args += ("-o", "Port={0}".format(self._connection_info.port))
+ # FIXME: need to get this from connection info
+ #if self.private_key_file is not None:
+ # self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self.private_key_file)))
+ #elif self.runner.private_key_file is not None:
+ # self._common_args += ("-o", "IdentityFile=\"{0}\"".format(os.path.expanduser(self.runner.private_key_file)))
+ if self._connection_info.password:
+ self._common_args += ("-o", "GSSAPIAuthentication=no",
+ "-o", "PubkeyAuthentication=no")
+ else:
+ self._common_args += ("-o", "KbdInteractiveAuthentication=no",
+ "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
+ "-o", "PasswordAuthentication=no")
+ if self._connection_info.remote_user is not None and self._connection_info.remote_user != pwd.getpwuid(os.geteuid())[0]:
+ self._common_args += ("-o", "User={0}".format(self._connection_info.remote_user))
+ # FIXME: figure out where this goes
+ #self._common_args += ("-o", "ConnectTimeout={0}".format(self.runner.timeout))
+ self._common_args += ("-o", "ConnectTimeout=15")
+
+ self._connected = True
+
+ return self
+
+ def _run(self, cmd, indata):
+ if indata:
+ # do not use pseudo-pty
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdin = p.stdin
+ else:
+ # try to use upseudo-pty
+ try:
+ # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
+ master, slave = pty.openpty()
+ p = subprocess.Popen(cmd, stdin=slave, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdin = os.fdopen(master, 'w', 0)
+ os.close(slave)
+ except:
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdin = p.stdin
+
+ return (p, stdin)
+
+ def _password_cmd(self):
+ if self._connection_info.password:
+ try:
+ p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+ except OSError:
+ raise AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
+ (self.rfd, self.wfd) = os.pipe()
+ return ("sshpass", "-d{0}".format(self.rfd))
+ return []
+
+ def _send_password(self):
+ if self._connection_info.password:
+ os.close(self.rfd)
+ os.write(self.wfd, "{0}\n".format(self._connection_info.password))
+ os.close(self.wfd)
+
+ def _communicate(self, p, stdin, indata, su=False, sudoable=False, prompt=None):
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ # We can't use p.communicate here because the ControlMaster may have stdout open as well
+ stdout = ''
+ stderr = ''
+ rpipes = [p.stdout, p.stderr]
+ if indata:
+ try:
+ stdin.write(indata)
+ stdin.close()
+ except:
+ raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
+ # Read stdout/stderr from process
+ while True:
+ rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
+
+ # FIXME: su/sudo stuff
+ # fail early if the sudo/su password is wrong
+ #if self.runner.sudo and sudoable:
+ # if self.runner.sudo_pass:
+ # incorrect_password = gettext.dgettext(
+ # "sudo", "Sorry, try again.")
+ # if stdout.endswith("%s\r\n%s" % (incorrect_password,
+ # prompt)):
+ # raise AnsibleError('Incorrect sudo password')
+ #
+ # if stdout.endswith(prompt):
+ # raise AnsibleError('Missing sudo password')
+ #
+ #if self.runner.su and su and self.runner.su_pass:
+ # incorrect_password = gettext.dgettext(
+ # "su", "Sorry")
+ # if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
+ # raise AnsibleError('Incorrect su password')
+
+ if p.stdout in rfd:
+ dat = os.read(p.stdout.fileno(), 9000)
+ stdout += dat
+ if dat == '':
+ rpipes.remove(p.stdout)
+ if p.stderr in rfd:
+ dat = os.read(p.stderr.fileno(), 9000)
+ stderr += dat
+ if dat == '':
+ rpipes.remove(p.stderr)
+ # only break out if no pipes are left to read or
+ # the pipes are completely read and
+ # the process is terminated
+ if (not rpipes or not rfd) and p.poll() is not None:
+ break
+ # No pipes are left to read but process is not yet terminated
+ # Only then it is safe to wait for the process to be finished
+ # NOTE: Actually p.poll() is always None here if rpipes is empty
+ elif not rpipes and p.poll() == None:
+ p.wait()
+ # The process is terminated. Since no pipes to read from are
+ # left, there is no need to call select() again.
+ break
+ # close stdin after process is terminated and stdout/stderr are read
+ # completely (see also issue #848)
+ stdin.close()
+ return (p.returncode, stdout, stderr)
+
+ def not_in_host_file(self, host):
+ if 'USER' in os.environ:
+ user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
+ else:
+ user_host_file = "~/.ssh/known_hosts"
+ user_host_file = os.path.expanduser(user_host_file)
+
+ host_file_list = []
+ host_file_list.append(user_host_file)
+ host_file_list.append("/etc/ssh/ssh_known_hosts")
+ host_file_list.append("/etc/ssh/ssh_known_hosts2")
+
+ hfiles_not_found = 0
+ for hf in host_file_list:
+ if not os.path.exists(hf):
+ hfiles_not_found += 1
+ continue
+ try:
+ host_fh = open(hf)
+ except IOError as e:
+ hfiles_not_found += 1
+ continue
+ else:
+ data = host_fh.read()
+ host_fh.close()
+
+ for line in data.split("\n"):
+ if line is None or " " not in line:
+ continue
+ tokens = line.split()
+ if not tokens:
+ continue
+ if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
+ # this is a hashed known host entry
+ try:
+ (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
+ hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
+ hash.update(host)
+ if hash.digest() == kn_host.decode('base64'):
+ return False
+ except:
+ # invalid hashed host key, skip it
+ continue
+ else:
+ # standard host file entry
+ if host in tokens[0]:
+ return False
+
+ if (hfiles_not_found == len(host_file_list)):
+ self._display.vvv("EXEC previous known host file not found for {0}".format(host))
+ return True
+
+ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
+ ''' run a command on the remote host '''
+
+ ssh_cmd = self._password_cmd()
+ ssh_cmd += ("ssh", "-C")
+ if not in_data:
+ # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
+ # inside a tty automatically invokes the python interactive-mode but the modules are not
+ # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
+ ssh_cmd.append("-tt")
+ if self._connection_info.verbosity > 3:
+ ssh_cmd.append("-vvv")
+ else:
+ ssh_cmd.append("-q")
+ ssh_cmd += self._common_args
+
+ # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however
+ # not sure if it's all working yet so this remains commented out
+ #if self._ipv6:
+ # ssh_cmd += ['-6']
+ ssh_cmd.append(self._connection_info.remote_addr)
+
+ ssh_cmd.append(cmd)
+ self._display.vvv("EXEC {0}".format(' '.join(ssh_cmd)), host=self._connection_info.remote_addr)
+
+ not_in_host_file = self.not_in_host_file(self._connection_info.remote_addr)
+
+ # FIXME: move the locations of these lock files, same as init above
+ #if C.HOST_KEY_CHECKING and not_in_host_file:
+ # # lock around the initial SSH connectivity so the user prompt about whether to add
+ # # the host to known hosts is not intermingled with multiprocess output.
+ # fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
+ # fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
+
+ # create process
+ (p, stdin) = self._run(ssh_cmd, in_data)
+
+ self._send_password()
+
+ no_prompt_out = ''
+ no_prompt_err = ''
+ # FIXME: su/sudo stuff
+ #if (self.runner.sudo and sudoable and self.runner.sudo_pass) or \
+ # (self.runner.su and su and self.runner.su_pass):
+ # # several cases are handled for sudo privileges with password
+ # # * NOPASSWD (tty & no-tty): detect success_key on stdout
+ # # * without NOPASSWD:
+ # # * detect prompt on stdout (tty)
+ # # * detect prompt on stderr (no-tty)
+ # fcntl.fcntl(p.stdout, fcntl.F_SETFL,
+ # fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ # fcntl.fcntl(p.stderr, fcntl.F_SETFL,
+ # fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ # sudo_output = ''
+ # sudo_errput = ''
+ #
+ # while True:
+ # if success_key in sudo_output or \
+ # (self.runner.sudo_pass and sudo_output.endswith(prompt)) or \
+ # (self.runner.su_pass and utils.su_prompts.check_su_prompt(sudo_output)):
+ # break
+ #
+ # rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
+ # [p.stdout], self.runner.timeout)
+ # if p.stderr in rfd:
+ # chunk = p.stderr.read()
+ # if not chunk:
+ # raise AnsibleError('ssh connection closed waiting for sudo or su password prompt')
+ # sudo_errput += chunk
+ # incorrect_password = gettext.dgettext(
+ # "sudo", "Sorry, try again.")
+ # if sudo_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
+ # raise AnsibleError('Incorrect sudo password')
+ # elif sudo_errput.endswith(prompt):
+ # stdin.write(self.runner.sudo_pass + '\n')
+ #
+ # if p.stdout in rfd:
+ # chunk = p.stdout.read()
+ # if not chunk:
+ # raise AnsibleError('ssh connection closed waiting for sudo or su password prompt')
+ # sudo_output += chunk
+ #
+ # if not rfd:
+ # # timeout. wrap up process communication
+ # stdout = p.communicate()
+ # raise AnsibleError('ssh connection error waiting for sudo or su password prompt')
+ #
+ # if success_key not in sudo_output:
+ # if sudoable:
+ # stdin.write(self.runner.sudo_pass + '\n')
+ # elif su:
+ # stdin.write(self.runner.su_pass + '\n')
+ # else:
+ # no_prompt_out += sudo_output
+ # no_prompt_err += sudo_errput
+
+ #(returncode, stdout, stderr) = self._communicate(p, stdin, in_data, su=su, sudoable=sudoable, prompt=prompt)
+ # FIXME: the prompt won't be here anymore
+ prompt=""
+ (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, prompt=prompt)
+
+ #if C.HOST_KEY_CHECKING and not_in_host_file:
+ # # lock around the initial SSH connectivity so the user prompt about whether to add
+ # # the host to known hosts is not intermingled with multiprocess output.
+ # fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
+ # fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
+ controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or 'unknown configuration option: ControlPersist' in stderr
+
+ if C.HOST_KEY_CHECKING:
+ if ssh_cmd[0] == "sshpass" and p.returncode == 6:
+ raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
+
+ if p.returncode != 0 and controlpersisterror:
+ raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
+ # FIXME: module name isn't in runner
+ #if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
+ if p.returncode == 255 and in_data:
+ raise AnsibleConnectionFailure('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
+
+ return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr)
+
+ def put_file(self, in_path, out_path):
+ ''' transfer a file from local to remote '''
+ self._display.vvv("PUT {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr)
+ if not os.path.exists(in_path):
+ raise AnsibleFileNotFound("file or module does not exist: {0}".format(in_path))
+ cmd = self._password_cmd()
+
+ # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH
+ host = self._connection_info.remote_addr
+
+ # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however
+ # not sure if it's all working yet so this remains commented out
+ #if self._ipv6:
+ # host = '[%s]' % host
+
+ if C.DEFAULT_SCP_IF_SSH:
+ cmd.append('scp')
+ cmd += self._common_args
+ cmd.append(in_path,host + ":" + pipes.quote(out_path))
+ indata = None
+ else:
+ cmd.append('sftp')
+ cmd += self._common_args
+ cmd.append(host)
+ indata = "put {0} {1}\n".format(pipes.quote(in_path), pipes.quote(out_path))
+
+ (p, stdin) = self._run(cmd, indata)
+
+ self._send_password()
+
+ (returncode, stdout, stderr) = self._communicate(p, stdin, indata)
+
+ if returncode != 0:
+ raise AnsibleError("failed to transfer file to {0}:\n{1}\n{2}".format(out_path, stdout, stderr))
+
+ def fetch_file(self, in_path, out_path):
+ ''' fetch a file from remote to local '''
+ self._display.vvv("FETCH {0} TO {1}".format(in_path, out_path), host=self._connection_info.remote_addr)
+ cmd = self._password_cmd()
+
+ # FIXME: make a function, used in all 3 methods EXEC/PUT/FETCH
+ host = self._connection_info.remote_addr
+
+ # FIXME: ipv6 stuff needs to be figured out. It's in the connection info, however
+ # not sure if it's all working yet so this remains commented out
+ #if self._ipv6:
+ # host = '[%s]' % self._connection_info.remote_addr
+
+ if C.DEFAULT_SCP_IF_SSH:
+ cmd.append('scp')
+ cmd += self._common_args
+ cmd += ('{0}:{1}'.format(host, in_path), out_path)
+ indata = None
+ else:
+ cmd.append('sftp')
+ cmd += self._common_args
+ cmd.append(host)
+ indata = "get {0} {1}\n".format(in_path, out_path)
+
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ self._send_password()
+ stdout, stderr = p.communicate(indata)
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file from {0}:\n{1}\n{2}".format(in_path, stdout, stderr))
+
+ def close(self):
+ ''' not applicable since we're executing openssh binaries '''
+ self._connected = False
+
diff --git a/lib/ansible/runner/connection_plugins/winrm.py b/lib/ansible/plugins/connections/winrm.py
index b41a74c8e1..8a42da2534 100644
--- a/lib/ansible/runner/connection_plugins/winrm.py
+++ b/lib/ansible/plugins/connections/winrm.py
@@ -15,25 +15,23 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-from __future__ import absolute_import
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import base64
import os
import re
import shlex
import traceback
-import urlparse
-from ansible import errors
-from ansible import utils
-from ansible.callbacks import vvv, vvvv, verbose
-from ansible.runner.shell_plugins import powershell
+
+from six.moves.urllib import parse
try:
from winrm import Response
from winrm.exceptions import WinRMTransportError
from winrm.protocol import Protocol
except ImportError:
- raise errors.AnsibleError("winrm is not installed")
+ raise AnsibleError("winrm is not installed")
HAVE_KERBEROS = False
try:
@@ -42,10 +40,12 @@ try:
except ImportError:
pass
-def vvvvv(msg, host=None):
- verbose(msg, host=host, caplevel=4)
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
+from ansible.plugins.connections import ConnectionBase
+from ansible.plugins import shell_loader
-class Connection(object):
+class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
transport_schemes = {
@@ -53,69 +53,79 @@ class Connection(object):
'https': [('kerberos', 'https'), ('plaintext', 'https')],
}
- def __init__(self, runner, host, port, user, password, *args, **kwargs):
- self.runner = runner
- self.host = host
- self.port = port
- self.user = user
- self.password = password
- self.has_pipelining = False
- self.default_shell = 'powershell'
+ def __init__(self, *args, **kwargs):
+
+ self.has_pipelining = False
self.default_suffixes = ['.ps1', '']
- self.protocol = None
- self.shell_id = None
- self.delegate = None
+ self.protocol = None
+ self.shell_id = None
+ self.delegate = None
+
+ self._shell = shell_loader.get('powershell')
- # Add runas support
- #self.become_methods_supported=['runas']
+ # TODO: Add runas support
self.become_methods_supported=[]
+ super(Connection, self).__init__(*args, **kwargs)
+
+ @property
+ def transport(self):
+ ''' used to identify this connection object from other classes '''
+ return 'winrm'
+
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
- port = self.port or 5986
- vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
- (self.user, port, self.host), host=self.host)
- netloc = '%s:%d' % (self.host, port)
+ port = self._connection_info.port or 5986
+ self._display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
+ (self._connection_info.remote_user, port, self._connection_info.remote_addr), host=self._connection_info.remote_addr)
+ netloc = '%s:%d' % (self._connection_info.remote_addr, port)
exc = None
for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']:
- if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self.user):
+ if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self._connection_info.remote_user):
continue
+
if transport == 'kerberos':
- realm = self.user.split('@', 1)[1].strip() or None
+ realm = self._connection_info.remote_user.split('@', 1)[1].strip() or None
else:
realm = None
- endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', ''))
- vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint),
- host=self.host)
- protocol = Protocol(endpoint, transport=transport,
- username=self.user, password=self.password,
- realm=realm)
+
+ endpoint = parse.urlunsplit((scheme, netloc, '/wsman', '', ''))
+
+ self._display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._connection_info.remote_addr)
+ protocol = Protocol(
+ endpoint,
+ transport=transport,
+ username=self._connection_info.remote_user,
+ password=self._connection_info.password,
+ realm=realm
+ )
+
try:
protocol.send_message('')
return protocol
- except WinRMTransportError, exc:
+ except WinRMTransportError as exc:
err_msg = str(exc)
if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
- raise errors.AnsibleError("the connection attempt timed out")
+ raise AnsibleError("the connection attempt timed out")
m = re.search(r'Code\s+?(\d{3})', err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
- raise errors.AnsibleError("the username/password specified for this server was incorrect")
+ raise AnsibleError("the username/password specified for this server was incorrect")
elif code == 411:
return protocol
- vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host)
+ self._display.vvvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self._connection_info.remote_addr)
continue
if exc:
- raise errors.AnsibleError(str(exc))
+ raise AnsibleError(str(exc))
def _winrm_exec(self, command, args=(), from_exec=False):
if from_exec:
- vvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
+ self._display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._connection_info.remote_addr)
else:
- vvvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
+ self._display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._connection_info.remote_addr)
if not self.protocol:
self.protocol = self._winrm_connect()
if not self.shell_id:
@@ -125,49 +135,46 @@ class Connection(object):
command_id = self.protocol.run_command(self.shell_id, command, args)
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
if from_exec:
- vvvv('WINRM RESULT %r' % response, host=self.host)
+ self._display.vvvvv('WINRM RESULT %r' % response, host=self._connection_info.remote_addr)
else:
- vvvvv('WINRM RESULT %r' % response, host=self.host)
- vvvvv('WINRM STDOUT %s' % response.std_out, host=self.host)
- vvvvv('WINRM STDERR %s' % response.std_err, host=self.host)
+ self._display.vvvvvv('WINRM RESULT %r' % response, host=self._connection_info.remote_addr)
+ self._display.vvvvvv('WINRM STDOUT %s' % response.std_out, host=self._connection_info.remote_addr)
+ self._display.vvvvvv('WINRM STDERR %s' % response.std_err, host=self._connection_info.remote_addr)
return response
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
- def connect(self):
+ def _connect(self):
if not self.protocol:
self.protocol = self._winrm_connect()
return self
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None):
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
+ def exec_command(self, cmd, tmp_path, executable='/bin/sh', in_data=None):
cmd = cmd.encode('utf-8')
cmd_parts = shlex.split(cmd, posix=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
decoded_cmd = base64.b64decode(encoded_cmd)
- vvv("EXEC %s" % decoded_cmd, host=self.host)
+ self._display.vvv("EXEC %s" % decoded_cmd, host=self._connection_info.remote_addr)
else:
- vvv("EXEC %s" % cmd, host=self.host)
+ self._display.vvv("EXEC %s" % cmd, host=self._connection_info.remote_addr)
# For script/raw support.
if cmd_parts and cmd_parts[0].lower().endswith('.ps1'):
- script = powershell._build_file_cmd(cmd_parts, quote_args=False)
- cmd_parts = powershell._encode_script(script, as_list=True)
+ script = self._shell._build_file_cmd(cmd_parts, quote_args=False)
+ cmd_parts = self._shell._encode_script(script, as_list=True)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
- except Exception, e:
+ except Exception as e:
traceback.print_exc()
- raise errors.AnsibleError("failed to exec cmd %s" % cmd)
+ raise AnsibleError("failed to exec cmd %s" % cmd)
return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8'))
def put_file(self, in_path, out_path):
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
+ self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr)
if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
+ raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
with open(in_path) as in_file:
in_size = os.path.getsize(in_path)
script_template = '''
@@ -179,8 +186,8 @@ class Connection(object):
[void]$s.Close();
'''
# Determine max size of data we can pass per command.
- script = script_template % (powershell._escape(out_path), in_size, '', in_size)
- cmd = powershell._encode_script(script)
+ script = script_template % (self._shell._escape(out_path), in_size, '', in_size)
+ cmd = self._shell._encode_script(script)
# Encode script with no data, subtract its length from 8190 (max
# windows command length), divide by 2.67 (UTF16LE base64 command
# encoding), then by 1.35 again (data base64 encoding).
@@ -192,19 +199,19 @@ class Connection(object):
if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'):
out_path = out_path + '.ps1'
b64_data = base64.b64encode(out_data)
- script = script_template % (powershell._escape(out_path), offset, b64_data, in_size)
- vvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self.host)
- cmd_parts = powershell._encode_script(script, as_list=True)
+ script = script_template % (self._shell._escape(out_path), offset, b64_data, in_size)
+ self._display.vvvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self._connection_info.remote_addr)
+ cmd_parts = self._shell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(result.std_err.encode('utf-8'))
except Exception:
traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
+ raise AnsibleError("failed to transfer file to %s" % out_path)
def fetch_file(self, in_path, out_path):
out_path = out_path.replace('\\', '/')
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
+ self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._connection_info.remote_addr)
buffer_size = 2**19 # 0.5MB chunks
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
@@ -233,9 +240,9 @@ class Connection(object):
Write-Error "%(path)s does not exist";
Exit 1;
}
- ''' % dict(buffer_size=buffer_size, path=powershell._escape(in_path), offset=offset)
- vvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self.host)
- cmd_parts = powershell._encode_script(script, as_list=True)
+ ''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
+ self._display.vvvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self._connection_info.remote_addr)
+ cmd_parts = self._shell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(result.std_err.encode('utf-8'))
@@ -259,7 +266,7 @@ class Connection(object):
offset += len(data)
except Exception:
traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
+ raise AnsibleError("failed to transfer file to %s" % out_path)
finally:
if out_file:
out_file.close()
diff --git a/lib/ansible/runner/connection_plugins/zone.py b/lib/ansible/plugins/connections/zone.py
index fd3242cb6e..f7e19c3bb4 100644
--- a/lib/ansible/runner/connection_plugins/zone.py
+++ b/lib/ansible/plugins/connections/zone.py
@@ -17,6 +17,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import distutils.spawn
import traceback
diff --git a/lib/ansible/plugins/filter b/lib/ansible/plugins/filter
new file mode 120000
index 0000000000..fa1d588570
--- /dev/null
+++ b/lib/ansible/plugins/filter
@@ -0,0 +1 @@
+../../../lib/ansible/runner/filter_plugins \ No newline at end of file
diff --git a/lib/ansible/plugins/inventory/__init__.py b/lib/ansible/plugins/inventory/__init__.py
new file mode 100644
index 0000000000..03fd89429b
--- /dev/null
+++ b/lib/ansible/plugins/inventory/__init__.py
@@ -0,0 +1,82 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from abc import ABCMeta, abstractmethod
+
+from six import add_metaclass
+
+@add_metaclass(ABCMeta)
+class InventoryParser:
+ '''Abstract Base Class for retrieving inventory information
+
+ Any InventoryParser functions by taking an inven_source. The caller then
+ calls the parser() method. Once parser is called, the caller can access
+ InventoryParser.hosts for a mapping of Host objects and
+ InventoryParser.Groups for a mapping of Group objects.
+ '''
+
+ def __init__(self, inven_source):
+ '''
+ InventoryParser contructors take a source of inventory information
+ that they will parse the host and group information from.
+ '''
+ self.inven_source = inven_source
+ self.reset_parser()
+
+ @abstractmethod
+ def reset_parser(self):
+ '''
+ InventoryParsers generally cache their data once parser() is
+ called. This method initializes any parser state before calling parser
+ again.
+ '''
+ self.hosts = dict()
+ self.groups = dict()
+ self.parsed = False
+
+ def _merge(self, target, addition):
+ '''
+ This method is provided to InventoryParsers to merge host or group
+ dicts since it may take several passes to get all of the data
+
+ Example usage:
+ self.hosts = self.from_ini(filename)
+ new_hosts = self.from_script(scriptname)
+ self._merge(self.hosts, new_hosts)
+ '''
+ for i in addition:
+ if i in target:
+ target[i].merge(addition[i])
+ else:
+ target[i] = addition[i]
+
+ @abstractmethod
+ def parse(self, refresh=False):
+ if refresh:
+ self.reset_parser()
+ if self.parsed:
+ return self.parsed
+
+ # Parse self.inven_sources here
+ pass
+
diff --git a/lib/ansible/plugins/inventory/aggregate.py b/lib/ansible/plugins/inventory/aggregate.py
new file mode 100644
index 0000000000..6bdf2ddcb6
--- /dev/null
+++ b/lib/ansible/plugins/inventory/aggregate.py
@@ -0,0 +1,61 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import InventoryParser
+#from . ini import InventoryIniParser
+#from . script import InventoryScriptParser
+
+class InventoryAggregateParser(InventoryParser):
+
+ def __init__(self, inven_sources):
+ self.inven_source = inven_sources
+ self.hosts = dict()
+ self.groups = dict()
+
+ def reset_parser(self):
+ super(InventoryAggregateParser, self).reset_parser()
+
+ def parse(self, refresh=False):
+ # InventoryDirectoryParser is a InventoryAggregateParser so we avoid
+ # a circular import by importing here
+ from . directory import InventoryAggregateParser
+ if super(InventoryAggregateParser, self).parse(refresh):
+ return self.parsed
+
+ for entry in self.inven_sources:
+ if os.path.sep in entry:
+ # file or directory
+ if os.path.isdir(entry):
+ parser = directory.InventoryDirectoryParser(filename=entry)
+ elif utils.is_executable(entry):
+ parser = InventoryScriptParser(filename=entry)
+ else:
+ parser = InventoryIniParser(filename=entry)
+ else:
+ # hostname
+ parser = HostnameParser(hostname=entry)
+ hosts, groups = parser.parse()
+ self._merge(self.hosts, hosts)
+ self._merge(self.groups, groups)
diff --git a/lib/ansible/plugins/inventory/directory.py b/lib/ansible/plugins/inventory/directory.py
new file mode 100644
index 0000000000..a75ad44ea6
--- /dev/null
+++ b/lib/ansible/plugins/inventory/directory.py
@@ -0,0 +1,52 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . aggregate import InventoryAggregateParser
+
+class InventoryDirectoryParser(InventoryAggregateParser):
+
+ def __init__(self, inven_directory):
+ directory = inven_directory
+ names = os.listdir(inven_directory)
+ filtered_names = []
+
+ # Clean up the list of filenames
+ for filename in names:
+ # Skip files that end with certain extensions or characters
+ if any(filename.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
+ continue
+ # Skip hidden files
+ if filename.startswith('.') and not filename.startswith('.{0}'.format(os.path.sep)):
+ continue
+ # These are things inside of an inventory basedir
+ if filename in ("host_vars", "group_vars", "vars_plugins"):
+ continue
+ fullpath = os.path.join(directory, filename)
+ new_names.append(fullpath)
+
+ super(InventoryDirectoryParser, self).__init__(new_names)
+
+ def parse(self):
+ return super(InventoryDirectoryParser, self).parse()
diff --git a/lib/ansible/plugins/inventory/ini.py b/lib/ansible/plugins/inventory/ini.py
new file mode 100644
index 0000000000..e185c1a785
--- /dev/null
+++ b/lib/ansible/plugins/inventory/ini.py
@@ -0,0 +1,60 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+#############################################
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from . import InventoryParser
+
+class InventoryIniParser(InventoryAggregateParser):
+
+ def __init__(self, inven_directory):
+ directory = inven_directory
+ names = os.listdir(inven_directory)
+ filtered_names = []
+
+ # Clean up the list of filenames
+ for filename in names:
+ # Skip files that end with certain extensions or characters
+ if any(filename.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
+ continue
+ # Skip hidden files
+ if filename.startswith('.') and not filename.startswith('.{0}'.format(os.path.sep)):
+ continue
+ # These are things inside of an inventory basedir
+ if filename in ("host_vars", "group_vars", "vars_plugins"):
+ continue
+ fullpath = os.path.join(directory, filename)
+ new_names.append(fullpath)
+
+ super(InventoryDirectoryParser, self).__init__(new_names)
+
+ def parse(self):
+ return super(InventoryDirectoryParser, self).parse()
+
+ def _before_comment(self, msg):
+ ''' what's the part of a string before a comment? '''
+ msg = msg.replace("\#","**NOT_A_COMMENT**")
+ msg = msg.split("#")[0]
+ msg = msg.replace("**NOT_A_COMMENT**","#")
+ return msg
+
diff --git a/lib/ansible/plugins/lookup/__init__.py b/lib/ansible/plugins/lookup/__init__.py
new file mode 100644
index 0000000000..8c841c81d2
--- /dev/null
+++ b/lib/ansible/plugins/lookup/__init__.py
@@ -0,0 +1,49 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+__all__ = ['LookupBase']
+
+class LookupBase:
+ def __init__(self, loader=None, **kwargs):
+ self._loader = loader
+
+ def _flatten(self, terms):
+ ret = []
+ for term in terms:
+ if isinstance(term, (list, tuple)):
+ ret.extend(term)
+ else:
+ ret.append(term)
+ return ret
+
+ def _combine(self, a, b):
+ results = []
+ for x in a:
+ for y in b:
+ results.append(self._flatten([x,y]))
+ return results
+
+ def _flatten_hash_to_list(self, terms):
+ ret = []
+ for key in terms:
+ ret.append({'key': key, 'value': terms[key]})
+ return ret
+
diff --git a/lib/ansible/runner/lookup_plugins/cartesian.py b/lib/ansible/plugins/lookup/cartesian.py
index ab7bba0f0f..7d8e08cb94 100644
--- a/lib/ansible/runner/lookup_plugins/cartesian.py
+++ b/lib/ansible/plugins/lookup/cartesian.py
@@ -14,46 +14,35 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-import ansible.utils as utils
-import ansible.errors as errors
from itertools import product
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- elif isinstance(term, tuple):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+class LookupModule(LookupBase):
"""
Create the cartesian product of lists
[1, 2, 3], [a, b] -> [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]
"""
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def __lookup_injects(self, terms, inject):
+ def __lookup_variabless(self, terms, variables):
results = []
for x in terms:
- intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
+ intermediate = listify_lookup_plugin_terms(x, variables, loader=self._loader)
results.append(intermediate)
return results
- def run(self, terms, inject=None, **kwargs):
+ def run(self, terms, variables=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms = self.__lookup_injects(terms, inject)
+ terms = self.__lookup_variabless(terms, variables)
my_list = terms[:]
if len(my_list) == 0:
- raise errors.AnsibleError("with_cartesian requires at least one element in each list")
- return [flatten(x) for x in product(*my_list)]
+ raise AnsibleError("with_cartesian requires at least one element in each list")
+ return [self._flatten(x) for x in product(*my_list, fillvalue=None)]
diff --git a/lib/ansible/runner/lookup_plugins/csvfile.py b/lib/ansible/plugins/lookup/csvfile.py
index ce5a2b77d2..2a98d19fe4 100644
--- a/lib/ansible/runner/lookup_plugins/csvfile.py
+++ b/lib/ansible/plugins/lookup/csvfile.py
@@ -14,16 +14,17 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible import utils, errors
import os
import codecs
import csv
-class LookupModule(object):
+from ansible.errors import *
+from ansible.plugins.lookup import LookupBase
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+class LookupModule(LookupBase):
def read_csv(self, filename, key, delimiter, dflt=None, col=1):
@@ -34,14 +35,12 @@ class LookupModule(object):
for row in creader:
if row[0] == key:
return row[int(col)]
- except Exception, e:
- raise errors.AnsibleError("csvfile: %s" % str(e))
+ except Exception as e:
+ raise AnsibleError("csvfile: %s" % str(e))
return dflt
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ def run(self, terms, variables=None, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
@@ -64,13 +63,13 @@ class LookupModule(object):
name, value = param.split('=')
assert(name in paramvals)
paramvals[name] = value
- except (ValueError, AssertionError), e:
- raise errors.AnsibleError(e)
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
if paramvals['delimiter'] == 'TAB':
paramvals['delimiter'] = "\t"
- path = utils.path_dwim(self.basedir, paramvals['file'])
+ path = self._loader.path_dwim(paramvals['file'])
var = self.read_csv(path, key, paramvals['delimiter'], paramvals['default'], paramvals['col'])
if var is not None:
diff --git a/lib/ansible/runner/lookup_plugins/dict.py b/lib/ansible/plugins/lookup/dict.py
index cda1546598..1b54f3db93 100644
--- a/lib/ansible/runner/lookup_plugins/dict.py
+++ b/lib/ansible/plugins/lookup/dict.py
@@ -14,26 +14,17 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
-def flatten_hash_to_list(terms):
- ret = []
- for key in terms:
- ret.append({'key': key, 'value': terms[key]})
- return ret
+class LookupModule(LookupBase):
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ def run(self, terms, varibles=None, **kwargs):
if not isinstance(terms, dict):
- raise errors.AnsibleError("with_dict expects a dict")
+ raise AnsibleError("with_dict expects a dict")
- return flatten_hash_to_list(terms)
+ return self._flatten_hash_to_list(terms)
diff --git a/lib/ansible/runner/lookup_plugins/dnstxt.py b/lib/ansible/plugins/lookup/dnstxt.py
index 4fa47bf4ee..e9dd27bfb6 100644
--- a/lib/ansible/runner/lookup_plugins/dnstxt.py
+++ b/lib/ansible/plugins/lookup/dnstxt.py
@@ -14,9 +14,11 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible import utils, errors
import os
+
HAVE_DNS=False
try:
import dns.resolver
@@ -25,6 +27,9 @@ try:
except ImportError:
pass
+from ansible.errors import *
+from ansible.plugins.lookup import LookupBase
+
# ==============================================================
# DNSTXT: DNS TXT records
#
@@ -32,17 +37,12 @@ except ImportError:
# TODO: configurable resolver IPs
# --------------------------------------------------------------
-class LookupModule(object):
+class LookupModule(LookupBase):
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+ def run(self, terms, variables=None, **kwargs):
if HAVE_DNS == False:
- raise errors.AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
if isinstance(terms, basestring):
terms = [ terms ]
@@ -61,8 +61,10 @@ class LookupModule(object):
string = 'NXDOMAIN'
except dns.resolver.Timeout:
string = ''
- except dns.exception.DNSException, e:
- raise errors.AnsibleError("dns.resolver unhandled exception", e)
+ except dns.exception.DNSException as e:
+ raise AnsibleError("dns.resolver unhandled exception", e)
ret.append(''.join(string))
+
return ret
+
diff --git a/lib/ansible/runner/lookup_plugins/env.py b/lib/ansible/plugins/lookup/env.py
index d4f85356ed..55847dd777 100644
--- a/lib/ansible/runner/lookup_plugins/env.py
+++ b/lib/ansible/plugins/lookup/env.py
@@ -14,22 +14,16 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible import utils, errors
-from ansible.utils import template
import os
-class LookupModule(object):
+from ansible.plugins.lookup import LookupBase
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+class LookupModule(LookupBase):
- def run(self, terms, inject=None, **kwargs):
-
- try:
- terms = template.template(self.basedir, terms, inject)
- except Exception, e:
- pass
+ def run(self, terms, variables, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
@@ -38,4 +32,5 @@ class LookupModule(object):
for term in terms:
var = term.split()[0]
ret.append(os.getenv(var, ''))
+
return ret
diff --git a/lib/ansible/runner/lookup_plugins/etcd.py b/lib/ansible/plugins/lookup/etcd.py
index a758a2fb0b..002068389f 100644
--- a/lib/ansible/runner/lookup_plugins/etcd.py
+++ b/lib/ansible/plugins/lookup/etcd.py
@@ -14,8 +14,9 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible import utils
import os
import urllib2
try:
@@ -23,6 +24,8 @@ try:
except ImportError:
import simplejson as json
+from ansible.plugins.lookup import LookupBase
+
# this can be made configurable, not should not use ansible.cfg
ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001'
if os.getenv('ANSIBLE_ETCD_URL') is not None:
@@ -57,22 +60,18 @@ class etcd():
return value
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
- self.etcd = etcd()
+class LookupModule(LookupBase):
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ def run(self, terms, variables, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
+ etcd = etcd()
+
ret = []
for term in terms:
key = term.split()[0]
- value = self.etcd.get(key)
+ value = etcd.get(key)
ret.append(value)
return ret
diff --git a/lib/ansible/runner/lookup_plugins/file.py b/lib/ansible/plugins/lookup/file.py
index 70bae6653a..efb039497d 100644
--- a/lib/ansible/runner/lookup_plugins/file.py
+++ b/lib/ansible/plugins/lookup/file.py
@@ -14,28 +14,25 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible import utils, errors
import os
import codecs
-class LookupModule(object):
+from ansible.errors import *
+from ansible.plugins.lookup import LookupBase
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+class LookupModule(LookupBase):
- def run(self, terms, inject=None, **kwargs):
+ def run(self, terms, variables=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- ret = []
-
- # this can happen if the variable contains a string, strictly not desired for lookup
- # plugins, but users may try it, so make it work.
if not isinstance(terms, list):
terms = [ terms ]
+ ret = []
for term in terms:
- basedir_path = utils.path_dwim(self.basedir, term)
+ basedir_path = self._loader.path_dwim(term)
relative_path = None
playbook_path = None
@@ -44,16 +41,20 @@ class LookupModule(object):
# basedir of the current file, use dwim_relative to look in the
# role/files/ directory, and finally the playbook directory
# itself (which will be relative to the current working dir)
- if '_original_file' in inject:
- relative_path = utils.path_dwim_relative(inject['_original_file'], 'files', term, self.basedir, check=False)
- if 'playbook_dir' in inject:
- playbook_path = os.path.join(inject['playbook_dir'], term)
+
+ # FIXME: the original file stuff still needs to be worked out, but the
+ # playbook_dir stuff should be able to be removed as it should
+ # be covered by the fact that the loader contains that info
+ #if '_original_file' in variables:
+ # relative_path = self._loader.path_dwim_relative(variables['_original_file'], 'files', term, self.basedir, check=False)
+ #if 'playbook_dir' in variables:
+ # playbook_path = os.path.join(variables['playbook_dir'], term)
for path in (basedir_path, relative_path, playbook_path):
if path and os.path.exists(path):
ret.append(codecs.open(path, encoding="utf8").read().rstrip())
break
else:
- raise errors.AnsibleError("could not locate file in lookup: %s" % term)
+ raise AnsibleError("could not locate file in lookup: %s" % term)
return ret
diff --git a/lib/ansible/runner/lookup_plugins/fileglob.py b/lib/ansible/plugins/lookup/fileglob.py
index 7d3cbb92be..8985906715 100644
--- a/lib/ansible/runner/lookup_plugins/fileglob.py
+++ b/lib/ansible/plugins/lookup/fileglob.py
@@ -14,26 +14,21 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
import glob
-from ansible import utils
-class LookupModule(object):
+from ansible.plugins.lookup import LookupBase
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+class LookupModule(LookupBase):
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ def run(self, terms, variables=None, **kwargs):
ret = []
-
for term in terms:
-
- dwimmed = utils.path_dwim(self.basedir, term)
+ dwimmed = self._loader.path_dwim(term)
globbed = glob.glob(dwimmed)
ret.extend(g for g in globbed if os.path.isfile(g))
-
return ret
diff --git a/lib/ansible/runner/lookup_plugins/first_found.py b/lib/ansible/plugins/lookup/first_found.py
index a48b56a3c2..091f104c62 100644
--- a/lib/ansible/runner/lookup_plugins/first_found.py
+++ b/lib/ansible/plugins/lookup/first_found.py
@@ -14,7 +14,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
# take a list of files and (optionally) a list of paths
# return the first existing file found in the paths
@@ -118,17 +119,18 @@
# ignore_errors: true
-from ansible import utils, errors
import os
-class LookupModule(object):
+from jinja2.exceptions import UndefinedError
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+from ansible.errors import AnsibleUndefinedVariable
+from ansible.plugins.lookup import LookupBase
+from ansible.template import Templar
+from ansible.utils.boolean import boolean
- def run(self, terms, inject=None, **kwargs):
+class LookupModule(LookupBase):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ def run(self, terms, variables, **kwargs):
result = None
anydict = False
@@ -144,7 +146,7 @@ class LookupModule(object):
if isinstance(term, dict):
files = term.get('files', [])
paths = term.get('paths', [])
- skip = utils.boolean(term.get('skip', False))
+ skip = boolean(term.get('skip', False))
filelist = files
if isinstance(files, basestring):
@@ -172,20 +174,30 @@ class LookupModule(object):
else:
total_search = terms
+ templar = Templar(loader=self._loader, variables=variables)
+ roledir = variables.get('roledir')
for fn in total_search:
- if inject and '_original_file' in inject:
- # check the templates and vars directories too,
- # if they exist
- for roledir in ('templates', 'vars'):
- path = utils.path_dwim(os.path.join(self.basedir, '..', roledir), fn)
- if os.path.exists(path):
- return [path]
- # if none of the above were found, just check the
- # current filename against the basedir (this will already
- # have ../files from runner, if it's a role task
- path = utils.path_dwim(self.basedir, fn)
- if os.path.exists(path):
- return [path]
+ try:
+ fn = templar.template(fn)
+ except (AnsibleUndefinedVariable, UndefinedError) as e:
+ continue
+
+ if os.path.isabs(fn) and os.path.exists(fn):
+ return [fn]
+ else:
+ if roledir is not None:
+ # check the templates and vars directories too,if they exist
+ for subdir in ('templates', 'vars'):
+ path = self._loader.path_dwim_relative(roledir, subdir, fn)
+ if os.path.exists(path):
+ return [path]
+
+ # if none of the above were found, just check the
+ # current filename against the basedir (this will already
+ # have ../files from runner, if it's a role task
+ path = self._loader.path_dwim(fn)
+ if os.path.exists(path):
+ return [path]
else:
if skip:
return []
diff --git a/lib/ansible/runner/lookup_plugins/flattened.py b/lib/ansible/plugins/lookup/flattened.py
index 6d9dd613be..f0a8adaf5e 100644
--- a/lib/ansible/runner/lookup_plugins/flattened.py
+++ b/lib/ansible/plugins/lookup/flattened.py
@@ -14,35 +14,31 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-import ansible.utils as utils
-import ansible.errors as errors
+from ansible.errors import *
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+class LookupModule(LookupBase):
-def check_list_of_one_list(term):
- # make sure term is not a list of one (list of one..) item
- # return the final non list item if so
+ def _check_list_of_one_list(self, term):
+ # make sure term is not a list of one (list of one..) item
+ # return the final non list item if so
- if isinstance(term,list) and len(term) == 1:
- term = term[0]
- if isinstance(term,list):
- term = check_list_of_one_list(term)
+ if isinstance(term,list) and len(term) == 1:
+ term = term[0]
+ if isinstance(term,list):
+ term = self._check_list_of_one_list(term)
- return term
+ return term
-
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
-
- def flatten(self, terms, inject):
+ def _do_flatten(self, terms, variables):
ret = []
for term in terms:
- term = check_list_of_one_list(term)
+ term = self._check_list_of_one_list(term)
if term == 'None' or term == 'null':
# ignore undefined items
@@ -50,14 +46,14 @@ class LookupModule(object):
if isinstance(term, basestring):
# convert a variable to a list
- term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject)
+ term2 = listify_lookup_plugin_terms(term, variables, loader=self._loader)
# but avoid converting a plain string to a list of one string
if term2 != [ term ]:
term = term2
if isinstance(term, list):
# if it's a list, check recursively for items that are a list
- term = self.flatten(term, inject)
+ term = self._do_flatten(term, variables)
ret.extend(term)
else:
ret.append(term)
@@ -65,14 +61,10 @@ class LookupModule(object):
return ret
- def run(self, terms, inject=None, **kwargs):
-
- # see if the string represents a list and convert to list if so
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ def run(self, terms, variables, **kwargs):
if not isinstance(terms, list):
- raise errors.AnsibleError("with_flattened expects a list")
+ raise AnsibleError("with_flattened expects a list")
- ret = self.flatten(terms, inject)
- return ret
+ return self._do_flatten(terms, variables)
diff --git a/lib/ansible/runner/lookup_plugins/template.py b/lib/ansible/plugins/lookup/indexed_items.py
index e009b6b76b..9e242ac6bf 100644
--- a/lib/ansible/runner/lookup_plugins/template.py
+++ b/lib/ansible/plugins/lookup/indexed_items.py
@@ -14,20 +14,22 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible.utils import template
-import ansible.utils as utils
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
-class LookupModule(object):
+class LookupModule(LookupBase):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
- def run(self, terms, inject=None, **kwargs):
+ def run(self, terms, variables, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ if not isinstance(terms, list):
+ raise AnsibleError("with_indexed_items expects a list")
+
+ items = self._flatten(terms)
+ return zip(range(len(items)), items)
- ret = []
- for term in terms:
- ret.append(template.template_from_file(self.basedir, term, inject))
- return ret
diff --git a/lib/ansible/runner/lookup_plugins/indexed_items.py b/lib/ansible/plugins/lookup/inventory_hostnames.py
index c1db1fdee2..d09dec0c7b 100644
--- a/lib/ansible/runner/lookup_plugins/indexed_items.py
+++ b/lib/ansible/plugins/lookup/inventory_hostnames.py
@@ -1,4 +1,5 @@
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+# (c) 2013, Steven Dossett <sdossett@panath.com>
#
# This file is part of Ansible
#
@@ -15,30 +16,22 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
+from ansible.errors import *
+from ansible.plugins.lookup import LookupBase
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+class LookupModule(LookupBase):
def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
if not isinstance(terms, list):
- raise errors.AnsibleError("with_indexed_items expects a list")
-
- items = flatten(terms)
- return zip(range(len(items)), items)
+ raise AnsibleError("with_inventory_hostnames expects a list")
+
+ # FIXME: the inventory is no longer available this way, so we may have
+ # to dump the host list into the list of variables and read it back
+ # in here (or the inventory sources, so we can recreate the list
+ # of hosts)
+ #return self._flatten(inventory.Inventory(self.host_list).list_hosts(terms))
+ return terms
diff --git a/lib/ansible/plugins/lookup/items.py b/lib/ansible/plugins/lookup/items.py
new file mode 100644
index 0000000000..65ff66d854
--- /dev/null
+++ b/lib/ansible/plugins/lookup/items.py
@@ -0,0 +1,30 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.plugins.lookup import LookupBase
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, **kwargs):
+
+ if not isinstance(terms, list):
+ terms = [ terms ]
+
+ return self._flatten(terms)
+
diff --git a/lib/ansible/runner/lookup_plugins/lines.py b/lib/ansible/plugins/lookup/lines.py
index 5d4b70a857..0d842bf148 100644
--- a/lib/ansible/runner/lookup_plugins/lines.py
+++ b/lib/ansible/plugins/lookup/lines.py
@@ -15,24 +15,23 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-import subprocess
-from ansible import utils, errors
-
-class LookupModule(object):
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+import subprocess
+from ansible.errors import *
+from ansible.plugins.lookup import LookupBase
- def run(self, terms, inject=None, **kwargs):
+class LookupModule(LookupBase):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
- p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.extend(stdout.splitlines())
else:
- raise errors.AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
+ raise AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
return ret
diff --git a/lib/ansible/plugins/lookup/nested.py b/lib/ansible/plugins/lookup/nested.py
new file mode 100644
index 0000000000..52f4bed1d5
--- /dev/null
+++ b/lib/ansible/plugins/lookup/nested.py
@@ -0,0 +1,51 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+
+class LookupModule(LookupBase):
+
+ def __lookup_variabless(self, terms, variables):
+ results = []
+ for x in terms:
+ intermediate = listify_lookup_plugin_terms(x, variables, loader=self._loader)
+ results.append(intermediate)
+ return results
+
+ def run(self, terms, variables=None, **kwargs):
+
+ terms = self.__lookup_variabless(terms, variables)
+
+ my_list = terms[:]
+ my_list.reverse()
+ result = []
+ if len(my_list) == 0:
+ raise AnsibleError("with_nested requires at least one element in the nested list")
+ result = my_list.pop()
+ while len(my_list) > 0:
+ result2 = self._combine(result, my_list.pop())
+ result = result2
+ new_result = []
+ for x in result:
+ new_result.append(self._flatten(x))
+ return new_result
+
+
diff --git a/lib/ansible/runner/lookup_plugins/password.py b/lib/ansible/plugins/lookup/password.py
index a066887e2c..2e7633a067 100644
--- a/lib/ansible/runner/lookup_plugins/password.py
+++ b/lib/ansible/plugins/lookup/password.py
@@ -16,39 +16,58 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible import utils, errors
import os
import errno
-from string import ascii_letters, digits
import string
import random
+from string import ascii_letters, digits
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.encrypt import do_encrypt
+
+DEFAULT_LENGTH = 20
-class LookupModule(object):
+class LookupModule(LookupBase):
- LENGTH = 20
+ def random_password(self, length=DEFAULT_LENGTH, chars=C.DEFAULT_PASSWORD_CHARS):
+ '''
+ Return a random password string of length containing only chars.
+ NOTE: this was moved from the old ansible utils code, as nothing
+ else appeared to use it.
+ '''
- def __init__(self, length=None, encrypt=None, basedir=None, **kwargs):
- self.basedir = basedir
+ password = []
+ while len(password) < length:
+ new_char = os.urandom(1)
+ if new_char in chars:
+ password.append(new_char)
+
+ return ''.join(password)
def random_salt(self):
salt_chars = ascii_letters + digits + './'
- return utils.random_password(length=8, chars=salt_chars)
-
- def run(self, terms, inject=None, **kwargs):
+ return self.random_password(length=8, chars=salt_chars)
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ def run(self, terms, variables, **kwargs):
ret = []
+ if not isinstance(terms, list):
+ terms = [ terms ]
+
for term in terms:
# you can't have escaped spaces in yor pathname
params = term.split()
relpath = params[0]
paramvals = {
- 'length': LookupModule.LENGTH,
+ 'length': DEFAULT_LENGTH,
'encrypt': None,
'chars': ['ascii_letters','digits',".,:-_"],
}
@@ -68,22 +87,22 @@ class LookupModule(object):
paramvals['chars'] = use_chars
else:
paramvals[name] = value
- except (ValueError, AssertionError), e:
- raise errors.AnsibleError(e)
+ except (ValueError, AssertionError) as e:
+ raise AnsibleError(e)
length = paramvals['length']
encrypt = paramvals['encrypt']
use_chars = paramvals['chars']
# get password or create it if file doesn't exist
- path = utils.path_dwim(self.basedir, relpath)
+ path = self._loader.path_dwim(relpath)
if not os.path.exists(path):
pathdir = os.path.dirname(path)
if not os.path.isdir(pathdir):
try:
- os.makedirs(pathdir, mode=0700)
- except OSError, e:
- raise errors.AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e)))
+ os.makedirs(pathdir, mode=0o700)
+ except OSError as e:
+ raise AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e)))
chars = "".join([getattr(string,c,c) for c in use_chars]).replace('"','').replace("'",'')
password = ''.join(random.choice(chars) for _ in range(length))
@@ -94,7 +113,7 @@ class LookupModule(object):
else:
content = password
with open(path, 'w') as f:
- os.chmod(path, 0600)
+ os.chmod(path, 0o600)
f.write(content + '\n')
else:
content = open(path).read().rstrip()
@@ -112,16 +131,16 @@ class LookupModule(object):
salt = self.random_salt()
content = '%s salt=%s' % (password, salt)
with open(path, 'w') as f:
- os.chmod(path, 0600)
+ os.chmod(path, 0o600)
f.write(content + '\n')
# crypt not requested, remove salt if present
elif (encrypt is None and salt):
with open(path, 'w') as f:
- os.chmod(path, 0600)
+ os.chmod(path, 0o600)
f.write(password + '\n')
if encrypt:
- password = utils.do_encrypt(password, encrypt, salt=salt)
+ password = do_encrypt(password, encrypt, salt=salt)
ret.append(password)
diff --git a/lib/ansible/runner/lookup_plugins/pipe.py b/lib/ansible/plugins/lookup/pipe.py
index 0cd9e1cda5..d9f74708b2 100644
--- a/lib/ansible/runner/lookup_plugins/pipe.py
+++ b/lib/ansible/plugins/lookup/pipe.py
@@ -14,18 +14,17 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import subprocess
-from ansible import utils, errors
-class LookupModule(object):
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+class LookupModule(LookupBase):
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ def run(self, terms, variables, **kwargs):
if isinstance(terms, basestring):
terms = [ terms ]
@@ -43,10 +42,10 @@ class LookupModule(object):
'''
term = str(term)
- p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ p = subprocess.Popen(term, cwd=self._loader.get_basedir(), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
ret.append(stdout.decode("utf-8").rstrip())
else:
- raise errors.AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
+ raise AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
return ret
diff --git a/lib/ansible/runner/lookup_plugins/random_choice.py b/lib/ansible/plugins/lookup/random_choice.py
index 9b32c2f119..de4f31cd0e 100644
--- a/lib/ansible/runner/lookup_plugins/random_choice.py
+++ b/lib/ansible/plugins/lookup/random_choice.py
@@ -14,9 +14,12 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import random
-from ansible import utils
+
+from ansible.plugins.lookup import LookupBase
# useful for introducing chaos ... or just somewhat reasonably fair selection
# amongst available mirrors
@@ -28,14 +31,9 @@ from ansible import utils
# - two
# - three
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+class LookupModule(LookupBase):
def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
return [ random.choice(terms) ]
diff --git a/lib/ansible/runner/lookup_plugins/redis_kv.py b/lib/ansible/plugins/lookup/redis_kv.py
index 22c5c3754f..e499e83f93 100644
--- a/lib/ansible/runner/lookup_plugins/redis_kv.py
+++ b/lib/ansible/plugins/lookup/redis_kv.py
@@ -14,16 +14,21 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible import utils, errors
import os
+import re
+
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
-import re
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
@@ -31,17 +36,15 @@ import re
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
+class LookupModule(LookupBase):
- if HAVE_REDIS == False:
- raise errors.AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
+ def run(self, terms, variables, **kwargs):
- def run(self, terms, inject=None, **kwargs):
+ if not HAVE_REDIS:
+ raise AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+ if not isinstance(terms, list):
+ terms = [ terms ]
ret = []
for term in terms:
@@ -59,7 +62,7 @@ class LookupModule(object):
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
- raise errors.AnsibleError("Bad URI in redis lookup")
+ raise AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
diff --git a/lib/ansible/runner/lookup_plugins/sequence.py b/lib/ansible/plugins/lookup/sequence.py
index b162b3069e..1ddeba932f 100644
--- a/lib/ansible/runner/lookup_plugins/sequence.py
+++ b/lib/ansible/plugins/lookup/sequence.py
@@ -14,11 +14,16 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible.errors import AnsibleError
-import ansible.utils as utils
from re import compile as re_compile, IGNORECASE
+from ansible.errors import *
+from ansible.parsing.splitter import parse_kv
+from ansible.plugins.lookup import LookupBase
+from ansible.template import Templar
+
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
@@ -34,7 +39,7 @@ SHORTCUT = re_compile(
)
-class LookupModule(object):
+class LookupModule(LookupBase):
"""
sequence lookup module
@@ -73,10 +78,6 @@ class LookupModule(object):
calculating the number of entries in a sequence when a stride is specified.
"""
- def __init__(self, basedir, **kwargs):
- """absorb any keyword args"""
- self.basedir = basedir
-
def reset(self):
"""set sensible defaults"""
self.start = 1
@@ -170,26 +171,24 @@ class LookupModule(object):
"problem formatting %r with %r" % self.format
)
- def run(self, terms, inject=None, **kwargs):
+ def run(self, terms, variables, **kwargs):
results = []
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
if isinstance(terms, basestring):
terms = [ terms ]
+ templar = Templar(loader=self._loader, variables=variables)
+
for term in terms:
try:
self.reset() # clear out things for this iteration
+ term = templar.template(term)
try:
if not self.parse_simple_args(term):
- self.parse_kv_args(utils.parse_kv(term))
- except Exception:
- raise AnsibleError(
- "unknown error parsing with_sequence arguments: %r"
- % term
- )
+ self.parse_kv_args(parse_kv(term))
+ except Exception, e:
+ raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.sanity_check()
diff --git a/lib/ansible/runner/lookup_plugins/subelements.py b/lib/ansible/plugins/lookup/subelements.py
index f33aae717d..09a2ca306a 100644
--- a/lib/ansible/runner/lookup_plugins/subelements.py
+++ b/lib/ansible/plugins/lookup/subelements.py
@@ -14,28 +14,21 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-import ansible.utils as utils
-import ansible.errors as errors
+from ansible.errors import *
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
+class LookupModule(LookupBase):
-class LookupModule(object):
+ def run(self, terms, variables, **kwargs):
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms[0] = utils.listify_lookup_plugin_terms(terms[0], self.basedir, inject)
+ terms[0] = listify_lookup_plugin_terms(terms[0], variables, loader=self._loader)
if not isinstance(terms, list) or not len(terms) == 2:
- raise errors.AnsibleError(
- "subelements lookup expects a list of two items, first a dict or a list, and second a string")
- terms[0] = utils.listify_lookup_plugin_terms(terms[0], self.basedir, inject)
- if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], basestring):
- raise errors.AnsibleError(
- "subelements lookup expects a list of two items, first a dict or a list, and second a string")
+ raise AnsibleError("subelements lookup expects a list of two items, first a dict or a list, and second a string")
if isinstance(terms[0], dict): # convert to list:
if terms[0].get('skipped',False) != False:
@@ -46,19 +39,20 @@ class LookupModule(object):
elementlist.append(terms[0][key])
else:
elementlist = terms[0]
+
subelement = terms[1]
ret = []
for item0 in elementlist:
if not isinstance(item0, dict):
- raise errors.AnsibleError("subelements lookup expects a dictionary, got '%s'" %item0)
- if item0.get('skipped',False) != False:
+ raise AnsibleError("subelements lookup expects a dictionary, got '%s'" %item0)
+ if item0.get('skipped', False) != False:
# this particular item is to be skipped
continue
if not subelement in item0:
- raise errors.AnsibleError("could not find '%s' key in iterated item '%s'" % (subelement, item0))
+ raise AnsibleError("could not find '%s' key in iterated item '%s'" % (subelement, item0))
if not isinstance(item0[subelement], list):
- raise errors.AnsibleError("the key %s should point to a list, got '%s'" % (subelement, item0[subelement]))
+ raise AnsibleError("the key %s should point to a list, got '%s'" % (subelement, item0[subelement]))
sublist = item0.pop(subelement, [])
for item1 in sublist:
ret.append((item0, item1))
diff --git a/lib/ansible/plugins/lookup/template.py b/lib/ansible/plugins/lookup/template.py
new file mode 100644
index 0000000000..e53e1990a0
--- /dev/null
+++ b/lib/ansible/plugins/lookup/template.py
@@ -0,0 +1,45 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from ansible.errors import AnsibleError
+from ansible.plugins.lookup import LookupBase
+from ansible.template import Templar
+
+class LookupModule(LookupBase):
+
+ def run(self, terms, variables, **kwargs):
+
+ if not isinstance(terms, list):
+ terms = [ terms ]
+
+ templar = Templar(loader=self._loader, variables=variables)
+
+ ret = []
+ for term in terms:
+ path = self._loader.path_dwim(term)
+ if os.path.exists(path):
+ with open(path, 'r') as f:
+ template_data = f.read()
+ res = templar.template(template_data, preserve_trailing_newlines=True)
+ ret.append(res)
+ else:
+ raise AnsibleError("the template file %s could not be found for the lookup" % term)
+ return ret
diff --git a/lib/ansible/runner/lookup_plugins/together.py b/lib/ansible/plugins/lookup/together.py
index 07332c9fb9..2f53121cc8 100644
--- a/lib/ansible/runner/lookup_plugins/together.py
+++ b/lib/ansible/plugins/lookup/together.py
@@ -14,24 +14,16 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-import ansible.utils as utils
-from ansible.utils import safe_eval
-import ansible.errors as errors
from itertools import izip_longest
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- elif isinstance(term, tuple):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
+from ansible.errors import *
+from ansible.plugins.lookup import LookupBase
+from ansible.utils.listify import listify_lookup_plugin_terms
-class LookupModule(object):
+class LookupModule(LookupBase):
"""
Transpose a list of arrays:
[1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
@@ -39,26 +31,20 @@ class LookupModule(object):
[1, 2], [3] -> [1, 3], [2, None]
"""
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def __lookup_injects(self, terms, inject):
+ def __lookup_variabless(self, terms, variables):
results = []
for x in terms:
- intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
+ intermediate = listify_lookup_plugin_terms(x, variables, loader=self._loader)
results.append(intermediate)
return results
- def run(self, terms, inject=None, **kwargs):
-
- # this code is common with 'items.py' consider moving to utils if we need it again
+ def run(self, terms, variables=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms = self.__lookup_injects(terms, inject)
+ terms = self.__lookup_variabless(terms, variables)
my_list = terms[:]
if len(my_list) == 0:
raise errors.AnsibleError("with_together requires at least one element in each list")
- return [flatten(x) for x in izip_longest(*my_list, fillvalue=None)]
+ return [self._flatten(x) for x in izip_longest(*my_list, fillvalue=None)]
diff --git a/lib/ansible/runner/lookup_plugins/url.py b/lib/ansible/plugins/lookup/url.py
index b42b3b14da..9f1a89f772 100644
--- a/lib/ansible/runner/lookup_plugins/url.py
+++ b/lib/ansible/plugins/lookup/url.py
@@ -14,19 +14,17 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible import utils
+from ansible.plugins.lookup import LookupBase
import urllib2
-class LookupModule(object):
+class LookupModule(LookupBase):
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
if isinstance(terms, basestring):
terms = [ terms ]
@@ -35,10 +33,10 @@ class LookupModule(object):
try:
r = urllib2.Request(term)
response = urllib2.urlopen(r)
- except URLError, e:
+ except URLError as e:
utils.warnings("Failed lookup url for %s : %s" % (term, str(e)))
continue
- except HTTPError, e:
+ except HTTPError as e:
utils.warnings("Received HTTP error for %s : %s" % (term, str(e)))
continue
diff --git a/lib/ansible/plugins/shell/__init__.py b/lib/ansible/plugins/shell/__init__.py
new file mode 100644
index 0000000000..785fc45992
--- /dev/null
+++ b/lib/ansible/plugins/shell/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/lib/ansible/runner/shell_plugins/csh.py b/lib/ansible/plugins/shell/csh.py
index 4e9f8c8af7..29751f73ee 100644
--- a/lib/ansible/runner/shell_plugins/csh.py
+++ b/lib/ansible/plugins/shell/csh.py
@@ -14,8 +14,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible.runner.shell_plugins.sh import ShellModule as ShModule
+from ansible.plugins.shell.sh import ShellModule as ShModule
class ShellModule(ShModule):
diff --git a/lib/ansible/runner/shell_plugins/fish.py b/lib/ansible/plugins/shell/fish.py
index 137c013c12..ff78941e19 100644
--- a/lib/ansible/runner/shell_plugins/fish.py
+++ b/lib/ansible/plugins/shell/fish.py
@@ -14,8 +14,10 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-from ansible.runner.shell_plugins.sh import ShellModule as ShModule
+from ansible.plugins.shell.sh import ShellModule as ShModule
class ShellModule(ShModule):
diff --git a/lib/ansible/runner/shell_plugins/powershell.py b/lib/ansible/plugins/shell/powershell.py
index 850b380edd..e4331e46c6 100644
--- a/lib/ansible/runner/shell_plugins/powershell.py
+++ b/lib/ansible/plugins/shell/powershell.py
@@ -14,6 +14,8 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import base64
import os
@@ -30,35 +32,6 @@ _powershell_version = os.environ.get('POWERSHELL_VERSION', None)
if _powershell_version:
_common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:]
-def _escape(value, include_vars=False):
- '''Return value escaped for use in PowerShell command.'''
- # http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
- # http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python
- subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'),
- ('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'),
- ('\'', '`\''), ('`', '``'), ('\x00', '`0')]
- if include_vars:
- subs.append(('$', '`$'))
- pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs)
- substs = [s for p, s in subs]
- replace = lambda m: substs[m.lastindex - 1]
- return re.sub(pattern, replace, value)
-
-def _encode_script(script, as_list=False):
- '''Convert a PowerShell script to a single base64-encoded command.'''
- script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
- encoded_script = base64.b64encode(script.encode('utf-16-le'))
- cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
- if as_list:
- return cmd_parts
- return ' '.join(cmd_parts)
-
-def _build_file_cmd(cmd_parts, quote_args=True):
- '''Build command line to run a file, given list of file name plus args.'''
- if quote_args:
- cmd_parts = ['"%s"' % x for x in cmd_parts]
- return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + cmd_parts)
-
class ShellModule(object):
def env_prefix(self, **kwargs):
@@ -75,35 +48,23 @@ class ShellModule(object):
return ''
def remove(self, path, recurse=False):
- path = _escape(path)
+ path = self._escape(path)
if recurse:
- return _encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
+ return self._encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
else:
- return _encode_script('''Remove-Item "%s" -Force;''' % path)
+ return self._encode_script('''Remove-Item "%s" -Force;''' % path)
def mkdtemp(self, basefile, system=False, mode=None):
- basefile = _escape(basefile)
+ basefile = self._escape(basefile)
# FIXME: Support system temp path!
- return _encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile)
-
- def expand_user(self, user_home_path):
- # PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
- # not seem to work remotely, though by default we are always starting
- # in the user's home directory.
- if user_home_path == '~':
- script = 'Write-Host (Get-Location).Path'
- elif user_home_path.startswith('~\\'):
- script = 'Write-Host ((Get-Location).Path + "%s")' % _escape(user_home_path[1:])
- else:
- script = 'Write-Host "%s"' % _escape(user_home_path)
- return _encode_script(script)
+ return self._encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile)
- def checksum(self, path, python_interp):
- path = _escape(path)
+ def md5(self, path):
+ path = self._escape(path)
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
- $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
+ $sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider;
$fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
[System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
@@ -117,15 +78,43 @@ class ShellModule(object):
Write-Host "1";
}
''' % dict(path=path)
- return _encode_script(script)
+ return self._encode_script(script)
def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
cmd = cmd.encode('utf-8')
cmd_parts = shlex.split(cmd, posix=False)
if not cmd_parts[0].lower().endswith('.ps1'):
cmd_parts[0] = '%s.ps1' % cmd_parts[0]
- script = _build_file_cmd(cmd_parts, quote_args=False)
+ script = self._build_file_cmd(cmd_parts)
if rm_tmp:
- rm_tmp = _escape(rm_tmp)
+ rm_tmp = self._escape(rm_tmp)
script = '%s; Remove-Item "%s" -Force -Recurse;' % (script, rm_tmp)
- return _encode_script(script)
+ return self._encode_script(script)
+
+ def _escape(self, value, include_vars=False):
+ '''Return value escaped for use in PowerShell command.'''
+ # http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
+ # http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python
+ subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'),
+ ('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'),
+ ('\'', '`\''), ('`', '``'), ('\x00', '`0')]
+ if include_vars:
+ subs.append(('$', '`$'))
+ pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs)
+ substs = [s for p, s in subs]
+ replace = lambda m: substs[m.lastindex - 1]
+ return re.sub(pattern, replace, value)
+
+ def _encode_script(self, script, as_list=False):
+ '''Convert a PowerShell script to a single base64-encoded command.'''
+ script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
+ encoded_script = base64.b64encode(script.encode('utf-16-le'))
+ cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
+ if as_list:
+ return cmd_parts
+ return ' '.join(cmd_parts)
+
+ def _build_file_cmd(self, cmd_parts):
+ '''Build command line to run a file, given list of file name plus args.'''
+ return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + ['"%s"' % x for x in cmd_parts])
+
diff --git a/lib/ansible/runner/shell_plugins/sh.py b/lib/ansible/plugins/shell/sh.py
index 5fb0dc3add..628df9bbfb 100644
--- a/lib/ansible/runner/shell_plugins/sh.py
+++ b/lib/ansible/plugins/shell/sh.py
@@ -14,11 +14,15 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import os
import re
import pipes
import ansible.constants as C
+import time
+import random
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
diff --git a/lib/ansible/plugins/strategies/__init__.py b/lib/ansible/plugins/strategies/__init__.py
new file mode 100644
index 0000000000..f610334371
--- /dev/null
+++ b/lib/ansible/plugins/strategies/__init__.py
@@ -0,0 +1,432 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six.moves import queue as Queue
+import time
+
+from ansible.errors import *
+
+from ansible.inventory.host import Host
+from ansible.inventory.group import Group
+
+from ansible.playbook.handler import Handler
+from ansible.playbook.helpers import load_list_of_blocks
+from ansible.playbook.role import ROLE_CACHE, hash_params
+from ansible.plugins import filter_loader, lookup_loader, module_loader
+from ansible.utils.debug import debug
+
+
+__all__ = ['StrategyBase']
+
+# FIXME: this should probably be in the plugins/__init__.py, with
+# a smarter mechanism to set all of the attributes based on
+# the loaders created there
+class SharedPluginLoaderObj:
+ '''
+ A simple object to make pass the various plugin loaders to
+ the forked processes over the queue easier
+ '''
+ def __init__(self):
+ self.filter_loader = filter_loader
+ self.lookup_loader = lookup_loader
+ self.module_loader = module_loader
+
+class StrategyBase:
+
+ '''
+ This is the base class for strategy plugins, which contains some common
+ code useful to all strategies like running handlers, cleanup actions, etc.
+ '''
+
+ def __init__(self, tqm):
+ self._tqm = tqm
+ self._inventory = tqm.get_inventory()
+ self._workers = tqm.get_workers()
+ self._notified_handlers = tqm.get_notified_handlers()
+ #self._callback = tqm.get_callback()
+ self._variable_manager = tqm.get_variable_manager()
+ self._loader = tqm.get_loader()
+ self._final_q = tqm._final_q
+
+ # internal counters
+ self._pending_results = 0
+ self._cur_worker = 0
+
+ # this dictionary is used to keep track of hosts that have
+ # outstanding tasks still in queue
+ self._blocked_hosts = dict()
+
+ def run(self, iterator, connection_info, result=True):
+ # save the counts on failed/unreachable hosts, as the cleanup/handler
+ # methods will clear that information during their runs
+ num_failed = len(self._tqm._failed_hosts)
+ num_unreachable = len(self._tqm._unreachable_hosts)
+
+ #debug("running the cleanup portion of the play")
+ #result &= self.cleanup(iterator, connection_info)
+ debug("running handlers")
+ result &= self.run_handlers(iterator, connection_info)
+
+ # send the stats callback
+ self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
+
+ if not result:
+ if num_unreachable > 0:
+ return 3
+ elif num_failed > 0:
+ return 2
+ else:
+ return 1
+ else:
+ return 0
+
+ def get_hosts_remaining(self, play):
+ return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
+
+ def get_failed_hosts(self, play):
+ return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
+
+ def _queue_task(self, host, task, task_vars, connection_info):
+ ''' handles queueing the task up to be sent to a worker '''
+
+ debug("entering _queue_task() for %s/%s" % (host, task))
+
+ # and then queue the new task
+ debug("%s - putting task (%s) in queue" % (host, task))
+ try:
+ debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
+
+ (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
+ self._cur_worker += 1
+ if self._cur_worker >= len(self._workers):
+ self._cur_worker = 0
+
+ self._pending_results += 1
+
+ # create a dummy object with plugin loaders set as an easier
+ # way to share them with the forked processes
+ shared_loader_obj = SharedPluginLoaderObj()
+
+ main_q.put((host, task, self._loader.get_basedir(), task_vars, connection_info, shared_loader_obj), block=False)
+ except (EOFError, IOError, AssertionError) as e:
+ # most likely an abort
+ debug("got an error while queuing: %s" % e)
+ return
+ debug("exiting _queue_task() for %s/%s" % (host, task))
+
+ def _process_pending_results(self, iterator):
+ '''
+ Reads results off the final queue and takes appropriate action
+ based on the result (executing callbacks, updating state, etc.).
+ '''
+
+ ret_results = []
+
+ while not self._final_q.empty() and not self._tqm._terminated:
+ try:
+ result = self._final_q.get(block=False)
+ debug("got result from result worker: %s" % (result,))
+
+ # all host status messages contain 2 entries: (msg, task_result)
+ if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
+ task_result = result[1]
+ host = task_result._host
+ task = task_result._task
+ if result[0] == 'host_task_failed':
+ if not task.ignore_errors:
+ debug("marking %s as failed" % host.name)
+ iterator.mark_host_failed(host)
+ self._tqm._failed_hosts[host.name] = True
+ self._tqm._stats.increment('failures', host.name)
+ self._tqm.send_callback('v2_runner_on_failed', task_result)
+ elif result[0] == 'host_unreachable':
+ self._tqm._unreachable_hosts[host.name] = True
+ self._tqm._stats.increment('dark', host.name)
+ self._tqm.send_callback('v2_runner_on_unreachable', task_result)
+ elif result[0] == 'host_task_skipped':
+ self._tqm._stats.increment('skipped', host.name)
+ self._tqm.send_callback('v2_runner_on_skipped', task_result)
+ elif result[0] == 'host_task_ok':
+ self._tqm._stats.increment('ok', host.name)
+ if 'changed' in task_result._result and task_result._result['changed']:
+ self._tqm._stats.increment('changed', host.name)
+ self._tqm.send_callback('v2_runner_on_ok', task_result)
+
+ self._pending_results -= 1
+ if host.name in self._blocked_hosts:
+ del self._blocked_hosts[host.name]
+
+ # If this is a role task, mark the parent role as being run (if
+ # the task was ok or failed, but not skipped or unreachable)
+ if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
+ # lookup the role in the ROLE_CACHE to make sure we're dealing
+ # with the correct object and mark it as executed
+ for (entry, role_obj) in ROLE_CACHE[task_result._task._role._role_name].iteritems():
+ hashed_entry = hash_params(task_result._task._role._role_params)
+ if entry == hashed_entry :
+ role_obj._had_task_run = True
+
+ ret_results.append(task_result)
+
+ elif result[0] == 'add_host':
+ task_result = result[1]
+ new_host_info = task_result.get('add_host', dict())
+
+ self._add_host(new_host_info)
+
+ elif result[0] == 'add_group':
+ host = result[1]
+ task_result = result[2]
+ group_name = task_result.get('add_group')
+
+ self._add_group(host, group_name)
+
+ elif result[0] == 'notify_handler':
+ host = result[1]
+ handler_name = result[2]
+
+ if handler_name not in self._notified_handlers:
+ self._notified_handlers[handler_name] = []
+
+ if host not in self._notified_handlers[handler_name]:
+ self._notified_handlers[handler_name].append(host)
+
+ elif result[0] == 'set_host_var':
+ host = result[1]
+ var_name = result[2]
+ var_value = result[3]
+ self._variable_manager.set_host_variable(host, var_name, var_value)
+
+ elif result[0] == 'set_host_facts':
+ host = result[1]
+ facts = result[2]
+ self._variable_manager.set_host_facts(host, facts)
+
+ else:
+ raise AnsibleError("unknown result message received: %s" % result[0])
+ except Queue.Empty:
+ pass
+
+ return ret_results
+
+ def _wait_on_pending_results(self, iterator):
+ '''
+ Wait for the shared counter to drop to zero, using a short sleep
+ between checks to ensure we don't spin lock
+ '''
+
+ ret_results = []
+
+ while self._pending_results > 0 and not self._tqm._terminated:
+ debug("waiting for pending results (%d left)" % self._pending_results)
+ results = self._process_pending_results(iterator)
+ ret_results.extend(results)
+ if self._tqm._terminated:
+ break
+ time.sleep(0.01)
+
+ return ret_results
+
+ def _add_host(self, host_info):
+ '''
+ Helper function to add a new host to inventory based on a task result.
+ '''
+
+ host_name = host_info.get('host_name')
+
+ # Check if host in cache, add if not
+ if host_name in self._inventory._hosts_cache:
+ new_host = self._inventory._hosts_cache[host_name]
+ else:
+ new_host = Host(host_name)
+ self._inventory._hosts_cache[host_name] = new_host
+
+ allgroup = self._inventory.get_group('all')
+ allgroup.add_host(new_host)
+
+ # Set/update the vars for this host
+ # FIXME: probably should have a set vars method for the host?
+ new_vars = host_info.get('host_vars', dict())
+ new_host.vars.update(new_vars)
+
+ new_groups = host_info.get('groups', [])
+ for group_name in new_groups:
+ if not self._inventory.get_group(group_name):
+ new_group = Group(group_name)
+ self._inventory.add_group(new_group)
+ new_group.vars = self._inventory.get_group_variables(group_name)
+ else:
+ new_group = self._inventory.get_group(group_name)
+
+ new_group.add_host(new_host)
+
+ # add this host to the group cache
+ if self._inventory._groups_list is not None:
+ if group_name in self._inventory._groups_list:
+ if new_host.name not in self._inventory._groups_list[group_name]:
+ self._inventory._groups_list[group_name].append(new_host.name)
+
+ # clear pattern caching completely since it's unpredictable what
+ # patterns may have referenced the group
+ # FIXME: is this still required?
+ self._inventory.clear_pattern_cache()
+
+ def _add_group(self, host, group_name):
+ '''
+ Helper function to add a group (if it does not exist), and to assign the
+ specified host to that group.
+ '''
+
+ new_group = self._inventory.get_group(group_name)
+ if not new_group:
+ # create the new group and add it to inventory
+ new_group = Group(group_name)
+ self._inventory.add_group(new_group)
+
+ # and add the group to the proper hierarchy
+ allgroup = self._inventory.get_group('all')
+ allgroup.add_child_group(new_group)
+
+ # the host here is from the executor side, which means it was a
+ # serialized/cloned copy and we'll need to look up the proper
+ # host object from the master inventory
+ actual_host = self._inventory.get_host(host.name)
+
+ # and add the host to the group
+ new_group.add_host(actual_host)
+
+ def _load_included_file(self, included_file):
+ '''
+ Loads an included YAML file of tasks, applying the optional set of variables.
+ '''
+
+ data = self._loader.load_from_file(included_file._filename)
+ if not isinstance(data, list):
+ raise AnsibleParserError("included task files must contain a list of tasks", obj=included_file._task._ds)
+
+ is_handler = isinstance(included_file._task, Handler)
+ block_list = load_list_of_blocks(
+ data,
+ play=included_file._task._block._play,
+ parent_block=included_file._task._block,
+ task_include=included_file._task,
+ role=included_file._task._role,
+ use_handlers=is_handler,
+ loader=self._loader
+ )
+
+ # set the vars for this task from those specified as params to the include
+ for b in block_list:
+ b._vars = included_file._args.copy()
+
+ return block_list
+
+ def cleanup(self, iterator, connection_info):
+ '''
+ Iterates through failed hosts and runs any outstanding rescue/always blocks
+ and handlers which may still need to be run after a failure.
+ '''
+
+ debug("in cleanup")
+ result = True
+
+ debug("getting failed hosts")
+ failed_hosts = self.get_failed_hosts(iterator._play)
+ if len(failed_hosts) == 0:
+ debug("there are no failed hosts")
+ return result
+
+ debug("marking hosts failed in the iterator")
+ # mark the host as failed in the iterator so it will take
+ # any required rescue paths which may be outstanding
+ for host in failed_hosts:
+ iterator.mark_host_failed(host)
+
+ debug("clearing the failed hosts list")
+ # clear the failed hosts dictionary now while also
+ for entry in self._tqm._failed_hosts.keys():
+ del self._tqm._failed_hosts[entry]
+
+ work_to_do = True
+ while work_to_do:
+ work_to_do = False
+ for host in failed_hosts:
+ host_name = host.name
+
+ if host_name in self._tqm._failed_hosts:
+ iterator.mark_host_failed(host)
+ del self._tqm._failed_hosts[host_name]
+
+ if host_name in self._blocked_hosts:
+ work_to_do = True
+ continue
+ elif iterator.get_next_task_for_host(host, peek=True) and host_name not in self._tqm._unreachable_hosts:
+ work_to_do = True
+
+ # pop the task, mark the host blocked, and queue it
+ self._blocked_hosts[host_name] = True
+ task = iterator.get_next_task_for_host(host)
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
+ self._tqm.send_callback('v2_playbook_on_cleanup_task_start', task)
+ self._queue_task(host, task, task_vars, connection_info)
+
+ self._process_pending_results(iterator)
+ time.sleep(0.01)
+
+ # no more work, wait until the queue is drained
+ self._wait_on_pending_results(iterator)
+
+ return result
+
+ def run_handlers(self, iterator, connection_info):
+ '''
+ Runs handlers on those hosts which have been notified.
+ '''
+
+ result = True
+
+ # FIXME: getting the handlers from the iterators play should be
+ # a method on the iterator, which may also filter the list
+ # of handlers based on the notified list
+
+ for handler_block in iterator._play.handlers:
+ # FIXME: handlers need to support the rescue/always portions of blocks too,
+ # but this may take some work in the iterator and gets tricky when
+ # we consider the ability of meta tasks to flush handlers
+ for handler in handler_block.block:
+ handler_name = handler.get_name()
+ if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
+ if not len(self.get_hosts_remaining(iterator._play)):
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ result = False
+ break
+ self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
+ for host in self._notified_handlers[handler_name]:
+ if not handler.has_triggered(host):
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
+ self._queue_task(host, handler, task_vars, connection_info)
+ handler.flag_for_host(host)
+ self._process_pending_results(iterator)
+ self._wait_on_pending_results(iterator)
+ # wipe the notification list
+ self._notified_handlers[handler_name] = []
+ debug("done running handlers, result is: %s" % result)
+ return result
diff --git a/lib/ansible/plugins/strategies/free.py b/lib/ansible/plugins/strategies/free.py
new file mode 100644
index 0000000000..d0506d37dd
--- /dev/null
+++ b/lib/ansible/plugins/strategies/free.py
@@ -0,0 +1,151 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import time
+
+from ansible.plugins.strategies import StrategyBase
+from ansible.utils.debug import debug
+
+class StrategyModule(StrategyBase):
+
+ def run(self, iterator, connection_info):
+ '''
+ The "free" strategy is a bit more complex, in that it allows tasks to
+ be sent to hosts as quickly as they can be processed. This means that
+ some hosts may finish very quickly if run tasks result in little or no
+ work being done versus other systems.
+
+ The algorithm used here also tries to be more "fair" when iterating
+ through hosts by remembering the last host in the list to be given a task
+ and starting the search from there as opposed to the top of the hosts
+ list again, which would end up favoring hosts near the beginning of the
+ list.
+ '''
+
+ # the last host to be given a task
+ last_host = 0
+
+ result = True
+
+ work_to_do = True
+ while work_to_do and not self._tqm._terminated:
+
+ hosts_left = self.get_hosts_remaining(iterator._play)
+ if len(hosts_left) == 0:
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ result = False
+ break
+
+ work_to_do = False # assume we have no more work to do
+ starting_host = last_host # save current position so we know when we've
+ # looped back around and need to break
+
+ # try and find an unblocked host with a task to run
+ host_results = []
+ while True:
+ host = hosts_left[last_host]
+ debug("next free host: %s" % host)
+ host_name = host.get_name()
+
+ # peek at the next task for the host, to see if there's
+ # anything to do do for this host
+ (state, task) = iterator.get_next_task_for_host(host, peek=True)
+ debug("free host state: %s" % state)
+ debug("free host task: %s" % task)
+ if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task:
+
+ # set the flag so the outer loop knows we've still found
+ # some work which needs to be done
+ work_to_do = True
+
+ debug("this host has work to do")
+
+ # check to see if this host is blocked (still executing a previous task)
+ if not host_name in self._blocked_hosts:
+ # pop the task, mark the host blocked, and queue it
+ self._blocked_hosts[host_name] = True
+ (state, task) = iterator.get_next_task_for_host(host)
+
+ debug("getting variables")
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
+ debug("done getting variables")
+
+ # check to see if this task should be skipped, due to it being a member of a
+ # role which has already run (and whether that role allows duplicate execution)
+ if task._role and task._role.has_run():
+ # If there is no metadata, the default behavior is to not allow duplicates,
+ # if there is metadata, check to see if the allow_duplicates flag was set to true
+ if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
+ debug("'%s' skipped because role has already run" % task)
+ continue
+
+ if not task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, task_vars) and task.action != 'setup':
+ debug("'%s' failed tag evaluation" % task)
+ continue
+
+ if task.action == 'meta':
+ # meta tasks store their args in the _raw_params field of args,
+ # since they do not use k=v pairs, so get that
+ meta_action = task.args.get('_raw_params')
+ if meta_action == 'noop':
+ # FIXME: issue a callback for the noop here?
+ continue
+ elif meta_action == 'flush_handlers':
+ # FIXME: in the 'free' mode, flushing handlers should result in
+ # only those handlers notified for the host doing the flush
+ self.run_handlers(iterator, connection_info)
+ else:
+ raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
+
+ self._blocked_hosts[host_name] = False
+ else:
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+ self._queue_task(host, task, task_vars, connection_info)
+
+ # move on to the next host and make sure we
+ # haven't gone past the end of our hosts list
+ last_host += 1
+ if last_host > len(hosts_left) - 1:
+ last_host = 0
+
+ # if we've looped around back to the start, break out
+ if last_host == starting_host:
+ break
+
+ results = self._process_pending_results(iterator)
+ host_results.extend(results)
+
+ # pause briefly so we don't spin lock
+ time.sleep(0.05)
+
+ try:
+ results = self._wait_on_pending_results(iterator)
+ host_results.extend(results)
+ except Exception as e:
+ # FIXME: ctrl+c can cause some failures here, so catch them
+ # with the appropriate error type
+ print("wtf: %s" % e)
+ pass
+
+ # run the base class run() method, which executes the cleanup function
+ # and runs any outstanding handlers which have been triggered
+ super(StrategyModule, self).run(iterator, connection_info)
+
diff --git a/lib/ansible/plugins/strategies/linear.py b/lib/ansible/plugins/strategies/linear.py
new file mode 100644
index 0000000000..95ecac1451
--- /dev/null
+++ b/lib/ansible/plugins/strategies/linear.py
@@ -0,0 +1,307 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.errors import AnsibleError
+from ansible.executor.play_iterator import PlayIterator
+from ansible.playbook.block import Block
+from ansible.playbook.task import Task
+from ansible.plugins import action_loader
+from ansible.plugins.strategies import StrategyBase
+from ansible.utils.debug import debug
+
+class StrategyModule(StrategyBase):
+
+ def _get_next_task_lockstep(self, hosts, iterator):
+ '''
+ Returns a list of (host, task) tuples, where the task may
+ be a noop task to keep the iterator in lock step across
+ all hosts.
+ '''
+
+ noop_task = Task()
+ noop_task.action = 'meta'
+ noop_task.args['_raw_params'] = 'noop'
+ noop_task.set_loader(iterator._play._loader)
+
+ host_tasks = {}
+ for host in hosts:
+ host_tasks[host.name] = iterator.get_next_task_for_host(host, peek=True)
+
+ num_setups = 0
+ num_tasks = 0
+ num_rescue = 0
+ num_always = 0
+
+ lowest_cur_block = len(iterator._blocks)
+
+ for (k, v) in host_tasks.iteritems():
+ if v is None:
+ continue
+
+ (s, t) = v
+ if s.cur_block < lowest_cur_block and s.run_state != PlayIterator.ITERATING_COMPLETE:
+ lowest_cur_block = s.cur_block
+
+ if s.run_state == PlayIterator.ITERATING_SETUP:
+ num_setups += 1
+ elif s.run_state == PlayIterator.ITERATING_TASKS:
+ num_tasks += 1
+ elif s.run_state == PlayIterator.ITERATING_RESCUE:
+ num_rescue += 1
+ elif s.run_state == PlayIterator.ITERATING_ALWAYS:
+ num_always += 1
+
+ def _advance_selected_hosts(hosts, cur_block, cur_state):
+ '''
+ This helper returns the task for all hosts in the requested
+ state, otherwise they get a noop dummy task. This also advances
+ the state of the host, since the given states are determined
+ while using peek=True.
+ '''
+ # we return the values in the order they were originally
+ # specified in the given hosts array
+ rvals = []
+ for host in hosts:
+ (s, t) = host_tasks[host.name]
+ if s.run_state == cur_state and s.cur_block == cur_block:
+ new_t = iterator.get_next_task_for_host(host)
+ #if new_t != t:
+ # raise AnsibleError("iterator error, wtf?")
+ rvals.append((host, t))
+ else:
+ rvals.append((host, noop_task))
+ return rvals
+
+ # if any hosts are in ITERATING_SETUP, return the setup task
+ # while all other hosts get a noop
+ if num_setups:
+ return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_SETUP)
+
+ # if any hosts are in ITERATING_TASKS, return the next normal
+ # task for these hosts, while all other hosts get a noop
+ if num_tasks:
+ return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_TASKS)
+
+ # if any hosts are in ITERATING_RESCUE, return the next rescue
+ # task for these hosts, while all other hosts get a noop
+ if num_rescue:
+ return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_RESCUE)
+
+ # if any hosts are in ITERATING_ALWAYS, return the next always
+ # task for these hosts, while all other hosts get a noop
+ if num_always:
+ return _advance_selected_hosts(hosts, lowest_cur_block, PlayIterator.ITERATING_ALWAYS)
+
+ # at this point, everything must be ITERATING_COMPLETE, so we
+ # return None for all hosts in the list
+ return [(host, None) for host in hosts]
+
+
+ def run(self, iterator, connection_info):
+ '''
+ The linear strategy is simple - get the next task and queue
+ it for all hosts, then wait for the queue to drain before
+ moving on to the next task
+ '''
+
+ result = True
+
+ # iteratate over each task, while there is one left to run
+ work_to_do = True
+ while work_to_do and not self._tqm._terminated:
+
+ try:
+ debug("getting the remaining hosts for this loop")
+ self._tqm._failed_hosts = iterator.get_failed_hosts()
+ hosts_left = self.get_hosts_remaining(iterator._play)
+ debug("done getting the remaining hosts for this loop")
+ if len(hosts_left) == 0:
+ debug("out of hosts to run on")
+ self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
+ result = False
+ break
+
+ # queue up this task for each host in the inventory
+ callback_sent = False
+ work_to_do = False
+
+ host_results = []
+ host_tasks = self._get_next_task_lockstep(hosts_left, iterator)
+ for (host, task) in host_tasks:
+ if not task:
+ continue
+
+ run_once = False
+ work_to_do = True
+
+ # test to see if the task across all hosts points to an action plugin which
+ # sets BYPASS_HOST_LOOP to true, or if it has run_once enabled. If so, we
+ # will only send this task to the first host in the list.
+
+ try:
+ action = action_loader.get(task.action, class_only=True)
+ if task.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
+ run_once = True
+ except KeyError:
+ # we don't care here, because the action may simply not have a
+ # corresponding action plugin
+ pass
+
+ debug("getting variables")
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
+ debug("done getting variables")
+
+ # check to see if this task should be skipped, due to it being a member of a
+ # role which has already run (and whether that role allows duplicate execution)
+ if task._role and task._role.has_run():
+ # If there is no metadata, the default behavior is to not allow duplicates,
+ # if there is metadata, check to see if the allow_duplicates flag was set to true
+ if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
+ debug("'%s' skipped because role has already run" % task)
+ continue
+
+ if task.action == 'meta':
+ # meta tasks store their args in the _raw_params field of args,
+ # since they do not use k=v pairs, so get that
+ meta_action = task.args.get('_raw_params')
+ if meta_action == 'noop':
+ # FIXME: issue a callback for the noop here?
+ continue
+ elif meta_action == 'flush_handlers':
+ self.run_handlers(iterator, connection_info)
+ else:
+ raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
+ else:
+ if not callback_sent:
+ self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
+ callback_sent = True
+
+ self._blocked_hosts[host.get_name()] = True
+ self._queue_task(host, task, task_vars, connection_info)
+
+ results = self._process_pending_results(iterator)
+ host_results.extend(results)
+
+ # if we're bypassing the host loop, break out now
+ if run_once:
+ break
+
+ debug("done queuing things up, now waiting for results queue to drain")
+ results = self._wait_on_pending_results(iterator)
+ host_results.extend(results)
+
+ # FIXME: this needs to be somewhere else
+ class IncludedFile:
+ def __init__(self, filename, args, task):
+ self._filename = filename
+ self._args = args
+ self._task = task
+ self._hosts = []
+ def add_host(self, host):
+ if host not in self._hosts:
+ self._hosts.append(host)
+ def __eq__(self, other):
+ return other._filename == self._filename and other._args == self._args
+ def __repr__(self):
+ return "%s (%s): %s" % (self._filename, self._args, self._hosts)
+
+ # FIXME: this should also be moved to the base class in a method
+ included_files = []
+ for res in host_results:
+ if res._task.action == 'include':
+ if res._task.loop:
+ include_results = res._result['results']
+ else:
+ include_results = [ res._result ]
+
+ for include_result in include_results:
+ # if the task result was skipped or failed, continue
+ if 'skipped' in include_result and include_result['skipped'] or 'failed' in include_result:
+ continue
+
+ original_task = iterator.get_original_task(res._host, res._task)
+ if original_task and original_task._role:
+ include_file = self._loader.path_dwim_relative(original_task._role._role_path, 'tasks', include_result['include'])
+ else:
+ include_file = self._loader.path_dwim(res._task.args.get('_raw_params'))
+
+ include_variables = include_result.get('include_variables', dict())
+ if 'item' in include_result:
+ include_variables['item'] = include_result['item']
+
+ inc_file = IncludedFile(include_file, include_variables, original_task)
+
+ try:
+ pos = included_files.index(inc_file)
+ inc_file = included_files[pos]
+ except ValueError:
+ included_files.append(inc_file)
+
+ inc_file.add_host(res._host)
+
+ # FIXME: should this be moved into the iterator class? Main downside would be
+ # that accessing the TQM's callback member would be more difficult, if
+ # we do want to send callbacks from here
+ if len(included_files) > 0:
+ noop_task = Task()
+ noop_task.action = 'meta'
+ noop_task.args['_raw_params'] = 'noop'
+ noop_task.set_loader(iterator._play._loader)
+
+ all_blocks = dict((host, []) for host in hosts_left)
+ for included_file in included_files:
+ # included hosts get the task list while those excluded get an equal-length
+ # list of noop tasks, to make sure that they continue running in lock-step
+ try:
+ new_blocks = self._load_included_file(included_file)
+ except AnsibleError, e:
+ for host in included_file._hosts:
+ iterator.mark_host_failed(host)
+ # FIXME: callback here?
+ print(e)
+
+ for new_block in new_blocks:
+ noop_block = Block(parent_block=task._block)
+ noop_block.block = [noop_task for t in new_block.block]
+ noop_block.always = [noop_task for t in new_block.always]
+ noop_block.rescue = [noop_task for t in new_block.rescue]
+ for host in hosts_left:
+ if host in included_file._hosts:
+ task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=included_file._task)
+ final_block = new_block.filter_tagged_tasks(connection_info, task_vars)
+ all_blocks[host].append(final_block)
+ else:
+ all_blocks[host].append(noop_block)
+
+ for host in hosts_left:
+ iterator.add_tasks(host, all_blocks[host])
+
+ debug("results queue empty")
+ except (IOError, EOFError), e:
+ debug("got IOError/EOFError in task loop: %s" % e)
+ # most likely an abort, return failed
+ return 1
+
+ # run the base class run() method, which executes the cleanup function
+ # and runs any outstanding handlers which have been triggered
+
+ return super(StrategyModule, self).run(iterator, connection_info, result)
+
diff --git a/lib/ansible/plugins/vars/__init__.py b/lib/ansible/plugins/vars/__init__.py
new file mode 100644
index 0000000000..785fc45992
--- /dev/null
+++ b/lib/ansible/plugins/vars/__init__.py
@@ -0,0 +1,21 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
diff --git a/lib/ansible/runner/__init__.py b/lib/ansible/runner/__init__.py
deleted file mode 100644
index 8b46683c37..0000000000
--- a/lib/ansible/runner/__init__.py
+++ /dev/null
@@ -1,1517 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import multiprocessing
-import signal
-import os
-import pwd
-import Queue
-import random
-import traceback
-import tempfile
-import time
-import collections
-import socket
-import base64
-import sys
-import pipes
-import jinja2
-import subprocess
-import getpass
-
-import ansible.constants as C
-import ansible.inventory
-from ansible import utils
-from ansible.utils import template
-from ansible.utils import check_conditional
-from ansible.utils import string_functions
-from ansible import errors
-from ansible import module_common
-import poller
-import connection
-from return_data import ReturnData
-from ansible.callbacks import DefaultRunnerCallbacks, vv
-from ansible.module_common import ModuleReplacer
-from ansible.module_utils.splitter import split_args, unquote
-from ansible.cache import FactCache
-from ansible.utils import update_hash
-
-module_replacer = ModuleReplacer(strip_comments=False)
-
-try:
- from hashlib import sha1
-except ImportError:
- from sha import sha as sha1
-
-HAS_ATFORK=True
-try:
- from Crypto.Random import atfork
-except ImportError:
- HAS_ATFORK=False
-
-multiprocessing_runner = None
-
-OUTPUT_LOCKFILE = tempfile.TemporaryFile()
-PROCESS_LOCKFILE = tempfile.TemporaryFile()
-
-################################################
-
-def _executor_hook(job_queue, result_queue, new_stdin):
-
- # attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17
- # this function also not present in CentOS 6
- if HAS_ATFORK:
- atfork()
-
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- while not job_queue.empty():
- try:
- host = job_queue.get(block=False)
- return_data = multiprocessing_runner._executor(host, new_stdin)
- result_queue.put(return_data)
- except Queue.Empty:
- pass
- except:
- traceback.print_exc()
-
-class HostVars(dict):
- ''' A special view of vars_cache that adds values from the inventory when needed. '''
-
- def __init__(self, vars_cache, inventory, vault_password=None):
- self.vars_cache = vars_cache
- self.inventory = inventory
- self.lookup = {}
- self.update(vars_cache)
- self.vault_password = vault_password
-
- def __getitem__(self, host):
- if host not in self.lookup:
- result = self.inventory.get_variables(host, vault_password=self.vault_password).copy()
- result.update(self.vars_cache.get(host, {}))
- self.lookup[host] = template.template('.', result, self.vars_cache)
- return self.lookup[host]
-
-
-class Runner(object):
- ''' core API interface to ansible '''
-
- # see bin/ansible for how this is used...
-
- def __init__(self,
- host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage
- module_path=None, # ex: /usr/share/ansible
- module_name=C.DEFAULT_MODULE_NAME, # ex: copy
- module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b"
- forks=C.DEFAULT_FORKS, # parallelism level
- timeout=C.DEFAULT_TIMEOUT, # SSH timeout
- pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org'
- remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username'
- remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
- remote_port=None, # if SSH on different ports
- private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
- background=0, # async poll every X seconds, else 0 for non-async
- basedir=None, # directory of playbook, if applicable
- setup_cache=None, # used to share fact data w/ other tasks
- vars_cache=None, # used to store variables about hosts
- transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local'
- conditional='True', # run only if this fact expression evals to true
- callbacks=None, # used for output
- module_vars=None, # a playbooks internals thing
- play_vars=None, #
- play_file_vars=None, #
- role_vars=None, #
- role_params=None, #
- default_vars=None, #
- extra_vars=None, # extra vars specified with he playbook(s)
- is_playbook=False, # running from playbook or not?
- inventory=None, # reference to Inventory object
- subset=None, # subset pattern
- check=False, # don't make any changes, just try to probe for potential changes
- diff=False, # whether to show diffs for template files that change
- environment=None, # environment variables (as dict) to use inside the command
- complex_args=None, # structured data in addition to module_args, must be a dict
- error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False
- accelerate=False, # use accelerated connection
- accelerate_ipv6=False, # accelerated connection w/ IPv6
- accelerate_port=None, # port to use with accelerated connection
- vault_pass=None,
- run_hosts=None, # an optional list of pre-calculated hosts to run on
- no_log=False, # option to enable/disable logging for a given task
- run_once=False, # option to enable/disable host bypass loop for a given task
- become=False, # whether to run privilege escalation or not
- become_method=C.DEFAULT_BECOME_METHOD,
- become_user=C.DEFAULT_BECOME_USER, # ex: 'root'
- become_pass=C.DEFAULT_BECOME_PASS, # ex: 'password123' or None
- become_exe=C.DEFAULT_BECOME_EXE, # ex: /usr/local/bin/sudo
- ):
-
- # used to lock multiprocess inputs and outputs at various levels
- self.output_lockfile = OUTPUT_LOCKFILE
- self.process_lockfile = PROCESS_LOCKFILE
-
- if not complex_args:
- complex_args = {}
-
- # storage & defaults
- self.check = check
- self.diff = diff
- self.setup_cache = utils.default(setup_cache, lambda: ansible.cache.FactCache())
- self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict))
- self.basedir = utils.default(basedir, lambda: os.getcwd())
- self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks())
- self.generated_jid = str(random.randint(0, 999999999999))
- self.transport = transport
- self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list))
-
- self.module_vars = utils.default(module_vars, lambda: {})
- self.play_vars = utils.default(play_vars, lambda: {})
- self.play_file_vars = utils.default(play_file_vars, lambda: {})
- self.role_vars = utils.default(role_vars, lambda: {})
- self.role_params = utils.default(role_params, lambda: {})
- self.default_vars = utils.default(default_vars, lambda: {})
- self.extra_vars = utils.default(extra_vars, lambda: {})
-
- self.always_run = None
- self.connector = connection.Connector(self)
- self.conditional = conditional
- self.delegate_to = None
- self.module_name = module_name
- self.forks = int(forks)
- self.pattern = pattern
- self.module_args = module_args
- self.timeout = timeout
- self.remote_user = remote_user
- self.remote_pass = remote_pass
- self.remote_port = remote_port
- self.private_key_file = private_key_file
- self.background = background
- self.become = become
- self.become_method = become_method
- self.become_user_var = become_user
- self.become_user = None
- self.become_pass = become_pass
- self.become_exe = become_exe
- self.is_playbook = is_playbook
- self.environment = environment
- self.complex_args = complex_args
- self.error_on_undefined_vars = error_on_undefined_vars
- self.accelerate = accelerate
- self.accelerate_port = accelerate_port
- self.accelerate_ipv6 = accelerate_ipv6
- self.callbacks.runner = self
- self.omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
- self.vault_pass = vault_pass
- self.no_log = no_log
- self.run_once = run_once
-
- if self.transport == 'smart':
- # If the transport is 'smart', check to see if certain conditions
- # would prevent us from using ssh, and fallback to paramiko.
- # 'smart' is the default since 1.2.1/1.3
- self.transport = "ssh"
- if sys.platform.startswith('darwin') and self.remote_pass:
- # due to a current bug in sshpass on OSX, which can trigger
- # a kernel panic even for non-privileged users, we revert to
- # paramiko on that OS when a SSH password is specified
- self.transport = "paramiko"
- else:
- # see if SSH can support ControlPersist if not use paramiko
- cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
- if "Bad configuration option" in err:
- self.transport = "paramiko"
-
- # save the original transport, in case it gets
- # changed later via options like accelerate
- self.original_transport = self.transport
-
- # misc housekeeping
- if subset and self.inventory._subset is None:
- # don't override subset when passed from playbook
- self.inventory.subset(subset)
-
- # If we get a pre-built list of hosts to run on, from say a playbook, use them.
- # Also where we will store the hosts to run on once discovered
- self.run_hosts = run_hosts
-
- if self.transport == 'local':
- self.remote_user = pwd.getpwuid(os.geteuid())[0]
-
- if module_path is not None:
- for i in module_path.split(os.pathsep):
- utils.plugins.module_finder.add_directory(i)
-
- utils.plugins.push_basedir(self.basedir)
-
- # ensure we are using unique tmp paths
- random.seed()
- # *****************************************************
-
- def _complex_args_hack(self, complex_args, module_args):
- """
- ansible-playbook both allows specifying key=value string arguments and complex arguments
- however not all modules use our python common module system and cannot
- access these. An example might be a Bash module. This hack allows users to still pass "args"
- as a hash of simple scalars to those arguments and is short term. We could technically
- just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented
- it does mean values in 'args' have LOWER priority than those on the key=value line, allowing
- args to provide yet another way to have pluggable defaults.
- """
- if complex_args is None:
- return module_args
- if not isinstance(complex_args, dict):
- raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args)
- for (k,v) in complex_args.iteritems():
- if isinstance(v, basestring):
- module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
- return module_args
-
- # *****************************************************
-
- def _transfer_str(self, conn, tmp, name, data):
- ''' transfer string to remote file '''
-
- if type(data) == dict:
- data = utils.jsonify(data)
-
- afd, afile = tempfile.mkstemp()
- afo = os.fdopen(afd, 'w')
- try:
- if not isinstance(data, unicode):
- #ensure the data is valid UTF-8
- data.decode('utf-8')
- else:
- data = data.encode('utf-8')
- afo.write(data)
- except:
- raise errors.AnsibleError("failure encoding into utf-8")
- afo.flush()
- afo.close()
-
- remote = conn.shell.join_path(tmp, name)
- try:
- conn.put_file(afile, remote)
- finally:
- os.unlink(afile)
- return remote
-
- # *****************************************************
-
- def _compute_environment_string(self, conn, inject=None):
- ''' what environment variables to use when running the command? '''
-
- enviro = {}
- if self.environment:
- enviro = template.template(self.basedir, self.environment, inject, convert_bare=True)
- enviro = utils.safe_eval(enviro)
- if type(enviro) != dict:
- raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro)
-
- return conn.shell.env_prefix(**enviro)
-
- # *****************************************************
-
- def _compute_delegate(self, password, remote_inject):
-
- """ Build a dictionary of all attributes for the delegate host """
-
- delegate = {}
-
- # allow delegated host to be templated
- delegate['inject'] = remote_inject.copy()
-
- # set any interpreters
- interpreters = []
- for i in delegate['inject']:
- if i.startswith("ansible_") and i.endswith("_interpreter"):
- interpreters.append(i)
- for i in interpreters:
- del delegate['inject'][i]
- port = C.DEFAULT_REMOTE_PORT
-
- # get the vars for the delegate by its name
- try:
- this_info = delegate['inject']['hostvars'][self.delegate_to]
- except:
- # make sure the inject is empty for non-inventory hosts
- this_info = {}
-
- # get the real ssh_address for the delegate
- # and allow ansible_ssh_host to be templated
- delegate['ssh_host'] = template.template(
- self.basedir,
- this_info.get('ansible_ssh_host', self.delegate_to),
- this_info,
- fail_on_undefined=True
- )
-
- delegate['port'] = this_info.get('ansible_ssh_port', port)
- delegate['user'] = self._compute_delegate_user(self.delegate_to, delegate['inject'])
- delegate['pass'] = this_info.get('ansible_ssh_pass', password)
- delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file)
- delegate['transport'] = this_info.get('ansible_connection', self.transport)
- delegate['become_pass'] = this_info.get('ansible_become_pass', this_info.get('ansible_ssh_pass', self.become_pass))
-
- # Last chance to get private_key_file from global variables.
- # this is useful if delegated host is not defined in the inventory
- if delegate['private_key_file'] is None:
- delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None)
-
- if delegate['private_key_file'] is not None:
- delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file'])
-
- for i in this_info:
- if i.startswith("ansible_") and i.endswith("_interpreter"):
- delegate['inject'][i] = this_info[i]
-
- return delegate
-
- def _compute_delegate_user(self, host, inject):
-
- """ Calculate the remote user based on an order of preference """
-
- # inventory > playbook > original_host
-
- actual_user = inject.get('ansible_ssh_user', self.remote_user)
- thisuser = None
-
- try:
- if host in inject['hostvars']:
- if inject['hostvars'][host].get('ansible_ssh_user'):
- # user for delegate host in inventory
- thisuser = inject['hostvars'][host].get('ansible_ssh_user')
- else:
- # look up the variables for the host directly from inventory
- host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
- if 'ansible_ssh_user' in host_vars:
- thisuser = host_vars['ansible_ssh_user']
- except errors.AnsibleError, e:
- # the hostname was not found in the inventory, so
- # we just ignore this and try the next method
- pass
-
- if thisuser is None and self.remote_user:
- # user defined by play/runner
- thisuser = self.remote_user
-
- if thisuser is not None:
- actual_user = thisuser
- else:
- # fallback to the inventory user of the play host
- #actual_user = inject.get('ansible_ssh_user', actual_user)
- actual_user = inject.get('ansible_ssh_user', self.remote_user)
-
- return actual_user
-
- def _count_module_args(self, args, allow_dupes=False):
- '''
- Count the number of k=v pairs in the supplied module args. This is
- basically a specialized version of parse_kv() from utils with a few
- minor changes.
- '''
- options = {}
- if args is not None:
- try:
- vargs = split_args(args)
- except Exception, e:
- if "unbalanced jinja2 block or quotes" in str(e):
- raise errors.AnsibleError("error parsing argument string '%s', try quoting the entire line." % args)
- else:
- raise
- for x in vargs:
- quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'")
- if "=" in x and not quoted:
- k, v = x.split("=",1)
- is_shell_module = self.module_name in ('command', 'shell')
- is_shell_param = k in ('creates', 'removes', 'chdir', 'executable')
- if k in options and not allow_dupes:
- if not(is_shell_module and not is_shell_param):
- raise errors.AnsibleError("a duplicate parameter was found in the argument string (%s)" % k)
- if is_shell_module and is_shell_param or not is_shell_module:
- options[k] = v
- return len(options)
-
-
- # *****************************************************
-
- def _execute_module(self, conn, tmp, module_name, args,
- async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None, delete_remote_tmp=True):
-
- ''' transfer and run a module along with its arguments on the remote side'''
-
- # hack to support fireball mode
- if module_name == 'fireball':
- args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host))))
- if 'port' not in args:
- args += " port=%s" % C.ZEROMQ_PORT
-
- (
- module_style,
- shebang,
- module_data
- ) = self._configure_module(conn, module_name, args, inject, complex_args)
-
- # a remote tmp path may be necessary and not already created
- if self._late_needs_tmp_path(conn, tmp, module_style):
- tmp = self._make_tmp_path(conn)
-
- remote_module_path = conn.shell.join_path(tmp, module_name)
-
- if (module_style != 'new'
- or async_jid is not None
- or not conn.has_pipelining
- or not C.ANSIBLE_SSH_PIPELINING
- or C.DEFAULT_KEEP_REMOTE_FILES
- or self.become_method == 'su'):
- self._transfer_str(conn, tmp, module_name, module_data)
-
- environment_string = self._compute_environment_string(conn, inject)
-
- if "tmp" in tmp and (self.become and self.become_user != 'root'):
- # deal with possible umask issues once you become another user
- self._remote_chmod(conn, 'a+r', remote_module_path, tmp)
-
- cmd = ""
- in_data = None
- if module_style != 'new':
- if 'CHECKMODE=True' in args:
- # if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to
- # do --check mode, so to be safe we will not run it.
- return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot yet run check mode against old-style modules"))
- elif 'NO_LOG' in args:
- return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot use no_log: with old-style modules"))
-
- args = template.template(self.basedir, args, inject)
-
- # decide whether we need to transfer JSON or key=value
- argsfile = None
- if module_style == 'non_native_want_json':
- if complex_args:
- complex_args.update(utils.parse_kv(args))
- argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args))
- else:
- argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args)))
-
- else:
- argsfile = self._transfer_str(conn, tmp, 'arguments', args)
-
- if self.become and self.become_user != 'root':
- # deal with possible umask issues once become another user
- self._remote_chmod(conn, 'a+r', argsfile, tmp)
-
- if async_jid is None:
- cmd = "%s %s" % (remote_module_path, argsfile)
- else:
- cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
- else:
- if async_jid is None:
- if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.become_method == 'su':
- in_data = module_data
- else:
- cmd = "%s" % (remote_module_path)
- else:
- cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]])
-
- if not shebang:
- raise errors.AnsibleError("module is missing interpreter line")
-
- rm_tmp = None
- if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if not self.become or self.become_user == 'root':
- # not sudoing or sudoing to root, so can cleanup files in the same step
- rm_tmp = tmp
-
- cmd = conn.shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
- cmd = cmd.strip()
-
- sudoable = True
- if module_name == "accelerate":
- # always run the accelerate module as the user
- # specified in the play, not the become_user
- sudoable = False
-
- res = self._low_level_exec_command(conn, cmd, tmp, become=self.become, sudoable=sudoable, in_data=in_data)
-
- if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if self.become and self.become_user != 'root':
- # not becoming root, so maybe can't delete files as that other user
- # have to clean up temp files as original user in a second step
- cmd2 = conn.shell.remove(tmp, recurse=True)
- self._low_level_exec_command(conn, cmd2, tmp, sudoable=False)
-
- data = utils.parse_json(res['stdout'], from_remote=True, no_exceptions=True)
- if 'parsed' in data and data['parsed'] == False:
- data['msg'] += res['stderr']
- return ReturnData(conn=conn, result=data)
-
- # *****************************************************
-
- def _executor(self, host, new_stdin):
- ''' handler for multiprocessing library '''
-
- try:
- fileno = sys.stdin.fileno()
- except ValueError:
- fileno = None
-
- try:
- self._new_stdin = new_stdin
- if not new_stdin and fileno is not None:
- try:
- self._new_stdin = os.fdopen(os.dup(fileno))
- except OSError, e:
- # couldn't dupe stdin, most likely because it's
- # not a valid file descriptor, so we just rely on
- # using the one that was passed in
- pass
-
- exec_rc = self._executor_internal(host, new_stdin)
- if type(exec_rc) != ReturnData:
- raise Exception("unexpected return type: %s" % type(exec_rc))
- # redundant, right?
- if not exec_rc.comm_ok:
- self.callbacks.on_unreachable(host, exec_rc.result)
- return exec_rc
- except errors.AnsibleError, ae:
- msg = str(ae)
- self.callbacks.on_unreachable(host, msg)
- return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
- except Exception:
- msg = traceback.format_exc()
- self.callbacks.on_unreachable(host, msg)
- return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
-
- # *****************************************************
-
- def get_combined_cache(self):
- # merge the VARS and SETUP caches for this host
- combined_cache = self.setup_cache.copy()
- return utils.merge_hash(combined_cache, self.vars_cache)
-
- def get_inject_vars(self, host):
- host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass)
- combined_cache = self.get_combined_cache()
-
- # use combined_cache and host_variables to template the module_vars
- # we update the inject variables with the data we're about to template
- # since some of the variables we'll be replacing may be contained there too
- module_vars_inject = utils.combine_vars(host_variables, combined_cache.get(host, {}))
- module_vars_inject = utils.combine_vars(self.module_vars, module_vars_inject)
- module_vars = template.template(self.basedir, self.module_vars, module_vars_inject)
-
- # remove bad variables from the module vars, which may be in there due
- # the way role declarations are specified in playbooks
- if 'tags' in module_vars:
- del module_vars['tags']
- if 'when' in module_vars:
- del module_vars['when']
-
- # start building the dictionary of injected variables
- inject = {}
-
- # default vars are the lowest priority
- inject = utils.combine_vars(inject, self.default_vars)
- # next come inventory variables for the host
- inject = utils.combine_vars(inject, host_variables)
- # then the setup_cache which contains facts gathered
- inject = utils.combine_vars(inject, self.setup_cache.get(host, {}))
- # next come variables from vars and vars files
- inject = utils.combine_vars(inject, self.play_vars)
- inject = utils.combine_vars(inject, self.play_file_vars)
- # next come variables from role vars/main.yml files
- inject = utils.combine_vars(inject, self.role_vars)
- # then come the module variables
- inject = utils.combine_vars(inject, module_vars)
- # followed by vars_cache things (set_fact, include_vars, and
- # vars_files which had host-specific templating done)
- inject = utils.combine_vars(inject, self.vars_cache.get(host, {}))
- # role parameters next
- inject = utils.combine_vars(inject, self.role_params)
- # and finally -e vars are the highest priority
- inject = utils.combine_vars(inject, self.extra_vars)
- # and then special vars
- inject.setdefault('ansible_ssh_user', self.remote_user)
- inject['group_names'] = host_variables.get('group_names', [])
- inject['groups'] = self.inventory.groups_list()
- inject['vars'] = self.module_vars
- inject['defaults'] = self.default_vars
- inject['environment'] = self.environment
- inject['playbook_dir'] = os.path.abspath(self.basedir)
- inject['omit'] = self.omit_token
- inject['combined_cache'] = combined_cache
-
- return inject
-
- def _executor_internal(self, host, new_stdin):
- ''' executes any module one or more times '''
-
- # We build the proper injected dictionary for all future
- # templating operations in this run
- inject = self.get_inject_vars(host)
-
- # Then we selectively merge some variable dictionaries down to a
- # single dictionary, used to template the HostVars for this host
- temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
- temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'] )
- temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
- temp_vars = utils.combine_vars(temp_vars, self.play_vars)
- temp_vars = utils.combine_vars(temp_vars, self.play_file_vars)
- temp_vars = utils.combine_vars(temp_vars, self.extra_vars)
-
- hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass)
-
- # and we save the HostVars in the injected dictionary so they
- # may be referenced from playbooks/templates
- inject['hostvars'] = hostvars
-
- host_connection = inject.get('ansible_connection', self.transport)
- if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]:
- port = hostvars.get('ansible_ssh_port', self.remote_port)
- if port is None:
- port = C.DEFAULT_REMOTE_PORT
- else:
- # fireball, local, etc
- port = self.remote_port
-
- if self.inventory.basedir() is not None:
- inject['inventory_dir'] = self.inventory.basedir()
-
- if self.inventory.src() is not None:
- inject['inventory_file'] = self.inventory.src()
-
- # could be already set by playbook code
- inject.setdefault('ansible_version', utils.version_info(gitinfo=False))
-
- # allow with_foo to work in playbooks...
- items = None
- items_plugin = self.module_vars.get('items_lookup_plugin', None)
-
- if items_plugin is not None and items_plugin in utils.plugins.lookup_loader:
-
- basedir = self.basedir
- if '_original_file' in inject:
- basedir = os.path.dirname(inject['_original_file'])
- filesdir = os.path.join(basedir, '..', 'files')
- if os.path.exists(filesdir):
- basedir = filesdir
-
- try:
- items_terms = self.module_vars.get('items_lookup_terms', '')
- items_terms = template.template(basedir, items_terms, inject)
- items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject)
- except errors.AnsibleUndefinedVariable, e:
- if 'has no attribute' in str(e):
- # the undefined variable was an attribute of a variable that does
- # exist, so try and run this through the conditional check to see
- # if the user wanted to skip something on being undefined
- if utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=True):
- # the conditional check passed, so we have to fail here
- raise
- else:
- # the conditional failed, so we skip this task
- result = utils.jsonify(dict(changed=False, skipped=True))
- self.callbacks.on_skipped(host, None)
- return ReturnData(host=host, result=result)
- except errors.AnsibleError, e:
- raise
- except Exception, e:
- raise errors.AnsibleError("Unexpected error while executing task: %s" % str(e))
-
- # strip out any jinja2 template syntax within
- # the data returned by the lookup plugin
- items = utils._clean_data_struct(items, from_remote=True)
- if items is None:
- items = []
- else:
- if type(items) != list:
- raise errors.AnsibleError("lookup plugins have to return a list: %r" % items)
-
- if len(items) and utils.is_list_of_strings(items) and self.module_name in [ 'apt', 'yum', 'pkgng', 'zypper' ]:
- # hack for apt, yum, and pkgng so that with_items maps back into a single module call
- use_these_items = []
- for x in items:
- inject['item'] = x
- if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- use_these_items.append(x)
- inject['item'] = ",".join(use_these_items)
- items = None
-
- def _safe_template_complex_args(args, inject):
- # Ensure the complex args here are a dictionary, but
- # first template them if they contain a variable
-
- returned_args = args
- if isinstance(args, basestring):
- # If the complex_args were evaluated to a dictionary and there are
- # more keys in the templated version than the evaled version, some
- # param inserted additional keys (the template() call also runs
- # safe_eval on the var if it looks like it's a datastructure). If the
- # evaled_args are not a dict, it's most likely a whole variable (ie.
- # args: {{var}}), in which case there's no way to detect the proper
- # count of params in the dictionary.
-
- templated_args = template.template(self.basedir, args, inject, convert_bare=True)
- evaled_args = utils.safe_eval(args)
-
- if isinstance(evaled_args, dict) and len(evaled_args) > 0 and len(evaled_args) != len(templated_args):
- raise errors.AnsibleError("a variable tried to insert extra parameters into the args for this task")
-
- # set the returned_args to the templated_args
- returned_args = templated_args
-
- # and a final check to make sure the complex args are a dict
- if returned_args is not None and not isinstance(returned_args, dict):
- raise errors.AnsibleError("args must be a dictionary, received %s" % returned_args)
-
- return returned_args
-
- # logic to decide how to run things depends on whether with_items is used
- if items is None:
- complex_args = _safe_template_complex_args(self.complex_args, inject)
- return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args)
- elif len(items) > 0:
-
- # executing using with_items, so make multiple calls
- # TODO: refactor
-
- if self.background > 0:
- raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks")
-
- all_comm_ok = True
- all_changed = False
- all_failed = False
- results = []
- for x in items:
- # use a fresh inject for each item
- this_inject = inject.copy()
- this_inject['item'] = x
-
- complex_args = _safe_template_complex_args(self.complex_args, this_inject)
-
- result = self._executor_internal_inner(
- host,
- self.module_name,
- self.module_args,
- this_inject,
- port,
- complex_args=complex_args
- )
-
- if 'stdout' in result.result and 'stdout_lines' not in result.result:
- result.result['stdout_lines'] = result.result['stdout'].splitlines()
-
- results.append(result.result)
- if result.comm_ok == False:
- all_comm_ok = False
- all_failed = True
- break
- for x in results:
- if x.get('changed') == True:
- all_changed = True
- if (x.get('failed') == True) or ('failed_when_result' in x and [x['failed_when_result']] or [('rc' in x) and (x['rc'] != 0)])[0]:
- all_failed = True
- break
- msg = 'All items completed'
- if all_failed:
- msg = "One or more items failed."
- rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg)
- if not all_failed:
- del rd_result['failed']
- return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result)
- else:
- self.callbacks.on_skipped(host, None)
- return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True))
-
- # *****************************************************
-
- def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None):
- ''' decides how to invoke a module '''
-
- # late processing of parameterized become_user (with_items,..)
- if self.become_user_var is not None:
- self.become_user = template.template(self.basedir, self.become_user_var, inject)
-
- # module_name may be dynamic (but cannot contain {{ ansible_ssh_user }})
- module_name = template.template(self.basedir, module_name, inject)
-
- if module_name in utils.plugins.action_loader:
- if self.background != 0:
- raise errors.AnsibleError("async mode is not supported with the %s module" % module_name)
- handler = utils.plugins.action_loader.get(module_name, self)
- elif self.background == 0:
- handler = utils.plugins.action_loader.get('normal', self)
- else:
- handler = utils.plugins.action_loader.get('async', self)
-
- if type(self.conditional) != list:
- self.conditional = [ self.conditional ]
-
- for cond in self.conditional:
-
- if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- result = dict(changed=False, skipped=True)
- if self.no_log:
- result = utils.censor_unlogged_data(result)
- self.callbacks.on_skipped(host, result)
- else:
- self.callbacks.on_skipped(host, inject.get('item',None))
- return ReturnData(host=host, result=utils.jsonify(result))
-
- if getattr(handler, 'setup', None) is not None:
- handler.setup(module_name, inject)
- conn = None
- actual_host = inject.get('ansible_ssh_host', host)
- # allow ansible_ssh_host to be templated
- actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True)
- actual_port = port
- actual_user = inject.get('ansible_ssh_user', self.remote_user)
- actual_pass = inject.get('ansible_ssh_pass', self.remote_pass)
- actual_transport = inject.get('ansible_connection', self.transport)
- actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file)
- actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True)
-
- self.become = utils.boolean(inject.get('ansible_become', inject.get('ansible_sudo', inject.get('ansible_su', self.become))))
- self.become_user = inject.get('ansible_become_user', inject.get('ansible_sudo_user', inject.get('ansible_su_user',self.become_user)))
- self.become_pass = inject.get('ansible_become_pass', inject.get('ansible_sudo_pass', inject.get('ansible_su_pass', self.become_pass)))
- self.become_exe = inject.get('ansible_become_exe', inject.get('ansible_sudo_exe', self.become_exe))
- self.become_method = inject.get('ansible_become_method', self.become_method)
-
- # select default root user in case self.become requested
- # but no user specified; happens e.g. in host vars when
- # just ansible_become=True is specified
- if self.become and self.become_user is None:
- self.become_user = 'root'
-
- if actual_private_key_file is not None:
- actual_private_key_file = os.path.expanduser(actual_private_key_file)
-
- if self.accelerate and actual_transport != 'local':
- #Fix to get the inventory name of the host to accelerate plugin
- if inject.get('ansible_ssh_host', None):
- self.accelerate_inventory_host = host
- else:
- self.accelerate_inventory_host = None
- # if we're using accelerated mode, force the
- # transport to accelerate
- actual_transport = "accelerate"
- if not self.accelerate_port:
- self.accelerate_port = C.ACCELERATE_PORT
-
- actual_port = inject.get('ansible_ssh_port', port)
-
- # the delegated host may have different SSH port configured, etc
- # and we need to transfer those, and only those, variables
- self.delegate_to = inject.get('delegate_to', None)
- if self.delegate_to:
- self.delegate_to = template.template(self.basedir, self.delegate_to, inject)
-
- if self.delegate_to is not None:
- delegate = self._compute_delegate(actual_pass, inject)
- actual_transport = delegate['transport']
- actual_host = delegate['ssh_host']
- actual_port = delegate['port']
- actual_user = delegate['user']
- actual_pass = delegate['pass']
- actual_private_key_file = delegate['private_key_file']
- self.become_pass = delegate.get('become_pass',delegate.get('sudo_pass'))
- inject = delegate['inject']
- # set resolved delegate_to into inject so modules can call _remote_checksum
- inject['delegate_to'] = self.delegate_to
-
- # user/pass may still contain variables at this stage
- actual_user = template.template(self.basedir, actual_user, inject)
- try:
- actual_pass = template.template(self.basedir, actual_pass, inject)
- self.become_pass = template.template(self.basedir, self.become_pass, inject)
- except:
- # ignore password template errors, could be triggered by password charaters #10468
- pass
-
- # make actual_user available as __magic__ ansible_ssh_user variable
- inject['ansible_ssh_user'] = actual_user
-
- try:
- if actual_transport == 'accelerate':
- # for accelerate, we stuff both ports into a single
- # variable so that we don't have to mangle other function
- # calls just to accommodate this one case
- actual_port = [actual_port, self.accelerate_port]
- elif actual_port is not None:
- actual_port = int(template.template(self.basedir, actual_port, inject))
- except ValueError, e:
- result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port)
- return ReturnData(host=host, comm_ok=False, result=result)
-
- try:
- if self.delegate_to or host != actual_host:
- delegate_host = host
- else:
- delegate_host = None
- conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file, delegate_host)
-
- default_shell = getattr(conn, 'default_shell', '')
- shell_type = inject.get('ansible_shell_type')
- if not shell_type:
- if default_shell:
- shell_type = default_shell
- else:
- shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
-
- shell_plugin = utils.plugins.shell_loader.get(shell_type)
- if shell_plugin is None:
- shell_plugin = utils.plugins.shell_loader.get('sh')
- conn.shell = shell_plugin
-
- except errors.AnsibleConnectionFailed, e:
- result = dict(failed=True, msg="FAILED: %s" % str(e))
- return ReturnData(host=host, comm_ok=False, result=result)
-
- tmp = ''
- # action plugins may DECLARE via TRANSFERS_FILES = True that they need a remote tmp path working dir
- if self._early_needs_tmp_path(module_name, handler):
- tmp = self._make_tmp_path(conn)
-
- # allow module args to work as a dictionary
- # though it is usually a string
- if isinstance(module_args, dict):
- module_args = utils.serialize_args(module_args)
-
- # render module_args and complex_args templates
- try:
- # When templating module_args, we need to be careful to ensure
- # that no variables inadvertently (or maliciously) add params
- # to the list of args. We do this by counting the number of k=v
- # pairs before and after templating.
- num_args_pre = self._count_module_args(module_args, allow_dupes=True)
- module_args = template.template(self.basedir, module_args, inject, fail_on_undefined=self.error_on_undefined_vars)
- num_args_post = self._count_module_args(module_args)
- if num_args_pre != num_args_post:
- raise errors.AnsibleError("A variable inserted a new parameter into the module args. " + \
- "Be sure to quote variables if they contain equal signs (for example: \"{{var}}\").")
- # And we also make sure nothing added in special flags for things
- # like the command/shell module (ie. #USE_SHELL)
- if '#USE_SHELL' in module_args:
- raise errors.AnsibleError("A variable tried to add #USE_SHELL to the module arguments.")
- complex_args = template.template(self.basedir, complex_args, inject, fail_on_undefined=self.error_on_undefined_vars)
- except jinja2.exceptions.UndefinedError, e:
- raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
-
- # filter omitted arguments out from complex_args
- if complex_args:
- complex_args = dict(filter(lambda x: x[1] != self.omit_token, complex_args.iteritems()))
-
- # Filter omitted arguments out from module_args.
- # We do this with split_args instead of parse_kv to ensure
- # that things are not unquoted/requoted incorrectly
- args = split_args(module_args)
- final_args = []
- for arg in args:
- if '=' in arg:
- k,v = arg.split('=', 1)
- if unquote(v) != self.omit_token:
- final_args.append(arg)
- else:
- # not a k=v param, append it
- final_args.append(arg)
- module_args = ' '.join(final_args)
-
- result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
- # Code for do until feature
- until = self.module_vars.get('until', None)
- if until is not None and result.comm_ok:
- inject[self.module_vars.get('register')] = result.result
-
- cond = template.template(self.basedir, until, inject, expand_lists=False)
- if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- retries = template.template(self.basedir, self.module_vars.get('retries'), inject, expand_lists=False)
- delay = self.module_vars.get('delay')
- for x in range(1, int(retries) + 1):
- # template the delay, cast to float and sleep
- delay = template.template(self.basedir, delay, inject, expand_lists=False)
- delay = float(delay)
- time.sleep(delay)
- tmp = ''
- if self._early_needs_tmp_path(module_name, handler):
- tmp = self._make_tmp_path(conn)
- result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
- result.result['attempts'] = x
- vv("Result from run %i is: %s" % (x, result.result))
- inject[self.module_vars.get('register')] = result.result
- cond = template.template(self.basedir, until, inject, expand_lists=False)
- if utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- break
- if result.result['attempts'] == retries and not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- result.result['failed'] = True
- result.result['msg'] = "Task failed as maximum retries was encountered"
- else:
- result.result['attempts'] = 0
- conn.close()
-
- if not result.comm_ok:
- # connection or parsing errors...
- self.callbacks.on_unreachable(host, result.result)
- else:
- data = result.result
-
- # https://github.com/ansible/ansible/issues/4958
- if hasattr(sys.stdout, "isatty"):
- if "stdout" in data and sys.stdout.isatty():
- if not string_functions.isprintable(data['stdout']):
- data['stdout'] = ''.join(c for c in data['stdout'] if string_functions.isprintable(c))
-
- if 'item' in inject:
- result.result['item'] = inject['item']
-
- result.result['invocation'] = dict(
- module_args=module_args,
- module_name=module_name
- )
-
- changed_when = self.module_vars.get('changed_when')
- failed_when = self.module_vars.get('failed_when')
- if (changed_when is not None or failed_when is not None) and self.background == 0:
- register = self.module_vars.get('register')
- if register is not None:
- if 'stdout' in data:
- data['stdout_lines'] = data['stdout'].splitlines()
- inject[register] = data
- # only run the final checks if the async_status has finished,
- # or if we're not running an async_status check at all
- if (module_name == 'async_status' and "finished" in data) or module_name != 'async_status':
- if changed_when is not None and 'skipped' not in data:
- data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
- if failed_when is not None and 'skipped' not in data:
- data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
-
-
- if is_chained:
- # no callbacks
- return result
- if 'skipped' in data:
- self.callbacks.on_skipped(host, inject.get('item',None))
-
- if self.no_log:
- data = utils.censor_unlogged_data(data)
-
- if not result.is_successful():
- ignore_errors = self.module_vars.get('ignore_errors', False)
- self.callbacks.on_failed(host, data, ignore_errors)
- else:
- if self.diff:
- self.callbacks.on_file_diff(conn.host, result.diff)
- self.callbacks.on_ok(host, data)
-
- return result
-
- def _early_needs_tmp_path(self, module_name, handler):
- ''' detect if a tmp path should be created before the handler is called '''
- if module_name in utils.plugins.action_loader:
- return getattr(handler, 'TRANSFERS_FILES', False)
- # other modules never need tmp path at early stage
- return False
-
- def _late_needs_tmp_path(self, conn, tmp, module_style):
- if "tmp" in tmp:
- # tmp has already been created
- return False
- if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.become_method == 'su':
- # tmp is necessary to store module source code
- return True
- if not conn.has_pipelining:
- # tmp is necessary to store the module source code
- # or we want to keep the files on the target system
- return True
- if module_style != "new":
- # even when conn has pipelining, old style modules need tmp to store arguments
- return True
- return False
-
-
- # *****************************************************
-
- def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False,
- executable=None, become=False, in_data=None):
- ''' execute a command string over SSH, return the output '''
- # this can be skipped with powershell modules when there is no analog to a Windows command (like chmod)
- if cmd:
-
- if executable is None:
- executable = C.DEFAULT_EXECUTABLE
-
- become_user = self.become_user
-
- # compare connection user to (su|sudo)_user and disable if the same
- # assume connection type is local if no user attribute
- this_user = getattr(conn, 'user', getpass.getuser())
- if (not become and this_user == become_user):
- sudoable = False
- become = False
-
- rc, stdin, stdout, stderr = conn.exec_command(cmd,
- tmp,
- become_user=become_user,
- sudoable=sudoable,
- executable=executable,
- in_data=in_data)
-
- if type(stdout) not in [ str, unicode ]:
- out = ''.join(stdout.readlines())
- else:
- out = stdout
-
- if type(stderr) not in [ str, unicode ]:
- err = ''.join(stderr.readlines())
- else:
- err = stderr
-
- if rc is not None:
- return dict(rc=rc, stdout=out, stderr=err)
- else:
- return dict(stdout=out, stderr=err)
-
- return dict(rc=None, stdout='', stderr='')
-
-
- # *****************************************************
-
- def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, become=False):
- ''' issue a remote chmod command '''
- cmd = conn.shell.chmod(mode, path)
- return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, become=become)
-
- # *****************************************************
-
- def _remote_expand_user(self, conn, path, tmp):
- ''' takes a remote path and performs tilde expansion on the remote host '''
- if not path.startswith('~'):
- return path
-
- split_path = path.split(os.path.sep, 1)
- expand_path = split_path[0]
- if expand_path == '~':
- if self.become and self.become_user:
- expand_path = '~%s' % self.become_user
-
- cmd = conn.shell.expand_user(expand_path)
- data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, become=False)
- initial_fragment = utils.last_non_blank_line(data['stdout'])
-
- if not initial_fragment:
- # Something went wrong trying to expand the path remotely. Return
- # the original string
- return path
-
- if len(split_path) > 1:
- return conn.shell.join_path(initial_fragment, *split_path[1:])
- else:
- return initial_fragment
-
- # *****************************************************
-
- def _remote_checksum(self, conn, tmp, path, inject):
- ''' takes a remote checksum and returns 1 if no file '''
-
- # Lookup the python interp from the host or delegate
-
- # host == inven_host when there is no delegate
- host = inject['inventory_hostname']
- if 'delegate_to' in inject:
- delegate = inject['delegate_to']
- if delegate:
- # host == None when the delegate is not in inventory
- host = None
- # delegate set, check whether the delegate has inventory vars
- delegate = template.template(self.basedir, delegate, inject)
- if delegate in inject['hostvars']:
- # host == delegate if we need to lookup the
- # python_interpreter from the delegate's inventory vars
- host = delegate
-
- if host:
- python_interp = inject['hostvars'][host].get('ansible_python_interpreter', 'python')
- else:
- python_interp = 'python'
-
- cmd = conn.shell.checksum(path, python_interp)
-
- #TODO: remove this horrible hack and find way to get checksum to work with other privilege escalation methods
- if self.become_method == 'sudo':
- sudoable = True
- else:
- sudoable = False
- data = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable)
- data2 = utils.last_non_blank_line(data['stdout'])
- try:
- if data2 == '':
- # this may happen if the connection to the remote server
- # failed, so just return "INVALIDCHECKSUM" to avoid errors
- return "INVALIDCHECKSUM"
- else:
- return data2.split()[0]
- except IndexError:
- sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n")
- sys.stderr.write("command: %s\n" % cmd)
- sys.stderr.write("----\n")
- sys.stderr.write("output: %s\n" % data)
- sys.stderr.write("----\n")
- # this will signal that it changed and allow things to keep going
- return "INVALIDCHECKSUM"
-
- # *****************************************************
-
- def _make_tmp_path(self, conn):
- ''' make and return a temporary path on a remote box '''
- basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
- use_system_tmp = False
- if self.become and self.become_user != 'root':
- use_system_tmp = True
-
- tmp_mode = None
- if self.remote_user != 'root' or (self.become and self.become_user != 'root'):
- tmp_mode = 'a+rx'
-
- cmd = conn.shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
- result = self._low_level_exec_command(conn, cmd, None, sudoable=False)
-
- # error handling on this seems a little aggressive?
- if result['rc'] != 0:
- if result['rc'] == 5:
- output = 'Authentication failure.'
- elif result['rc'] == 255 and self.transport in ['ssh']:
- if utils.VERBOSITY > 3:
- output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
- else:
- output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
- elif 'No space left on device' in result['stderr']:
- output = result['stderr']
- else:
- output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
- if 'stdout' in result and result['stdout'] != '':
- output = output + ": %s" % result['stdout']
- raise errors.AnsibleError(output)
-
- rc = conn.shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
- # Catch failure conditions, files should never be
- # written to locations in /.
- if rc == '/':
- raise errors.AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd))
- return rc
-
- # *****************************************************
-
- def _remove_tmp_path(self, conn, tmp_path):
- ''' Remove a tmp_path. '''
- if "-tmp-" in tmp_path:
- cmd = conn.shell.remove(tmp_path, recurse=True)
- self._low_level_exec_command(conn, cmd, None, sudoable=False)
- # If we have gotten here we have a working ssh configuration.
- # If ssh breaks we could leave tmp directories out on the remote system.
-
- # *****************************************************
-
- def _copy_module(self, conn, tmp, module_name, module_args, inject, complex_args=None):
- ''' transfer a module over SFTP, does not run it '''
- (
- module_style,
- module_shebang,
- module_data
- ) = self._configure_module(conn, module_name, module_args, inject, complex_args)
- module_remote_path = conn.shell.join_path(tmp, module_name)
-
- self._transfer_str(conn, tmp, module_name, module_data)
-
- return (module_remote_path, module_style, module_shebang)
-
- # *****************************************************
-
- def _configure_module(self, conn, module_name, module_args, inject, complex_args=None):
- ''' find module and configure it '''
-
- # Search module path(s) for named module.
- module_suffixes = getattr(conn, 'default_suffixes', None)
- module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes)
- if module_path is None:
- module_path2 = utils.plugins.module_finder.find_plugin('ping', module_suffixes)
- if module_path2 is not None:
- raise errors.AnsibleFileNotFound("module %s not found in configured module paths" % (module_name))
- else:
- raise errors.AnsibleFileNotFound("module %s not found in configured module paths. Additionally, core modules are missing. If this is a checkout, run 'git submodule update --init --recursive' to correct this problem." % (module_name))
-
-
- # insert shared code and arguments into the module
- (module_data, module_style, module_shebang) = module_replacer.modify_module(
- module_path, complex_args, module_args, inject
- )
-
- return (module_style, module_shebang, module_data)
-
-
- # *****************************************************
-
-
- def _parallel_exec(self, hosts):
- ''' handles mulitprocessing when more than 1 fork is required '''
-
- manager = multiprocessing.Manager()
- job_queue = manager.Queue()
- for host in hosts:
- job_queue.put(host)
- result_queue = manager.Queue()
-
- try:
- fileno = sys.stdin.fileno()
- except ValueError:
- fileno = None
-
- workers = []
- for i in range(self.forks):
- new_stdin = None
- if fileno is not None:
- try:
- new_stdin = os.fdopen(os.dup(fileno))
- except OSError, e:
- # couldn't dupe stdin, most likely because it's
- # not a valid file descriptor, so we just rely on
- # using the one that was passed in
- pass
- prc = multiprocessing.Process(target=_executor_hook,
- args=(job_queue, result_queue, new_stdin))
- prc.start()
- workers.append(prc)
-
- try:
- for worker in workers:
- worker.join()
- except KeyboardInterrupt:
- for worker in workers:
- worker.terminate()
- worker.join()
-
- results = []
- try:
- while not result_queue.empty():
- results.append(result_queue.get(block=False))
- except socket.error:
- raise errors.AnsibleError("<interrupted>")
- return results
-
- # *****************************************************
-
- def _partition_results(self, results):
- ''' separate results by ones we contacted & ones we didn't '''
-
- if results is None:
- return None
- results2 = dict(contacted={}, dark={})
-
- for result in results:
- host = result.host
- if host is None:
- raise Exception("internal error, host not set")
- if result.communicated_ok():
- results2["contacted"][host] = result.result
- else:
- results2["dark"][host] = result.result
-
- # hosts which were contacted but never got a chance to return
- for host in self.run_hosts:
- if not (host in results2['dark'] or host in results2['contacted']):
- results2["dark"][host] = {}
- return results2
-
- # *****************************************************
-
- def run(self):
- ''' xfer & run module on all matched hosts '''
-
- # find hosts that match the pattern
- if not self.run_hosts:
- self.run_hosts = self.inventory.list_hosts(self.pattern)
- hosts = self.run_hosts
- if len(hosts) == 0:
- self.callbacks.on_no_hosts()
- return dict(contacted={}, dark={})
-
- global multiprocessing_runner
- multiprocessing_runner = self
- results = None
-
- # Check if this is an action plugin. Some of them are designed
- # to be ran once per group of hosts. Example module: pause,
- # run once per hostgroup, rather than pausing once per each
- # host.
- p = utils.plugins.action_loader.get(self.module_name, self)
-
- if self.forks == 0 or self.forks > len(hosts):
- self.forks = len(hosts)
-
- if (p and (getattr(p, 'BYPASS_HOST_LOOP', None)) or self.run_once):
-
- # Expose the current hostgroup to the bypassing plugins
- self.host_set = hosts
- # We aren't iterating over all the hosts in this
- # group. So, just choose the "delegate_to" host if that is defined and is
- # one of the targeted hosts, otherwise pick the first host in our group to
- # construct the conn object with.
- if self.delegate_to is not None and self.delegate_to in hosts:
- host = self.delegate_to
- else:
- host = hosts[0]
-
- result_data = self._executor(host, None).result
- # Create a ResultData item for each host in this group
- # using the returned result. If we didn't do this we would
- # get false reports of dark hosts.
- results = [ ReturnData(host=h, result=result_data, comm_ok=True) \
- for h in hosts ]
- del self.host_set
-
- elif self.forks > 1:
- try:
- results = self._parallel_exec(hosts)
- except IOError, ie:
- print ie.errno
- if ie.errno == 32:
- # broken pipe from Ctrl+C
- raise errors.AnsibleError("interrupted")
- raise
- else:
- results = [ self._executor(h, None) for h in hosts ]
-
- return self._partition_results(results)
-
- # *****************************************************
-
- def run_async(self, time_limit):
- ''' Run this module asynchronously and return a poller. '''
-
- self.background = time_limit
- results = self.run()
- return results, poller.AsyncPoller(results, self)
-
- # *****************************************************
-
- def noop_on_check(self, inject):
- ''' Should the runner run in check mode or not ? '''
-
- # initialize self.always_run on first call
- if self.always_run is None:
- self.always_run = self.module_vars.get('always_run', False)
- self.always_run = check_conditional(
- self.always_run, self.basedir, inject, fail_on_undefined=True)
-
- return (self.check and not self.always_run)
diff --git a/lib/ansible/runner/action_plugins/__init__.py b/lib/ansible/runner/action_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/runner/action_plugins/__init__.py
+++ /dev/null
diff --git a/lib/ansible/runner/action_plugins/add_host.py b/lib/ansible/runner/action_plugins/add_host.py
deleted file mode 100644
index 995b205b62..0000000000
--- a/lib/ansible/runner/action_plugins/add_host.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2012, Seth Vidal <skvidal@fedoraproject.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible.callbacks import vv
-from ansible.errors import AnsibleError as ae
-from ansible.runner.return_data import ReturnData
-from ansible.utils import parse_kv, combine_vars
-from ansible.inventory.host import Host
-from ansible.inventory.group import Group
-
-class ActionModule(object):
- ''' Create inventory hosts and groups in the memory inventory'''
-
- ### We need to be able to modify the inventory
- BYPASS_HOST_LOOP = True
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
-
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(parse_kv(module_args))
- if not 'hostname' in args and not 'name' in args:
- raise ae("'name' is a required argument.")
-
- result = {}
-
- # Parse out any hostname:port patterns
- new_name = args.get('name', args.get('hostname', None))
- vv("creating host via 'add_host': hostname=%s" % new_name)
-
- if ":" in new_name:
- new_name, new_port = new_name.split(":")
- args['ansible_ssh_port'] = new_port
-
- # redefine inventory and get group "all"
- inventory = self.runner.inventory
- allgroup = inventory.get_group('all')
-
- # check if host in cache, add if not
- if new_name in inventory._hosts_cache:
- new_host = inventory._hosts_cache[new_name]
- else:
- new_host = Host(new_name)
- # only groups can be added directly to inventory
- inventory._hosts_cache[new_name] = new_host
- allgroup.add_host(new_host)
-
- groupnames = args.get('groupname', args.get('groups', args.get('group', '')))
- # add it to the group if that was specified
- if groupnames:
- for group_name in groupnames.split(","):
- group_name = group_name.strip()
- if not inventory.get_group(group_name):
- new_group = Group(group_name)
- inventory.add_group(new_group)
- new_group.vars = inventory.get_group_variables(group_name, vault_password=inventory._vault_password)
- grp = inventory.get_group(group_name)
- grp.add_host(new_host)
-
- # add this host to the group cache
- if inventory._groups_list is not None:
- if group_name in inventory._groups_list:
- if new_host.name not in inventory._groups_list[group_name]:
- inventory._groups_list[group_name].append(new_host.name)
-
- vv("added host to group via add_host module: %s" % group_name)
- result['new_groups'] = groupnames.split(",")
-
-
- # actually load host vars
- new_host.vars = combine_vars(new_host.vars, inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password))
-
- # Add any passed variables to the new_host
- for k in args.keys():
- if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
- new_host.set_variable(k, args[k])
-
- result['new_host'] = new_name
-
- # clear pattern caching completely since it's unpredictable what
- # patterns may have referenced the group
- inventory.clear_pattern_cache()
-
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
-
-
diff --git a/lib/ansible/runner/action_plugins/assemble.py b/lib/ansible/runner/action_plugins/assemble.py
deleted file mode 100644
index 33a4838e32..0000000000
--- a/lib/ansible/runner/action_plugins/assemble.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
-# Stephen Fromm <sfromm@gmail.com>
-# Brian Coca <briancoca+dev@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-
-import os
-import os.path
-import pipes
-import shutil
-import tempfile
-import base64
-import re
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None):
- ''' assemble a file from a directory of fragments '''
- tmpfd, temp_path = tempfile.mkstemp()
- tmp = os.fdopen(tmpfd,'w')
- delimit_me = False
- add_newline = False
-
- for f in sorted(os.listdir(src_path)):
- if compiled_regexp and not compiled_regexp.search(f):
- continue
- fragment = "%s/%s" % (src_path, f)
- if not os.path.isfile(fragment):
- continue
- fragment_content = file(fragment).read()
-
- # always put a newline between fragments if the previous fragment didn't end with a newline.
- if add_newline:
- tmp.write('\n')
-
- # delimiters should only appear between fragments
- if delimit_me:
- if delimiter:
- # un-escape anything like newlines
- delimiter = delimiter.decode('unicode-escape')
- tmp.write(delimiter)
- # always make sure there's a newline after the
- # delimiter, so lines don't run together
- if delimiter[-1] != '\n':
- tmp.write('\n')
-
- tmp.write(fragment_content)
- delimit_me = True
- if fragment_content.endswith('\n'):
- add_newline = False
- else:
- add_newline = True
-
- tmp.close()
- return temp_path
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
-
- options.update(utils.parse_kv(module_args))
-
- src = options.get('src', None)
- dest = options.get('dest', None)
- delimiter = options.get('delimiter', None)
- remote_src = utils.boolean(options.get('remote_src', 'yes'))
- regexp = options.get('regexp', None)
-
-
- if src is None or dest is None:
- result = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- if remote_src:
- return self.runner._execute_module(conn, tmp, 'assemble', module_args, inject=inject, complex_args=complex_args)
- elif '_original_file' in inject:
- src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
- else:
- # the source is local, so expand it here
- src = os.path.expanduser(src)
-
- _re = None
- if regexp is not None:
- _re = re.compile(regexp)
-
- # Does all work assembling the file
- path = self._assemble_from_fragments(src, delimiter, _re)
-
- path_checksum = utils.checksum_s(path)
- dest = self.runner._remote_expand_user(conn, dest, tmp)
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
-
- if path_checksum != remote_checksum:
- resultant = file(path).read()
- if self.runner.diff:
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
- xfered = self.runner._transfer_str(conn, tmp, 'src', resultant)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
-
- # run the copy module
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(src),
- )
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
- else:
- res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject)
- res.diff = dict(after=resultant)
- return res
- else:
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(src),
- )
-
- # make sure checkmod is passed on correctly
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- return self.runner._execute_module(conn, tmp, 'file', module_args_tmp, inject=inject)
diff --git a/lib/ansible/runner/action_plugins/assert.py b/lib/ansible/runner/action_plugins/assert.py
deleted file mode 100644
index a0e02dedb0..0000000000
--- a/lib/ansible/runner/action_plugins/assert.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2012, Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible import utils, errors
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- ''' Fail with custom message '''
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # note: the fail module does not need to pay attention to check mode
- # it always runs.
-
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(utils.parse_kv(module_args))
-
- msg = None
- if 'msg' in args:
- msg = args['msg']
-
- if not 'that' in args:
- raise errors.AnsibleError('conditional required in "that" string')
-
- if not isinstance(args['that'], list):
- args['that'] = [ args['that'] ]
-
- for that in args['that']:
- test_result = utils.check_conditional(that, self.runner.basedir, inject, fail_on_undefined=True)
- if not test_result:
- result = dict(
- failed = True,
- evaluated_to = test_result,
- assertion = that,
- )
- if msg:
- result['msg'] = msg
- return ReturnData(conn=conn, result=result)
-
- return ReturnData(conn=conn, result=dict(msg='all assertions passed'))
-
diff --git a/lib/ansible/runner/action_plugins/async.py b/lib/ansible/runner/action_plugins/async.py
deleted file mode 100644
index dc53d6fa6c..0000000000
--- a/lib/ansible/runner/action_plugins/async.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' transfer the given module name, plus the async module, then run it '''
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
-
- # shell and command module are the same
- if module_name == 'shell':
- module_name = 'command'
- module_args += " #USE_SHELL"
-
- if "tmp" not in tmp:
- tmp = self.runner._make_tmp_path(conn)
-
- (module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject, complex_args=complex_args)
- self.runner._remote_chmod(conn, 'a+rx', module_path, tmp)
-
- return self.runner._execute_module(conn, tmp, 'async_wrapper', module_args,
- async_module=module_path,
- async_jid=self.runner.generated_jid,
- async_limit=self.runner.background,
- inject=inject
- )
-
diff --git a/lib/ansible/runner/action_plugins/copy.py b/lib/ansible/runner/action_plugins/copy.py
deleted file mode 100644
index a6a5cb5a27..0000000000
--- a/lib/ansible/runner/action_plugins/copy.py
+++ /dev/null
@@ -1,381 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ansible import utils
-import ansible.constants as C
-import ansible.utils.template as template
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-import json
-import stat
-import tempfile
-import pipes
-
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- content = options.get('content', None)
- dest = options.get('dest', None)
- raw = utils.boolean(options.get('raw', 'no'))
- force = utils.boolean(options.get('force', 'yes'))
-
- # content with newlines is going to be escaped to safely load in yaml
- # now we need to unescape it so that the newlines are evaluated properly
- # when writing the file to disk
- if content:
- if isinstance(content, unicode):
- try:
- content = content.decode('unicode-escape')
- except UnicodeDecodeError:
- pass
-
- if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
- result=dict(failed=True, msg="src (or content) and dest are required")
- return ReturnData(conn=conn, result=result)
- elif (source is not None or 'first_available_file' in inject) and content is not None:
- result=dict(failed=True, msg="src and content are mutually exclusive")
- return ReturnData(conn=conn, result=result)
-
- # Check if the source ends with a "/"
- source_trailing_slash = False
- if source:
- source_trailing_slash = source.endswith("/")
-
- # Define content_tempfile in case we set it after finding content populated.
- content_tempfile = None
-
- # If content is defined make a temp file and write the content into it.
- if content is not None:
- try:
- # If content comes to us as a dict it should be decoded json.
- # We need to encode it back into a string to write it out.
- if type(content) is dict:
- content_tempfile = self._create_content_tempfile(json.dumps(content))
- else:
- content_tempfile = self._create_content_tempfile(content)
- source = content_tempfile
- except Exception, err:
- result = dict(failed=True, msg="could not write content temp file: %s" % err)
- return ReturnData(conn=conn, result=result)
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
- elif 'first_available_file' in inject:
- found = False
- for fn in inject.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- results = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, result=results)
- else:
- source = template.template(self.runner.basedir, source, inject)
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
- source_files = []
-
- # If source is a directory populate our list else source is a file and translate it to a tuple.
- if os.path.isdir(source):
- # Get the amount of spaces to remove to get the relative path.
- if source_trailing_slash:
- sz = len(source) + 1
- else:
- sz = len(source.rsplit('/', 1)[0]) + 1
-
- # Walk the directory and append the file tuples to source_files.
- for base_path, sub_folders, files in os.walk(source):
- for file in files:
- full_path = os.path.join(base_path, file)
- rel_path = full_path[sz:]
- source_files.append((full_path, rel_path))
-
- # If it's recursive copy, destination is always a dir,
- # explicitly mark it so (note - copy module relies on this).
- if not conn.shell.path_has_trailing_slash(dest):
- dest = conn.shell.join_path(dest, '')
- else:
- source_files.append((source, os.path.basename(source)))
-
- changed = False
- diffs = []
- module_result = {"changed": False}
-
- # A register for if we executed a module.
- # Used to cut down on command calls when not recursive.
- module_executed = False
-
- # Tell _execute_module to delete the file if there is one file.
- delete_remote_tmp = (len(source_files) == 1)
-
- # If this is a recursive action create a tmp_path that we can share as the _exec_module create is too late.
- if not delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- # expand any user home dir specifier
- dest = self.runner._remote_expand_user(conn, dest, tmp_path)
-
- for source_full, source_rel in source_files:
- # Generate a hash of the local file.
- local_checksum = utils.checksum(source_full)
-
- # If local_checksum is not defined we can't find the file so we should fail out.
- if local_checksum is None:
- result = dict(failed=True, msg="could not find src=%s" % source_full)
- return ReturnData(conn=conn, result=result)
-
- # This is kind of optimization - if user told us destination is
- # dir, do path manipulation right away, otherwise we still check
- # for dest being a dir via remote call below.
- if conn.shell.path_has_trailing_slash(dest):
- dest_file = conn.shell.join_path(dest, source_rel)
- else:
- dest_file = conn.shell.join_path(dest)
-
- # Attempt to get the remote checksum
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum == '3':
- # The remote_checksum was executed on a directory.
- if content is not None:
- # If source was defined as content remove the temporary file and fail out.
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- result = dict(failed=True, msg="can not use content with a dir as dest")
- return ReturnData(conn=conn, result=result)
- else:
- # Append the relative source location to the destination and retry remote_checksum
- dest_file = conn.shell.join_path(dest, source_rel)
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum == '4':
- result = dict(msg="python isn't present on the system. Unable to compute checksum", failed=True)
- return ReturnData(conn=conn, result=result)
-
- if remote_checksum != '1' and not force:
- # remote_file exists so continue to next iteration.
- continue
-
- if local_checksum != remote_checksum:
- # The checksums don't match and we will change or error out.
- changed = True
-
- # Create a tmp_path if missing only if this is not recursive.
- # If this is recursive we already have a tmp_path.
- if delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- if self.runner.diff and not raw:
- diff = self._get_diff_data(conn, tmp_path, inject, dest_file, source_full)
- else:
- diff = {}
-
- if self.runner.noop_on_check(inject):
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- diffs.append(diff)
- changed = True
- module_result = dict(changed=True)
- continue
-
- # Define a remote directory that we will copy the file to.
- tmp_src = tmp_path + 'source'
-
- if not raw:
- conn.put_file(source_full, tmp_src)
- else:
- conn.put_file(source_full, dest_file)
-
- # We have copied the file remotely and no longer require our content_tempfile
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root' and not raw:
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path)
-
- if raw:
- # Continue to next iteration if raw is defined.
- continue
-
- # Run the copy module
-
- # src and dest here come after original and override them
- # we pass dest only to make sure it includes trailing slash in case of recursive copy
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- module_return = self.runner._execute_module(conn, tmp_path, 'copy', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- else:
- # no need to transfer the file, already correct hash, but still need to call
- # the file module in case we want to change attributes
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- if raw:
- # Continue to next iteration if raw is defined.
- # self.runner._remove_tmp_path(conn, tmp_path)
- continue
-
- tmp_src = tmp_path + source_rel
-
- # Build temporary module_args.
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- # Execute the file module.
- module_return = self.runner._execute_module(conn, tmp_path, 'file', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- module_result = module_return.result
- if not module_result.get('checksum'):
- module_result['checksum'] = local_checksum
- if module_result.get('failed') == True:
- return module_return
- if module_result.get('changed') == True:
- changed = True
-
- # Delete tmp_path if we were recursive or if we did not execute a module.
- if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \
- or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
- self.runner._remove_tmp_path(conn, tmp_path)
-
- # the file module returns the file path as 'path', but
- # the copy module uses 'dest', so add it if it's not there
- if 'path' in module_result and 'dest' not in module_result:
- module_result['dest'] = module_result['path']
-
- # TODO: Support detailed status/diff for multiple files
- if len(source_files) == 1:
- result = module_result
- else:
- result = dict(dest=dest, src=source, changed=changed)
- if len(diffs) == 1:
- return ReturnData(conn=conn, result=result, diff=diffs[0])
- else:
- return ReturnData(conn=conn, result=result)
-
- def _create_content_tempfile(self, content):
- ''' Create a tempfile containing defined content '''
- fd, content_tempfile = tempfile.mkstemp()
- f = os.fdopen(fd, 'w')
- try:
- f.write(content)
- except Exception, err:
- os.remove(content_tempfile)
- raise Exception(err)
- finally:
- f.close()
- return content_tempfile
-
- def _get_diff_data(self, conn, tmp, inject, destination, source):
- peek_result = self.runner._execute_module(conn, tmp, 'file', "path=%s diff_peek=1" % destination, inject=inject, persist_files=True)
-
- if not peek_result.is_successful():
- return {}
-
- diff = {}
- if peek_result.result['state'] == 'absent':
- diff['before'] = ''
- elif peek_result.result['appears_binary']:
- diff['dst_binary'] = 1
- elif peek_result.result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % destination, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
- diff['before_header'] = destination
- diff['before'] = dest_contents
-
- src = open(source)
- src_contents = src.read(8192)
- st = os.stat(source)
- if "\x00" in src_contents:
- diff['src_binary'] = 1
- elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- src.seek(0)
- diff['after_header'] = source
- diff['after'] = src.read()
-
- return diff
-
- def _remove_tempfile_if_content_defined(self, content, content_tempfile):
- if content is not None:
- os.remove(content_tempfile)
-
-
- def _result_key_merge(self, options, results):
- # add keys to file module results to mimic copy
- if 'path' in results.result and 'dest' not in results.result:
- results.result['dest'] = results.result['path']
- del results.result['path']
- return results
diff --git a/lib/ansible/runner/action_plugins/debug.py b/lib/ansible/runner/action_plugins/debug.py
deleted file mode 100644
index eaf1364c3f..0000000000
--- a/lib/ansible/runner/action_plugins/debug.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2012, Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible import utils
-from ansible.utils import template
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- ''' Print statements during execution '''
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
- self.basedir = runner.basedir
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- args = {}
- if complex_args:
- args.update(complex_args)
-
- # attempt to prevent confusing messages when the variable didn't interpolate
- module_args = module_args.replace("{{ ","{{").replace(" }}","}}")
-
- kv = utils.parse_kv(module_args)
- args.update(kv)
-
- if not 'msg' in args and not 'var' in args:
- args['msg'] = 'Hello world!'
-
- result = {}
- if 'msg' in args:
- if 'fail' in args and utils.boolean(args['fail']):
- result = dict(failed=True, msg=args['msg'])
- else:
- result = dict(msg=args['msg'])
- elif 'var' in args and not utils.LOOKUP_REGEX.search(args['var']):
- results = template.template(self.basedir, args['var'], inject, convert_bare=True)
- result['var'] = { args['var']: results }
-
- # force flag to make debug output module always verbose
- result['verbose_always'] = True
-
- return ReturnData(conn=conn, result=result)
diff --git a/lib/ansible/runner/action_plugins/fail.py b/lib/ansible/runner/action_plugins/fail.py
deleted file mode 100644
index 2bbaf40313..0000000000
--- a/lib/ansible/runner/action_plugins/fail.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2012, Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- ''' Fail with custom message '''
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # note: the fail module does not need to pay attention to check mode
- # it always runs.
-
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(utils.parse_kv(module_args))
- if not 'msg' in args:
- args['msg'] = 'Failed as requested from task'
-
- result = dict(failed=True, msg=args['msg'])
- return ReturnData(conn=conn, result=result)
diff --git a/lib/ansible/runner/action_plugins/group_by.py b/lib/ansible/runner/action_plugins/group_by.py
deleted file mode 100644
index 25c2073fa0..0000000000
--- a/lib/ansible/runner/action_plugins/group_by.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible.callbacks import vv
-from ansible.errors import AnsibleError as ae
-from ansible.runner.return_data import ReturnData
-from ansible.utils import parse_kv, check_conditional
-import ansible.utils.template as template
-
-class ActionModule(object):
- ''' Create inventory groups based on variables '''
-
- ### We need to be able to modify the inventory
- BYPASS_HOST_LOOP = True
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # the group_by module does not need to pay attention to check mode.
- # it always runs.
-
- # module_args and complex_args have already been templated for the first host.
- # Use them here only to check that a key argument is provided.
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(parse_kv(module_args))
- if not 'key' in args:
- raise ae("'key' is a required argument.")
-
- vv("created 'group_by' ActionModule: key=%s"%(args['key']))
-
- inventory = self.runner.inventory
-
- result = {'changed': False}
-
- ### find all groups
- groups = {}
-
- for host in self.runner.host_set:
- data = {}
- data.update(inject)
- data.update(inject['hostvars'][host])
- conds = self.runner.conditional
- if type(conds) != list:
- conds = [ conds ]
- next_host = False
- for cond in conds:
- if not check_conditional(cond, self.runner.basedir, data, fail_on_undefined=self.runner.error_on_undefined_vars):
- next_host = True
- break
- if next_host:
- continue
-
- # Template original module_args and complex_args from runner for each host.
- host_module_args = template.template(self.runner.basedir, self.runner.module_args, data)
- host_complex_args = template.template(self.runner.basedir, self.runner.complex_args, data)
- host_args = {}
- if host_complex_args:
- host_args.update(host_complex_args)
- host_args.update(parse_kv(host_module_args))
-
- group_name = host_args['key']
- group_name = group_name.replace(' ','-')
- if group_name not in groups:
- groups[group_name] = []
- groups[group_name].append(host)
-
- result['groups'] = groups
-
- ### add to inventory
- for group, hosts in groups.items():
- inv_group = inventory.get_group(group)
- if not inv_group:
- inv_group = ansible.inventory.Group(name=group)
- inventory.add_group(inv_group)
- inventory.get_group('all').add_child_group(inv_group)
- inv_group.vars = inventory.get_group_variables(group, update_cached=False, vault_password=inventory._vault_password)
- for host in hosts:
- if host in self.runner.inventory._vars_per_host:
- del self.runner.inventory._vars_per_host[host]
- inv_host = inventory.get_host(host)
- if not inv_host:
- inv_host = ansible.inventory.Host(name=host)
- if inv_group not in inv_host.get_groups():
- result['changed'] = True
- inv_group.add_host(inv_host)
-
- return ReturnData(conn=conn, comm_ok=True, result=result)
diff --git a/lib/ansible/runner/action_plugins/include_vars.py b/lib/ansible/runner/action_plugins/include_vars.py
deleted file mode 100644
index d6ce52cf00..0000000000
--- a/lib/ansible/runner/action_plugins/include_vars.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# (c) 2013-2014, Benno Joy <benno@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- if not module_args:
- result = dict(failed=True, msg="No source file given")
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
- source = module_args
- source = template.template(self.runner.basedir, source, inject)
-
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'vars', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- if os.path.exists(source):
- data = utils.parse_yaml_from_file(source, vault_password=self.runner.vault_pass)
- if data and type(data) != dict:
- raise errors.AnsibleError("%s must be stored as a dictionary/hash" % source)
- elif data is None:
- data = {}
- result = dict(ansible_facts=data)
- return ReturnData(conn=conn, comm_ok=True, result=result)
- else:
- result = dict(failed=True, msg="Source file not found.", file=source)
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
diff --git a/lib/ansible/runner/action_plugins/normal.py b/lib/ansible/runner/action_plugins/normal.py
deleted file mode 100644
index 8500c6641c..0000000000
--- a/lib/ansible/runner/action_plugins/normal.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-import random
-import traceback
-import tempfile
-
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible import module_common
-from ansible.runner.return_data import ReturnData
-from ansible.callbacks import vv, vvv
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' transfer & execute a module that is not 'copy' or 'template' '''
-
- module_args = self.runner._complex_args_hack(complex_args, module_args)
-
- if self.runner.noop_on_check(inject):
- if module_name in [ 'shell', 'command' ]:
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for %s' % module_name))
- # else let the module parsing code decide, though this will only be allowed for AnsibleModuleCommon using
- # python modules for now
- module_args += " CHECKMODE=True"
-
- if self.runner.no_log:
- module_args += " NO_LOG=True"
-
- # shell and command are the same module
- if module_name == 'shell':
- module_name = 'command'
- module_args += " #USE_SHELL"
-
- vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host)
- return self.runner._execute_module(conn, tmp, module_name, module_args, inject=inject, complex_args=complex_args)
-
-
diff --git a/lib/ansible/runner/action_plugins/patch.py b/lib/ansible/runner/action_plugins/patch.py
deleted file mode 100644
index 29d4f7eca5..0000000000
--- a/lib/ansible/runner/action_plugins/patch.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-
-import os
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- src = options.get('src', None)
- dest = options.get('dest', None)
- remote_src = utils.boolean(options.get('remote_src', 'no'))
-
- if src is None:
- result = dict(failed=True, msg="src is required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- if remote_src:
- return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
-
- # Source is local
- if '_original_file' in inject:
- src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
- else:
- src = utils.path_dwim(self.runner.basedir, src)
-
- if tmp is None or "-tmp-" not in tmp:
- tmp = self.runner._make_tmp_path(conn)
-
- tmp_src = conn.shell.join_path(tmp, os.path.basename(src))
- conn.put_file(src, tmp_src)
-
- if self.runner.become and self.runner.become_user != 'root':
- if not self.runner.noop_on_check(inject):
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
-
- new_module_args = dict(
- src=tmp_src,
- )
-
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
-
- module_args = utils.merge_module_args(module_args, new_module_args)
-
- return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
diff --git a/lib/ansible/runner/action_plugins/pause.py b/lib/ansible/runner/action_plugins/pause.py
deleted file mode 100644
index d0c9b53db2..0000000000
--- a/lib/ansible/runner/action_plugins/pause.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.callbacks import vv
-from ansible.errors import AnsibleError as ae
-from ansible.runner.return_data import ReturnData
-from ansible.utils import getch, parse_kv
-import ansible.utils.template as template
-from termios import tcflush, TCIFLUSH
-import datetime
-import sys
-import time
-
-
-class ActionModule(object):
- ''' pauses execution for a length or time, or until input is received '''
-
- PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
- BYPASS_HOST_LOOP = True
-
- def __init__(self, runner):
- self.runner = runner
- # Set defaults
- self.duration_unit = 'minutes'
- self.prompt = None
- self.seconds = None
- self.result = {'changed': False,
- 'rc': 0,
- 'stderr': '',
- 'stdout': '',
- 'start': None,
- 'stop': None,
- 'delta': None,
- }
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' run the pause action module '''
-
- # note: this module does not need to pay attention to the 'check'
- # flag, it always runs
-
- hosts = ', '.join(self.runner.host_set)
- args = {}
- if complex_args:
- args.update(complex_args)
- # extra template call unneeded?
- args.update(parse_kv(template.template(self.runner.basedir, module_args, inject)))
-
- # Are 'minutes' or 'seconds' keys that exist in 'args'?
- if 'minutes' in args or 'seconds' in args:
- try:
- if 'minutes' in args:
- self.pause_type = 'minutes'
- # The time() command operates in seconds so we need to
- # recalculate for minutes=X values.
- self.seconds = int(args['minutes']) * 60
- else:
- self.pause_type = 'seconds'
- self.seconds = int(args['seconds'])
- self.duration_unit = 'seconds'
- except ValueError, e:
- raise ae("non-integer value given for prompt duration:\n%s" % str(e))
- # Is 'prompt' a key in 'args'?
- elif 'prompt' in args:
- self.pause_type = 'prompt'
- self.prompt = "[%s]\n%s:\n" % (hosts, args['prompt'])
- # Is 'args' empty, then this is the default prompted pause
- elif len(args.keys()) == 0:
- self.pause_type = 'prompt'
- self.prompt = "[%s]\nPress enter to continue:\n" % hosts
- # I have no idea what you're trying to do. But it's so wrong.
- else:
- raise ae("invalid pause type given. must be one of: %s" % \
- ", ".join(self.PAUSE_TYPES))
-
- vv("created 'pause' ActionModule: pause_type=%s, duration_unit=%s, calculated_seconds=%s, prompt=%s" % \
- (self.pause_type, self.duration_unit, self.seconds, self.prompt))
-
- ########################################################################
- # Begin the hard work!
- try:
- self._start()
- if not self.pause_type == 'prompt':
- print "[%s]\nPausing for %s seconds" % (hosts, self.seconds)
- time.sleep(self.seconds)
- else:
- # Clear out any unflushed buffered input which would
- # otherwise be consumed by raw_input() prematurely.
- tcflush(sys.stdin, TCIFLUSH)
- self.result['user_input'] = raw_input(self.prompt.encode(sys.stdout.encoding))
- except KeyboardInterrupt:
- while True:
- print '\nAction? (a)bort/(c)ontinue: '
- c = getch()
- if c == 'c':
- # continue playbook evaluation
- break
- elif c == 'a':
- # abort further playbook evaluation
- raise ae('user requested abort!')
- finally:
- self._stop()
-
- return ReturnData(conn=conn, result=self.result)
-
- def _start(self):
- ''' mark the time of execution for duration calculations later '''
- self.start = time.time()
- self.result['start'] = str(datetime.datetime.now())
- if not self.pause_type == 'prompt':
- print "(^C-c = continue early, ^C-a = abort)"
-
- def _stop(self):
- ''' calculate the duration we actually paused for and then
- finish building the task result string '''
- duration = time.time() - self.start
- self.result['stop'] = str(datetime.datetime.now())
- self.result['delta'] = int(duration)
-
- if self.duration_unit == 'minutes':
- duration = round(duration / 60.0, 2)
- else:
- duration = round(duration, 2)
-
- self.result['stdout'] = "Paused for %s %s" % (duration, self.duration_unit)
diff --git a/lib/ansible/runner/action_plugins/raw.py b/lib/ansible/runner/action_plugins/raw.py
deleted file mode 100644
index e52296b2e7..0000000000
--- a/lib/ansible/runner/action_plugins/raw.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- if self.runner.noop_on_check(inject):
- # in --check mode, always skip this module execution
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True))
-
- executable = ''
- # From library/command, keep in sync
- r = re.compile(r'(^|\s)(executable)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)\s|$)')
- for m in r.finditer(module_args):
- v = m.group(4).replace("\\", "")
- if m.group(2) == "executable":
- executable = v
- module_args = r.sub("", module_args)
-
- result = self.runner._low_level_exec_command(conn, module_args, tmp, sudoable=True, executable=executable,
- become=self.runner.become)
- # for some modules (script, raw), the sudo success key
- # may leak into the stdout due to the way the sudo/su
- # command is constructed, so we filter that out here
- if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'):
- result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout'])
-
- return ReturnData(conn=conn, result=result)
diff --git a/lib/ansible/runner/action_plugins/script.py b/lib/ansible/runner/action_plugins/script.py
deleted file mode 100644
index 1b1aadc7aa..0000000000
--- a/lib/ansible/runner/action_plugins/script.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-import shlex
-
-import ansible.constants as C
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-
-class ActionModule(object):
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- if self.runner.noop_on_check(inject):
- # in check mode, always skip this module
- return ReturnData(conn=conn, comm_ok=True,
- result=dict(skipped=True, msg='check mode not supported for this module'))
-
- # extract ansible reserved parameters
- # From library/command keep in sync
- creates = None
- removes = None
- r = re.compile(r'(^|\s)(creates|removes)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)')
- for m in r.finditer(module_args):
- v = m.group(4).replace("\\", "")
- if m.group(2) == "creates":
- creates = v
- elif m.group(2) == "removes":
- removes = v
- module_args = r.sub("", module_args)
-
- if creates:
- # do not run the command if the line contains creates=filename
- # and the filename already exists. This allows idempotence
- # of command executions.
- module_args_tmp = "path=%s" % creates
- module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
- complex_args=complex_args, persist_files=True)
- stat = module_return.result.get('stat', None)
- if stat and stat.get('exists', False):
- return ReturnData(
- conn=conn,
- comm_ok=True,
- result=dict(
- changed=False,
- msg=("skipped, since %s exists" % creates)
- )
- )
- if removes:
- # do not run the command if the line contains removes=filename
- # and the filename does not exist. This allows idempotence
- # of command executions.
- module_args_tmp = "path=%s" % removes
- module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
- complex_args=complex_args, persist_files=True)
- stat = module_return.result.get('stat', None)
- if stat and not stat.get('exists', False):
- return ReturnData(
- conn=conn,
- comm_ok=True,
- result=dict(
- changed=False,
- msg=("skipped, since %s does not exist" % removes)
- )
- )
-
- # Decode the result of shlex.split() to UTF8 to get around a bug in that's been fixed in Python 2.7 but not Python 2.6.
- # See: http://bugs.python.org/issue6988
- tokens = shlex.split(module_args.encode('utf8'))
- tokens = [s.decode('utf8') for s in tokens]
- # extract source script
- source = tokens[0]
-
- # FIXME: error handling
- args = " ".join(tokens[1:])
- source = template.template(self.runner.basedir, source, inject)
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # transfer the file to a remote tmp location
- source = source.replace('\x00', '') # why does this happen here?
- args = args.replace('\x00', '') # why does this happen here?
- tmp_src = conn.shell.join_path(tmp, os.path.basename(source))
- tmp_src = tmp_src.replace('\x00', '')
-
- conn.put_file(source, tmp_src)
-
- sudoable = True
- # set file permissions, more permissive when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- chmod_mode = 'a+rx'
- sudoable = False
- else:
- chmod_mode = '+rx'
- self.runner._remote_chmod(conn, chmod_mode, tmp_src, tmp, sudoable=sudoable, become=self.runner.become)
-
- # add preparation steps to one ssh roundtrip executing the script
- env_string = self.runner._compute_environment_string(conn, inject)
- module_args = ' '.join([env_string, tmp_src, args])
-
- handler = utils.plugins.action_loader.get('raw', self.runner)
- result = handler.run(conn, tmp, 'raw', module_args, inject)
-
- # clean up after
- if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
- self.runner._remove_tmp_path(conn, tmp)
-
- result.result['changed'] = True
-
- return result
diff --git a/lib/ansible/runner/action_plugins/set_fact.py b/lib/ansible/runner/action_plugins/set_fact.py
deleted file mode 100644
index 7ac972cac6..0000000000
--- a/lib/ansible/runner/action_plugins/set_fact.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2013 Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for running operations on master '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
-
- # parse the k=v arguments and convert any special boolean
- # strings into proper booleans (issue #8629)
- parsed_args = utils.parse_kv(module_args)
- for k,v in parsed_args.iteritems():
- # convert certain strings to boolean values
- if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
- parsed_args[k] = utils.boolean(v)
-
- # and finally update the options with the parsed/modified args
- options.update(parsed_args)
-
- return ReturnData(conn=conn, result=dict(ansible_facts=options))
diff --git a/lib/ansible/runner/action_plugins/synchronize.py b/lib/ansible/runner/action_plugins/synchronize.py
deleted file mode 100644
index fb82194b00..0000000000
--- a/lib/ansible/runner/action_plugins/synchronize.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2012-2013, Timothy Appnel <tim@appnel.com>
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os.path
-
-from ansible import utils
-from ansible import constants
-from ansible.runner.return_data import ReturnData
-import ansible.utils.template as template
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
- self.inject = None
-
- def _get_absolute_path(self, path=None):
- if 'vars' in self.inject:
- if '_original_file' in self.inject['vars']:
- # roles
- original_path = path
- path = utils.path_dwim_relative(self.inject['_original_file'], 'files', path, self.runner.basedir)
- if original_path and original_path[-1] == '/' and path[-1] != '/':
- # make sure the dwim'd path ends in a trailing "/"
- # if the original path did
- path += '/'
-
- return path
-
- def _process_origin(self, host, path, user):
-
- if not host in ['127.0.0.1', 'localhost']:
- if user:
- return '%s@%s:%s' % (user, host, path)
- else:
- return '%s:%s' % (host, path)
- else:
- if not ':' in path:
- if not path.startswith('/'):
- path = self._get_absolute_path(path=path)
- return path
-
- def _process_remote(self, host, path, user):
- transport = self.runner.transport
- return_data = None
- if not host in ['127.0.0.1', 'localhost'] or transport != "local":
- if user:
- return_data = '%s@%s:%s' % (user, host, path)
- else:
- return_data = '%s:%s' % (host, path)
- else:
- return_data = path
-
- if not ':' in return_data:
- if not return_data.startswith('/'):
- return_data = self._get_absolute_path(path=return_data)
-
- return return_data
-
- def setup(self, module_name, inject):
- ''' Always default to localhost as delegate if None defined '''
-
- self.inject = inject
-
- # Store original transport and sudo values.
- self.original_transport = inject.get('ansible_connection', self.runner.transport)
- self.original_become = self.runner.become
- self.transport_overridden = False
-
- if inject.get('delegate_to') is None:
- inject['delegate_to'] = '127.0.0.1'
- # IF original transport is not local, override transport and disable sudo.
- if self.original_transport != 'local':
- inject['ansible_connection'] = 'local'
- self.transport_overridden = True
- self.runner.become = False
-
- def run(self, conn, tmp, module_name, module_args,
- inject, complex_args=None, **kwargs):
-
- ''' generates params and passes them on to the rsync module '''
-
- self.inject = inject
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- src = options.get('src', None)
- dest = options.get('dest', None)
- use_ssh_args = options.pop('use_ssh_args', None)
-
- src = template.template(self.runner.basedir, src, inject)
- dest = template.template(self.runner.basedir, dest, inject)
- use_ssh_args = template.template(self.runner.basedir, use_ssh_args, inject)
-
- try:
- options['local_rsync_path'] = inject['ansible_rsync_path']
- except KeyError:
- pass
-
- # from the perspective of the rsync call the delegate is the localhost
- src_host = '127.0.0.1'
- dest_host = inject.get('ansible_ssh_host', inject['inventory_hostname'])
-
- # allow ansible_ssh_host to be templated
- dest_host = template.template(self.runner.basedir, dest_host, inject, fail_on_undefined=True)
- dest_is_local = dest_host in ['127.0.0.1', 'localhost']
-
- # CHECK FOR NON-DEFAULT SSH PORT
- dest_port = options.get('dest_port')
- inv_port = inject.get('ansible_ssh_port', inject['inventory_hostname'])
- if inv_port != dest_port and inv_port != inject['inventory_hostname']:
- options['dest_port'] = inv_port
-
- # edge case: explicit delegate and dest_host are the same
- if dest_host == inject['delegate_to']:
- dest_host = '127.0.0.1'
-
- # SWITCH SRC AND DEST PER MODE
- if options.get('mode', 'push') == 'pull':
- (dest_host, src_host) = (src_host, dest_host)
-
- # CHECK DELEGATE HOST INFO
- use_delegate = False
- if conn.delegate != conn.host:
- if 'hostvars' in inject:
- if conn.delegate in inject['hostvars'] and self.original_transport != 'local':
- # use a delegate host instead of localhost
- use_delegate = True
-
- # COMPARE DELEGATE, HOST AND TRANSPORT
- process_args = False
- if not dest_host is src_host and self.original_transport != 'local':
- # interpret and inject remote host info into src or dest
- process_args = True
-
- # MUNGE SRC AND DEST PER REMOTE_HOST INFO
- if process_args or use_delegate:
-
- user = None
- if utils.boolean(options.get('set_remote_user', 'yes')):
- if use_delegate:
- user = inject['hostvars'][conn.delegate].get('ansible_ssh_user')
-
- if not use_delegate or not user:
- user = inject.get('ansible_ssh_user',
- self.runner.remote_user)
-
- if use_delegate:
- # FIXME
- private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
- else:
- private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
-
- private_key = template.template(self.runner.basedir, private_key, inject, fail_on_undefined=True)
-
- if not private_key is None:
- private_key = os.path.expanduser(private_key)
- options['private_key'] = private_key
-
- # use the mode to define src and dest's url
- if options.get('mode', 'push') == 'pull':
- # src is a remote path: <user>@<host>, dest is a local path
- src = self._process_remote(src_host, src, user)
- dest = self._process_origin(dest_host, dest, user)
- else:
- # src is a local path, dest is a remote path: <user>@<host>
- src = self._process_origin(src_host, src, user)
- dest = self._process_remote(dest_host, dest, user)
-
- options['src'] = src
- options['dest'] = dest
- if 'mode' in options:
- del options['mode']
- if use_ssh_args:
- options['ssh_args'] = constants.ANSIBLE_SSH_ARGS
-
- # Allow custom rsync path argument.
- rsync_path = options.get('rsync_path', None)
-
- # If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
- if not rsync_path and self.transport_overridden and self.original_become and not dest_is_local and self.runner.become_method == 'sudo':
- rsync_path = 'sudo rsync'
-
- # make sure rsync path is quoted.
- if rsync_path:
- options['rsync_path'] = '"' + rsync_path + '"'
-
- module_args = ""
- if self.runner.noop_on_check(inject):
- module_args = "CHECKMODE=True"
-
- # run the module and store the result
- result = self.runner._execute_module(conn, tmp, 'synchronize', module_args, complex_args=options, inject=inject)
-
- # reset the sudo property
- self.runner.become = self.original_become
-
- return result
-
diff --git a/lib/ansible/runner/action_plugins/template.py b/lib/ansible/runner/action_plugins/template.py
deleted file mode 100644
index 5c9be9e079..0000000000
--- a/lib/ansible/runner/action_plugins/template.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pipes
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def get_checksum(self, conn, tmp, dest, inject, try_directory=False, source=None):
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
-
- if remote_checksum in ('0', '2', '3', '4'):
- # Note: 1 means the file is not present which is fine; template
- # will create it. 3 means directory was specified instead of file
- # which requires special handling
- if try_directory and remote_checksum == '3' and source:
- # If the user specified a directory name as their dest then we
- # have to check the checksum of dest/basename(src). This is
- # the same behaviour as cp foo.txt /var/tmp/ so users expect
- # it to work.
- base = os.path.basename(source)
- dest = os.path.join(dest, base)
- remote_checksum = self.get_checksum(conn, tmp, dest, inject, try_directory=False)
- if remote_checksum not in ('0', '2', '3', '4'):
- return remote_checksum
-
- result = dict(failed=True, msg="failed to checksum remote file."
- " Checksum error code: %s" % remote_checksum)
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
- return remote_checksum
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for template operations '''
-
- if not self.runner.is_playbook:
- raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks")
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- source = options.get('src', None)
- dest = options.get('dest', None)
-
- if (source is None and 'first_available_file' not in inject) or dest is None:
- result = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
-
- if 'first_available_file' in inject:
- found = False
- for fn in self.runner.module_vars.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'templates', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- result = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, comm_ok=False, result=result)
- else:
- source = template.template(self.runner.basedir, source, inject)
-
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'templates', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # template the source data locally & get ready to transfer
- try:
- resultant = template.template_from_file(self.runner.basedir, source, inject, vault_password=self.runner.vault_pass)
- except Exception, e:
- result = dict(failed=True, msg=type(e).__name__ + ": " + str(e))
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- # Expand any user home dir specification
- dest = self.runner._remote_expand_user(conn, dest, tmp)
-
- directory_prepended = False
- if dest.endswith("/"): # CCTODO: Fix path for Windows hosts.
- directory_prepended = True
- base = os.path.basename(source)
- dest = os.path.join(dest, base)
-
- local_checksum = utils.checksum_s(resultant)
- remote_checksum = self.get_checksum(conn, tmp, dest, inject, not directory_prepended, source=source)
-
- if local_checksum != remote_checksum:
-
- # template is different from the remote value
-
- # if showing diffs, we need to get the remote value
- dest_contents = ''
-
- if self.runner.diff:
- # using persist_files to keep the temp directory around to avoid needing to grab another
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
-
- xfered = self.runner._transfer_str(conn, tmp, 'source', resultant)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
-
- # run the copy module
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(source),
- follow=True,
- )
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant))
- else:
- res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject, complex_args=complex_args)
- if res.result.get('changed', False):
- res.diff = dict(before=dest_contents, after=resultant)
- return res
- else:
- # when running the file module based on the template data, we do
- # not want the source filename (the name of the template) to be used,
- # since this would mess up links, so we clear the src param and tell
- # the module to follow links. When doing that, we have to set
- # original_basename to the template just in case the dest is
- # a directory.
- module_args = ''
- new_module_args = dict(
- src=None,
- original_basename=os.path.basename(source),
- follow=True,
- )
- # be sure to inject the check mode param into the module args and
- # rely on the file module to report its changed status
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- options.update(new_module_args)
- return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject, complex_args=options)
-
diff --git a/lib/ansible/runner/action_plugins/unarchive.py b/lib/ansible/runner/action_plugins/unarchive.py
deleted file mode 100644
index 312a2265c0..0000000000
--- a/lib/ansible/runner/action_plugins/unarchive.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ansible import utils
-import ansible.utils.template as template
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-import pipes
-
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- dest = options.get('dest', None)
- copy = utils.boolean(options.get('copy', 'yes'))
- creates = options.get('creates', None)
-
- if source is None or dest is None:
- result = dict(failed=True, msg="src (or content) and dest are required")
- return ReturnData(conn=conn, result=result)
-
- if creates:
- # do not run the command if the line contains creates=filename
- # and the filename already exists. This allows idempotence
- # of command executions.
- module_args_tmp = ""
- complex_args_tmp = dict(path=creates, get_md5=False, get_checksum=False)
- module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
- complex_args=complex_args_tmp, delete_remote_tmp=False)
- stat = module_return.result.get('stat', None)
- if stat and stat.get('exists', False):
- return ReturnData(
- conn=conn,
- comm_ok=True,
- result=dict(
- changed=False,
- msg=("skipped, since %s exists" % creates)
- )
- )
-
- dest = self.runner._remote_expand_user(conn, dest, tmp) # CCTODO: Fix path for Windows hosts.
- source = template.template(self.runner.basedir, os.path.expanduser(source), inject)
- if copy:
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
- if remote_checksum == '4':
- result = dict(failed=True, msg="python isn't present on the system. Unable to compute checksum")
- return ReturnData(conn=conn, result=result)
- if remote_checksum != '3':
- result = dict(failed=True, msg="dest '%s' must be an existing dir" % dest)
- return ReturnData(conn=conn, result=result)
-
- if copy:
- # transfer the file to a remote tmp location
- tmp_src = tmp + 'source'
- conn.put_file(source, tmp_src)
-
- # handle diff mode client side
- # handle check mode client side
- # fix file permissions when the copy is done as a different user
- if copy:
- if self.runner.become and self.runner.become_user != 'root':
- if not self.runner.noop_on_check(inject):
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
- # Build temporary module_args.
- new_module_args = dict(
- src=tmp_src,
- original_basename=os.path.basename(source),
- )
-
- # make sure checkmod is passed on correctly
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
-
- module_args = utils.merge_module_args(module_args, new_module_args)
- else:
- module_args = "%s original_basename=%s" % (module_args, pipes.quote(os.path.basename(source)))
- # make sure checkmod is passed on correctly
- if self.runner.noop_on_check(inject):
- module_args += " CHECKMODE=True"
- return self.runner._execute_module(conn, tmp, 'unarchive', module_args, inject=inject, complex_args=complex_args)
diff --git a/lib/ansible/runner/action_plugins/win_copy.py b/lib/ansible/runner/action_plugins/win_copy.py
deleted file mode 100644
index a62dfb9985..0000000000
--- a/lib/ansible/runner/action_plugins/win_copy.py
+++ /dev/null
@@ -1,377 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ansible import utils
-import ansible.constants as C
-import ansible.utils.template as template
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-import json
-import stat
-import tempfile
-import pipes
-
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- content = options.get('content', None)
- dest = options.get('dest', None)
- raw = utils.boolean(options.get('raw', 'no'))
- force = utils.boolean(options.get('force', 'yes'))
-
- # content with newlines is going to be escaped to safely load in yaml
- # now we need to unescape it so that the newlines are evaluated properly
- # when writing the file to disk
- if content:
- if isinstance(content, unicode):
- try:
- content = content.decode('unicode-escape')
- except UnicodeDecodeError:
- pass
-
- if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
- result=dict(failed=True, msg="src (or content) and dest are required")
- return ReturnData(conn=conn, result=result)
- elif (source is not None or 'first_available_file' in inject) and content is not None:
- result=dict(failed=True, msg="src and content are mutually exclusive")
- return ReturnData(conn=conn, result=result)
-
- # Check if the source ends with a "/"
- source_trailing_slash = False
- if source:
- source_trailing_slash = source.endswith("/")
-
- # Define content_tempfile in case we set it after finding content populated.
- content_tempfile = None
-
- # If content is defined make a temp file and write the content into it.
- if content is not None:
- try:
- # If content comes to us as a dict it should be decoded json.
- # We need to encode it back into a string to write it out.
- if type(content) is dict:
- content_tempfile = self._create_content_tempfile(json.dumps(content))
- else:
- content_tempfile = self._create_content_tempfile(content)
- source = content_tempfile
- except Exception, err:
- result = dict(failed=True, msg="could not write content temp file: %s" % err)
- return ReturnData(conn=conn, result=result)
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
- elif 'first_available_file' in inject:
- found = False
- for fn in inject.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- results = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, result=results)
- else:
- source = template.template(self.runner.basedir, source, inject)
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
- source_files = []
-
- # If source is a directory populate our list else source is a file and translate it to a tuple.
- if os.path.isdir(source):
- # Get the amount of spaces to remove to get the relative path.
- if source_trailing_slash:
- sz = len(source) + 1
- else:
- sz = len(source.rsplit('/', 1)[0]) + 1
-
- # Walk the directory and append the file tuples to source_files.
- for base_path, sub_folders, files in os.walk(source):
- for file in files:
- full_path = os.path.join(base_path, file)
- rel_path = full_path[sz:]
- source_files.append((full_path, rel_path))
-
- # If it's recursive copy, destination is always a dir,
- # explicitly mark it so (note - copy module relies on this).
- if not conn.shell.path_has_trailing_slash(dest):
- dest = conn.shell.join_path(dest, '')
- else:
- source_files.append((source, os.path.basename(source)))
-
- changed = False
- diffs = []
- module_result = {"changed": False}
-
- # A register for if we executed a module.
- # Used to cut down on command calls when not recursive.
- module_executed = False
-
- # Tell _execute_module to delete the file if there is one file.
- delete_remote_tmp = (len(source_files) == 1)
-
- # If this is a recursive action create a tmp_path that we can share as the _exec_module create is too late.
- if not delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- # expand any user home dir specifier
- dest = self.runner._remote_expand_user(conn, dest, tmp_path)
-
- for source_full, source_rel in source_files:
- # Generate a hash of the local file.
- local_checksum = utils.checksum(source_full)
-
- # If local_checksum is not defined we can't find the file so we should fail out.
- if local_checksum is None:
- result = dict(failed=True, msg="could not find src=%s" % source_full)
- return ReturnData(conn=conn, result=result)
-
- # This is kind of optimization - if user told us destination is
- # dir, do path manipulation right away, otherwise we still check
- # for dest being a dir via remote call below.
- if conn.shell.path_has_trailing_slash(dest):
- dest_file = conn.shell.join_path(dest, source_rel)
- else:
- dest_file = conn.shell.join_path(dest)
-
- # Attempt to get the remote checksum
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum == '3':
- # The remote_checksum was executed on a directory.
- if content is not None:
- # If source was defined as content remove the temporary file and fail out.
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- result = dict(failed=True, msg="can not use content with a dir as dest")
- return ReturnData(conn=conn, result=result)
- else:
- # Append the relative source location to the destination and retry remote_checksum.
- dest_file = conn.shell.join_path(dest, source_rel)
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum != '1' and not force:
- # remote_file does not exist so continue to next iteration.
- continue
-
- if local_checksum != remote_checksum:
- # The checksums don't match and we will change or error out.
- changed = True
-
- # Create a tmp_path if missing only if this is not recursive.
- # If this is recursive we already have a tmp_path.
- if delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- if self.runner.diff and not raw:
- diff = self._get_diff_data(conn, tmp_path, inject, dest_file, source_full)
- else:
- diff = {}
-
- if self.runner.noop_on_check(inject):
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- diffs.append(diff)
- changed = True
- module_result = dict(changed=True)
- continue
-
- # Define a remote directory that we will copy the file to.
- tmp_src = tmp_path + 'source'
-
- if not raw:
- conn.put_file(source_full, tmp_src)
- else:
- conn.put_file(source_full, dest_file)
-
- # We have copied the file remotely and no longer require our content_tempfile
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root' and not raw:
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path)
-
- if raw:
- # Continue to next iteration if raw is defined.
- continue
-
- # Run the copy module
-
- # src and dest here come after original and override them
- # we pass dest only to make sure it includes trailing slash in case of recursive copy
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- module_return = self.runner._execute_module(conn, tmp_path, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- else:
- # no need to transfer the file, already correct md5, but still need to call
- # the file module in case we want to change attributes
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- if raw:
- # Continue to next iteration if raw is defined.
- # self.runner._remove_tmp_path(conn, tmp_path)
- continue
-
- tmp_src = tmp_path + source_rel
-
- # Build temporary module_args.
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- # Execute the file module.
- module_return = self.runner._execute_module(conn, tmp_path, 'win_file', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- module_result = module_return.result
- if not module_result.get('checksum'):
- module_result['checksum'] = local_checksum
- if module_result.get('failed') == True:
- return module_return
- if module_result.get('changed') == True:
- changed = True
-
- # Delete tmp_path if we were recursive or if we did not execute a module.
- if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \
- or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
- self.runner._remove_tmp_path(conn, tmp_path)
-
- # the file module returns the file path as 'path', but
- # the copy module uses 'dest', so add it if it's not there
- if 'path' in module_result and 'dest' not in module_result:
- module_result['dest'] = module_result['path']
-
- # TODO: Support detailed status/diff for multiple files
- if len(source_files) == 1:
- result = module_result
- else:
- result = dict(dest=dest, src=source, changed=changed)
- if len(diffs) == 1:
- return ReturnData(conn=conn, result=result, diff=diffs[0])
- else:
- return ReturnData(conn=conn, result=result)
-
- def _create_content_tempfile(self, content):
- ''' Create a tempfile containing defined content '''
- fd, content_tempfile = tempfile.mkstemp()
- f = os.fdopen(fd, 'w')
- try:
- f.write(content)
- except Exception, err:
- os.remove(content_tempfile)
- raise Exception(err)
- finally:
- f.close()
- return content_tempfile
-
- def _get_diff_data(self, conn, tmp, inject, destination, source):
- peek_result = self.runner._execute_module(conn, tmp, 'win_file', "path=%s diff_peek=1" % destination, inject=inject, persist_files=True)
-
- if not peek_result.is_successful():
- return {}
-
- diff = {}
- if peek_result.result['state'] == 'absent':
- diff['before'] = ''
- elif peek_result.result['appears_binary']:
- diff['dst_binary'] = 1
- elif peek_result.result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % destination, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
- diff['before_header'] = destination
- diff['before'] = dest_contents
-
- src = open(source)
- src_contents = src.read(8192)
- st = os.stat(source)
- if "\x00" in src_contents:
- diff['src_binary'] = 1
- elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- src.seek(0)
- diff['after_header'] = source
- diff['after'] = src.read()
-
- return diff
-
- def _remove_tempfile_if_content_defined(self, content, content_tempfile):
- if content is not None:
- os.remove(content_tempfile)
-
-
- def _result_key_merge(self, options, results):
- # add keys to file module results to mimic copy
- if 'path' in results.result and 'dest' not in results.result:
- results.result['dest'] = results.result['path']
- del results.result['path']
- return results
diff --git a/lib/ansible/runner/action_plugins/win_template.py b/lib/ansible/runner/action_plugins/win_template.py
deleted file mode 100644
index 7bde4bd510..0000000000
--- a/lib/ansible/runner/action_plugins/win_template.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pipes
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for template operations '''
-
- if not self.runner.is_playbook:
- raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks")
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- source = options.get('src', None)
- dest = options.get('dest', None)
-
- if (source is None and 'first_available_file' not in inject) or dest is None:
- result = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
-
- if 'first_available_file' in inject:
- found = False
- for fn in self.runner.module_vars.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'templates', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- result = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, comm_ok=False, result=result)
- else:
- source = template.template(self.runner.basedir, source, inject)
-
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'templates', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- if conn.shell.path_has_trailing_slash(dest):
- base = os.path.basename(source)
- dest = conn.shell.join_path(dest, base)
-
- # template the source data locally & get ready to transfer
- try:
- resultant = template.template_from_file(self.runner.basedir, source, inject, vault_password=self.runner.vault_pass)
- except Exception, e:
- result = dict(failed=True, msg=type(e).__name__ + ": " + str(e))
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- local_checksum = utils.checksum_s(resultant)
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
-
- if local_checksum != remote_checksum:
-
- # template is different from the remote value
-
- # if showing diffs, we need to get the remote value
- dest_contents = ''
-
- if self.runner.diff:
- # using persist_files to keep the temp directory around to avoid needing to grab another
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
-
- xfered = self.runner._transfer_str(conn, tmp, 'source', resultant)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
-
- # run the copy module
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(source),
- follow=True,
- )
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant))
- else:
- res = self.runner._execute_module(conn, tmp, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args)
- if res.result.get('changed', False):
- res.diff = dict(before=dest_contents, after=resultant)
- return res
- else:
- # when running the file module based on the template data, we do
- # not want the source filename (the name of the template) to be used,
- # since this would mess up links, so we clear the src param and tell
- # the module to follow links
- new_module_args = dict(
- src=None,
- follow=True,
- )
- # be sure to inject the check mode param into the module args and
- # rely on the file module to report its changed status
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- module_args = utils.merge_module_args(module_args, new_module_args)
- return self.runner._execute_module(conn, tmp, 'win_file', module_args, inject=inject, complex_args=complex_args)
-
diff --git a/lib/ansible/runner/connection.py b/lib/ansible/runner/connection.py
deleted file mode 100644
index 2ea484f70b..0000000000
--- a/lib/ansible/runner/connection.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# (c) 2012-2013, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-################################################
-
-import os
-import stat
-import errno
-
-from ansible import utils
-from ansible.errors import AnsibleError
-
-class Connector(object):
- ''' Handles abstract connections to remote hosts '''
-
- def __init__(self, runner):
- self.runner = runner
-
- def connect(self, host, port, user, password, transport, private_key_file, delegate_host):
- conn = utils.plugins.connection_loader.get(transport, self.runner, host, port, user=user, password=password, private_key_file=private_key_file)
- if conn is None:
- raise AnsibleError("unsupported connection type: %s" % transport)
- conn.delegate = delegate_host
- if private_key_file:
- # If private key is readable by user other than owner, flag an error
- st = None
- try:
- st = os.stat(private_key_file)
- except (IOError, OSError), e:
- if e.errno != errno.ENOENT: # file is missing, might be agent
- raise(e)
-
- if st is not None and st.st_mode & (stat.S_IRGRP | stat.S_IROTH):
- raise AnsibleError("private_key_file (%s) is group-readable or world-readable and thus insecure - "
- "you will probably get an SSH failure"
- % (private_key_file,))
- self.active = conn.connect()
- return self.active
diff --git a/lib/ansible/runner/connection_plugins/__init__.py b/lib/ansible/runner/connection_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/runner/connection_plugins/__init__.py
+++ /dev/null
diff --git a/lib/ansible/runner/connection_plugins/fireball.py b/lib/ansible/runner/connection_plugins/fireball.py
deleted file mode 100644
index 562fc2eccf..0000000000
--- a/lib/ansible/runner/connection_plugins/fireball.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import base64
-from ansible.callbacks import vvv
-from ansible import utils
-from ansible import errors
-from ansible import constants
-
-HAVE_ZMQ=False
-
-try:
- import zmq
- HAVE_ZMQ=True
-except ImportError:
- pass
-
-class Connection(object):
- ''' ZeroMQ accelerated connection '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
-
- self.runner = runner
- self.has_pipelining = False
-
- # attempt to work around shared-memory funness
- if getattr(self.runner, 'aes_keys', None):
- utils.AES_KEYS = self.runner.aes_keys
-
- self.host = host
- self.key = utils.key_for_hostname(host)
- self.context = None
- self.socket = None
-
- if port is None:
- self.port = constants.ZEROMQ_PORT
- else:
- self.port = port
-
- self.become_methods_supported=[]
-
- def connect(self):
- ''' activates the connection object '''
-
- if not HAVE_ZMQ:
- raise errors.AnsibleError("zmq is not installed")
-
- # this is rough/temporary and will likely be optimized later ...
- self.context = zmq.Context()
- socket = self.context.socket(zmq.REQ)
- addr = "tcp://%s:%s" % (self.host, self.port)
- socket.connect(addr)
- self.socket = socket
-
- return self
-
- def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the remote host '''
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- vvv("EXEC COMMAND %s" % cmd)
-
- if self.runner.become and sudoable:
- raise errors.AnsibleError(
- "When using fireball, do not specify sudo or su to run your tasks. " +
- "Instead sudo the fireball action with sudo. " +
- "Task will communicate with the fireball already running in sudo mode."
- )
-
- data = dict(
- mode='command',
- cmd=cmd,
- tmp_path=tmp_path,
- executable=executable,
- )
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
-
- def put_file(self, in_path, out_path):
-
- ''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
-
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- data = file(in_path).read()
- data = base64.b64encode(data)
-
- data = dict(mode='put', data=data, out_path=out_path)
- # TODO: support chunked file transfer
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- # no meaningful response needed for this
-
- def fetch_file(self, in_path, out_path):
- ''' save a remote file to the specified path '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
-
- data = dict(mode='fetch', in_path=in_path)
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
- response = response['data']
- response = base64.b64decode(response)
-
- fh = open(out_path, "w")
- fh.write(response)
- fh.close()
-
- def close(self):
- ''' terminate the connection '''
- # Be a good citizen
- try:
- self.socket.close()
- self.context.term()
- except:
- pass
-
diff --git a/lib/ansible/runner/connection_plugins/local.py b/lib/ansible/runner/connection_plugins/local.py
deleted file mode 100644
index beaeb1ae50..0000000000
--- a/lib/ansible/runner/connection_plugins/local.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import traceback
-import os
-import pipes
-import shutil
-import subprocess
-import select
-import fcntl
-from ansible import errors
-from ansible import utils
-from ansible.callbacks import vvv
-
-
-class Connection(object):
- ''' Local based connections '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.runner = runner
- self.host = host
- # port is unused, since this is local
- self.port = port
- self.has_pipelining = False
-
- # TODO: add su(needs tty), pbrun, pfexec
- self.become_methods_supported=['sudo']
-
- def connect(self, port=None):
- ''' connect to the local host; nothing to do here '''
-
- return self
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the local host '''
-
- # su requires to be run from a terminal, and therefore isn't supported here (yet?)
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- if self.runner.become and sudoable:
- local_cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '-H', self.runner.become_exe)
- else:
- if executable:
- local_cmd = executable.split() + ['-c', cmd]
- else:
- local_cmd = cmd
- executable = executable.split()[0] if executable else None
-
- vvv("EXEC %s" % (local_cmd), host=self.host)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir, executable=executable,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- if self.runner.become and sudoable and self.runner.become_pass:
- fcntl.fcntl(p.stdout, fcntl.F_SETFL,
- fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL,
- fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- become_output = ''
- while success_key not in become_output:
-
- if prompt and become_output.endswith(prompt):
- break
- if utils.su_prompts.check_su_prompt(become_output):
- break
-
- rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
- [p.stdout, p.stderr], self.runner.timeout)
- if p.stdout in rfd:
- chunk = p.stdout.read()
- elif p.stderr in rfd:
- chunk = p.stderr.read()
- else:
- stdout, stderr = p.communicate()
- raise errors.AnsibleError('timeout waiting for %s password prompt:\n' % self.runner.become_method + become_output)
- if not chunk:
- stdout, stderr = p.communicate()
- raise errors.AnsibleError('%s output closed while waiting for password prompt:\n' % self.runner.become_method + become_output)
- become_output += chunk
- if success_key not in become_output:
- p.stdin.write(self.runner.become_pass + '\n')
- fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to local '''
-
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def fetch_file(self, in_path, out_path):
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- ''' fetch a file from local to local -- for copatibility '''
- self.put_file(in_path, out_path)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/lib/ansible/runner/connection_plugins/ssh.py b/lib/ansible/runner/connection_plugins/ssh.py
deleted file mode 100644
index 036175f6a9..0000000000
--- a/lib/ansible/runner/connection_plugins/ssh.py
+++ /dev/null
@@ -1,460 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import re
-import subprocess
-import shlex
-import pipes
-import random
-import select
-import fcntl
-import hmac
-import pwd
-import gettext
-import pty
-from hashlib import sha1
-import ansible.constants as C
-from ansible.callbacks import vvv
-from ansible import errors
-from ansible import utils
-
-
-class Connection(object):
- ''' ssh based connections '''
-
- def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
- self.runner = runner
- self.host = host
- self.ipv6 = ':' in self.host
- self.port = port
- self.user = str(user)
- self.password = password
- self.private_key_file = private_key_file
- self.HASHED_KEY_MAGIC = "|1|"
- self.has_pipelining = True
-
- # TODO: add pbrun, pfexec
- self.become_methods_supported=['sudo', 'su', 'pbrun']
-
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
-
- def connect(self):
- ''' connect to the remote host '''
-
- vvv("ESTABLISH CONNECTION FOR USER: %s" % self.user, host=self.host)
-
- self.common_args = []
- extra_args = C.ANSIBLE_SSH_ARGS
- if extra_args is not None:
- # make sure there is no empty string added as this can produce weird errors
- self.common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
- else:
- self.common_args += ["-o", "ControlMaster=auto",
- "-o", "ControlPersist=60s",
- "-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
-
- cp_in_use = False
- cp_path_set = False
- for arg in self.common_args:
- if "ControlPersist" in arg:
- cp_in_use = True
- if "ControlPath" in arg:
- cp_path_set = True
-
- if cp_in_use and not cp_path_set:
- self.common_args += ["-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
-
- if not C.HOST_KEY_CHECKING:
- self.common_args += ["-o", "StrictHostKeyChecking=no"]
-
- if self.port is not None:
- self.common_args += ["-o", "Port=%d" % (self.port)]
- if self.private_key_file is not None:
- self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
- elif self.runner.private_key_file is not None:
- self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
- if self.password:
- self.common_args += ["-o", "GSSAPIAuthentication=no",
- "-o", "PubkeyAuthentication=no"]
- else:
- self.common_args += ["-o", "KbdInteractiveAuthentication=no",
- "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
- "-o", "PasswordAuthentication=no"]
- if self.user != pwd.getpwuid(os.geteuid())[0]:
- self.common_args += ["-o", "User="+self.user]
- self.common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
-
- return self
-
- def _run(self, cmd, indata):
- if indata:
- # do not use pseudo-pty
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = p.stdin
- else:
- # try to use upseudo-pty
- try:
- # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
- master, slave = pty.openpty()
- p = subprocess.Popen(cmd, stdin=slave,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = os.fdopen(master, 'w', 0)
- os.close(slave)
- except:
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = p.stdin
-
- return (p, stdin)
-
- def _password_cmd(self):
- if self.password:
- try:
- p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- p.communicate()
- except OSError:
- raise errors.AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
- (self.rfd, self.wfd) = os.pipe()
- return ["sshpass", "-d%d" % self.rfd]
- return []
-
- def _send_password(self):
- if self.password:
- os.close(self.rfd)
- os.write(self.wfd, "%s\n" % self.password)
- os.close(self.wfd)
-
- def _communicate(self, p, stdin, indata, sudoable=False, prompt=None):
- fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- # We can't use p.communicate here because the ControlMaster may have stdout open as well
- stdout = ''
- stderr = ''
- rpipes = [p.stdout, p.stderr]
- if indata:
- try:
- stdin.write(indata)
- stdin.close()
- except:
- raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
- # Read stdout/stderr from process
- while True:
- rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
-
- # fail early if the become password is wrong
- if self.runner.become and sudoable:
- incorrect_password = gettext.dgettext(self.runner.become_method, C.BECOME_ERROR_STRINGS[self.runner.become_method])
-
- if prompt:
- if self.runner.become_pass:
- if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
- raise errors.AnsibleError('Incorrect become password')
-
- if stdout.endswith(prompt):
- raise errors.AnsibleError('Missing become password')
- elif stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
- raise errors.AnsibleError('Incorrect become password')
-
- if p.stdout in rfd:
- dat = os.read(p.stdout.fileno(), 9000)
- stdout += dat
- if dat == '':
- rpipes.remove(p.stdout)
- if p.stderr in rfd:
- dat = os.read(p.stderr.fileno(), 9000)
- stderr += dat
- if dat == '':
- rpipes.remove(p.stderr)
- # only break out if no pipes are left to read or
- # the pipes are completely read and
- # the process is terminated
- if (not rpipes or not rfd) and p.poll() is not None:
- break
- # No pipes are left to read but process is not yet terminated
- # Only then it is safe to wait for the process to be finished
- # NOTE: Actually p.poll() is always None here if rpipes is empty
- elif not rpipes and p.poll() == None:
- p.wait()
- # The process is terminated. Since no pipes to read from are
- # left, there is no need to call select() again.
- break
- # close stdin after process is terminated and stdout/stderr are read
- # completely (see also issue #848)
- stdin.close()
- return (p.returncode, stdout, stderr)
-
- def not_in_host_file(self, host):
- if 'USER' in os.environ:
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_host_file = "~/.ssh/known_hosts"
- user_host_file = os.path.expanduser(user_host_file)
-
- host_file_list = []
- host_file_list.append(user_host_file)
- host_file_list.append("/etc/ssh/ssh_known_hosts")
- host_file_list.append("/etc/ssh/ssh_known_hosts2")
-
- hfiles_not_found = 0
- for hf in host_file_list:
- if not os.path.exists(hf):
- hfiles_not_found += 1
- continue
- try:
- host_fh = open(hf)
- except IOError, e:
- hfiles_not_found += 1
- continue
- else:
- data = host_fh.read()
- host_fh.close()
-
- for line in data.split("\n"):
- line = line.strip()
- if line is None or " " not in line:
- continue
- tokens = line.split()
- if not tokens:
- continue
- if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
- # this is a hashed known host entry
- try:
- (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
- hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
- hash.update(host)
- if hash.digest() == kn_host.decode('base64'):
- return False
- except:
- # invalid hashed host key, skip it
- continue
- else:
- # standard host file entry
- if host in tokens[0]:
- return False
-
- if (hfiles_not_found == len(host_file_list)):
- vvv("EXEC previous known host file not found for %s" % host)
- return True
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the remote host '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- ssh_cmd = self._password_cmd()
- ssh_cmd += ["ssh", "-C"]
- if not in_data:
- # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
- # inside a tty automatically invokes the python interactive-mode but the modules are not
- # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
- ssh_cmd += ["-tt"]
- if utils.VERBOSITY > 3:
- ssh_cmd += ["-vvv"]
- else:
- if self.runner.module_name == 'raw':
- ssh_cmd += ["-q"]
- else:
- ssh_cmd += ["-v"]
- ssh_cmd += self.common_args
-
- if self.ipv6:
- ssh_cmd += ['-6']
- ssh_cmd += [self.host]
-
- if self.runner.become and sudoable:
- becomecmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
- ssh_cmd.append(becomecmd)
- else:
- prompt = None
- if executable:
- ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
- else:
- ssh_cmd.append(cmd)
-
- vvv("EXEC %s" % ' '.join(ssh_cmd), host=self.host)
-
- not_in_host_file = self.not_in_host_file(self.host)
-
- if C.HOST_KEY_CHECKING and not_in_host_file:
- # lock around the initial SSH connectivity so the user prompt about whether to add
- # the host to known hosts is not intermingled with multiprocess output.
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
-
- # create process
- (p, stdin) = self._run(ssh_cmd, in_data)
-
- self._send_password()
-
- no_prompt_out = ''
- no_prompt_err = ''
- if sudoable and self.runner.become and self.runner.become_pass:
- # several cases are handled for escalated privileges with password
- # * NOPASSWD (tty & no-tty): detect success_key on stdout
- # * without NOPASSWD:
- # * detect prompt on stdout (tty)
- # * detect prompt on stderr (no-tty)
- fcntl.fcntl(p.stdout, fcntl.F_SETFL,
- fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL,
- fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- become_output = ''
- become_errput = ''
-
- while True:
- if success_key in become_output or \
- (prompt and become_output.endswith(prompt)) or \
- utils.su_prompts.check_su_prompt(become_output):
- break
-
- rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
- [p.stdout], self.runner.timeout)
- if p.stderr in rfd:
- chunk = p.stderr.read()
- if not chunk:
- raise errors.AnsibleError('ssh connection closed waiting for a privilege escalation password prompt')
- become_errput += chunk
- incorrect_password = gettext.dgettext(
- "become", "Sorry, try again.")
- if become_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
- raise errors.AnsibleError('Incorrect become password')
- elif prompt and become_errput.endswith(prompt):
- stdin.write(self.runner.become_pass + '\n')
-
- if p.stdout in rfd:
- chunk = p.stdout.read()
- if not chunk:
- raise errors.AnsibleError('ssh connection closed waiting for %s password prompt' % self.runner.become_method)
- become_output += chunk
-
- if not rfd:
- # timeout. wrap up process communication
- stdout = p.communicate()
- raise errors.AnsibleError('ssh connection error while waiting for %s password prompt' % self.runner.become_method)
-
- if success_key in become_output:
- no_prompt_out += become_output
- no_prompt_err += become_errput
- elif sudoable:
- stdin.write(self.runner.become_pass + '\n')
-
- (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt)
-
- if C.HOST_KEY_CHECKING and not_in_host_file:
- # lock around the initial SSH connectivity so the user prompt about whether to add
- # the host to known hosts is not intermingled with multiprocess output.
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
- controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \
- 'unknown configuration option: ControlPersist' in stderr
-
- if C.HOST_KEY_CHECKING:
- if ssh_cmd[0] == "sshpass" and p.returncode == 6:
- raise errors.AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
-
- if p.returncode != 0 and controlpersisterror:
- raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
- if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
- raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
- if p.returncode == 255:
- ip = None
- port = None
- for line in stderr.splitlines():
- match = re.search(
- 'Connecting to .*\[(\d+\.\d+\.\d+\.\d+)\] port (\d+)',
- line)
- if match:
- ip = match.group(1)
- port = match.group(2)
- if 'UNPROTECTED PRIVATE KEY FILE' in stderr:
- lines = [line for line in stderr.splitlines()
- if 'ignore key:' in line]
- else:
- lines = stderr.splitlines()[-1:]
- if ip and port:
- lines.append(' while connecting to %s:%s' % (ip, port))
- lines.append(
- 'It is sometimes useful to re-run the command using -vvvv, '
- 'which prints SSH debug output to help diagnose the issue.')
- raise errors.AnsibleError('SSH Error: %s' % '\n'.join(lines))
-
- return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- cmd = self._password_cmd()
-
- host = self.host
- if self.ipv6:
- host = '[%s]' % host
-
- if C.DEFAULT_SCP_IF_SSH:
- cmd += ["scp"] + self.common_args
- cmd += [in_path,host + ":" + pipes.quote(out_path)]
- indata = None
- else:
- cmd += ["sftp"] + self.common_args + [host]
- indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
-
- (p, stdin) = self._run(cmd, indata)
-
- self._send_password()
-
- (returncode, stdout, stderr) = self._communicate(p, stdin, indata)
-
- if returncode != 0:
- raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from remote to local '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- cmd = self._password_cmd()
-
- host = self.host
- if self.ipv6:
- host = '[%s]' % host
-
- if C.DEFAULT_SCP_IF_SSH:
- cmd += ["scp"] + self.common_args
- cmd += [host + ":" + in_path, out_path]
- indata = None
- else:
- cmd += ["sftp"] + self.common_args + [host]
- indata = "get %s %s\n" % (in_path, out_path)
-
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- self._send_password()
- stdout, stderr = p.communicate(indata)
-
- if p.returncode != 0:
- raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
-
- def close(self):
- ''' not applicable since we're executing openssh binaries '''
- pass
-
diff --git a/lib/ansible/runner/filter_plugins/__init__.py b/lib/ansible/runner/filter_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/runner/filter_plugins/__init__.py
+++ /dev/null
diff --git a/lib/ansible/runner/filter_plugins/core.py b/lib/ansible/runner/filter_plugins/core.py
deleted file mode 100644
index bdf45509c3..0000000000
--- a/lib/ansible/runner/filter_plugins/core.py
+++ /dev/null
@@ -1,351 +0,0 @@
-# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-
-import sys
-import base64
-import json
-import os.path
-import types
-import pipes
-import glob
-import re
-import crypt
-import hashlib
-import string
-from functools import partial
-import operator as py_operator
-from random import SystemRandom, shuffle
-import uuid
-
-import yaml
-from jinja2.filters import environmentfilter
-from distutils.version import LooseVersion, StrictVersion
-
-from ansible import errors
-from ansible.utils.hashing import md5s, checksum_s
-from ansible.utils.unicode import unicode_wrap, to_unicode
-
-
-UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
-
-
-def to_nice_yaml(*a, **kw):
- '''Make verbose, human readable yaml'''
- transformed = yaml.safe_dump(*a, indent=4, allow_unicode=True, default_flow_style=False, **kw)
- return to_unicode(transformed)
-
-def to_json(a, *args, **kw):
- ''' Convert the value to JSON '''
- return json.dumps(a, *args, **kw)
-
-def to_nice_json(a, *args, **kw):
- '''Make verbose, human readable JSON'''
- # python-2.6's json encoder is buggy (can't encode hostvars)
- if sys.version_info < (2, 7):
- try:
- import simplejson
- except ImportError:
- pass
- else:
- try:
- major = int(simplejson.__version__.split('.')[0])
- except:
- pass
- else:
- if major >= 2:
- return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw)
- # Fallback to the to_json filter
- return to_json(a, *args, **kw)
- return json.dumps(a, indent=4, sort_keys=True, *args, **kw)
-
-def failed(*a, **kw):
- ''' Test if task result yields failed '''
- item = a[0]
- if type(item) != dict:
- raise errors.AnsibleFilterError("|failed expects a dictionary")
- rc = item.get('rc',0)
- failed = item.get('failed',False)
- if rc != 0 or failed:
- return True
- else:
- return False
-
-def success(*a, **kw):
- ''' Test if task result yields success '''
- return not failed(*a, **kw)
-
-def changed(*a, **kw):
- ''' Test if task result yields changed '''
- item = a[0]
- if type(item) != dict:
- raise errors.AnsibleFilterError("|changed expects a dictionary")
- if not 'changed' in item:
- changed = False
- if ('results' in item # some modules return a 'results' key
- and type(item['results']) == list
- and type(item['results'][0]) == dict):
- for result in item['results']:
- changed = changed or result.get('changed', False)
- else:
- changed = item.get('changed', False)
- return changed
-
-def skipped(*a, **kw):
- ''' Test if task result yields skipped '''
- item = a[0]
- if type(item) != dict:
- raise errors.AnsibleFilterError("|skipped expects a dictionary")
- skipped = item.get('skipped', False)
- return skipped
-
-def mandatory(a):
- ''' Make a variable mandatory '''
- try:
- a
- except NameError:
- raise errors.AnsibleFilterError('Mandatory variable not defined.')
- else:
- return a
-
-def bool(a):
- ''' return a bool for the arg '''
- if a is None or type(a) == bool:
- return a
- if type(a) in types.StringTypes:
- a = a.lower()
- if a in ['yes', 'on', '1', 'true', 1]:
- return True
- else:
- return False
-
-def quote(a):
- ''' return its argument quoted for shell usage '''
- return pipes.quote(a)
-
-def fileglob(pathname):
- ''' return list of matched files for glob '''
- return glob.glob(pathname)
-
-def regex(value='', pattern='', ignorecase=False, match_type='search'):
- ''' Expose `re` as a boolean filter using the `search` method by default.
- This is likely only useful for `search` and `match` which already
- have their own filters.
- '''
- if ignorecase:
- flags = re.I
- else:
- flags = 0
- _re = re.compile(pattern, flags=flags)
- _bool = __builtins__.get('bool')
- return _bool(getattr(_re, match_type, 'search')(value))
-
-def match(value, pattern='', ignorecase=False):
- ''' Perform a `re.match` returning a boolean '''
- return regex(value, pattern, ignorecase, 'match')
-
-def search(value, pattern='', ignorecase=False):
- ''' Perform a `re.search` returning a boolean '''
- return regex(value, pattern, ignorecase, 'search')
-
-def regex_replace(value='', pattern='', replacement='', ignorecase=False):
- ''' Perform a `re.sub` returning a string '''
-
- if not isinstance(value, basestring):
- value = str(value)
-
- if ignorecase:
- flags = re.I
- else:
- flags = 0
- _re = re.compile(pattern, flags=flags)
- return _re.sub(replacement, value)
-
-def ternary(value, true_val, false_val):
- ''' value ? true_val : false_val '''
- if value:
- return true_val
- else:
- return false_val
-
-
-def version_compare(value, version, operator='eq', strict=False):
- ''' Perform a version comparison on a value '''
- op_map = {
- '==': 'eq', '=': 'eq', 'eq': 'eq',
- '<': 'lt', 'lt': 'lt',
- '<=': 'le', 'le': 'le',
- '>': 'gt', 'gt': 'gt',
- '>=': 'ge', 'ge': 'ge',
- '!=': 'ne', '<>': 'ne', 'ne': 'ne'
- }
-
- if strict:
- Version = StrictVersion
- else:
- Version = LooseVersion
-
- if operator in op_map:
- operator = op_map[operator]
- else:
- raise errors.AnsibleFilterError('Invalid operator type')
-
- try:
- method = getattr(py_operator, operator)
- return method(Version(str(value)), Version(str(version)))
- except Exception, e:
- raise errors.AnsibleFilterError('Version comparison: %s' % e)
-
-@environmentfilter
-def rand(environment, end, start=None, step=None):
- r = SystemRandom()
- if isinstance(end, (int, long)):
- if not start:
- start = 0
- if not step:
- step = 1
- return r.randrange(start, end, step)
- elif hasattr(end, '__iter__'):
- if start or step:
- raise errors.AnsibleFilterError('start and step can only be used with integer values')
- return r.choice(end)
- else:
- raise errors.AnsibleFilterError('random can only be used on sequences and integers')
-
-def randomize_list(mylist):
- try:
- mylist = list(mylist)
- shuffle(mylist)
- except:
- pass
- return mylist
-
-def get_hash(data, hashtype='sha1'):
-
- try: # see if hash is supported
- h = hashlib.new(hashtype)
- except:
- return None
-
- h.update(data)
- return h.hexdigest()
-
-def get_encrypted_password(password, hashtype='sha512', salt=None):
-
- # TODO: find a way to construct dynamically from system
- cryptmethod= {
- 'md5': '1',
- 'blowfish': '2a',
- 'sha256': '5',
- 'sha512': '6',
- }
-
- hastype = hashtype.lower()
- if hashtype in cryptmethod:
- if salt is None:
- r = SystemRandom()
- salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)])
-
- saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
- encrypted = crypt.crypt(password,saltstring)
- return encrypted
-
- return None
-
-def to_uuid(string):
- return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
-
-class FilterModule(object):
- ''' Ansible core jinja2 filters '''
-
- def filters(self):
- return {
- # base 64
- 'b64decode': partial(unicode_wrap, base64.b64decode),
- 'b64encode': partial(unicode_wrap, base64.b64encode),
-
- # uuid
- 'to_uuid': to_uuid,
-
- # json
- 'to_json': to_json,
- 'to_nice_json': to_nice_json,
- 'from_json': json.loads,
-
- # yaml
- 'to_yaml': yaml.safe_dump,
- 'to_nice_yaml': to_nice_yaml,
- 'from_yaml': yaml.safe_load,
-
- # path
- 'basename': partial(unicode_wrap, os.path.basename),
- 'dirname': partial(unicode_wrap, os.path.dirname),
- 'expanduser': partial(unicode_wrap, os.path.expanduser),
- 'realpath': partial(unicode_wrap, os.path.realpath),
- 'relpath': partial(unicode_wrap, os.path.relpath),
-
- # failure testing
- 'failed' : failed,
- 'success' : success,
-
- # changed testing
- 'changed' : changed,
-
- # skip testing
- 'skipped' : skipped,
-
- # variable existence
- 'mandatory': mandatory,
-
- # value as boolean
- 'bool': bool,
-
- # quote string for shell usage
- 'quote': quote,
-
- # hash filters
- # md5 hex digest of string
- 'md5': md5s,
- # sha1 hex digeset of string
- 'sha1': checksum_s,
- # checksum of string as used by ansible for checksuming files
- 'checksum': checksum_s,
- # generic hashing
- 'password_hash': get_encrypted_password,
- 'hash': get_hash,
-
- # file glob
- 'fileglob': fileglob,
-
- # regex
- 'match': match,
- 'search': search,
- 'regex': regex,
- 'regex_replace': regex_replace,
-
- # ? : ;
- 'ternary': ternary,
-
- # list
- # version comparison
- 'version_compare': version_compare,
-
- # random stuff
- 'random': rand,
- 'shuffle': randomize_list,
- }
diff --git a/lib/ansible/runner/filter_plugins/ipaddr.py b/lib/ansible/runner/filter_plugins/ipaddr.py
deleted file mode 100644
index 5d9d6e3136..0000000000
--- a/lib/ansible/runner/filter_plugins/ipaddr.py
+++ /dev/null
@@ -1,659 +0,0 @@
-# (c) 2014, Maciej Delmanowski <drybjed@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from functools import partial
-
-try:
- import netaddr
-except ImportError:
- # in this case, we'll make the filters return error messages (see bottom)
- netaddr = None
-else:
- class mac_linux(netaddr.mac_unix):
- pass
- mac_linux.word_fmt = '%.2x'
-
-from ansible import errors
-
-
-# ---- IP address and network query helpers ----
-
-def _empty_ipaddr_query(v, vtype):
- # We don't have any query to process, so just check what type the user
- # expects, and return the IP address in a correct format
- if v:
- if vtype == 'address':
- return str(v.ip)
- elif vtype == 'network':
- return str(v)
-
-def _6to4_query(v, vtype, value):
- if v.version == 4:
-
- if v.size == 1:
- ipconv = str(v.ip)
- elif v.size > 1:
- if v.ip != v.network:
- ipconv = str(v.ip)
- else:
- ipconv = False
-
- if ipaddr(ipconv, 'public'):
- numbers = list(map(int, ipconv.split('.')))
-
- try:
- return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
- except:
- return False
-
- elif v.version == 6:
- if vtype == 'address':
- if ipaddr(str(v), '2002::/16'):
- return value
- elif vtype == 'network':
- if v.ip != v.network:
- if ipaddr(str(v.ip), '2002::/16'):
- return value
- else:
- return False
-
-def _ip_query(v):
- if v.size == 1:
- return str(v.ip)
- if v.size > 1:
- if v.ip != v.network:
- return str(v.ip)
-
-def _gateway_query(v):
- if v.size > 1:
- if v.ip != v.network:
- return str(v.ip) + '/' + str(v.prefixlen)
-
-def _bool_ipaddr_query(v):
- if v:
- return True
-
-def _broadcast_query(v):
- if v.size > 1:
- return str(v.broadcast)
-
-def _cidr_query(v):
- return str(v)
-
-def _cidr_lookup_query(v, iplist, value):
- try:
- if v in iplist:
- return value
- except:
- return False
-
-def _host_query(v):
- if v.size == 1:
- return str(v)
- elif v.size > 1:
- if v.ip != v.network:
- return str(v.ip) + '/' + str(v.prefixlen)
-
-def _hostmask_query(v):
- return str(v.hostmask)
-
-def _int_query(v, vtype):
- if vtype == 'address':
- return int(v.ip)
- elif vtype == 'network':
- return str(int(v.ip)) + '/' + str(int(v.prefixlen))
-
-def _ipv4_query(v, value):
- if v.version == 6:
- try:
- return str(v.ipv4())
- except:
- return False
- else:
- return value
-
-def _ipv6_query(v, value):
- if v.version == 4:
- return str(v.ipv6())
- else:
- return value
-
-def _link_local_query(v, value):
- v_ip = netaddr.IPAddress(str(v.ip))
- if v.version == 4:
- if ipaddr(str(v_ip), '169.254.0.0/24'):
- return value
-
- elif v.version == 6:
- if ipaddr(str(v_ip), 'fe80::/10'):
- return value
-
-def _loopback_query(v, value):
- v_ip = netaddr.IPAddress(str(v.ip))
- if v_ip.is_loopback():
- return value
-
-def _multicast_query(v, value):
- if v.is_multicast():
- return value
-
-def _net_query(v):
- if v.size > 1:
- if v.ip == v.network:
- return str(v.network) + '/' + str(v.prefixlen)
-
-def _netmask_query(v):
- if v.size > 1:
- return str(v.netmask)
-
-def _network_query(v):
- if v.size > 1:
- return str(v.network)
-
-def _prefix_query(v):
- return int(v.prefixlen)
-
-def _private_query(v, value):
- if v.is_private():
- return value
-
-def _public_query(v, value):
- v_ip = netaddr.IPAddress(str(v.ip))
- if v_ip.is_unicast() and not v_ip.is_private() and \
- not v_ip.is_loopback() and not v_ip.is_netmask() and \
- not v_ip.is_hostmask():
- return value
-
-def _revdns_query(v):
- v_ip = netaddr.IPAddress(str(v.ip))
- return v_ip.reverse_dns
-
-def _size_query(v):
- return v.size
-
-def _subnet_query(v):
- return str(v.cidr)
-
-def _type_query(v):
- if v.size == 1:
- return 'address'
- if v.size > 1:
- if v.ip != v.network:
- return 'address'
- else:
- return 'network'
-
-def _unicast_query(v, value):
- if v.is_unicast():
- return value
-
-def _version_query(v):
- return v.version
-
-def _wrap_query(v, vtype, value):
- if v.version == 6:
- if vtype == 'address':
- return '[' + str(v.ip) + ']'
- elif vtype == 'network':
- return '[' + str(v.ip) + ']/' + str(v.prefixlen)
- else:
- return value
-
-
-# ---- HWaddr query helpers ----
-def _bare_query(v):
- v.dialect = netaddr.mac_bare
- return str(v)
-
-def _bool_hwaddr_query(v):
- if v:
- return True
-
-def _cisco_query(v):
- v.dialect = netaddr.mac_cisco
- return str(v)
-
-def _empty_hwaddr_query(v, value):
- if v:
- return value
-
-def _linux_query(v):
- v.dialect = mac_linux
- return str(v)
-
-def _postgresql_query(v):
- v.dialect = netaddr.mac_pgsql
- return str(v)
-
-def _unix_query(v):
- v.dialect = netaddr.mac_unix
- return str(v)
-
-def _win_query(v):
- v.dialect = netaddr.mac_eui48
- return str(v)
-
-
-# ---- IP address and network filters ----
-
-def ipaddr(value, query = '', version = False, alias = 'ipaddr'):
- ''' Check if string is an IP address or network and filter it '''
-
- query_func_extra_args = {
- '': ('vtype',),
- '6to4': ('vtype', 'value'),
- 'cidr_lookup': ('iplist', 'value'),
- 'int': ('vtype',),
- 'ipv4': ('value',),
- 'ipv6': ('value',),
- 'link-local': ('value',),
- 'loopback': ('value',),
- 'lo': ('value',),
- 'multicast': ('value',),
- 'private': ('value',),
- 'public': ('value',),
- 'unicast': ('value',),
- 'wrap': ('vtype', 'value'),
- }
- query_func_map = {
- '': _empty_ipaddr_query,
- '6to4': _6to4_query,
- 'address': _ip_query,
- 'address/prefix': _gateway_query,
- 'bool': _bool_ipaddr_query,
- 'broadcast': _broadcast_query,
- 'cidr': _cidr_query,
- 'cidr_lookup': _cidr_lookup_query,
- 'gateway': _gateway_query,
- 'gw': _gateway_query,
- 'host': _host_query,
- 'host/prefix': _gateway_query,
- 'hostmask': _hostmask_query,
- 'hostnet': _gateway_query,
- 'int': _int_query,
- 'ip': _ip_query,
- 'ipv4': _ipv4_query,
- 'ipv6': _ipv6_query,
- 'link-local': _link_local_query,
- 'lo': _loopback_query,
- 'loopback': _loopback_query,
- 'multicast': _multicast_query,
- 'net': _net_query,
- 'netmask': _netmask_query,
- 'network': _network_query,
- 'prefix': _prefix_query,
- 'private': _private_query,
- 'public': _public_query,
- 'revdns': _revdns_query,
- 'router': _gateway_query,
- 'size': _size_query,
- 'subnet': _subnet_query,
- 'type': _type_query,
- 'unicast': _unicast_query,
- 'v4': _ipv4_query,
- 'v6': _ipv6_query,
- 'version': _version_query,
- 'wrap': _wrap_query,
- }
-
- vtype = None
-
- if not value:
- return False
-
- elif value == True:
- return False
-
- # Check if value is a list and parse each element
- elif isinstance(value, (list, tuple)):
-
- _ret = []
- for element in value:
- if ipaddr(element, str(query), version):
- _ret.append(ipaddr(element, str(query), version))
-
- if _ret:
- return _ret
- else:
- return list()
-
- # Check if value is a number and convert it to an IP address
- elif str(value).isdigit():
-
- # We don't know what IP version to assume, so let's check IPv4 first,
- # then IPv6
- try:
- if ((not version) or (version and version == 4)):
- v = netaddr.IPNetwork('0.0.0.0/0')
- v.value = int(value)
- v.prefixlen = 32
- elif version and version == 6:
- v = netaddr.IPNetwork('::/0')
- v.value = int(value)
- v.prefixlen = 128
-
- # IPv4 didn't work the first time, so it definitely has to be IPv6
- except:
- try:
- v = netaddr.IPNetwork('::/0')
- v.value = int(value)
- v.prefixlen = 128
-
- # The value is too big for IPv6. Are you a nanobot?
- except:
- return False
-
- # We got an IP address, let's mark it as such
- value = str(v)
- vtype = 'address'
-
- # value has not been recognized, check if it's a valid IP string
- else:
- try:
- v = netaddr.IPNetwork(value)
-
- # value is a valid IP string, check if user specified
- # CIDR prefix or just an IP address, this will indicate default
- # output format
- try:
- address, prefix = value.split('/')
- vtype = 'network'
- except:
- vtype = 'address'
-
- # value hasn't been recognized, maybe it's a numerical CIDR?
- except:
- try:
- address, prefix = value.split('/')
- address.isdigit()
- address = int(address)
- prefix.isdigit()
- prefix = int(prefix)
-
- # It's not numerical CIDR, give up
- except:
- return False
-
- # It is something, so let's try and build a CIDR from the parts
- try:
- v = netaddr.IPNetwork('0.0.0.0/0')
- v.value = address
- v.prefixlen = prefix
-
- # It's not a valid IPv4 CIDR
- except:
- try:
- v = netaddr.IPNetwork('::/0')
- v.value = address
- v.prefixlen = prefix
-
- # It's not a valid IPv6 CIDR. Give up.
- except:
- return False
-
- # We have a valid CIDR, so let's write it in correct format
- value = str(v)
- vtype = 'network'
-
- # We have a query string but it's not in the known query types. Check if
- # that string is a valid subnet, if so, we can check later if given IP
- # address/network is inside that specific subnet
- try:
- ### ?? 6to4 and link-local were True here before. Should they still?
- if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
- iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
- query = 'cidr_lookup'
- except:
- pass
-
- # This code checks if value maches the IP version the user wants, ie. if
- # it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
- # If version does not match, return False
- if version and v.version != version:
- return False
-
- extras = []
- for arg in query_func_extra_args.get(query, tuple()):
- extras.append(locals()[arg])
- try:
- return query_func_map[query](v, *extras)
- except KeyError:
- try:
- float(query)
- if v.size == 1:
- if vtype == 'address':
- return str(v.ip)
- elif vtype == 'network':
- return str(v)
-
- elif v.size > 1:
- try:
- return str(v[query]) + '/' + str(v.prefixlen)
- except:
- return False
-
- else:
- return value
-
- except:
- raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
-
- return False
-
-
-def ipwrap(value, query = ''):
- try:
- if isinstance(value, (list, tuple)):
- _ret = []
- for element in value:
- if ipaddr(element, query, version = False, alias = 'ipwrap'):
- _ret.append(ipaddr(element, 'wrap'))
- else:
- _ret.append(element)
-
- return _ret
- else:
- _ret = ipaddr(value, query, version = False, alias = 'ipwrap')
- if _ret:
- return ipaddr(_ret, 'wrap')
- else:
- return value
-
- except:
- return value
-
-
-def ipv4(value, query = ''):
- return ipaddr(value, query, version = 4, alias = 'ipv4')
-
-
-def ipv6(value, query = ''):
- return ipaddr(value, query, version = 6, alias = 'ipv6')
-
-
-# Split given subnet into smaller subnets or find out the biggest subnet of
-# a given IP address with given CIDR prefix
-# Usage:
-#
-# - address or address/prefix | ipsubnet
-# returns CIDR subnet of a given input
-#
-# - address/prefix | ipsubnet(cidr)
-# returns number of possible subnets for given CIDR prefix
-#
-# - address/prefix | ipsubnet(cidr, index)
-# returns new subnet with given CIDR prefix
-#
-# - address | ipsubnet(cidr)
-# returns biggest subnet with given CIDR prefix that address belongs to
-#
-# - address | ipsubnet(cidr, index)
-# returns next indexed subnet which contains given address
-def ipsubnet(value, query = '', index = 'x'):
- ''' Manipulate IPv4/IPv6 subnets '''
-
- try:
- vtype = ipaddr(value, 'type')
- if vtype == 'address':
- v = ipaddr(value, 'cidr')
- elif vtype == 'network':
- v = ipaddr(value, 'subnet')
-
- value = netaddr.IPNetwork(v)
- except:
- return False
-
- if not query:
- return str(value)
-
- elif str(query).isdigit():
- vsize = ipaddr(v, 'size')
- query = int(query)
-
- try:
- float(index)
- index = int(index)
-
- if vsize > 1:
- try:
- return str(list(value.subnet(query))[index])
- except:
- return False
-
- elif vsize == 1:
- try:
- return str(value.supernet(query)[index])
- except:
- return False
-
- except:
- if vsize > 1:
- try:
- return str(len(list(value.subnet(query))))
- except:
- return False
-
- elif vsize == 1:
- try:
- return str(value.supernet(query)[0])
- except:
- return False
-
- return False
-
-# Returns the nth host within a network described by value.
-# Usage:
-#
-# - address or address/prefix | nthhost(nth)
-# returns the nth host within the given network
-def nthhost(value, query=''):
- ''' Get the nth host within a given network '''
- try:
- vtype = ipaddr(value, 'type')
- if vtype == 'address':
- v = ipaddr(value, 'cidr')
- elif vtype == 'network':
- v = ipaddr(value, 'subnet')
-
- value = netaddr.IPNetwork(v)
- except:
- return False
-
- if not query:
- return False
-
- try:
- vsize = ipaddr(v, 'size')
- nth = int(query)
- if value.size > nth:
- return value[nth]
-
- except ValueError:
- return False
-
- return False
-
-
-# ---- HWaddr / MAC address filters ----
-
-def hwaddr(value, query = '', alias = 'hwaddr'):
- ''' Check if string is a HW/MAC address and filter it '''
-
- query_func_extra_args = {
- '': ('value',),
- }
- query_func_map = {
- '': _empty_hwaddr_query,
- 'bare': _bare_query,
- 'bool': _bool_hwaddr_query,
- 'cisco': _cisco_query,
- 'eui48': _win_query,
- 'linux': _linux_query,
- 'pgsql': _postgresql_query,
- 'postgresql': _postgresql_query,
- 'psql': _postgresql_query,
- 'unix': _unix_query,
- 'win': _win_query,
- }
-
- try:
- v = netaddr.EUI(value)
- except:
- if query and query != 'bool':
- raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
-
- extras = []
- for arg in query_func_extra_args.get(query, tuple()):
- extras.append(locals()[arg])
- try:
- return query_func_map[query](v, *extras)
- except KeyError:
- raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
-
- return False
-
-def macaddr(value, query = ''):
- return hwaddr(value, query, alias = 'macaddr')
-
-def _need_netaddr(f_name, *args, **kwargs):
- raise errors.AnsibleFilterError('The {0} filter requires python-netaddr be'
- ' installed on the ansible controller'.format(f_name))
-
-# ---- Ansible filters ----
-
-class FilterModule(object):
- ''' IP address and network manipulation filters '''
- filter_map = {
- # IP addresses and networks
- 'ipaddr': ipaddr,
- 'ipwrap': ipwrap,
- 'ipv4': ipv4,
- 'ipv6': ipv6,
- 'ipsubnet': ipsubnet,
- 'nthhost': nthhost,
-
- # MAC / HW addresses
- 'hwaddr': hwaddr,
- 'macaddr': macaddr
- }
-
- def filters(self):
- if netaddr:
- return self.filter_map
- else:
- # Need to install python-netaddr for these filters to work
- return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
diff --git a/lib/ansible/runner/filter_plugins/mathstuff.py b/lib/ansible/runner/filter_plugins/mathstuff.py
deleted file mode 100644
index c6a49485a4..0000000000
--- a/lib/ansible/runner/filter_plugins/mathstuff.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# (c) 2014, Brian Coca <bcoca@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-
-import math
-import collections
-from ansible import errors
-
-def unique(a):
- if isinstance(a,collections.Hashable):
- c = set(a)
- else:
- c = []
- for x in a:
- if x not in c:
- c.append(x)
- return c
-
-def intersect(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) & set(b)
- else:
- c = unique(filter(lambda x: x in b, a))
- return c
-
-def difference(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) - set(b)
- else:
- c = unique(filter(lambda x: x not in b, a))
- return c
-
-def symmetric_difference(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) ^ set(b)
- else:
- c = unique(filter(lambda x: x not in intersect(a,b), union(a,b)))
- return c
-
-def union(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) | set(b)
- else:
- c = unique(a + b)
- return c
-
-def min(a):
- _min = __builtins__.get('min')
- return _min(a);
-
-def max(a):
- _max = __builtins__.get('max')
- return _max(a);
-
-def isnotanumber(x):
- try:
- return math.isnan(x)
- except TypeError:
- return False
-
-
-def logarithm(x, base=math.e):
- try:
- if base == 10:
- return math.log10(x)
- else:
- return math.log(x, base)
- except TypeError, e:
- raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e))
-
-
-def power(x, y):
- try:
- return math.pow(x, y)
- except TypeError, e:
- raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e))
-
-
-def inversepower(x, base=2):
- try:
- if base == 2:
- return math.sqrt(x)
- else:
- return math.pow(x, 1.0/float(base))
- except TypeError, e:
- raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e))
-
-
-class FilterModule(object):
- ''' Ansible math jinja2 filters '''
-
- def filters(self):
- return {
- # general math
- 'isnan': isnotanumber,
- 'min' : min,
- 'max' : max,
-
- # exponents and logarithms
- 'log': logarithm,
- 'pow': power,
- 'root': inversepower,
-
- # set theory
- 'unique' : unique,
- 'intersect': intersect,
- 'difference': difference,
- 'symmetric_difference': symmetric_difference,
- 'union': union,
-
- }
diff --git a/lib/ansible/runner/lookup_plugins/__init__.py b/lib/ansible/runner/lookup_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/runner/lookup_plugins/__init__.py
+++ /dev/null
diff --git a/lib/ansible/runner/lookup_plugins/consul_kv.py b/lib/ansible/runner/lookup_plugins/consul_kv.py
deleted file mode 100755
index 522fa8deb7..0000000000
--- a/lib/ansible/runner/lookup_plugins/consul_kv.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-Lookup plugin to grab metadata from a consul key value store.
-============================================================
-
-Plugin will lookup metadata for a playbook from the key value store in a
-consul cluster. Values can be easily set in the kv store with simple rest
-commands e.g.
-
-curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata
-
-this can then be looked up in a playbook as follows
-
-- debug: msg='key contains {{item}}'
- with_consul_kv:
- - 'key/to/retrieve'
-
-
-Parameters can be provided after the key be more specific about what to retrieve e.g.
-
-- debug: msg='key contains {{item}}'
- with_consul_kv:
- - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98')}}'
-
-recurse: if true, will retrieve all the values that have the given key as prefix
-index: if the key has a value with the specified index then this is returned
- allowing access to historical values.
-token: acl token to allow access to restricted values.
-
-By default this will lookup keys via the consul agent running on http://localhost:8500
-this can be changed by setting the env variable 'ANSIBLE_CONSUL_URL' to point to the url
-of the kv store you'd like to use.
-
-'''
-
-######################################################################
-
-import os
-import sys
-from urlparse import urlparse
-from ansible import utils, errors
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-try:
- import consul
-except ImportError, e:
- print "failed=True msg='python-consul required for this module. "\
- "see http://python-consul.readthedocs.org/en/latest/#installation'"
- sys.exit(1)
-
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
-
- self.basedir = basedir
- self.agent_url = 'http://localhost:8500'
- if os.getenv('ANSIBLE_CONSUL_URL') is not None:
- self.agent_url = os.environ['ANSIBLE_CONSUL_URL']
-
- def run(self, terms, inject=None, **kwargs):
-
- u = urlparse(self.agent_url)
- consul_api = consul.Consul(host=u.hostname, port=u.port)
-
- values = []
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- try:
- for term in terms:
- params = self.parse_params(term)
- results = consul_api.kv.get(params['key'],
- token=params['token'],
- index=params['index'],
- recurse=params['recurse'])
- if results[1]:
- # responds with a single or list of result maps
- if isinstance(results[1], list):
- for r in results[1]:
- values.append(r['Value'])
- else:
- values.append(results[1]['Value'])
- except Exception, e:
- raise errors.AnsibleError(
- "Error locating '%s' in kv store. Error was %s" % (term, e))
-
- return values
-
- def parse_params(self, term):
- params = term.split(' ')
-
- paramvals = {
- 'key': params[0],
- 'token': None,
- 'recurse': False,
- 'index': None
- }
-
- # parameters specified?
- try:
- for param in params[1:]:
- if param and len(param) > 0:
- name, value = param.split('=')
- assert name in paramvals, "% not a valid consul lookup parameter" % name
- paramvals[name] = value
- except (ValueError, AssertionError), e:
- raise errors.AnsibleError(e)
-
- return paramvals
diff --git a/lib/ansible/runner/lookup_plugins/dig.py b/lib/ansible/runner/lookup_plugins/dig.py
deleted file mode 100644
index a549a4a157..0000000000
--- a/lib/ansible/runner/lookup_plugins/dig.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# (c) 2015, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import socket
-HAVE_DNS=False
-try:
- import dns.resolver
- import dns.reversename
- from dns.rdatatype import *
- from dns.exception import DNSException
- HAVE_DNS=True
-except ImportError:
- pass
-
-def make_rdata_dict(rdata):
- ''' While the 'dig' lookup plugin supports anything which dnspython supports
- out of the box, the following supported_types list describes which
- DNS query types we can convert to a dict.
-
- Note: adding support for RRSIG is hard work. :)
- '''
- supported_types = {
- A : ['address'],
- AAAA : ['address'],
- CNAME : ['target'],
- DNAME : ['target'],
- DLV : ['algorithm', 'digest_type', 'key_tag', 'digest'],
- DNSKEY : ['flags', 'algorithm', 'protocol', 'key'],
- DS : ['algorithm', 'digest_type', 'key_tag', 'digest'],
- HINFO : ['cpu', 'os'],
- LOC : ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'],
- MX : ['preference', 'exchange'],
- NAPTR : ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'],
- NS : ['target'],
- NSEC3PARAM : ['algorithm', 'flags', 'iterations', 'salt'],
- PTR : ['target'],
- RP : ['mbox', 'txt'],
- # RRSIG : ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'],
- SOA : ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'],
- SPF : ['strings'],
- SRV : ['priority', 'weight', 'port', 'target'],
- SSHFP : ['algorithm', 'fp_type', 'fingerprint'],
- TLSA : ['usage', 'selector', 'mtype', 'cert'],
- TXT : ['strings'],
- }
-
- rd = {}
-
- if rdata.rdtype in supported_types:
- fields = supported_types[rdata.rdtype]
- for f in fields:
- val = rdata.__getattribute__(f)
-
- if type(val) == dns.name.Name:
- val = dns.name.Name.to_text(val)
-
- if rdata.rdtype == DLV and f == 'digest':
- val = dns.rdata._hexify(rdata.digest).replace(' ', '')
- if rdata.rdtype == DS and f == 'digest':
- val = dns.rdata._hexify(rdata.digest).replace(' ', '')
- if rdata.rdtype == DNSKEY and f == 'key':
- val = dns.rdata._base64ify(rdata.key).replace(' ', '')
- if rdata.rdtype == NSEC3PARAM and f == 'salt':
- val = dns.rdata._hexify(rdata.salt).replace(' ', '')
- if rdata.rdtype == SSHFP and f == 'fingerprint':
- val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '')
- if rdata.rdtype == TLSA and f == 'cert':
- val = dns.rdata._hexify(rdata.cert).replace(' ', '')
-
-
- rd[f] = val
-
- return rd
-
-# ==============================================================
-# dig: Lookup DNS records
-#
-# --------------------------------------------------------------
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- if HAVE_DNS == False:
- raise errors.AnsibleError("Can't LOOKUP(dig): module dns.resolver is not installed")
-
- def run(self, terms, inject=None, **kwargs):
-
- '''
- terms contains a string with things to `dig' for. We support the
- following formats:
- example.com # A record
- example.com qtype=A # same
- example.com/TXT # specific qtype
- example.com qtype=txt # same
- 192.168.1.2/PTR # reverse PTR
- ^^ shortcut for 2.1.168.192.in-addr.arpa/PTR
- example.net/AAAA @nameserver # query specified server
- ^^^ can be comma-sep list of names/addresses
-
- ... flat=0 # returns a dict; default is 1 == string
- '''
- terms = terms.split()
-
- # Create Resolver object so that we can set NS if necessary
- myres = dns.resolver.Resolver()
- edns_size = 4096
- myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
-
- domain = None
- qtype = 'A'
- flat = True
-
- for t in terms:
- if t.startswith('@'): # e.g. "@10.0.1.2,192.168.1.1" is ok.
- nsset = t[1:].split(',')
- nameservers = []
- for ns in nsset:
- # Check if we have a valid IP address. If so, use that, otherwise
- # try to resolve name to address using system's resolver. If that
- # fails we bail out.
- try:
- socket.inet_aton(ns)
- nameservers.append(ns)
- except:
- try:
- nsaddr = dns.resolver.query(ns)[0].address
- nameservers.append(nsaddr)
- except Exception, e:
- raise errors.AnsibleError("dns lookup NS: ", str(e))
- myres.nameservers = nameservers
- continue
- if '=' in t:
- try:
- opt, arg = t.split('=')
- except:
- pass
-
- if opt == 'qtype':
- qtype = arg.upper()
- elif opt == 'flat':
- flat = int(arg)
-
- continue
-
- if '/' in t:
- try:
- domain, qtype = t.split('/')
- except:
- domain = t
- else:
- domain = t
-
- # print "--- domain = {0} qtype={1}".format(domain, qtype)
-
- ret = []
-
- if qtype.upper() == 'PTR':
- try:
- n = dns.reversename.from_address(domain)
- domain = n.to_text()
- except dns.exception.SyntaxError:
- pass
- except Exception, e:
- raise errors.AnsibleError("dns.reversename unhandled exception", str(e))
-
- try:
- answers = myres.query(domain, qtype)
- for rdata in answers:
- s = rdata.to_text()
- if qtype.upper() == 'TXT':
- s = s[1:-1] # Strip outside quotes on TXT rdata
-
- if flat:
- ret.append(s)
- else:
- try:
- rd = make_rdata_dict(rdata)
- rd['owner'] = answers.canonical_name.to_text()
- rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
- rd['ttl'] = answers.rrset.ttl
-
- ret.append(rd)
- except Exception, e:
- ret.append(str(e))
-
- except dns.resolver.NXDOMAIN:
- ret.append('NXDOMAIN')
- except dns.resolver.NoAnswer:
- ret.append("")
- except dns.resolver.Timeout:
- ret.append('')
- except dns.exception.DNSException, e:
- raise errors.AnsibleError("dns.resolver unhandled exception", e)
-
- return ret
diff --git a/lib/ansible/runner/lookup_plugins/inventory_hostnames.py b/lib/ansible/runner/lookup_plugins/inventory_hostnames.py
deleted file mode 100644
index 98523e1398..0000000000
--- a/lib/ansible/runner/lookup_plugins/inventory_hostnames.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2013, Steven Dossett <sdossett@panath.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
-import ansible.inventory as inventory
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
- if 'runner' in kwargs:
- self.host_list = kwargs['runner'].inventory.host_list
- else:
- raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"")
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, list):
- raise errors.AnsibleError("with_inventory_hostnames expects a list")
- return flatten(inventory.Inventory(self.host_list).list_hosts(terms))
-
diff --git a/lib/ansible/runner/lookup_plugins/nested.py b/lib/ansible/runner/lookup_plugins/nested.py
deleted file mode 100644
index 29c4a7d21c..0000000000
--- a/lib/ansible/runner/lookup_plugins/nested.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-from ansible.utils import safe_eval
-import ansible.errors as errors
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- elif isinstance(term, tuple):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-def combine(a,b):
- results = []
- for x in a:
- for y in b:
- results.append(flatten([x,y]))
- return results
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def __lookup_injects(self, terms, inject):
- results = []
- for x in terms:
- intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
- results.append(intermediate)
- return results
-
- def run(self, terms, inject=None, **kwargs):
-
- # this code is common with 'items.py' consider moving to utils if we need it again
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms = self.__lookup_injects(terms, inject)
-
- my_list = terms[:]
- my_list.reverse()
- result = []
- if len(my_list) == 0:
- raise errors.AnsibleError("with_nested requires at least one element in the nested list")
- result = my_list.pop()
- while len(my_list) > 0:
- result2 = combine(result, my_list.pop())
- result = result2
- new_result = []
- for x in result:
- new_result.append(flatten(x))
- return new_result
-
-
diff --git a/lib/ansible/runner/poller.py b/lib/ansible/runner/poller.py
deleted file mode 100644
index 0218481415..0000000000
--- a/lib/ansible/runner/poller.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import time
-
-from ansible import errors
-
-class AsyncPoller(object):
- """ Manage asynchronous jobs. """
-
- def __init__(self, results, runner):
- self.runner = runner
-
- self.results = { 'contacted': {}, 'dark': {}}
- self.hosts_to_poll = []
- self.completed = False
-
- # flag to determine if at least one host was contacted
- self.active = False
- # True to work with the `and` below
- skipped = True
- jid = None
- for (host, res) in results['contacted'].iteritems():
- if res.get('started', False):
- self.hosts_to_poll.append(host)
- jid = res.get('ansible_job_id', None)
- self.runner.vars_cache[host]['ansible_job_id'] = jid
- self.active = True
- else:
- skipped = skipped and res.get('skipped', False)
- self.runner.vars_cache[host]['ansible_job_id'] = ''
- self.results['contacted'][host] = res
- for (host, res) in results['dark'].iteritems():
- self.runner.vars_cache[host]['ansible_job_id'] = ''
- self.results['dark'][host] = res
-
- if not skipped:
- if jid is None:
- raise errors.AnsibleError("unexpected error: unable to determine jid")
- if len(self.hosts_to_poll)==0:
- raise errors.AnsibleError("unexpected error: no hosts to poll")
-
- def poll(self):
- """ Poll the job status.
-
- Returns the changes in this iteration."""
- self.runner.module_name = 'async_status'
- self.runner.module_args = "jid={{ansible_job_id}}"
- self.runner.pattern = "*"
- self.runner.background = 0
- self.runner.complex_args = None
-
- self.runner.inventory.restrict_to(self.hosts_to_poll)
- results = self.runner.run()
- self.runner.inventory.lift_restriction()
-
- hosts = []
- poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}}
- for (host, res) in results['contacted'].iteritems():
- if res.get('started',False):
- hosts.append(host)
- poll_results['polled'][host] = res
- else:
- self.results['contacted'][host] = res
- poll_results['contacted'][host] = res
- if res.get('failed', False) or res.get('rc', 0) != 0:
- self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host]['ansible_job_id'])
- else:
- self.runner.callbacks.on_async_ok(host, res, self.runner.vars_cache[host]['ansible_job_id'])
- for (host, res) in results['dark'].iteritems():
- self.results['dark'][host] = res
- poll_results['dark'][host] = res
- if host in self.hosts_to_poll:
- self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host].get('ansible_job_id','XX'))
-
- self.hosts_to_poll = hosts
- if len(hosts)==0:
- self.completed = True
-
- return poll_results
-
- def wait(self, seconds, poll_interval):
- """ Wait a certain time for job completion, check status every poll_interval. """
- # jid is None when all hosts were skipped
- if not self.active:
- return self.results
-
- clock = seconds - poll_interval
- while (clock >= 0 and not self.completed):
- time.sleep(poll_interval)
-
- poll_results = self.poll()
-
- for (host, res) in poll_results['polled'].iteritems():
- if res.get('started'):
- self.runner.callbacks.on_async_poll(host, res, self.runner.vars_cache[host]['ansible_job_id'], clock)
-
- clock = clock - poll_interval
-
- return self.results
diff --git a/lib/ansible/runner/return_data.py b/lib/ansible/runner/return_data.py
deleted file mode 100644
index 8cee506fde..0000000000
--- a/lib/ansible/runner/return_data.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils
-
-class ReturnData(object):
- ''' internal return class for runner execute methods, not part of public API signature '''
-
- __slots__ = [ 'result', 'comm_ok', 'host', 'diff' ]
-
- def __init__(self, conn=None, host=None, result=None,
- comm_ok=True, diff=dict()):
-
- # which host is this ReturnData about?
- if conn is not None:
- self.host = conn.host
- delegate = getattr(conn, 'delegate', None)
- if delegate is not None:
- self.host = delegate
-
- else:
- self.host = host
-
- self.result = result
- self.comm_ok = comm_ok
-
- # if these values are set and used with --diff we can show
- # changes made to particular files
- self.diff = diff
-
- if type(self.result) in [ str, unicode ]:
- self.result = utils.parse_json(self.result, from_remote=True, no_exceptions=True)
-
- if self.host is None:
- raise Exception("host not set")
- if type(self.result) != dict:
- raise Exception("dictionary result expected")
-
- def communicated_ok(self):
- return self.comm_ok
-
- def is_successful(self):
- return self.comm_ok and (self.result.get('failed', False) == False) and ('failed_when_result' in self.result and [not self.result['failed_when_result']] or [self.result.get('rc',0) == 0])[0]
-
diff --git a/lib/ansible/runner/shell_plugins/__init__.py b/lib/ansible/runner/shell_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/runner/shell_plugins/__init__.py
+++ /dev/null
diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py
new file mode 100644
index 0000000000..19e091b9b2
--- /dev/null
+++ b/lib/ansible/template/__init__.py
@@ -0,0 +1,295 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+from jinja2 import Environment
+from jinja2.exceptions import TemplateSyntaxError, UndefinedError
+from jinja2.utils import concat as j2_concat
+from jinja2.runtime import StrictUndefined
+
+from ansible import constants as C
+from ansible.errors import AnsibleError, AnsibleFilterError, AnsibleUndefinedVariable
+from ansible.plugins import filter_loader, lookup_loader
+from ansible.template.safe_eval import safe_eval
+from ansible.template.template import AnsibleJ2Template
+from ansible.template.vars import AnsibleJ2Vars
+from ansible.utils.debug import debug
+
+from numbers import Number
+
+__all__ = ['Templar']
+
+# A regex for checking to see if a variable we're trying to
+# expand is just a single variable name.
+SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$")
+
+# Primitive Types which we don't want Jinja to convert to strings.
+NON_TEMPLATED_TYPES = ( bool, Number )
+
+JINJA2_OVERRIDE = '#jinja2:'
+JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline']
+
+class Templar:
+ '''
+ The main class for templating, with the main entry-point of template().
+ '''
+
+ def __init__(self, loader, shared_loader_obj=None, variables=dict(), fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR):
+ self._loader = loader
+ self._basedir = loader.get_basedir()
+ self._filters = None
+ self._available_variables = variables
+
+ if shared_loader_obj:
+ self._filter_loader = getattr(shared_loader_obj, 'filter_loader')
+ self._lookup_loader = getattr(shared_loader_obj, 'lookup_loader')
+ else:
+ self._filter_loader = filter_loader
+ self._lookup_loader = lookup_loader
+
+ # flags to determine whether certain failures during templating
+ # should result in fatal errors being raised
+ self._fail_on_lookup_errors = True
+ self._fail_on_filter_errors = True
+ self._fail_on_undefined_errors = fail_on_undefined
+
+ def _count_newlines_from_end(self, in_str):
+ '''
+ Counts the number of newlines at the end of a string. This is used during
+ the jinja2 templating to ensure the count matches the input, since some newlines
+ may be thrown away during the templating.
+ '''
+
+ i = len(in_str)
+ while i > 0:
+ if in_str[i-1] != '\n':
+ break
+ i -= 1
+
+ return len(in_str) - i
+
+ def _get_filters(self):
+ '''
+ Returns filter plugins, after loading and caching them if need be
+ '''
+
+ if self._filters is not None:
+ return self._filters.copy()
+
+ plugins = [x for x in self._filter_loader.all()]
+
+ self._filters = dict()
+ for fp in plugins:
+ self._filters.update(fp.filters())
+
+ return self._filters.copy()
+
+ def _get_extensions(self):
+ '''
+ Return jinja2 extensions to load.
+
+ If some extensions are set via jinja_extensions in ansible.cfg, we try
+ to load them with the jinja environment.
+ '''
+
+ jinja_exts = []
+ if C.DEFAULT_JINJA2_EXTENSIONS:
+ # make sure the configuration directive doesn't contain spaces
+ # and split extensions in an array
+ jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
+
+ return jinja_exts
+
+ def set_available_variables(self, variables):
+ '''
+ Sets the list of template variables this Templar instance will use
+ to template things, so we don't have to pass them around between
+ internal methods.
+ '''
+
+ assert isinstance(variables, dict)
+ self._available_variables = variables.copy()
+
+ def template(self, variable, convert_bare=False, preserve_trailing_newlines=False):
+ '''
+ Templates (possibly recursively) any given data as input. If convert_bare is
+ set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
+ before being sent through the template engine.
+ '''
+
+ try:
+ if convert_bare:
+ variable = self._convert_bare_variable(variable)
+
+ if isinstance(variable, basestring):
+ result = variable
+ if self._contains_vars(variable):
+
+ # Check to see if the string we are trying to render is just referencing a single
+ # var. In this case we don't want to accidentally change the type of the variable
+ # to a string by using the jinja template renderer. We just want to pass it.
+ only_one = SINGLE_VAR.match(variable)
+ if only_one:
+ var_name = only_one.group(1)
+ if var_name in self._available_variables:
+ resolved_val = self._available_variables[var_name]
+ if isinstance(resolved_val, NON_TEMPLATED_TYPES):
+ return resolved_val
+
+ result = self._do_template(variable, preserve_trailing_newlines=preserve_trailing_newlines)
+
+ # if this looks like a dictionary or list, convert it to such using the safe_eval method
+ if (result.startswith("{") and not result.startswith("{{")) or result.startswith("["):
+ eval_results = safe_eval(result, locals=self._available_variables, include_exceptions=True)
+ if eval_results[1] is None:
+ result = eval_results[0]
+ else:
+ # FIXME: if the safe_eval raised an error, should we do something with it?
+ pass
+
+ return result
+
+ elif isinstance(variable, (list, tuple)):
+ return [self.template(v, convert_bare=convert_bare) for v in variable]
+ elif isinstance(variable, dict):
+ d = {}
+ for (k, v) in variable.iteritems():
+ d[k] = self.template(v, convert_bare=convert_bare)
+ return d
+ else:
+ return variable
+
+ except AnsibleFilterError:
+ if self._fail_on_filter_errors:
+ raise
+ else:
+ return variable
+
+ def _contains_vars(self, data):
+ '''
+ returns True if the data contains a variable pattern
+ '''
+ return "$" in data or "{{" in data or '{%' in data
+
+ def _convert_bare_variable(self, variable):
+ '''
+ Wraps a bare string, which may have an attribute portion (ie. foo.bar)
+ in jinja2 variable braces so that it is evaluated properly.
+ '''
+
+ if isinstance(variable, basestring):
+ first_part = variable.split(".")[0].split("[")[0]
+ if first_part in self._available_variables and '{{' not in variable and '$' not in variable:
+ return "{{%s}}" % variable
+
+ # the variable didn't meet the conditions to be converted,
+ # so just return it as-is
+ return variable
+
+ def _finalize(self, thing):
+ '''
+ A custom finalize method for jinja2, which prevents None from being returned
+ '''
+ return thing if thing is not None else ''
+
+ def _lookup(self, name, *args, **kwargs):
+ instance = self._lookup_loader.get(name.lower(), loader=self._loader)
+
+ if instance is not None:
+ # safely catch run failures per #5059
+ try:
+ ran = instance.run(*args, variables=self._available_variables, **kwargs)
+ except AnsibleUndefinedVariable:
+ raise
+ except Exception, e:
+ if self._fail_on_lookup_errors:
+ raise
+ ran = None
+ if ran:
+ ran = ",".join(ran)
+ return ran
+ else:
+ raise AnsibleError("lookup plugin (%s) not found" % name)
+
+ def _do_template(self, data, preserve_trailing_newlines=False):
+
+ try:
+
+ environment = Environment(trim_blocks=True, undefined=StrictUndefined, extensions=self._get_extensions(), finalize=self._finalize)
+ environment.filters.update(self._get_filters())
+ environment.template_class = AnsibleJ2Template
+
+ # FIXME: may not be required anymore, as the basedir stuff will
+ # be handled by the loader?
+ #if '_original_file' in vars:
+ # basedir = os.path.dirname(vars['_original_file'])
+ # filesdir = os.path.abspath(os.path.join(basedir, '..', 'files'))
+ # if os.path.exists(filesdir):
+ # basedir = filesdir
+
+ try:
+ t = environment.from_string(data)
+ except TemplateSyntaxError, e:
+ raise AnsibleError("template error while templating string: %s" % str(e))
+ except Exception, e:
+ if 'recursion' in str(e):
+ raise AnsibleError("recursive loop detected in template string: %s" % data)
+ else:
+ return data
+
+ t.globals['lookup'] = self._lookup
+ t.globals['finalize'] = self._finalize
+
+ jvars = AnsibleJ2Vars(self, t.globals)
+
+ new_context = t.new_context(jvars, shared=True)
+ rf = t.root_render_func(new_context)
+
+ try:
+ res = j2_concat(rf)
+ except TypeError, te:
+ if 'StrictUndefined' in str(te):
+ raise AnsibleUndefinedVariable(
+ "Unable to look up a name or access an attribute in template string. " + \
+ "Make sure your variable name does not contain invalid characters like '-'."
+ )
+ else:
+ debug("failing because of a type error, template data is: %s" % data)
+ raise AnsibleError("an unexpected type error occurred. Error was %s" % te)
+
+ if preserve_trailing_newlines:
+ # The low level calls above do not preserve the newline
+ # characters at the end of the input data, so we use the
+ # calculate the difference in newlines and append them
+ # to the resulting output for parity
+ res_newlines = self._count_newlines_from_end(res)
+ data_newlines = self._count_newlines_from_end(data)
+ if data_newlines > res_newlines:
+ res += '\n' * (data_newlines - res_newlines)
+
+ return res
+ except (UndefinedError, AnsibleUndefinedVariable), e:
+ if self._fail_on_undefined_errors:
+ raise
+ else:
+ return data
+
diff --git a/lib/ansible/template/safe_eval.py b/lib/ansible/template/safe_eval.py
new file mode 100644
index 0000000000..2689949504
--- /dev/null
+++ b/lib/ansible/template/safe_eval.py
@@ -0,0 +1,122 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import ast
+import sys
+
+from six.moves import builtins
+
+from ansible import constants as C
+from ansible.plugins import filter_loader
+
+def safe_eval(expr, locals={}, include_exceptions=False):
+ '''
+ This is intended for allowing things like:
+ with_items: a_list_variable
+
+ Where Jinja2 would return a string but we do not want to allow it to
+ call functions (outside of Jinja2, where the env is constrained). If
+ the input data to this function came from an untrusted (remote) source,
+ it should first be run through _clean_data_struct() to ensure the data
+ is further sanitized prior to evaluation.
+
+ Based on:
+ http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
+ '''
+
+ # this is the whitelist of AST nodes we are going to
+ # allow in the evaluation. Any node type other than
+ # those listed here will raise an exception in our custom
+ # visitor class defined below.
+ SAFE_NODES = set(
+ (
+ ast.Add,
+ ast.BinOp,
+ ast.Call,
+ ast.Compare,
+ ast.Dict,
+ ast.Div,
+ ast.Expression,
+ ast.List,
+ ast.Load,
+ ast.Mult,
+ ast.Num,
+ ast.Name,
+ ast.Str,
+ ast.Sub,
+ ast.Tuple,
+ ast.UnaryOp,
+ )
+ )
+
+ # AST node types were expanded after 2.6
+ if not sys.version.startswith('2.6'):
+ SAFE_NODES.union(
+ set(
+ (ast.Set,)
+ )
+ )
+
+ filter_list = []
+ for filter in filter_loader.all():
+ filter_list.extend(filter.filters().keys())
+
+ CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
+
+ class CleansingNodeVisitor(ast.NodeVisitor):
+ def generic_visit(self, node, inside_call=False):
+ if type(node) not in SAFE_NODES:
+ raise Exception("invalid expression (%s)" % expr)
+ elif isinstance(node, ast.Call):
+ inside_call = True
+ elif isinstance(node, ast.Name) and inside_call:
+ if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
+ raise Exception("invalid function: %s" % node.id)
+ # iterate over all child nodes
+ for child_node in ast.iter_child_nodes(node):
+ self.generic_visit(child_node, inside_call)
+
+ if not isinstance(expr, basestring):
+ # already templated to a datastructure, perhaps?
+ if include_exceptions:
+ return (expr, None)
+ return expr
+
+ cnv = CleansingNodeVisitor()
+ try:
+ parsed_tree = ast.parse(expr, mode='eval')
+ cnv.visit(parsed_tree)
+ compiled = compile(parsed_tree, expr, 'eval')
+ result = eval(compiled, {}, locals)
+
+ if include_exceptions:
+ return (result, None)
+ else:
+ return result
+ except SyntaxError as e:
+ # special handling for syntax errors, we just return
+ # the expression string back as-is
+ if include_exceptions:
+ return (expr, None)
+ return expr
+ except Exception as e:
+ if include_exceptions:
+ return (expr, e)
+ return expr
+
diff --git a/lib/ansible/runner/lookup_plugins/items.py b/lib/ansible/template/template.py
index 85e77d5380..a111bec0a5 100644
--- a/lib/ansible/runner/lookup_plugins/items.py
+++ b/lib/ansible/template/template.py
@@ -15,30 +15,23 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
+import jinja2
-class LookupModule(object):
+__all__ = ['AnsibleJ2Template']
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, list) and not isinstance(terms,set):
- raise errors.AnsibleError("with_items expects a list or a set")
-
- return flatten(terms)
+class AnsibleJ2Template(jinja2.environment.Template):
+ '''
+ A helper class, which prevents Jinja2 from running _jinja2_vars through dict().
+ Without this, {% include %} and similar will create new contexts unlike the special
+ one created in template_from_file. This ensures they are all alike, except for
+ potential locals.
+ '''
+ def new_context(self, vars=None, shared=False, locals=None):
+ return jinja2.runtime.Context(self.environment, vars.add_locals(locals), self.name, self.blocks)
diff --git a/lib/ansible/template/vars.py b/lib/ansible/template/vars.py
new file mode 100644
index 0000000000..3c0bb61ecb
--- /dev/null
+++ b/lib/ansible/template/vars.py
@@ -0,0 +1,88 @@
+# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+__all__ = ['AnsibleJ2Vars']
+
+
+class AnsibleJ2Vars:
+ '''
+ Helper class to template all variable content before jinja2 sees it. This is
+ done by hijacking the variable storage that jinja2 uses, and overriding __contains__
+ and __getitem__ to look like a dict. Added bonus is avoiding duplicating the large
+ hashes that inject tends to be.
+
+ To facilitate using builtin jinja2 things like range, globals are also handled here.
+ '''
+
+ def __init__(self, templar, globals, *extras):
+ '''
+ Initializes this object with a valid Templar() object, as
+ well as several dictionaries of variables representing
+ different scopes (in jinja2 terminology).
+ '''
+
+ self._templar = templar
+ self._globals = globals
+ self._extras = extras
+
+ def __contains__(self, k):
+ if k in self._templar._available_variables:
+ return True
+ for i in self._extras:
+ if k in i:
+ return True
+ if k in self._globals:
+ return True
+ return False
+
+ def __getitem__(self, varname):
+ # FIXME: are we still going to need HostVars?
+ #from ansible.runner import HostVars
+
+ if varname not in self._templar._available_variables:
+ for i in self._extras:
+ if varname in i:
+ return i[varname]
+ if varname in self._globals:
+ return self._globals[varname]
+ else:
+ raise KeyError("undefined variable: %s" % varname)
+
+ variable = self._templar._available_variables[varname]
+
+ # HostVars is special, return it as-is, as is the special variable
+ # 'vars', which contains the vars structure
+ from ansible.vars.hostvars import HostVars
+ if isinstance(variable, dict) and varname == "vars" or isinstance(variable, HostVars):
+ return variable
+ else:
+ return self._templar.template(variable)
+
+ def add_locals(self, locals):
+ '''
+ If locals are provided, create a copy of self containing those
+ locals in addition to what is already in this variable proxy.
+ '''
+ if locals is None:
+ return self
+ return AnsibleJ2Vars(self._templar, self._globals, locals, *self._extras)
+
diff --git a/lib/ansible/test-requirements.txt b/lib/ansible/test-requirements.txt
new file mode 100644
index 0000000000..e4822ada64
--- /dev/null
+++ b/lib/ansible/test-requirements.txt
@@ -0,0 +1,16 @@
+# Ansible requirementss
+paramiko
+PyYAML
+jinja2
+httplib2
+passlib
+six
+
+# These are needed for various optional features
+#python-memcached
+#redis
+
+# Test requirements
+unittest2
+mock
+nose
diff --git a/lib/ansible/utils/__init__.py b/lib/ansible/utils/__init__.py
index 7ed07a54c8..ae8ccff595 100644
--- a/lib/ansible/utils/__init__.py
+++ b/lib/ansible/utils/__init__.py
@@ -15,1646 +15,6 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-import errno
-import sys
-import re
-import os
-import shlex
-import yaml
-import copy
-import optparse
-import operator
-from ansible import errors
-from ansible import __version__
-from ansible.utils.display_functions import *
-from ansible.utils.plugins import *
-from ansible.utils.su_prompts import *
-from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
-from ansible.callbacks import display
-from ansible.module_utils.splitter import split_args, unquote
-from ansible.module_utils.basic import heuristic_log_sanitize
-from ansible.utils.unicode import to_bytes, to_unicode
-import ansible.constants as C
-import ast
-import time
-import StringIO
-import stat
-import termios
-import tty
-import pipes
-import random
-import difflib
-import warnings
-import traceback
-import getpass
-import sys
-import subprocess
-import contextlib
-
-from vault import VaultLib
-
-VERBOSITY=0
-
-MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
-
-# caching the compilation of the regex used
-# to check for lookup calls within data
-LOOKUP_REGEX = re.compile(r'lookup\s*\(')
-PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
-CODE_REGEX = re.compile(r'(?:{%|%})')
-
-
-try:
- # simplejson can be much faster if it's available
- import simplejson as json
-except ImportError:
- import json
-
-try:
- from yaml import CSafeLoader as Loader
-except ImportError:
- from yaml import SafeLoader as Loader
-
-PASSLIB_AVAILABLE = False
-try:
- import passlib.hash
- PASSLIB_AVAILABLE = True
-except:
- pass
-
-try:
- import builtin
-except ImportError:
- import __builtin__ as builtin
-
-KEYCZAR_AVAILABLE=False
-try:
- try:
- # some versions of pycrypto may not have this?
- from Crypto.pct_warnings import PowmInsecureWarning
- except ImportError:
- PowmInsecureWarning = RuntimeWarning
-
- with warnings.catch_warnings(record=True) as warning_handler:
- warnings.simplefilter("error", PowmInsecureWarning)
- try:
- import keyczar.errors as key_errors
- from keyczar.keys import AesKey
- except PowmInsecureWarning:
- system_warning(
- "The version of gmp you have installed has a known issue regarding " + \
- "timing vulnerabilities when used with pycrypto. " + \
- "If possible, you should update it (i.e. yum update gmp)."
- )
- warnings.resetwarnings()
- warnings.simplefilter("ignore")
- import keyczar.errors as key_errors
- from keyczar.keys import AesKey
- KEYCZAR_AVAILABLE=True
-except ImportError:
- pass
-
-
-###############################################################
-# Abstractions around keyczar
-###############################################################
-
-def key_for_hostname(hostname):
- # fireball mode is an implementation of ansible firing up zeromq via SSH
- # to use no persistent daemons or key management
-
- if not KEYCZAR_AVAILABLE:
- raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
-
- key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
- if not os.path.exists(key_path):
- os.makedirs(key_path, mode=0700)
- os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
- elif not os.path.isdir(key_path):
- raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
-
- if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
- raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
-
- key_path = os.path.join(key_path, hostname)
-
- # use new AES keys every 2 hours, which means fireball must not allow running for longer either
- if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
- key = AesKey.Generate()
- fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
- fh = os.fdopen(fd, 'w')
- fh.write(str(key))
- fh.close()
- return key
- else:
- if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
- raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
- fh = open(key_path)
- key = AesKey.Read(fh.read())
- fh.close()
- return key
-
-def encrypt(key, msg):
- return key.Encrypt(msg)
-
-def decrypt(key, msg):
- try:
- return key.Decrypt(msg)
- except key_errors.InvalidSignatureError:
- raise errors.AnsibleError("decryption failed")
-
-###############################################################
-# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
-###############################################################
-
-def read_vault_file(vault_password_file):
- """Read a vault password from a file or if executable, execute the script and
- retrieve password from STDOUT
- """
- if vault_password_file:
- this_path = os.path.realpath(os.path.expanduser(vault_password_file))
- if is_executable(this_path):
- try:
- # STDERR not captured to make it easier for users to prompt for input in their scripts
- p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
- except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
- stdout, stderr = p.communicate()
- vault_pass = stdout.strip('\r\n')
- else:
- try:
- f = open(this_path, "rb")
- vault_pass=f.read().strip()
- f.close()
- except (OSError, IOError), e:
- raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
-
- return vault_pass
- else:
- return None
-
-def err(msg):
- ''' print an error message to stderr '''
-
- print >> sys.stderr, msg
-
-def exit(msg, rc=1):
- ''' quit with an error to stdout and a failure code '''
-
- err(msg)
- sys.exit(rc)
-
-def jsonify(result, format=False):
- ''' format JSON output (uncompressed or uncompressed) '''
-
- if result is None:
- return "{}"
- result2 = result.copy()
- for key, value in result2.items():
- if type(value) is str:
- result2[key] = value.decode('utf-8', 'ignore')
-
- indent = None
- if format:
- indent = 4
-
- try:
- return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
- except UnicodeDecodeError:
- return json.dumps(result2, sort_keys=True, indent=indent)
-
-def write_tree_file(tree, hostname, buf):
- ''' write something into treedir/hostname '''
-
- # TODO: might be nice to append playbook runs per host in a similar way
- # in which case, we'd want append mode.
- path = os.path.join(tree, hostname)
- fd = open(path, "w+")
- fd.write(buf)
- fd.close()
-
-def is_failed(result):
- ''' is a given JSON result a failed result? '''
-
- return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
-
-def is_changed(result):
- ''' is a given JSON result a changed result? '''
-
- return (result.get('changed', False) in [ True, 'True', 'true'])
-
-def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
- from ansible.utils import template
-
- if conditional is None or conditional == '':
- return True
-
- if isinstance(conditional, list):
- for x in conditional:
- if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
- return False
- return True
-
- if not isinstance(conditional, basestring):
- return conditional
-
- conditional = conditional.replace("jinja2_compare ","")
- # allow variable names
- if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
- conditional = to_unicode(inject[conditional], nonstring='simplerepr')
- conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
- original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
- # a Jinja2 evaluation that results in something Python can eval!
- presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
- conditional = template.template(basedir, presented, inject)
- val = conditional.strip()
- if val == presented:
- # the templating failed, meaning most likely a
- # variable was undefined. If we happened to be
- # looking for an undefined variable, return True,
- # otherwise fail
- if "is undefined" in conditional:
- return True
- elif "is defined" in conditional:
- return False
- else:
- raise errors.AnsibleError("error while evaluating conditional: %s" % original)
- elif val == "True":
- return True
- elif val == "False":
- return False
- else:
- raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
-
-def is_executable(path):
- '''is the given path executable?'''
- return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
- or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
- or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
-
-def unfrackpath(path):
- '''
- returns a path that is free of symlinks, environment
- variables, relative path traversals and symbols (~)
- example:
- '$HOME/../../var/mail' becomes '/var/spool/mail'
- '''
- return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
-
-def prepare_writeable_dir(tree,mode=0777):
- ''' make sure a directory exists and is writeable '''
-
- # modify the mode to ensure the owner at least
- # has read/write access to this directory
- mode |= 0700
-
- # make sure the tree path is always expanded
- # and normalized and free of symlinks
- tree = unfrackpath(tree)
-
- if not os.path.exists(tree):
- try:
- os.makedirs(tree, mode)
- except (IOError, OSError), e:
- raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
- if not os.access(tree, os.W_OK):
- raise errors.AnsibleError("Cannot write to path %s" % tree)
- return tree
-
-def path_dwim(basedir, given):
- '''
- make relative paths work like folks expect.
- '''
-
- if given.startswith("'"):
- given = given[1:-1]
-
- if given.startswith("/"):
- return os.path.abspath(given)
- elif given.startswith("~"):
- return os.path.abspath(os.path.expanduser(given))
- else:
- if basedir is None:
- basedir = "."
- return os.path.abspath(os.path.join(basedir, given))
-
-def path_dwim_relative(original, dirname, source, playbook_base, check=True):
- ''' find one file in a directory one level up in a dir named dirname relative to current '''
- # (used by roles code)
-
- from ansible.utils import template
-
-
- basedir = os.path.dirname(original)
- if os.path.islink(basedir):
- basedir = unfrackpath(basedir)
- template2 = os.path.join(basedir, dirname, source)
- else:
- template2 = os.path.join(basedir, '..', dirname, source)
- source2 = path_dwim(basedir, template2)
- if os.path.exists(source2):
- return source2
- obvious_local_path = path_dwim(playbook_base, source)
- if os.path.exists(obvious_local_path):
- return obvious_local_path
- if check:
- raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
- return source2 # which does not exist
-
-def repo_url_to_role_name(repo_url):
- # gets the role name out of a repo like
- # http://git.example.com/repos/repo.git" => "repo"
-
- if '://' not in repo_url and '@' not in repo_url:
- return repo_url
- trailing_path = repo_url.split('/')[-1]
- if trailing_path.endswith('.git'):
- trailing_path = trailing_path[:-4]
- if trailing_path.endswith('.tar.gz'):
- trailing_path = trailing_path[:-7]
- if ',' in trailing_path:
- trailing_path = trailing_path.split(',')[0]
- return trailing_path
-
-
-def role_spec_parse(role_spec):
- # takes a repo and a version like
- # git+http://git.example.com/repos/repo.git,v1.0
- # and returns a list of properties such as:
- # {
- # 'scm': 'git',
- # 'src': 'http://git.example.com/repos/repo.git',
- # 'version': 'v1.0',
- # 'name': 'repo'
- # }
-
- role_spec = role_spec.strip()
- role_version = ''
- default_role_versions = dict(git='master', hg='tip')
- if role_spec == "" or role_spec.startswith("#"):
- return (None, None, None, None)
-
- tokens = [s.strip() for s in role_spec.split(',')]
-
- # assume https://github.com URLs are git+https:// URLs and not
- # tarballs unless they end in '.zip'
- if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
- tokens[0] = 'git+' + tokens[0]
-
- if '+' in tokens[0]:
- (scm, role_url) = tokens[0].split('+')
- else:
- scm = None
- role_url = tokens[0]
- if len(tokens) >= 2:
- role_version = tokens[1]
- if len(tokens) == 3:
- role_name = tokens[2]
- else:
- role_name = repo_url_to_role_name(tokens[0])
- if scm and not role_version:
- role_version = default_role_versions.get(scm, '')
- return dict(scm=scm, src=role_url, version=role_version, name=role_name)
-
-
-def role_yaml_parse(role):
- if 'role' in role:
- # Old style: {role: "galaxy.role,version,name", other_vars: "here" }
- role_info = role_spec_parse(role['role'])
- if isinstance(role_info, dict):
- # Warning: Slight change in behaviour here. name may be being
- # overloaded. Previously, name was only a parameter to the role.
- # Now it is both a parameter to the role and the name that
- # ansible-galaxy will install under on the local system.
- if 'name' in role and 'name' in role_info:
- del role_info['name']
- role.update(role_info)
- else:
- # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
- if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
- role["src"] = "git+" + role["src"]
-
- if '+' in role["src"]:
- (scm, src) = role["src"].split('+')
- role["scm"] = scm
- role["src"] = src
-
- if 'name' not in role:
- role["name"] = repo_url_to_role_name(role["src"])
-
- if 'version' not in role:
- role['version'] = ''
-
- if 'scm' not in role:
- role['scm'] = None
-
- return role
-
-
-def json_loads(data):
- ''' parse a JSON string and return a data structure '''
- try:
- loaded = json.loads(data)
- except ValueError,e:
- raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
-
- return loaded
-
-def _clean_data(orig_data, from_remote=False, from_inventory=False):
- ''' remove jinja2 template tags from a string '''
-
- if not isinstance(orig_data, basestring):
- return orig_data
-
- # when the data is marked as having come from a remote, we always
- # replace any print blocks (ie. {{var}}), however when marked as coming
- # from inventory we only replace print blocks that contain a call to
- # a lookup plugin (ie. {{lookup('foo','bar'))}})
- replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
-
- regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
-
- with contextlib.closing(StringIO.StringIO(orig_data)) as data:
- # these variables keep track of opening block locations, as we only
- # want to replace matched pairs of print/block tags
- print_openings = []
- block_openings = []
- for mo in regex.finditer(orig_data):
- token = mo.group(0)
- token_start = mo.start(0)
-
- if token[0] == '{':
- if token == '{%':
- block_openings.append(token_start)
- elif token == '{{':
- print_openings.append(token_start)
-
- elif token[1] == '}':
- prev_idx = None
- if token == '%}' and block_openings:
- prev_idx = block_openings.pop()
- elif token == '}}' and print_openings:
- prev_idx = print_openings.pop()
-
- if prev_idx is not None:
- # replace the opening
- data.seek(prev_idx, os.SEEK_SET)
- data.write('{#')
- # replace the closing
- data.seek(token_start, os.SEEK_SET)
- data.write('#}')
-
- else:
- assert False, 'Unhandled regex match'
-
- return data.getvalue()
-
-def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
- '''
- walk a complex data structure, and use _clean_data() to
- remove any template tags that may exist
- '''
- if not from_remote and not from_inventory:
- raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
- if isinstance(orig_data, dict):
- data = orig_data.copy()
- for key in data:
- new_key = _clean_data_struct(key, from_remote, from_inventory)
- new_val = _clean_data_struct(data[key], from_remote, from_inventory)
- if key != new_key:
- del data[key]
- data[new_key] = new_val
- elif isinstance(orig_data, list):
- data = orig_data[:]
- for i in range(0, len(data)):
- data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
- elif isinstance(orig_data, basestring):
- data = _clean_data(orig_data, from_remote, from_inventory)
- else:
- data = orig_data
- return data
-
-def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
- ''' this version for module return data only '''
-
- orig_data = raw_data
-
- # ignore stuff like tcgetattr spewage or other warnings
- data = filter_leading_non_json_lines(raw_data)
-
- try:
- results = json.loads(data)
- except:
- if no_exceptions:
- return dict(failed=True, parsed=False, msg=raw_data)
- else:
- raise
-
- if from_remote:
- results = _clean_data_struct(results, from_remote, from_inventory)
-
- return results
-
-def serialize_args(args):
- '''
- Flattens a dictionary args to a k=v string
- '''
- module_args = ""
- for (k,v) in args.iteritems():
- if isinstance(v, basestring):
- module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
- elif isinstance(v, bool):
- module_args = "%s=%s %s" % (k, str(v), module_args)
- return module_args.strip()
-
-def merge_module_args(current_args, new_args):
- '''
- merges either a dictionary or string of k=v pairs with another string of k=v pairs,
- and returns a new k=v string without duplicates.
- '''
- if not isinstance(current_args, basestring):
- raise errors.AnsibleError("expected current_args to be a basestring")
- # we use parse_kv to split up the current args into a dictionary
- final_args = parse_kv(current_args)
- if isinstance(new_args, dict):
- final_args.update(new_args)
- elif isinstance(new_args, basestring):
- new_args_kv = parse_kv(new_args)
- final_args.update(new_args_kv)
- return serialize_args(final_args)
-
-def parse_yaml(data, path_hint=None):
- ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
-
- stripped_data = data.lstrip()
- loaded = None
- if stripped_data.startswith("{") or stripped_data.startswith("["):
- # since the line starts with { or [ we can infer this is a JSON document.
- try:
- loaded = json.loads(data)
- except ValueError, ve:
- if path_hint:
- raise errors.AnsibleError(path_hint + ": " + str(ve))
- else:
- raise errors.AnsibleError(str(ve))
- else:
- # else this is pretty sure to be a YAML document
- loaded = yaml.load(data, Loader=Loader)
-
- return loaded
-
-def process_common_errors(msg, probline, column):
- replaced = probline.replace(" ","")
-
- if ":{{" in replaced and "}}" in replaced:
- msg = msg + """
-This one looks easy to fix. YAML thought it was looking for the start of a
-hash/dictionary and was confused to see a second "{". Most likely this was
-meant to be an ansible template evaluation instead, so we have to give the
-parser a small hint that we wanted a string instead. The solution here is to
-just quote the entire value.
-
-For instance, if the original line was:
-
- app_path: {{ base_path }}/foo
-
-It should be written as:
-
- app_path: "{{ base_path }}/foo"
-"""
- return msg
-
- elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
- msg = msg + """
-This one looks easy to fix. There seems to be an extra unquoted colon in the line
-and this is confusing the parser. It was only expecting to find one free
-colon. The solution is just add some quotes around the colon, or quote the
-entire line after the first colon.
-
-For instance, if the original line was:
-
- copy: src=file.txt dest=/path/filename:with_colon.txt
-
-It can be written as:
-
- copy: src=file.txt dest='/path/filename:with_colon.txt'
-
-Or:
-
- copy: 'src=file.txt dest=/path/filename:with_colon.txt'
-
-
-"""
- return msg
- else:
- parts = probline.split(":")
- if len(parts) > 1:
- middle = parts[1].strip()
- match = False
- unbalanced = False
- if middle.startswith("'") and not middle.endswith("'"):
- match = True
- elif middle.startswith('"') and not middle.endswith('"'):
- match = True
- if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
- unbalanced = True
- if match:
- msg = msg + """
-This one looks easy to fix. It seems that there is a value started
-with a quote, and the YAML parser is expecting to see the line ended
-with the same kind of quote. For instance:
-
- when: "ok" in result.stdout
-
-Could be written as:
-
- when: '"ok" in result.stdout'
-
-or equivalently:
-
- when: "'ok' in result.stdout"
-
-"""
- return msg
-
- if unbalanced:
- msg = msg + """
-We could be wrong, but this one looks like it might be an issue with
-unbalanced quotes. If starting a value with a quote, make sure the
-line ends with the same set of quotes. For instance this arbitrary
-example:
-
- foo: "bad" "wolf"
-
-Could be written as:
-
- foo: '"bad" "wolf"'
-
-"""
- return msg
-
- return msg
-
-def process_yaml_error(exc, data, path=None, show_content=True):
- if hasattr(exc, 'problem_mark'):
- mark = exc.problem_mark
- if show_content:
- if mark.line -1 >= 0:
- before_probline = data.split("\n")[mark.line-1]
- else:
- before_probline = ''
- probline = data.split("\n")[mark.line]
- arrow = " " * mark.column + "^"
- msg = """Syntax Error while loading YAML script, %s
-Note: The error may actually appear before this position: line %s, column %s
-
-%s
-%s
-%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
-
- unquoted_var = None
- if '{{' in probline and '}}' in probline:
- if '"{{' not in probline or "'{{" not in probline:
- unquoted_var = True
-
- if not unquoted_var:
- msg = process_common_errors(msg, probline, mark.column)
- else:
- msg = msg + """
-We could be wrong, but this one looks like it might be an issue with
-missing quotes. Always quote template expression brackets when they
-start a value. For instance:
-
- with_items:
- - {{ foo }}
-
-Should be written as:
-
- with_items:
- - "{{ foo }}"
-
-"""
- else:
- # most likely displaying a file with sensitive content,
- # so don't show any of the actual lines of yaml just the
- # line number itself
- msg = """Syntax error while loading YAML script, %s
-The error appears to have been on line %s, column %s, but may actually
-be before there depending on the exact syntax problem.
-""" % (path, mark.line + 1, mark.column + 1)
-
- else:
- # No problem markers means we have to throw a generic
- # "stuff messed up" type message. Sry bud.
- if path:
- msg = "Could not parse YAML. Check over %s again." % path
- else:
- msg = "Could not parse YAML."
- raise errors.AnsibleYAMLValidationFailed(msg)
-
-
-def parse_yaml_from_file(path, vault_password=None):
- ''' convert a yaml file to a data structure '''
-
- data = None
- show_content = True
-
- try:
- data = open(path).read()
- except IOError:
- raise errors.AnsibleError("file could not read: %s" % path)
-
- vault = VaultLib(password=vault_password)
- if vault.is_encrypted(data):
- # if the file is encrypted and no password was specified,
- # the decrypt call would throw an error, but we check first
- # since the decrypt function doesn't know the file name
- if vault_password is None:
- raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
- data = vault.decrypt(data)
- show_content = False
-
- try:
- return parse_yaml(data, path_hint=path)
- except yaml.YAMLError, exc:
- process_yaml_error(exc, data, path, show_content)
-
-def parse_kv(args):
- ''' convert a string of key/value items to a dict '''
- options = {}
- if args is not None:
- try:
- vargs = split_args(args)
- except ValueError, ve:
- if 'no closing quotation' in str(ve).lower():
- raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
- else:
- raise
- for x in vargs:
- if "=" in x:
- k, v = x.split("=",1)
- options[k.strip()] = unquote(v.strip())
- return options
-
-def _validate_both_dicts(a, b):
-
- if not (isinstance(a, dict) and isinstance(b, dict)):
- raise errors.AnsibleError(
- "failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
- )
-
-def merge_hash(a, b):
- ''' recursively merges hash b into a
- keys from b take precedence over keys from a '''
-
- result = {}
-
- # we check here as well as in combine_vars() since this
- # function can work recursively with nested dicts
- _validate_both_dicts(a, b)
-
- for dicts in a, b:
- # next, iterate over b keys and values
- for k, v in dicts.iteritems():
- # if there's already such key in a
- # and that key contains dict
- if k in result and isinstance(result[k], dict):
- # merge those dicts recursively
- result[k] = merge_hash(a[k], v)
- else:
- # otherwise, just copy a value from b to a
- result[k] = v
-
- return result
-
-def default(value, function):
- ''' syntactic sugar around lazy evaluation of defaults '''
- if value is None:
- return function()
- return value
-
-
-def _git_repo_info(repo_path):
- ''' returns a string containing git branch, commit id and commit date '''
- result = None
- if os.path.exists(repo_path):
- # Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
- if os.path.isfile(repo_path):
- try:
- gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
- # There is a possibility the .git file to have an absolute path.
- if os.path.isabs(gitdir):
- repo_path = gitdir
- else:
- repo_path = os.path.join(repo_path[:-4], gitdir)
- except (IOError, AttributeError):
- return ''
- f = open(os.path.join(repo_path, "HEAD"))
- branch = f.readline().split('/')[-1].rstrip("\n")
- f.close()
- branch_path = os.path.join(repo_path, "refs", "heads", branch)
- if os.path.exists(branch_path):
- f = open(branch_path)
- commit = f.readline()[:10]
- f.close()
- else:
- # detached HEAD
- commit = branch[:10]
- branch = 'detached HEAD'
- branch_path = os.path.join(repo_path, "HEAD")
-
- date = time.localtime(os.stat(branch_path).st_mtime)
- if time.daylight == 0:
- offset = time.timezone
- else:
- offset = time.altzone
- result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
- time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
- else:
- result = ''
- return result
-
-
-def _gitinfo():
- basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
- repo_path = os.path.join(basedir, '.git')
- result = _git_repo_info(repo_path)
- submodules = os.path.join(basedir, '.gitmodules')
- if not os.path.exists(submodules):
- return result
- f = open(submodules)
- for line in f:
- tokens = line.strip().split(' ')
- if tokens[0] == 'path':
- submodule_path = tokens[2]
- submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
- if not submodule_info:
- submodule_info = ' not found - use git submodule update --init ' + submodule_path
- result += "\n {0}: {1}".format(submodule_path, submodule_info)
- f.close()
- return result
-
-
-def version(prog):
- result = "{0} {1}".format(prog, __version__)
- gitinfo = _gitinfo()
- if gitinfo:
- result = result + " {0}".format(gitinfo)
- result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
- return result
-
-def version_info(gitinfo=False):
- if gitinfo:
- # expensive call, user with care
- ansible_version_string = version('')
- else:
- ansible_version_string = __version__
- ansible_version = ansible_version_string.split()[0]
- ansible_versions = ansible_version.split('.')
- for counter in range(len(ansible_versions)):
- if ansible_versions[counter] == "":
- ansible_versions[counter] = 0
- try:
- ansible_versions[counter] = int(ansible_versions[counter])
- except:
- pass
- if len(ansible_versions) < 3:
- for counter in range(len(ansible_versions), 3):
- ansible_versions.append(0)
- return {'string': ansible_version_string.strip(),
- 'full': ansible_version,
- 'major': ansible_versions[0],
- 'minor': ansible_versions[1],
- 'revision': ansible_versions[2]}
-
-def getch():
- ''' read in a single character '''
- fd = sys.stdin.fileno()
- old_settings = termios.tcgetattr(fd)
- try:
- tty.setraw(sys.stdin.fileno())
- ch = sys.stdin.read(1)
- finally:
- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
- return ch
-
-def sanitize_output(arg_string):
- ''' strips private info out of a string '''
-
- private_keys = ('password', 'login_password')
-
- output = []
- for part in arg_string.split():
- try:
- (k, v) = part.split('=', 1)
- except ValueError:
- v = heuristic_log_sanitize(part)
- output.append(v)
- continue
-
- if k in private_keys:
- v = 'VALUE_HIDDEN'
- else:
- v = heuristic_log_sanitize(v)
- output.append('%s=%s' % (k, v))
-
- output = ' '.join(output)
- return output
-
-
-####################################################################
-# option handling code for /usr/bin/ansible and ansible-playbook
-# below this line
-
-class SortedOptParser(optparse.OptionParser):
- '''Optparser which sorts the options by opt before outputting --help'''
-
- def format_help(self, formatter=None):
- self.option_list.sort(key=operator.methodcaller('get_opt_string'))
- return optparse.OptionParser.format_help(self, formatter=None)
-
-def increment_debug(option, opt, value, parser):
- global VERBOSITY
- VERBOSITY += 1
-
-def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
- async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
- ''' create an options parser for any ansible script '''
-
- parser = SortedOptParser(usage, version=version("%prog"))
- parser.add_option('-v','--verbose', default=False, action="callback",
- callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
-
- parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
- help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
- parser.add_option('-i', '--inventory-file', dest='inventory',
- help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
- default=constants.DEFAULT_HOST_LIST)
- parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
- help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
- help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
- parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
- help='ask for SSH password')
- parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
- help='use this file to authenticate the connection')
- parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
- help='ask for vault password')
- parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
- dest='vault_password_file', help="vault password file")
- parser.add_option('--list-hosts', dest='listhosts', action='store_true',
- help='outputs a list of matching hosts; does not execute anything else')
- parser.add_option('-M', '--module-path', dest='module_path',
- help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
- default=None)
-
- if subset_opts:
- parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
- help='further limit selected hosts to an additional pattern')
-
- parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
- dest='timeout',
- help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
-
- if output_opts:
- parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
- help='condense output')
- parser.add_option('-t', '--tree', dest='tree', default=None,
- help='log output to this directory')
-
- if runas_opts:
- # priv user defaults to root later on to enable detecting when this option was given here
- parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password (deprecated, use become)')
- parser.add_option('--ask-su-pass', default=False, dest='ask_su_pass', action='store_true',
- help='ask for su password (deprecated, use become)')
- parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
- help="run operations with sudo (nopasswd) (deprecated, use become)")
- parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
- help='desired sudo user (default=root) (deprecated, use become)')
- parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
- help='run operations with su (deprecated, use become)')
- parser.add_option('-R', '--su-user', default=None,
- help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
-
- # consolidated privilege escalation (become)
- parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
- help="run operations with become (nopasswd implied)")
- parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
- help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
- parser.add_option('--become-user', default=None, dest='become_user', type='string',
- help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
- parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
- help='ask for privilege escalation password')
-
-
- if connect_opts:
- parser.add_option('-c', '--connection', dest='connection',
- default=constants.DEFAULT_TRANSPORT,
- help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
-
- if async_opts:
- parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
- dest='poll_interval',
- help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
- parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
- help='run asynchronously, failing after X seconds (default=N/A)')
-
- if check_opts:
- parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
- help="don't make any changes; instead, try to predict some of the changes that may occur"
- )
-
- if diff_opts:
- parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
- help="when changing (small) files and templates, show the differences in those files; works great with --check"
- )
-
- return parser
-
-def parse_extra_vars(extra_vars_opts, vault_pass):
- extra_vars = {}
- for extra_vars_opt in extra_vars_opts:
- extra_vars_opt = to_unicode(extra_vars_opt)
- if extra_vars_opt.startswith(u"@"):
- # Argument is a YAML file (JSON is a subset of YAML)
- extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
- elif extra_vars_opt and extra_vars_opt[0] in u'[{':
- # Arguments as YAML
- extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
- else:
- # Arguments as Key-value
- extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
- return extra_vars
-
-def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
-
- vault_pass = None
- new_vault_pass = None
-
- if ask_vault_pass:
- vault_pass = getpass.getpass(prompt="Vault password: ")
-
- if ask_vault_pass and confirm_vault:
- vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
- if vault_pass != vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- if ask_new_vault_pass:
- new_vault_pass = getpass.getpass(prompt="New Vault password: ")
-
- if ask_new_vault_pass and confirm_new:
- new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
- if new_vault_pass != new_vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- # enforce no newline chars at the end of passwords
- if vault_pass:
- vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
- if new_vault_pass:
- new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
-
- return vault_pass, new_vault_pass
-
-def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
- sshpass = None
- becomepass = None
- vaultpass = None
- become_prompt = ''
-
- if ask_pass:
- sshpass = getpass.getpass(prompt="SSH password: ")
- become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
- if sshpass:
- sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
- else:
- become_prompt = "%s password: " % become_method.upper()
-
- if become_ask_pass:
- becomepass = getpass.getpass(prompt=become_prompt)
- if ask_pass and becomepass == '':
- becomepass = sshpass
- if becomepass:
- becomepass = to_bytes(becomepass)
-
- if ask_vault_pass:
- vaultpass = getpass.getpass(prompt="Vault password: ")
- if vaultpass:
- vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
-
- return (sshpass, becomepass, vaultpass)
-
-
-def choose_pass_prompt(options):
-
- if options.ask_su_pass:
- return 'su'
- elif options.ask_sudo_pass:
- return 'sudo'
-
- return options.become_method
-
-def normalize_become_options(options):
-
- options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
- options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
-
- if options.become:
- pass
- elif options.sudo:
- options.become = True
- options.become_method = 'sudo'
- elif options.su:
- options.become = True
- options.become_method = 'su'
-
-
-def do_encrypt(result, encrypt, salt_size=None, salt=None):
- if PASSLIB_AVAILABLE:
- try:
- crypt = getattr(passlib.hash, encrypt)
- except:
- raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
-
- if salt_size:
- result = crypt.encrypt(result, salt_size=salt_size)
- elif salt:
- result = crypt.encrypt(result, salt=salt)
- else:
- result = crypt.encrypt(result)
- else:
- raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
-
- return result
-
-def last_non_blank_line(buf):
-
- all_lines = buf.splitlines()
- all_lines.reverse()
- for line in all_lines:
- if (len(line) > 0):
- return line
- # shouldn't occur unless there's no output
- return ""
-
-def filter_leading_non_json_lines(buf):
- '''
- used to avoid random output from SSH at the top of JSON output, like messages from
- tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
-
- need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
- filter only leading lines since multiline JSON is valid.
- '''
-
- filtered_lines = StringIO.StringIO()
- stop_filtering = False
- for line in buf.splitlines():
- if stop_filtering or line.startswith('{') or line.startswith('['):
- stop_filtering = True
- filtered_lines.write(line + '\n')
- return filtered_lines.getvalue()
-
-def boolean(value):
- val = str(value)
- if val.lower() in [ "true", "t", "y", "1", "yes" ]:
- return True
- else:
- return False
-
-def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
- """
- helper function for connection plugins to create privilege escalation commands
- """
-
- randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
- success_key = 'BECOME-SUCCESS-%s' % randbits
- prompt = None
- becomecmd = None
-
- shell = shell or '$SHELL'
-
- if method == 'sudo':
- # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
- # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
- # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
- # string to the user's shell. We loop reading output until we see the randomly-generated
- # sudo prompt set with the -p option.
- prompt = '[sudo via ansible, key=%s] password: ' % randbits
- exe = exe or C.DEFAULT_SUDO_EXE
- becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
- (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif method == 'su':
- exe = exe or C.DEFAULT_SU_EXE
- flags = flags or C.DEFAULT_SU_FLAGS
- becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif method == 'pbrun':
- prompt = 'assword:'
- exe = exe or 'pbrun'
- flags = flags or ''
- becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
-
- elif method == 'pfexec':
- exe = exe or 'pfexec'
- flags = flags or ''
- # No user as it uses it's own exec_attr to figure it out
- becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
-
- if becomecmd is None:
- raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
-
- return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
-
-
-def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
- """
- helper function for connection plugins to create sudo commands
- """
- return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
-
-
-def make_su_cmd(su_user, executable, cmd):
- """
- Helper function for connection plugins to create direct su commands
- """
- return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
-
-def get_diff(diff):
- # called by --diff usage in playbook and runner via callbacks
- # include names in diffs 'before' and 'after' and do diff -U 10
-
- try:
- with warnings.catch_warnings():
- warnings.simplefilter('ignore')
- ret = []
- if 'dst_binary' in diff:
- ret.append("diff skipped: destination file appears to be binary\n")
- if 'src_binary' in diff:
- ret.append("diff skipped: source file appears to be binary\n")
- if 'dst_larger' in diff:
- ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
- if 'src_larger' in diff:
- ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
- if 'before' in diff and 'after' in diff:
- if 'before_header' in diff:
- before_header = "before: %s" % diff['before_header']
- else:
- before_header = 'before'
- if 'after_header' in diff:
- after_header = "after: %s" % diff['after_header']
- else:
- after_header = 'after'
- differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
- for line in list(differ):
- ret.append(line)
- return u"".join(ret)
- except UnicodeDecodeError:
- return ">> the files are different, but the diff library cannot compare unicode strings"
-
-def is_list_of_strings(items):
- for x in items:
- if not isinstance(x, basestring):
- return False
- return True
-
-def list_union(a, b):
- result = []
- for x in a:
- if x not in result:
- result.append(x)
- for x in b:
- if x not in result:
- result.append(x)
- return result
-
-def list_intersection(a, b):
- result = []
- for x in a:
- if x in b and x not in result:
- result.append(x)
- return result
-
-def list_difference(a, b):
- result = []
- for x in a:
- if x not in b and x not in result:
- result.append(x)
- for x in b:
- if x not in a and x not in result:
- result.append(x)
- return result
-
-def contains_vars(data):
- '''
- returns True if the data contains a variable pattern
- '''
- return "$" in data or "{{" in data
-
-def safe_eval(expr, locals={}, include_exceptions=False):
- '''
- This is intended for allowing things like:
- with_items: a_list_variable
-
- Where Jinja2 would return a string but we do not want to allow it to
- call functions (outside of Jinja2, where the env is constrained). If
- the input data to this function came from an untrusted (remote) source,
- it should first be run through _clean_data_struct() to ensure the data
- is further sanitized prior to evaluation.
-
- Based on:
- http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
- '''
-
- # this is the whitelist of AST nodes we are going to
- # allow in the evaluation. Any node type other than
- # those listed here will raise an exception in our custom
- # visitor class defined below.
- SAFE_NODES = set(
- (
- ast.Add,
- ast.BinOp,
- ast.Call,
- ast.Compare,
- ast.Dict,
- ast.Div,
- ast.Expression,
- ast.List,
- ast.Load,
- ast.Mult,
- ast.Num,
- ast.Name,
- ast.Str,
- ast.Sub,
- ast.Tuple,
- ast.UnaryOp,
- )
- )
-
- # AST node types were expanded after 2.6
- if not sys.version.startswith('2.6'):
- SAFE_NODES.union(
- set(
- (ast.Set,)
- )
- )
-
- filter_list = []
- for filter in filter_loader.all():
- filter_list.extend(filter.filters().keys())
-
- CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
-
- class CleansingNodeVisitor(ast.NodeVisitor):
- def generic_visit(self, node, inside_call=False):
- if type(node) not in SAFE_NODES:
- raise Exception("invalid expression (%s)" % expr)
- elif isinstance(node, ast.Call):
- inside_call = True
- elif isinstance(node, ast.Name) and inside_call:
- if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
- raise Exception("invalid function: %s" % node.id)
- # iterate over all child nodes
- for child_node in ast.iter_child_nodes(node):
- self.generic_visit(child_node, inside_call)
-
- if not isinstance(expr, basestring):
- # already templated to a datastructure, perhaps?
- if include_exceptions:
- return (expr, None)
- return expr
-
- cnv = CleansingNodeVisitor()
- try:
- parsed_tree = ast.parse(expr, mode='eval')
- cnv.visit(parsed_tree)
- compiled = compile(parsed_tree, expr, 'eval')
- result = eval(compiled, {}, locals)
-
- if include_exceptions:
- return (result, None)
- else:
- return result
- except SyntaxError, e:
- # special handling for syntax errors, we just return
- # the expression string back as-is
- if include_exceptions:
- return (expr, None)
- return expr
- except Exception, e:
- if include_exceptions:
- return (expr, e)
- return expr
-
-
-def listify_lookup_plugin_terms(terms, basedir, inject):
-
- from ansible.utils import template
-
- if isinstance(terms, basestring):
- # someone did:
- # with_items: alist
- # OR
- # with_items: {{ alist }}
-
- stripped = terms.strip()
- if not (stripped.startswith('{') or stripped.startswith('[')) and \
- not stripped.startswith("/") and \
- not stripped.startswith('set([') and \
- not LOOKUP_REGEX.search(terms):
- # if not already a list, get ready to evaluate with Jinja2
- # not sure why the "/" is in above code :)
- try:
- new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
- if isinstance(new_terms, basestring) and "{{" in new_terms:
- pass
- else:
- terms = new_terms
- except:
- pass
-
- if '{' in terms or '[' in terms:
- # Jinja2 already evaluated a variable to a list.
- # Jinja2-ified list needs to be converted back to a real type
- # TODO: something a bit less heavy than eval
- return safe_eval(terms)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- return terms
-
-def combine_vars(a, b):
-
- _validate_both_dicts(a, b)
-
- if C.DEFAULT_HASH_BEHAVIOUR == "merge":
- return merge_hash(a, b)
- else:
- return dict(a.items() + b.items())
-
-def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
- '''Return a random password string of length containing only chars.'''
-
- password = []
- while len(password) < length:
- new_char = os.urandom(1)
- if new_char in chars:
- password.append(new_char)
-
- return ''.join(password)
-
-def before_comment(msg):
- ''' what's the part of a string before a comment? '''
- msg = msg.replace("\#","**NOT_A_COMMENT**")
- msg = msg.split("#")[0]
- msg = msg.replace("**NOT_A_COMMENT**","#")
- return msg
-
-def load_vars(basepath, results, vault_password=None):
- """
- Load variables from any potential yaml filename combinations of basepath,
- returning result.
- """
-
- paths_to_check = [ "".join([basepath, ext])
- for ext in C.YAML_FILENAME_EXTENSIONS ]
-
- found_paths = []
-
- for path in paths_to_check:
- found, results = _load_vars_from_path(path, results, vault_password=vault_password)
- if found:
- found_paths.append(path)
-
-
- # disallow the potentially confusing situation that there are multiple
- # variable files for the same name. For example if both group_vars/all.yml
- # and group_vars/all.yaml
- if len(found_paths) > 1:
- raise errors.AnsibleError("Multiple variable files found. "
- "There should only be one. %s" % ( found_paths, ))
-
- return results
-
-## load variables from yaml files/dirs
-# e.g. host/group_vars
-#
-def _load_vars_from_path(path, results, vault_password=None):
- """
- Robustly access the file at path and load variables, carefully reporting
- errors in a friendly/informative way.
-
- Return the tuple (found, new_results, )
- """
-
- try:
- # in the case of a symbolic link, we want the stat of the link itself,
- # not its target
- pathstat = os.lstat(path)
- except os.error, err:
- # most common case is that nothing exists at that path.
- if err.errno == errno.ENOENT:
- return False, results
- # otherwise this is a condition we should report to the user
- raise errors.AnsibleError(
- "%s is not accessible: %s."
- " Please check its permissions." % ( path, err.strerror))
-
- # symbolic link
- if stat.S_ISLNK(pathstat.st_mode):
- try:
- target = os.path.realpath(path)
- except os.error, err2:
- raise errors.AnsibleError("The symbolic link at %s "
- "is not readable: %s. Please check its permissions."
- % (path, err2.strerror, ))
- # follow symbolic link chains by recursing, so we repeat the same
- # permissions checks above and provide useful errors.
- return _load_vars_from_path(target, results, vault_password)
-
- # directory
- if stat.S_ISDIR(pathstat.st_mode):
-
- # support organizing variables across multiple files in a directory
- return True, _load_vars_from_folder(path, results, vault_password=vault_password)
-
- # regular file
- elif stat.S_ISREG(pathstat.st_mode):
- data = parse_yaml_from_file(path, vault_password=vault_password)
- if data and type(data) != dict:
- raise errors.AnsibleError(
- "%s must be stored as a dictionary/hash" % path)
- elif data is None:
- data = {}
-
- # combine vars overrides by default but can be configured to do a
- # hash merge in settings
- results = combine_vars(results, data)
- return True, results
-
- # something else? could be a fifo, socket, device, etc.
- else:
- raise errors.AnsibleError("Expected a variable file or directory "
- "but found a non-file object at path %s" % (path, ))
-
-def _load_vars_from_folder(folder_path, results, vault_password=None):
- """
- Load all variables within a folder recursively.
- """
-
- # this function and _load_vars_from_path are mutually recursive
-
- try:
- names = os.listdir(folder_path)
- except os.error, err:
- raise errors.AnsibleError(
- "This folder cannot be listed: %s: %s."
- % ( folder_path, err.strerror))
-
- # evaluate files in a stable order rather than whatever order the
- # filesystem lists them.
- names.sort()
-
- # do not parse hidden files or dirs, e.g. .svn/
- paths = [os.path.join(folder_path, name) for name in names if not name.startswith('.')]
- for path in paths:
- _found, results = _load_vars_from_path(path, results, vault_password=vault_password)
- return results
-
-def update_hash(hash, key, new_value):
- ''' used to avoid nested .update calls on the parent '''
-
- value = hash.get(key, {})
- value.update(new_value)
- hash[key] = value
-
-def censor_unlogged_data(data):
- '''
- used when the no_log: True attribute is passed to a task to keep data from a callback.
- NOT intended to prevent variable registration, but only things from showing up on
- screen
- '''
- new_data = {}
- for (x,y) in data.iteritems():
- if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
- new_data[x] = y
- new_data['censored'] = 'results hidden due to no_log parameter'
- return new_data
-
-def check_mutually_exclusive_privilege(options, parser):
-
- # privilege escalation command line arguments need to be mutually exclusive
- if (options.su or options.su_user or options.ask_su_pass) and \
- (options.sudo or options.sudo_user or options.ask_sudo_pass) or \
- (options.su or options.su_user or options.ask_su_pass) and \
- (options.become or options.become_user or options.become_ask_pass) or \
- (options.sudo or options.sudo_user or options.ask_sudo_pass) and \
- (options.become or options.become_user or options.become_ask_pass):
-
- parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
- "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
- "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
- " are exclusive of each other")
-
-
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
diff --git a/lib/ansible/utils/boolean.py b/lib/ansible/utils/boolean.py
new file mode 100644
index 0000000000..bf15be346d
--- /dev/null
+++ b/lib/ansible/utils/boolean.py
@@ -0,0 +1,29 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def boolean(value):
+ val = str(value)
+ if val.lower() in [ "true", "t", "y", "1", "yes" ]:
+ return True
+ else:
+ return False
+
diff --git a/lib/ansible/utils/cmd_functions.py b/lib/ansible/utils/cmd_functions.py
deleted file mode 100644
index 6525260f10..0000000000
--- a/lib/ansible/utils/cmd_functions.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import sys
-import shlex
-import subprocess
-import select
-
-def run_cmd(cmd, live=False, readsize=10):
-
- #readsize = 10
-
- cmdargs = shlex.split(cmd)
- p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout = ''
- stderr = ''
- rpipes = [p.stdout, p.stderr]
- while True:
- rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
-
- if p.stdout in rfd:
- dat = os.read(p.stdout.fileno(), readsize)
- if live:
- sys.stdout.write(dat)
- stdout += dat
- if dat == '':
- rpipes.remove(p.stdout)
- if p.stderr in rfd:
- dat = os.read(p.stderr.fileno(), readsize)
- stderr += dat
- if live:
- sys.stdout.write(dat)
- if dat == '':
- rpipes.remove(p.stderr)
- # only break out if we've emptied the pipes, or there is nothing to
- # read from and the process has finished.
- if (not rpipes or not rfd) and p.poll() is not None:
- break
- # Calling wait while there are still pipes to read can cause a lock
- elif not rpipes and p.poll() == None:
- p.wait()
-
- return p.returncode, stdout, stderr
diff --git a/lib/ansible/color.py b/lib/ansible/utils/color.py
index b3127d85fe..37d0466d2d 100644
--- a/lib/ansible/color.py
+++ b/lib/ansible/utils/color.py
@@ -14,12 +14,15 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
import sys
-import constants
+
+from ansible import constants as C
ANSIBLE_COLOR=True
-if constants.ANSIBLE_NOCOLOR:
+if C.ANSIBLE_NOCOLOR:
ANSIBLE_COLOR=False
elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
ANSIBLE_COLOR=False
@@ -36,7 +39,7 @@ else:
# curses returns an error (e.g. could not find terminal)
ANSIBLE_COLOR=False
-if constants.ANSIBLE_FORCE_COLOR:
+if C.ANSIBLE_FORCE_COLOR:
ANSIBLE_COLOR=True
# --- begin "pretty"
@@ -72,3 +75,20 @@ def stringc(text, color):
# --- end "pretty"
+def colorize(lead, num, color):
+ """ Print 'lead' = 'num' in 'color' """
+ if num != 0 and ANSIBLE_COLOR and color is not None:
+ return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
+ else:
+ return "%s=%-4s" % (lead, str(num))
+
+def hostcolor(host, stats, color=True):
+ if ANSIBLE_COLOR and color:
+ if stats['failures'] != 0 or stats['unreachable'] != 0:
+ return "%-37s" % stringc(host, 'red')
+ elif stats['changed'] != 0:
+ return "%-37s" % stringc(host, 'yellow')
+ else:
+ return "%-37s" % stringc(host, 'green')
+ return "%-26s" % host
+
diff --git a/lib/ansible/utils/debug.py b/lib/ansible/utils/debug.py
new file mode 100644
index 0000000000..5b04ac0572
--- /dev/null
+++ b/lib/ansible/utils/debug.py
@@ -0,0 +1,18 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import time
+import sys
+
+from multiprocessing import Lock
+
+from ansible import constants as C
+
+global_debug_lock = Lock()
+def debug(msg):
+ if C.DEFAULT_DEBUG:
+ global_debug_lock.acquire()
+ print("%6d %0.5f: %s" % (os.getpid(), time.time(), msg))
+ sys.stdout.flush()
+ global_debug_lock.release()
diff --git a/lib/ansible/utils/display.py b/lib/ansible/utils/display.py
new file mode 100644
index 0000000000..d5b6ad71a9
--- /dev/null
+++ b/lib/ansible/utils/display.py
@@ -0,0 +1,142 @@
+# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# FIXME: copied mostly from old code, needs py3 improvements
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import textwrap
+import sys
+
+from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.utils.color import stringc
+
+class Display:
+
+ def __init__(self, verbosity=0):
+
+ self.verbosity = verbosity
+
+ # list of all deprecation messages to prevent duplicate display
+ self._deprecations = {}
+ self._warns = {}
+ self._errors = {}
+
+ def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False):
+ msg2 = msg
+ if color:
+ msg2 = stringc(msg, color)
+ if not log_only:
+ if not stderr:
+ try:
+ print(msg2)
+ except UnicodeEncodeError:
+ print(msg2.encode('utf-8'))
+ else:
+ try:
+ print(msg2, file=sys.stderr)
+ except UnicodeEncodeError:
+ print(msg2.encode('utf-8'), file=sys.stderr)
+ if C.DEFAULT_LOG_PATH != '':
+ while msg.startswith("\n"):
+ msg = msg.replace("\n","")
+ # FIXME: logger stuff needs to be implemented
+ #if not screen_only:
+ # if color == 'red':
+ # logger.error(msg)
+ # else:
+ # logger.info(msg)
+
+ def vv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=1)
+
+ def vvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=2)
+
+ def vvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=3)
+
+ def vvvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=4)
+
+ def vvvvvv(self, msg, host=None):
+ return self.verbose(msg, host=host, caplevel=5)
+
+ def verbose(self, msg, host=None, caplevel=2):
+ # FIXME: this needs to be implemented
+ #msg = utils.sanitize_output(msg)
+ if self.verbosity > caplevel:
+ if host is None:
+ self.display(msg, color='blue')
+ else:
+ self.display("<%s> %s" % (host, msg), color='blue', screen_only=True)
+
+ def deprecated(self, msg, version, removed=False):
+ ''' used to print out a deprecation message.'''
+
+ if not removed and not C.DEPRECATION_WARNINGS:
+ return
+
+ if not removed:
+ if version:
+ new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
+ else:
+ new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
+ new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
+ else:
+ raise AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
+
+ wrapped = textwrap.wrap(new_msg, 79)
+ new_msg = "\n".join(wrapped) + "\n"
+
+ if new_msg not in self._deprecations:
+ self.display(new_msg, color='purple', stderr=True)
+ self._deprecations[new_msg] = 1
+
+ def warning(self, msg):
+ new_msg = "\n[WARNING]: %s" % msg
+ wrapped = textwrap.wrap(new_msg, 79)
+ new_msg = "\n".join(wrapped) + "\n"
+ if new_msg not in self._warns:
+ self.display(new_msg, color='bright purple', stderr=True)
+ self._warns[new_msg] = 1
+
+ def system_warning(self, msg):
+ if C.SYSTEM_WARNINGS:
+ self.warning(msg)
+
+ def banner(self, msg, color=None):
+ '''
+ Prints a header-looking line with stars taking up to 80 columns
+ of width (3 columns, minimum)
+ '''
+ msg = msg.strip()
+ star_len = (80 - len(msg))
+ if star_len < 0:
+ star_len = 3
+ stars = "*" * star_len
+ self.display("\n%s %s" % (msg, stars), color=color)
+
+ def error(self, msg):
+ new_msg = "\n[ERROR]: %s" % msg
+ wrapped = textwrap.wrap(new_msg, 79)
+ new_msg = "\n".join(wrapped) + "\n"
+ if new_msg not in self._errors:
+ self.display(new_msg, color='red', stderr=True)
+ self._errors[new_msg] = 1
+
diff --git a/lib/ansible/utils/display_functions.py b/lib/ansible/utils/display_functions.py
deleted file mode 100644
index 2233c81657..0000000000
--- a/lib/ansible/utils/display_functions.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import textwrap
-
-from ansible import constants as C
-from ansible import errors
-from ansible.callbacks import display
-
-__all__ = ['deprecated', 'warning', 'system_warning']
-
-# list of all deprecation messages to prevent duplicate display
-deprecations = {}
-warns = {}
-
-def deprecated(msg, version, removed=False):
- ''' used to print out a deprecation message.'''
-
- if not removed and not C.DEPRECATION_WARNINGS:
- return
-
- if not removed:
- if version:
- new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
- else:
- new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
- new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
- else:
- raise errors.AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
-
- wrapped = textwrap.wrap(new_msg, 79)
- new_msg = "\n".join(wrapped) + "\n"
-
- if new_msg not in deprecations:
- display(new_msg, color='purple', stderr=True)
- deprecations[new_msg] = 1
-
-def warning(msg):
- new_msg = "\n[WARNING]: %s" % msg
- wrapped = textwrap.wrap(new_msg, 79)
- new_msg = "\n".join(wrapped) + "\n"
- if new_msg not in warns:
- display(new_msg, color='bright purple', stderr=True)
- warns[new_msg] = 1
-
-def system_warning(msg):
- if C.SYSTEM_WARNINGS:
- warning(msg)
-
diff --git a/lib/ansible/utils/encrypt.py b/lib/ansible/utils/encrypt.py
new file mode 100644
index 0000000000..5138dbef70
--- /dev/null
+++ b/lib/ansible/utils/encrypt.py
@@ -0,0 +1,49 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+PASSLIB_AVAILABLE = False
+try:
+ import passlib.hash
+ PASSLIB_AVAILABLE = True
+except:
+ pass
+
+from ansible.errors import AnsibleError
+
+__all__ = ['do_encrypt']
+
+def do_encrypt(result, encrypt, salt_size=None, salt=None):
+ if PASSLIB_AVAILABLE:
+ try:
+ crypt = getattr(passlib.hash, encrypt)
+ except:
+ raise AnsibleError("passlib does not support '%s' algorithm" % encrypt)
+
+ if salt_size:
+ result = crypt.encrypt(result, salt_size=salt_size)
+ elif salt:
+ result = crypt.encrypt(result, salt=salt)
+ else:
+ result = crypt.encrypt(result)
+ else:
+ raise AnsibleError("passlib must be installed to encrypt vars_prompt values")
+
+ return result
+
diff --git a/lib/ansible/utils/hashing.py b/lib/ansible/utils/hashing.py
index a7d142e5bd..5e378db79f 100644
--- a/lib/ansible/utils/hashing.py
+++ b/lib/ansible/utils/hashing.py
@@ -20,6 +20,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
+from ansible.errors import AnsibleError
# Note, sha1 is the only hash algorithm compatible with python2.4 and with
# FIPS-140 mode (as of 11-2014)
@@ -43,6 +44,8 @@ def secure_hash_s(data, hash_func=sha1):
digest = hash_func()
try:
+ if not isinstance(data, basestring):
+ data = "%s" % data
digest.update(data)
except UnicodeEncodeError:
digest.update(data.encode('utf-8'))
@@ -62,8 +65,8 @@ def secure_hash(filename, hash_func=sha1):
digest.update(block)
block = infile.read(blocksize)
infile.close()
- except IOError, e:
- raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
+ except IOError as e:
+ raise AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
return digest.hexdigest()
# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
diff --git a/lib/ansible/utils/listify.py b/lib/ansible/utils/listify.py
new file mode 100644
index 0000000000..a26b4b9829
--- /dev/null
+++ b/lib/ansible/utils/listify.py
@@ -0,0 +1,66 @@
+# (c) 2014 Michael DeHaan, <michael@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from six import iteritems, string_types
+
+import re
+
+from ansible.template import Templar
+from ansible.template.safe_eval import safe_eval
+
+__all__ = ['listify_lookup_plugin_terms']
+
+LOOKUP_REGEX = re.compile(r'lookup\s*\(')
+
+def listify_lookup_plugin_terms(terms, variables, loader):
+
+ if isinstance(terms, basestring):
+ # someone did:
+ # with_items: alist
+ # OR
+ # with_items: {{ alist }}
+
+ stripped = terms.strip()
+ templar = Templar(loader=loader, variables=variables)
+ if not (stripped.startswith('{') or stripped.startswith('[')) and not stripped.startswith("/") and not stripped.startswith('set([') and not LOOKUP_REGEX.search(terms):
+ # if not already a list, get ready to evaluate with Jinja2
+ # not sure why the "/" is in above code :)
+ try:
+ new_terms = templar.template("{{ %s }}" % terms)
+ if isinstance(new_terms, basestring) and "{{" in new_terms:
+ pass
+ else:
+ terms = new_terms
+ except:
+ pass
+ else:
+ terms = templar.template(terms)
+
+ if '{' in terms or '[' in terms:
+ # Jinja2 already evaluated a variable to a list.
+ # Jinja2-ified list needs to be converted back to a real type
+ return safe_eval(terms)
+
+ if isinstance(terms, basestring):
+ terms = [ terms ]
+
+ return terms
+
diff --git a/lib/ansible/utils/module_docs.py b/lib/ansible/utils/module_docs.py
index ee99af2cb5..632b4a00c2 100644
--- a/lib/ansible/utils/module_docs.py
+++ b/lib/ansible/utils/module_docs.py
@@ -23,7 +23,7 @@ import ast
import yaml
import traceback
-from ansible import utils
+from ansible.plugins import fragment_loader
# modules that are ok that they do not have documentation strings
BLACKLIST_MODULES = [
@@ -66,7 +66,7 @@ def get_docstring(filename, verbose=False):
if fragment_slug != 'doesnotexist':
- fragment_class = utils.plugins.fragment_loader.get(fragment_name)
+ fragment_class = fragment_loader.get(fragment_name)
assert fragment_class is not None
fragment_yaml = getattr(fragment_class, fragment_var, '{}')
diff --git a/lib/ansible/utils/module_docs_fragments b/lib/ansible/utils/module_docs_fragments
new file mode 120000
index 0000000000..83aef9ec19
--- /dev/null
+++ b/lib/ansible/utils/module_docs_fragments
@@ -0,0 +1 @@
+../../../lib/ansible/utils/module_docs_fragments \ No newline at end of file
diff --git a/lib/ansible/utils/module_docs_fragments/__init__.py b/lib/ansible/utils/module_docs_fragments/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/lib/ansible/utils/module_docs_fragments/__init__.py
+++ /dev/null
diff --git a/lib/ansible/utils/module_docs_fragments/aws.py b/lib/ansible/utils/module_docs_fragments/aws.py
deleted file mode 100644
index 981eb8e105..0000000000
--- a/lib/ansible/utils/module_docs_fragments/aws.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# (c) 2014, Will Thames <will@thames.id.au>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-class ModuleDocFragment(object):
-
- # AWS only documentation fragment
- DOCUMENTATION = """
-options:
- ec2_url:
- description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Ignored for modules where region is required. Must be specified for all other modules if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used.
- required: false
- default: null
- aliases: []
- aws_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
- required: false
- default: null
- aliases: [ 'ec2_secret_key', 'secret_key' ]
- aws_access_key:
- description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
- required: false
- default: null
- aliases: [ 'ec2_access_key', 'access_key' ]
- security_token:
- description:
- - AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
- required: false
- default: null
- aliases: [ 'access_token' ]
- version_added: "1.6"
- validate_certs:
- description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
- required: false
- default: "yes"
- choices: ["yes", "no"]
- aliases: []
- version_added: "1.5"
- profile:
- description:
- - uses a boto profile. Only works with boto >= 2.24.0
- required: false
- default: null
- aliases: []
- version_added: "1.6"
-requirements:
- - boto
-notes:
- - If parameters are not set within the module, the following
- environment variables can be used in decreasing order of precedence
- C(AWS_URL) or C(EC2_URL),
- C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
- C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
- C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
- C(AWS_REGION) or C(EC2_REGION)
- - Ansible uses the boto configuration file (typically ~/.boto) if no
- credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html
- - C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
- AWS region, when required, but this can also be configured in the boto config file
-"""
diff --git a/lib/ansible/utils/module_docs_fragments/cloudstack.py b/lib/ansible/utils/module_docs_fragments/cloudstack.py
deleted file mode 100644
index 8d173ea756..0000000000
--- a/lib/ansible/utils/module_docs_fragments/cloudstack.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2015 René Moser <mail@renemoser.net>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-class ModuleDocFragment(object):
-
- # Standard cloudstack documentation fragment
- DOCUMENTATION = '''
-options:
- api_key:
- description:
- - API key of the CloudStack API.
- required: false
- default: null
- aliases: []
- api_secret:
- description:
- - Secret key of the CloudStack API.
- required: false
- default: null
- aliases: []
- api_url:
- description:
- - URL of the CloudStack API e.g. https://cloud.example.com/client/api.
- required: false
- default: null
- aliases: []
- api_http_method:
- description:
- - HTTP method used.
- required: false
- default: 'get'
- aliases: []
-requirements:
- - cs
-notes:
- - Ansible uses the C(cs) library's configuration method if credentials are not
- provided by the options C(api_url), C(api_key), C(api_secret).
- Configuration is read from several locations, in the following order:
- - The C(CLOUDSTACK_ENDPOINT), C(CLOUDSTACK_KEY), C(CLOUDSTACK_SECRET) and
- C(CLOUDSTACK_METHOD) environment variables.
- - A C(CLOUDSTACK_CONFIG) environment variable pointing to an C(.ini) file,
- - A C(cloudstack.ini) file in the current working directory.
- - A C(.cloudstack.ini) file in the users home directory.
- See https://github.com/exoscale/cs for more information.
- - This module supports check mode.
-'''
diff --git a/lib/ansible/utils/module_docs_fragments/files.py b/lib/ansible/utils/module_docs_fragments/files.py
deleted file mode 100644
index adff1f2f1b..0000000000
--- a/lib/ansible/utils/module_docs_fragments/files.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# (c) 2014, Matt Martz <matt@sivel.net>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-class ModuleDocFragment(object):
-
- # Standard files documentation fragment
- DOCUMENTATION = """
-options:
- mode:
- required: false
- default: null
- choices: []
- description:
- - mode the file or directory should be, such as 0644 as would be fed to I(chmod). As of version 1.8, the mode may be specified as a symbolic mode (for example, C(u+rwx) or C(u=rw,g=r,o=r)).
- owner:
- required: false
- default: null
- choices: []
- description:
- - name of the user that should own the file/directory, as would be fed to I(chown)
- group:
- required: false
- default: null
- choices: []
- description:
- - name of the group that should own the file/directory, as would be fed to I(chown)
- seuser:
- required: false
- default: null
- choices: []
- description:
- - user part of SELinux file context. Will default to system policy, if
- applicable. If set to C(_default), it will use the C(user) portion of the
- policy if available
- serole:
- required: false
- default: null
- choices: []
- description:
- - role part of SELinux file context, C(_default) feature works as for I(seuser).
- setype:
- required: false
- default: null
- choices: []
- description:
- - type part of SELinux file context, C(_default) feature works as for I(seuser).
- selevel:
- required: false
- default: "s0"
- choices: []
- description:
- - level part of the SELinux file context. This is the MLS/MCS attribute,
- sometimes known as the C(range). C(_default) feature works as for
- I(seuser).
- follow:
- required: false
- default: "no"
- choices: [ "yes", "no" ]
- version_added: "1.8"
- description:
- - 'This flag indicates that filesystem links, if they exist, should be followed.'
-
-"""
diff --git a/lib/ansible/utils/module_docs_fragments/openstack.py b/lib/ansible/utils/module_docs_fragments/openstack.py
deleted file mode 100644
index f989b3dcb8..0000000000
--- a/lib/ansible/utils/module_docs_fragments/openstack.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-class ModuleDocFragment(object):
-
- # Standard openstack documentation fragment
- DOCUMENTATION = '''
-options:
- cloud:
- description:
- - Named cloud to operate against. Provides default values for I(auth) and I(auth_plugin)
- required: false
- auth:
- description:
- - Dictionary containing auth information as needed by the cloud's auth
- plugin strategy. For the default I{password) plugin, this would contain
- I(auth_url), I(username), I(password), I(project_name) and any
- information about domains if the cloud supports them. For other plugins,
- this param will need to contain whatever parameters that auth plugin
- requires. This parameter is not needed if a named cloud is provided.
- required: false
- auth_type:
- description:
- - Name of the auth plugin to use. If the cloud uses something other than
- password authentication, the name of the plugin should be indicated here
- and the contents of the I(auth) parameter should be updated accordingly.
- required: false
- default: password
- region_name:
- description:
- - Name of the region.
- required: false
- availability_zone:
- description:
- - Name of the availability zone.
- required: false
- wait:
- description:
- - Should ansible wait until the requested resource is complete.
- required: false
- default: "yes"
- choices: ["yes", "no"]
- timeout:
- description:
- - How long should ansible wait for the requested resource.
- required: false
- default: 180
- api_timeout:
- description:
- - How long should the socket layer wait before timing out for API calls.
- If this is omitted, nothing will be passed to the requests library.
- required: false
- default: None
- validate_certs:
- description:
- - Whether or not SSL API requests should be verified.
- required: false
- default: True
- aliases: ['verify']
- cacert:
- description:
- - A path to a CA Cert bundle that can be used as part of verifying
- SSL API requests.
- required: false
- cert:
- description:
- - A path to a client certificate to use as part of the SSL transaction
- required: false
- key:
- description:
- - A path to a client key to use as part of the SSL transaction
- required: false
- endpoint_type:
- description:
- - Endpoint URL type to fetch from the service catalog.
- choices: [public, internal, admin]
- required: false
- default: public
-requirements: [shade]
-notes:
- - The standard OpenStack environment variables, such as C(OS_USERNAME)
- may be user instead of providing explicit values.
- - Auth information is driven by os-client-config, which means that values
- can come from a yaml config file in /etc/ansible/openstack.yaml,
- /etc/openstack/clouds.yaml or ~/.config/openstack/clouds.yaml, then from
- standard environment variables, then finally by explicit parameters in
- plays.
-'''
diff --git a/lib/ansible/utils/module_docs_fragments/rackspace.py b/lib/ansible/utils/module_docs_fragments/rackspace.py
deleted file mode 100644
index a49202c500..0000000000
--- a/lib/ansible/utils/module_docs_fragments/rackspace.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# (c) 2014, Matt Martz <matt@sivel.net>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-class ModuleDocFragment(object):
-
- # Standard Rackspace only documentation fragment
- DOCUMENTATION = """
-options:
- api_key:
- description:
- - Rackspace API key (overrides I(credentials))
- aliases:
- - password
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if I(api_key) and
- I(username) are provided)
- default: null
- aliases:
- - creds_file
- env:
- description:
- - Environment as configured in ~/.pyrax.cfg,
- see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration)
- version_added: 1.5
- region:
- description:
- - Region to create an instance in
- default: DFW
- username:
- description:
- - Rackspace username (overrides I(credentials))
- verify_ssl:
- description:
- - Whether or not to require SSL validation of API endpoints
- version_added: 1.5
-requirements:
- - pyrax
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
-"""
-
- # Documentation fragment including attributes to enable communication
- # of other OpenStack clouds. Not all rax modules support this.
- OPENSTACK = """
-options:
- api_key:
- description:
- - Rackspace API key (overrides I(credentials))
- aliases:
- - password
- auth_endpoint:
- description:
- - The URI of the authentication service
- default: https://identity.api.rackspacecloud.com/v2.0/
- version_added: 1.5
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if I(api_key) and
- I(username) are provided)
- default: null
- aliases:
- - creds_file
- env:
- description:
- - Environment as configured in ~/.pyrax.cfg,
- see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration)
- version_added: 1.5
- identity_type:
- description:
- - Authentication machanism to use, such as rackspace or keystone
- default: rackspace
- version_added: 1.5
- region:
- description:
- - Region to create an instance in
- default: DFW
- tenant_id:
- description:
- - The tenant ID used for authentication
- version_added: 1.5
- tenant_name:
- description:
- - The tenant name used for authentication
- version_added: 1.5
- username:
- description:
- - Rackspace username (overrides I(credentials))
- verify_ssl:
- description:
- - Whether or not to require SSL validation of API endpoints
- version_added: 1.5
-requirements:
- - pyrax
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
-"""
diff --git a/lib/ansible/utils/path.py b/lib/ansible/utils/path.py
new file mode 100644
index 0000000000..e49a2f7d55
--- /dev/null
+++ b/lib/ansible/utils/path.py
@@ -0,0 +1,37 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import stat
+
+__all__ = ['is_executable', 'unfrackpath']
+
+def is_executable(path):
+ '''is the given path executable?'''
+ return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
+
+def unfrackpath(path):
+ '''
+ returns a path that is free of symlinks, environment
+ variables, relative path traversals and symbols (~)
+ example:
+ '$HOME/../../var/mail' becomes '/var/spool/mail'
+ '''
+ return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
+
diff --git a/lib/ansible/utils/string_functions.py b/lib/ansible/utils/string_functions.py
deleted file mode 100644
index 3b452718f7..0000000000
--- a/lib/ansible/utils/string_functions.py
+++ /dev/null
@@ -1,18 +0,0 @@
-def isprintable(instring):
- if isinstance(instring, str):
- #http://stackoverflow.com/a/3637294
- import string
- printset = set(string.printable)
- isprintable = set(instring).issubset(printset)
- return isprintable
- else:
- return True
-
-def count_newlines_from_end(str):
- i = len(str)
- while i > 0:
- if str[i-1] != '\n':
- break
- i -= 1
- return len(str) - i
-
diff --git a/lib/ansible/utils/su_prompts.py b/lib/ansible/utils/su_prompts.py
deleted file mode 100644
index 04e98e1c45..0000000000
--- a/lib/ansible/utils/su_prompts.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-SU_PROMPT_LOCALIZATIONS = [
- 'Password',
- '암호',
- 'パスワード',
- 'Adgangskode',
- 'Contraseña',
- 'Contrasenya',
- 'Hasło',
- 'Heslo',
- 'Jelszó',
- 'Lösenord',
- 'Mật khẩu',
- 'Mot de passe',
- 'Parola',
- 'Parool',
- 'Pasahitza',
- 'Passord',
- 'Passwort',
- 'Salasana',
- 'Sandi',
- 'Senha',
- 'Wachtwoord',
- 'ססמה',
- 'Лозинка',
- 'Парола',
- 'Пароль',
- 'गुप्तशब्द',
- 'शब्दकूट',
- 'సంకేతపదము',
- 'හස්පදය',
- '密码',
- '密碼',
-]
-
-SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE)
-
-def check_su_prompt(data):
- return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
-
diff --git a/lib/ansible/utils/template.py b/lib/ansible/utils/template.py
deleted file mode 100644
index fb35924ce1..0000000000
--- a/lib/ansible/utils/template.py
+++ /dev/null
@@ -1,404 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-import codecs
-import jinja2
-from jinja2.runtime import StrictUndefined
-from jinja2.exceptions import TemplateSyntaxError
-import yaml
-import json
-from ansible import errors
-import ansible.constants as C
-import time
-import subprocess
-import datetime
-import pwd
-import ast
-import traceback
-from numbers import Number
-
-from ansible.utils.string_functions import count_newlines_from_end
-from ansible.utils import to_bytes, to_unicode
-
-class Globals(object):
-
- FILTERS = None
-
- def __init__(self):
- pass
-
-def _get_filters():
- ''' return filter plugin instances '''
-
- if Globals.FILTERS is not None:
- return Globals.FILTERS
-
- from ansible import utils
- plugins = [ x for x in utils.plugins.filter_loader.all()]
- filters = {}
- for fp in plugins:
- filters.update(fp.filters())
- Globals.FILTERS = filters
-
- return Globals.FILTERS
-
-def _get_extensions():
- ''' return jinja2 extensions to load '''
-
- '''
- if some extensions are set via jinja_extensions in ansible.cfg, we try
- to load them with the jinja environment
- '''
- jinja_exts = []
- if C.DEFAULT_JINJA2_EXTENSIONS:
- '''
- Let's make sure the configuration directive doesn't contain spaces
- and split extensions in an array
- '''
- jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
-
- return jinja_exts
-
-class Flags:
- LEGACY_TEMPLATE_WARNING = False
-
-# TODO: refactor this file
-
-FILTER_PLUGINS = None
-_LISTRE = re.compile(r"(\w+)\[(\d+)\]")
-
-# A regex for checking to see if a variable we're trying to
-# expand is just a single variable name.
-SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$")
-
-JINJA2_OVERRIDE = '#jinja2:'
-JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline']
-
-def lookup(name, *args, **kwargs):
- from ansible import utils
- instance = utils.plugins.lookup_loader.get(name.lower(), basedir=kwargs.get('basedir',None))
- tvars = kwargs.get('vars', None)
-
- wantlist = kwargs.pop('wantlist', False)
-
- if instance is not None:
- try:
- ran = instance.run(*args, inject=tvars, **kwargs)
- except errors.AnsibleError:
- raise
- except jinja2.exceptions.UndefinedError, e:
- raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
- except Exception, e:
- raise errors.AnsibleError('Unexpected error in during lookup: %s' % e)
- if ran and not wantlist:
- ran = ",".join(ran)
- return ran
- else:
- raise errors.AnsibleError("lookup plugin (%s) not found" % name)
-
-def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True):
- ''' templates a data structure by traversing it and substituting for other data structures '''
- from ansible import utils
- try:
- if convert_bare and isinstance(varname, basestring):
- first_part = varname.split(".")[0].split("[")[0]
- if first_part in templatevars and '{{' not in varname and '$' not in varname:
- varname = "{{%s}}" % varname
-
- if isinstance(varname, basestring):
- if '{{' in varname or '{%' in varname:
- try:
- varname = template_from_string(basedir, varname, templatevars, fail_on_undefined)
- except errors.AnsibleError, e:
- raise errors.AnsibleError("Failed to template %s: %s" % (varname, str(e)))
-
- # template_from_string may return non strings for the case where the var is just
- # a reference to a single variable, so we should re_check before we do further evals
- if isinstance(varname, basestring):
- if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["):
- eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True)
- if eval_results[1] is None:
- varname = eval_results[0]
-
- return varname
-
- elif isinstance(varname, (list, tuple)):
- return [template(basedir, v, templatevars, lookup_fatal, depth, expand_lists, convert_bare, fail_on_undefined, filter_fatal) for v in varname]
- elif isinstance(varname, dict):
- d = {}
- for (k, v) in varname.iteritems():
- d[k] = template(basedir, v, templatevars, lookup_fatal, depth, expand_lists, convert_bare, fail_on_undefined, filter_fatal)
- return d
- else:
- return varname
- except errors.AnsibleFilterError:
- if filter_fatal:
- raise
- else:
- return varname
-
-
-class _jinja2_vars(object):
- '''
- Helper class to template all variable content before jinja2 sees it.
- This is done by hijacking the variable storage that jinja2 uses, and
- overriding __contains__ and __getitem__ to look like a dict. Added bonus
- is avoiding duplicating the large hashes that inject tends to be.
- To facilitate using builtin jinja2 things like range, globals are handled
- here.
- extras is a list of locals to also search for variables.
- '''
-
- def __init__(self, basedir, vars, globals, fail_on_undefined, *extras):
- self.basedir = basedir
- self.vars = vars
- self.globals = globals
- self.fail_on_undefined = fail_on_undefined
- self.extras = extras
-
- def __contains__(self, k):
- if k in self.vars:
- return True
- for i in self.extras:
- if k in i:
- return True
- if k in self.globals:
- return True
- return False
-
- def __getitem__(self, varname):
- from ansible.runner import HostVars
- if varname not in self.vars:
- for i in self.extras:
- if varname in i:
- return i[varname]
- if varname in self.globals:
- return self.globals[varname]
- else:
- raise KeyError("undefined variable: %s" % varname)
- var = self.vars[varname]
- # HostVars is special, return it as-is, as is the special variable
- # 'vars', which contains the vars structure
- var = to_unicode(var, nonstring="passthru")
- if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars):
- return var
- else:
- return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined)
-
- def add_locals(self, locals):
- '''
- If locals are provided, create a copy of self containing those
- locals in addition to what is already in this variable proxy.
- '''
- if locals is None:
- return self
- return _jinja2_vars(self.basedir, self.vars, self.globals, self.fail_on_undefined, locals, *self.extras)
-
-class J2Template(jinja2.environment.Template):
- '''
- This class prevents Jinja2 from running _jinja2_vars through dict()
- Without this, {% include %} and similar will create new contexts unlike
- the special one created in template_from_file. This ensures they are all
- alike, except for potential locals.
- '''
- def new_context(self, vars=None, shared=False, locals=None):
- return jinja2.runtime.Context(self.environment, vars.add_locals(locals), self.name, self.blocks)
-
-def template_from_file(basedir, path, vars, vault_password=None):
- ''' run a file through the templating engine '''
-
- fail_on_undefined = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
-
- from ansible import utils
- realpath = utils.path_dwim(basedir, path)
- loader=jinja2.FileSystemLoader([basedir,os.path.dirname(realpath)])
-
- def my_lookup(*args, **kwargs):
- kwargs['vars'] = vars
- return lookup(*args, basedir=basedir, **kwargs)
- def my_finalize(thing):
- return thing if thing is not None else ''
-
- environment = jinja2.Environment(loader=loader, trim_blocks=True, extensions=_get_extensions())
- environment.filters.update(_get_filters())
- environment.globals['lookup'] = my_lookup
- environment.globals['finalize'] = my_finalize
- if fail_on_undefined:
- environment.undefined = StrictUndefined
-
- try:
- data = codecs.open(realpath, encoding="utf8").read()
- except UnicodeDecodeError:
- raise errors.AnsibleError("unable to process as utf-8: %s" % realpath)
- except:
- raise errors.AnsibleError("unable to read %s" % realpath)
-
- # Get jinja env overrides from template
- if data.startswith(JINJA2_OVERRIDE):
- eol = data.find('\n')
- line = data[len(JINJA2_OVERRIDE):eol]
- data = data[eol+1:]
- for pair in line.split(','):
- (key,val) = pair.split(':')
- key = key.strip()
- if key in JINJA2_ALLOWED_OVERRIDES:
- setattr(environment, key, ast.literal_eval(val.strip()))
-
-
- environment.template_class = J2Template
- try:
- t = environment.from_string(data)
- except TemplateSyntaxError, e:
- # Throw an exception which includes a more user friendly error message
- values = {'name': realpath, 'lineno': e.lineno, 'error': str(e)}
- msg = 'file: %(name)s, line number: %(lineno)s, error: %(error)s' % \
- values
- error = errors.AnsibleError(msg)
- raise error
- vars = vars.copy()
- try:
- template_uid = pwd.getpwuid(os.stat(realpath).st_uid).pw_name
- except:
- template_uid = os.stat(realpath).st_uid
- vars['template_host'] = os.uname()[1]
- vars['template_path'] = realpath
- vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(realpath))
- vars['template_uid'] = template_uid
- vars['template_fullpath'] = os.path.abspath(realpath)
- vars['template_run_date'] = datetime.datetime.now()
-
- managed_default = C.DEFAULT_MANAGED_STR
- managed_str = managed_default.format(
- host = vars['template_host'],
- uid = vars['template_uid'],
- file = to_bytes(vars['template_path'])
- )
- vars['ansible_managed'] = time.strftime(
- managed_str,
- time.localtime(os.path.getmtime(realpath))
- )
-
- # This line performs deep Jinja2 magic that uses the _jinja2_vars object for vars
- # Ideally, this could use some API where setting shared=True and the object won't get
- # passed through dict(o), but I have not found that yet.
- try:
- res = jinja2.utils.concat(t.root_render_func(t.new_context(_jinja2_vars(basedir, vars, t.globals, fail_on_undefined), shared=True)))
- except jinja2.exceptions.UndefinedError, e:
- raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
- except jinja2.exceptions.TemplateNotFound, e:
- # Throw an exception which includes a more user friendly error message
- # This likely will happen for included sub-template. Not that besides
- # pure "file not found" it may happen due to Jinja2's "security"
- # checks on path.
- values = {'name': realpath, 'subname': str(e)}
- msg = 'file: %(name)s, error: Cannot find/not allowed to load (include) template %(subname)s' % \
- values
- error = errors.AnsibleError(msg)
- raise error
-
- # The low level calls above do not preserve the newline
- # characters at the end of the input data, so we use the
- # calculate the difference in newlines and append them
- # to the resulting output for parity
- res_newlines = count_newlines_from_end(res)
- data_newlines = count_newlines_from_end(data)
- if data_newlines > res_newlines:
- res += '\n' * (data_newlines - res_newlines)
-
- if isinstance(res, unicode):
- # do not try to re-template a unicode string
- result = res
- else:
- result = template(basedir, res, vars)
-
- return result
-
-def template_from_string(basedir, data, vars, fail_on_undefined=False):
- ''' run a string through the (Jinja2) templating engine '''
- try:
- if type(data) == str:
- data = unicode(data, 'utf-8')
-
- # Check to see if the string we are trying to render is just referencing a single
- # var. In this case we don't want to accidentally change the type of the variable
- # to a string by using the jinja template renderer. We just want to pass it.
- only_one = SINGLE_VAR.match(data)
- if only_one:
- var_name = only_one.group(1)
- if var_name in vars:
- resolved_val = vars[var_name]
- if isinstance(resolved_val, (bool, Number)):
- return resolved_val
-
- def my_finalize(thing):
- return thing if thing is not None else ''
-
- environment = jinja2.Environment(trim_blocks=True, undefined=StrictUndefined, extensions=_get_extensions(), finalize=my_finalize)
- environment.filters.update(_get_filters())
- environment.template_class = J2Template
-
- if '_original_file' in vars:
- basedir = os.path.dirname(vars['_original_file'])
- filesdir = os.path.abspath(os.path.join(basedir, '..', 'files'))
- if os.path.exists(filesdir):
- basedir = filesdir
-
- # 6227
- if isinstance(data, unicode):
- try:
- data = data.decode('utf-8')
- except UnicodeEncodeError, e:
- pass
-
- try:
- t = environment.from_string(data)
- except TemplateSyntaxError, e:
- raise errors.AnsibleError("template error while templating string: %s" % str(e))
- except Exception, e:
- if 'recursion' in str(e):
- raise errors.AnsibleError("recursive loop detected in template string: %s" % data)
- else:
- return data
-
- def my_lookup(*args, **kwargs):
- kwargs['vars'] = vars
- return lookup(*args, basedir=basedir, **kwargs)
-
- t.globals['lookup'] = my_lookup
- t.globals['finalize'] = my_finalize
- jvars =_jinja2_vars(basedir, vars, t.globals, fail_on_undefined)
- new_context = t.new_context(jvars, shared=True)
- rf = t.root_render_func(new_context)
- try:
- res = jinja2.utils.concat(rf)
- except TypeError, te:
- if 'StrictUndefined' in str(te):
- raise errors.AnsibleUndefinedVariable(
- "Unable to look up a name or access an attribute in template string. " + \
- "Make sure your variable name does not contain invalid characters like '-'."
- )
- else:
- raise errors.AnsibleError("an unexpected type error occurred. Error was %s" % te)
- return res
- except (jinja2.exceptions.UndefinedError, errors.AnsibleUndefinedVariable):
- if fail_on_undefined:
- raise
- else:
- return data
-
diff --git a/lib/ansible/utils/unicode.py b/lib/ansible/utils/unicode.py
index 7bd035c007..2cff2e5e45 100644
--- a/lib/ansible/utils/unicode.py
+++ b/lib/ansible/utils/unicode.py
@@ -19,6 +19,8 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
+from six import string_types, text_type, binary_type, PY3
+
# to_bytes and to_unicode were written by Toshio Kuratomi for the
# python-kitchen library https://pypi.python.org/pypi/kitchen
# They are licensed in kitchen under the terms of the GPLv2+
@@ -35,6 +37,9 @@ _LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1',
# EXCEPTION_CONVERTERS is defined below due to using to_unicode
+if PY3:
+ basestring = (str, bytes)
+
def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
'''Convert an object into a :class:`unicode` string
@@ -89,12 +94,12 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
# Could use isbasestring/isunicode here but we want this code to be as
# fast as possible
if isinstance(obj, basestring):
- if isinstance(obj, unicode):
+ if isinstance(obj, text_type):
return obj
if encoding in _UTF8_ALIASES:
- return unicode(obj, 'utf-8', errors)
+ return text_type(obj, 'utf-8', errors)
if encoding in _LATIN1_ALIASES:
- return unicode(obj, 'latin-1', errors)
+ return text_type(obj, 'latin-1', errors)
return obj.decode(encoding, errors)
if not nonstring:
@@ -110,19 +115,19 @@ def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
simple = None
if not simple:
try:
- simple = str(obj)
+ simple = text_type(obj)
except UnicodeError:
try:
simple = obj.__str__()
except (UnicodeError, AttributeError):
simple = u''
- if isinstance(simple, str):
- return unicode(simple, encoding, errors)
+ if isinstance(simple, binary_type):
+ return text_type(simple, encoding, errors)
return simple
elif nonstring in ('repr', 'strict'):
obj_repr = repr(obj)
- if isinstance(obj_repr, str):
- obj_repr = unicode(obj_repr, encoding, errors)
+ if isinstance(obj_repr, binary_type):
+ obj_repr = text_type(obj_repr, encoding, errors)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_unicode was given "%(obj)s" which is neither'
@@ -198,19 +203,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
# Could use isbasestring, isbytestring here but we want this to be as fast
# as possible
if isinstance(obj, basestring):
- if isinstance(obj, str):
+ if isinstance(obj, binary_type):
return obj
return obj.encode(encoding, errors)
if not nonstring:
nonstring = 'simplerepr'
if nonstring == 'empty':
- return ''
+ return b''
elif nonstring == 'passthru':
return obj
elif nonstring == 'simplerepr':
try:
- simple = str(obj)
+ simple = binary_type(obj)
except UnicodeError:
try:
simple = obj.__str__()
@@ -220,19 +225,19 @@ def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
try:
simple = obj.__unicode__()
except (AttributeError, UnicodeError):
- simple = ''
- if isinstance(simple, unicode):
+ simple = b''
+ if isinstance(simple, text_type):
simple = simple.encode(encoding, 'replace')
return simple
elif nonstring in ('repr', 'strict'):
try:
obj_repr = obj.__repr__()
except (AttributeError, UnicodeError):
- obj_repr = ''
- if isinstance(obj_repr, unicode):
+ obj_repr = b''
+ if isinstance(obj_repr, text_type):
obj_repr = obj_repr.encode(encoding, errors)
else:
- obj_repr = str(obj_repr)
+ obj_repr = binary_type(obj_repr)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_bytes was given "%(obj)s" which is neither'
diff --git a/lib/ansible/utils/vars.py b/lib/ansible/utils/vars.py
new file mode 100644
index 0000000000..c033c0c258
--- /dev/null
+++ b/lib/ansible/utils/vars.py
@@ -0,0 +1,51 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible import constants as C
+
+def combine_vars(a, b):
+
+ if C.DEFAULT_HASH_BEHAVIOUR == "merge":
+ return merge_hash(a, b)
+ else:
+ return dict(a.items() + b.items())
+
+def merge_hash(a, b):
+ ''' recursively merges hash b into a
+ keys from b take precedence over keys from a '''
+
+ result = {}
+
+ for dicts in a, b:
+ # next, iterate over b keys and values
+ for k, v in dicts.iteritems():
+ # if there's already such key in a
+ # and that key contains dict
+ if k in result and isinstance(result[k], dict):
+ # merge those dicts recursively
+ result[k] = merge_hash(a[k], v)
+ else:
+ # otherwise, just copy a value from b to a
+ result[k] = v
+
+ return result
+
diff --git a/lib/ansible/utils/vault.py b/lib/ansible/utils/vault.py
index 842688a2c1..5c704afac5 100644
--- a/lib/ansible/utils/vault.py
+++ b/lib/ansible/utils/vault.py
@@ -1,4 +1,6 @@
-# (c) 2014, James Tanner <tanner.jc@gmail.com>
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -12,574 +14,43 @@
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-# ansible-pull is a script that runs ansible in local mode
-# after checking out a playbooks directory from source repo. There is an
-# example playbook to bootstrap this script in the examples/ dir which
-# installs ansible and sets it up to run on cron.
-import os
-import shlex
-import shutil
-import tempfile
-from io import BytesIO
-from subprocess import call
-from ansible import errors
-from hashlib import sha256
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
-# Note: Only used for loading obsolete VaultAES files. All files are written
-# using the newer VaultAES256 which does not require md5
-try:
- from hashlib import md5
-except ImportError:
- try:
- from md5 import md5
- except ImportError:
- # MD5 unavailable. Possibly FIPS mode
- md5 = None
+import os
+import subprocess
-from binascii import hexlify
-from binascii import unhexlify
from ansible import constants as C
+from ansible.errors import AnsibleError
+from ansible.utils.path import is_executable
-try:
- from Crypto.Hash import SHA256, HMAC
- HAS_HASH = True
-except ImportError:
- HAS_HASH = False
-
-# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Util import Counter
- HAS_COUNTER = True
-except ImportError:
- HAS_COUNTER = False
-
-# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Protocol.KDF import PBKDF2
- HAS_PBKDF2 = True
-except ImportError:
- HAS_PBKDF2 = False
-
-# AES IMPORTS
-try:
- from Crypto.Cipher import AES as AES
- HAS_AES = True
-except ImportError:
- HAS_AES = False
-
-CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto"
-
-HEADER='$ANSIBLE_VAULT'
-CIPHER_WHITELIST=['AES', 'AES256']
-
-class VaultLib(object):
-
- def __init__(self, password):
- self.password = password
- self.cipher_name = None
- self.version = '1.1'
-
- def is_encrypted(self, data):
- if data.startswith(HEADER):
- return True
- else:
- return False
-
- def encrypt(self, data):
-
- if self.is_encrypted(data):
- raise errors.AnsibleError("data is already encrypted")
-
- if not self.cipher_name:
- self.cipher_name = "AES256"
- #raise errors.AnsibleError("the cipher must be set before encrypting data")
-
- if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
- cipher = globals()['Vault' + self.cipher_name]
- this_cipher = cipher()
- else:
- raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name)
-
- """
- # combine sha + data
- this_sha = sha256(data).hexdigest()
- tmp_data = this_sha + "\n" + data
- """
-
- # encrypt sha + data
- enc_data = this_cipher.encrypt(data, self.password)
-
- # add header
- tmp_data = self._add_header(enc_data)
- return tmp_data
-
- def decrypt(self, data):
- if self.password is None:
- raise errors.AnsibleError("A vault password must be specified to decrypt data")
-
- if not self.is_encrypted(data):
- raise errors.AnsibleError("data is not encrypted")
-
- # clean out header
- data = self._split_header(data)
-
- # create the cipher object
- if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
- cipher = globals()['Vault' + self.cipher_name]
- this_cipher = cipher()
- else:
- raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name)
-
- # try to unencrypt data
- data = this_cipher.decrypt(data, self.password)
- if data is None:
- raise errors.AnsibleError("Decryption failed")
-
- return data
-
- def _add_header(self, data):
- # combine header and encrypted data in 80 char columns
-
- #tmpdata = hexlify(data)
- tmpdata = [data[i:i+80] for i in range(0, len(data), 80)]
-
- if not self.cipher_name:
- raise errors.AnsibleError("the cipher must be set before adding a header")
-
- dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n"
-
- for l in tmpdata:
- dirty_data += l + '\n'
-
- return dirty_data
-
-
- def _split_header(self, data):
- # used by decrypt
-
- tmpdata = data.split('\n')
- tmpheader = tmpdata[0].strip().split(';')
-
- self.version = str(tmpheader[1].strip())
- self.cipher_name = str(tmpheader[2].strip())
- clean_data = '\n'.join(tmpdata[1:])
-
- """
- # strip out newline, join, unhex
- clean_data = [ x.strip() for x in clean_data ]
- clean_data = unhexlify(''.join(clean_data))
- """
-
- return clean_data
-
- def __enter__(self):
- return self
-
- def __exit__(self, *err):
- pass
-
-class VaultEditor(object):
- # uses helper methods for write_file(self, filename, data)
- # to write a file so that code isn't duplicated for simple
- # file I/O, ditto read_file(self, filename) and launch_editor(self, filename)
- # ... "Don't Repeat Yourself", etc.
-
- def __init__(self, cipher_name, password, filename):
- # instantiates a member variable for VaultLib
- self.cipher_name = cipher_name
- self.password = password
- self.filename = filename
-
- def _edit_file_helper(self, existing_data=None, cipher=None):
- # make sure the umask is set to a sane value
- old_umask = os.umask(0o077)
-
- # Create a tempfile
- _, tmp_path = tempfile.mkstemp()
-
- if existing_data:
- self.write_data(existing_data, tmp_path)
-
- # drop the user into an editor on the tmp file
- try:
- call(self._editor_shell_command(tmp_path))
- except OSError, e:
- raise Exception("Failed to open editor (%s): %s" % (self._editor_shell_command(tmp_path)[0],str(e)))
- tmpdata = self.read_data(tmp_path)
-
- # create new vault
- this_vault = VaultLib(self.password)
- if cipher:
- this_vault.cipher_name = cipher
-
- # encrypt new data and write out to tmp
- enc_data = this_vault.encrypt(tmpdata)
- self.write_data(enc_data, tmp_path)
-
- # shuffle tmp file into place
- self.shuffle_files(tmp_path, self.filename)
-
- # and restore umask
- os.umask(old_umask)
-
- def create_file(self):
- """ create a new encrypted file """
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if os.path.isfile(self.filename):
- raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename)
-
- # Let the user specify contents and save file
- self._edit_file_helper(cipher=self.cipher_name)
-
- def decrypt_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if not os.path.isfile(self.filename):
- raise errors.AnsibleError("%s does not exist" % self.filename)
-
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- if this_vault.is_encrypted(tmpdata):
- dec_data = this_vault.decrypt(tmpdata)
- if dec_data is None:
- raise errors.AnsibleError("Decryption failed")
- else:
- self.write_data(dec_data, self.filename)
- else:
- raise errors.AnsibleError("%s is not encrypted" % self.filename)
-
- def edit_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt to tmpfile
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
-
- # let the user edit the data and save
- self._edit_file_helper(existing_data=dec_data)
- ###we want the cipher to default to AES256 (get rid of files
- # encrypted with the AES cipher)
- #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name)
-
-
- def view_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt to tmpfile
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
- old_umask = os.umask(0o077)
- _, tmp_path = tempfile.mkstemp()
- self.write_data(dec_data, tmp_path)
- os.umask(old_umask)
-
- # drop the user into pager on the tmp file
- call(self._pager_shell_command(tmp_path))
- os.remove(tmp_path)
-
- def encrypt_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if not os.path.isfile(self.filename):
- raise errors.AnsibleError("%s does not exist" % self.filename)
-
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- this_vault.cipher_name = self.cipher_name
- if not this_vault.is_encrypted(tmpdata):
- enc_data = this_vault.encrypt(tmpdata)
- self.write_data(enc_data, self.filename)
- else:
- raise errors.AnsibleError("%s is already encrypted" % self.filename)
-
- def rekey_file(self, new_password):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
-
- # create new vault
- new_vault = VaultLib(new_password)
-
- # we want to force cipher to the default
- #new_vault.cipher_name = this_vault.cipher_name
-
- # re-encrypt data and re-write file
- enc_data = new_vault.encrypt(dec_data)
- self.write_data(enc_data, self.filename)
-
- def read_data(self, filename):
- f = open(filename, "rb")
- tmpdata = f.read()
- f.close()
- return tmpdata
-
- def write_data(self, data, filename):
- if os.path.isfile(filename):
- os.remove(filename)
- f = open(filename, "wb")
- f.write(data)
- f.close()
-
- def shuffle_files(self, src, dest):
- # overwrite dest with src
- if os.path.isfile(dest):
- os.remove(dest)
- shutil.move(src, dest)
-
- def _editor_shell_command(self, filename):
- EDITOR = os.environ.get('EDITOR','vim')
- editor = shlex.split(EDITOR)
- editor.append(filename)
-
- return editor
-
- def _pager_shell_command(self, filename):
- PAGER = os.environ.get('PAGER','less')
- pager = shlex.split(PAGER)
- pager.append(filename)
-
- return pager
-
-########################################
-# CIPHERS #
-########################################
-
-class VaultAES(object):
-
- # this version has been obsoleted by the VaultAES256 class
- # which uses encrypt-then-mac (fixing order) and also improving the KDF used
- # code remains for upgrade purposes only
- # http://stackoverflow.com/a/16761459
-
- def __init__(self):
- if not md5:
- raise errors.AnsibleError('md5 hash is unavailable (Could be due to FIPS mode). Legacy VaultAES format is unavailable.')
- if not HAS_AES:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- def aes_derive_key_and_iv(self, password, salt, key_length, iv_length):
-
- """ Create a key and an initialization vector """
-
- d = d_i = ''
- while len(d) < key_length + iv_length:
- d_i = md5(d_i + password + salt).digest()
- d += d_i
-
- key = d[:key_length]
- iv = d[key_length:key_length+iv_length]
-
- return key, iv
-
- def encrypt(self, data, password, key_length=32):
-
- """ Read plaintext data from in_file and write encrypted to out_file """
-
-
- # combine sha + data
- this_sha = sha256(data).hexdigest()
- tmp_data = this_sha + "\n" + data
-
- in_file = BytesIO(tmp_data)
- in_file.seek(0)
- out_file = BytesIO()
-
- bs = AES.block_size
-
- # Get a block of random data. EL does not have Crypto.Random.new()
- # so os.urandom is used for cross platform purposes
- salt = os.urandom(bs - len('Salted__'))
-
- key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
- cipher = AES.new(key, AES.MODE_CBC, iv)
- out_file.write('Salted__' + salt)
- finished = False
- while not finished:
- chunk = in_file.read(1024 * bs)
- if len(chunk) == 0 or len(chunk) % bs != 0:
- padding_length = (bs - len(chunk) % bs) or bs
- chunk += padding_length * chr(padding_length)
- finished = True
- out_file.write(cipher.encrypt(chunk))
-
- out_file.seek(0)
- enc_data = out_file.read()
- tmp_data = hexlify(enc_data)
-
- return tmp_data
-
-
- def decrypt(self, data, password, key_length=32):
-
- """ Read encrypted data from in_file and write decrypted to out_file """
-
- # http://stackoverflow.com/a/14989032
-
- data = ''.join(data.split('\n'))
- data = unhexlify(data)
-
- in_file = BytesIO(data)
- in_file.seek(0)
- out_file = BytesIO()
-
- bs = AES.block_size
- salt = in_file.read(bs)[len('Salted__'):]
- key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
- cipher = AES.new(key, AES.MODE_CBC, iv)
- next_chunk = ''
- finished = False
-
- while not finished:
- chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))
- if len(next_chunk) == 0:
- padding_length = ord(chunk[-1])
- chunk = chunk[:-padding_length]
- finished = True
- out_file.write(chunk)
-
- # reset the stream pointer to the beginning
- out_file.seek(0)
- new_data = out_file.read()
-
- # split out sha and verify decryption
- split_data = new_data.split("\n")
- this_sha = split_data[0]
- this_data = '\n'.join(split_data[1:])
- test_sha = sha256(this_data).hexdigest()
-
- if this_sha != test_sha:
- raise errors.AnsibleError("Decryption failed")
-
- #return out_file.read()
- return this_data
-
-
-class VaultAES256(object):
-
+def read_vault_file(vault_password_file):
"""
- Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
- Keys are derived using PBKDF2
+ Read a vault password from a file or if executable, execute the script and
+ retrieve password from STDOUT
"""
- # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
-
- def __init__(self):
-
- if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- def gen_key_initctr(self, password, salt):
- # 16 for AES 128, 32 for AES256
- keylength = 32
-
- # match the size used for counter.new to avoid extra work
- ivlength = 16
-
- hash_function = SHA256
-
- # make two keys and one iv
- pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest()
-
-
- derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength,
- count=10000, prf=pbkdf2_prf)
-
- key1 = derivedkey[:keylength]
- key2 = derivedkey[keylength:(keylength * 2)]
- iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength]
-
- return key1, key2, hexlify(iv)
-
-
- def encrypt(self, data, password):
+ this_path = os.path.realpath(os.path.expanduser(vault_password_file))
+ if not os.path.exists(this_path):
+ raise AnsibleError("The vault password file %s was not found" % this_path)
- salt = os.urandom(32)
- key1, key2, iv = self.gen_key_initctr(password, salt)
-
- # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3
- bs = AES.block_size
- padding_length = (bs - len(data) % bs) or bs
- data += padding_length * chr(padding_length)
-
- # COUNTER.new PARAMETERS
- # 1) nbits (integer) - Length of the counter, in bits.
- # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr
-
- ctr = Counter.new(128, initial_value=long(iv, 16))
-
- # AES.new PARAMETERS
- # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr
- # 2) MODE_CTR, is the recommended mode
- # 3) counter=<CounterObject>
-
- cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
-
- # ENCRYPT PADDED DATA
- cryptedData = cipher.encrypt(data)
-
- # COMBINE SALT, DIGEST AND DATA
- hmac = HMAC.new(key2, cryptedData, SHA256)
- message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) )
- message = hexlify(message)
- return message
-
- def decrypt(self, data, password):
-
- # SPLIT SALT, DIGEST, AND DATA
- data = ''.join(data.split("\n"))
- data = unhexlify(data)
- salt, cryptedHmac, cryptedData = data.split("\n", 2)
- salt = unhexlify(salt)
- cryptedData = unhexlify(cryptedData)
-
- key1, key2, iv = self.gen_key_initctr(password, salt)
-
- # EXIT EARLY IF DIGEST DOESN'T MATCH
- hmacDecrypt = HMAC.new(key2, cryptedData, SHA256)
- if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()):
- return None
-
- # SET THE COUNTER AND THE CIPHER
- ctr = Counter.new(128, initial_value=long(iv, 16))
- cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
-
- # DECRYPT PADDED DATA
- decryptedData = cipher.decrypt(cryptedData)
-
- # UNPAD DATA
- padding_length = ord(decryptedData[-1])
- decryptedData = decryptedData[:-padding_length]
-
- return decryptedData
-
- def is_equal(self, a, b):
- # http://codahale.com/a-lesson-in-timing-attacks/
- if len(a) != len(b):
- return False
-
- result = 0
- for x, y in zip(a, b):
- result |= ord(x) ^ ord(y)
- return result == 0
+ if is_executable(this_path):
+ try:
+ # STDERR not captured to make it easier for users to prompt for input in their scripts
+ p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
+ except OSError as e:
+ raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
+ stdout, stderr = p.communicate()
+ vault_pass = stdout.strip('\r\n')
+ else:
+ try:
+ f = open(this_path, "rb")
+ vault_pass=f.read().strip()
+ f.close()
+ except (OSError, IOError) as e:
+ raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
+ return vault_pass
diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py
new file mode 100644
index 0000000000..f30d52b7a3
--- /dev/null
+++ b/lib/ansible/vars/__init__.py
@@ -0,0 +1,317 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+
+from collections import defaultdict
+
+try:
+ from hashlib import sha1
+except ImportError:
+ from sha import sha as sha1
+
+from ansible import constants as C
+from ansible.errors import *
+from ansible.parsing import DataLoader
+from ansible.plugins.cache import FactCache
+from ansible.template import Templar
+from ansible.utils.debug import debug
+from ansible.vars.hostvars import HostVars
+
+CACHED_VARS = dict()
+
+class VariableManager:
+
+ def __init__(self):
+
+ self._fact_cache = FactCache()
+ self._vars_cache = defaultdict(dict)
+ self._extra_vars = defaultdict(dict)
+ self._host_vars_files = defaultdict(dict)
+ self._group_vars_files = defaultdict(dict)
+ self._inventory = None
+
+ self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
+
+ def _get_cache_entry(self, play=None, host=None, task=None):
+ play_id = "NONE"
+ if play:
+ play_id = play._uuid
+
+ host_id = "NONE"
+ if host:
+ host_id = host.get_name()
+
+ task_id = "NONE"
+ if task:
+ task_id = task._uuid
+
+ return "PLAY:%s;HOST:%s;TASK:%s" % (play_id, host_id, task_id)
+
+ @property
+ def extra_vars(self):
+ ''' ensures a clean copy of the extra_vars are made '''
+ return self._extra_vars.copy()
+
+ def set_extra_vars(self, value):
+ ''' ensures a clean copy of the extra_vars are used to set the value '''
+ assert isinstance(value, dict)
+ self._extra_vars = value.copy()
+
+ def set_inventory(self, inventory):
+ self._inventory = inventory
+
+ def _validate_both_dicts(self, a, b):
+ '''
+ Validates that both arguments are dictionaries, or an error is raised.
+ '''
+ if not (isinstance(a, dict) and isinstance(b, dict)):
+ raise AnsibleError("failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__))
+
+ def _combine_vars(self, a, b):
+ '''
+ Combines dictionaries of variables, based on the hash behavior
+ '''
+
+ self._validate_both_dicts(a, b)
+
+ if C.DEFAULT_HASH_BEHAVIOUR == "merge":
+ return self._merge_dicts(a, b)
+ else:
+ return dict(a.items() + b.items())
+
+ def _merge_dicts(self, a, b):
+ '''
+ Recursively merges dict b into a, so that keys
+ from b take precedence over keys from a.
+ '''
+
+ result = dict()
+
+ self._validate_both_dicts(a, b)
+
+ for dicts in a, b:
+ # next, iterate over b keys and values
+ for k, v in dicts.iteritems():
+ # if there's already such key in a
+ # and that key contains dict
+ if k in result and isinstance(result[k], dict):
+ # merge those dicts recursively
+ result[k] = self._merge_dicts(a[k], v)
+ else:
+ # otherwise, just copy a value from b to a
+ result[k] = v
+
+ return result
+
+ def get_vars(self, loader, play=None, host=None, task=None):
+ '''
+ Returns the variables, with optional "context" given via the parameters
+ for the play, host, and task (which could possibly result in different
+ sets of variables being returned due to the additional context).
+
+ The order of precedence is:
+ - play->roles->get_default_vars (if there is a play context)
+ - group_vars_files[host] (if there is a host context)
+ - host_vars_files[host] (if there is a host context)
+ - host->get_vars (if there is a host context)
+ - fact_cache[host] (if there is a host context)
+ - vars_cache[host] (if there is a host context)
+ - play vars (if there is a play context)
+ - play vars_files (if there's no host context, ignore
+ file names that cannot be templated)
+ - task->get_vars (if there is a task context)
+ - extra vars
+ '''
+
+ debug("in VariableManager get_vars()")
+ cache_entry = self._get_cache_entry(play=play, host=host, task=task)
+ if cache_entry in CACHED_VARS:
+ debug("vars are cached, returning them now")
+ return CACHED_VARS[cache_entry]
+
+ all_vars = defaultdict(dict)
+
+ if play:
+ # first we compile any vars specified in defaults/main.yml
+ # for all roles within the specified play
+ for role in play.get_roles():
+ all_vars = self._combine_vars(all_vars, role.get_default_vars())
+
+ if host:
+ # next, if a host is specified, we load any vars from group_vars
+ # files and then any vars from host_vars files which may apply to
+ # this host or the groups it belongs to
+
+ # we merge in the special 'all' group_vars first, if they exist
+ if 'all' in self._group_vars_files:
+ all_vars = self._combine_vars(all_vars, self._group_vars_files['all'])
+
+ for group in host.get_groups():
+ all_vars = self._combine_vars(all_vars, group.get_vars())
+ if group.name in self._group_vars_files and group.name != 'all':
+ all_vars = self._combine_vars(all_vars, self._group_vars_files[group.name])
+
+ host_name = host.get_name()
+ if host_name in self._host_vars_files:
+ all_vars = self._combine_vars(all_vars, self._host_vars_files[host_name])
+
+ # then we merge in vars specified for this host
+ all_vars = self._combine_vars(all_vars, host.get_vars())
+
+ # next comes the facts cache and the vars cache, respectively
+ all_vars = self._combine_vars(all_vars, self._fact_cache.get(host.get_name(), dict()))
+
+ if play:
+ all_vars = self._combine_vars(all_vars, play.get_vars())
+ templar = Templar(loader=loader, variables=all_vars)
+ for vars_file in play.get_vars_files():
+ try:
+ vars_file = templar.template(vars_file)
+ data = loader.load_from_file(vars_file)
+ if data is None:
+ data = dict()
+ all_vars = self._combine_vars(all_vars, data)
+ except:
+ # FIXME: get_vars should probably be taking a flag to determine
+ # whether or not vars files errors should be fatal at this
+ # stage, or just base it on whether a host was specified?
+ pass
+ for role in play.get_roles():
+ all_vars = self._combine_vars(all_vars, role.get_vars())
+
+ if host:
+ all_vars = self._combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
+
+ if task:
+ if task._role:
+ all_vars = self._combine_vars(all_vars, task._role.get_vars())
+ all_vars = self._combine_vars(all_vars, task.get_vars())
+
+ all_vars = self._combine_vars(all_vars, self._extra_vars)
+
+ # FIXME: make sure all special vars are here
+ # Finally, we create special vars
+
+ if host and self._inventory is not None:
+ hostvars = HostVars(vars_manager=self, inventory=self._inventory, loader=loader)
+ all_vars['hostvars'] = hostvars
+
+ if self._inventory is not None:
+ all_vars['inventory_dir'] = self._inventory.basedir()
+
+ # the 'omit' value alows params to be left out if the variable they are based on is undefined
+ all_vars['omit'] = self._omit_token
+
+ CACHED_VARS[cache_entry] = all_vars
+
+ debug("done with get_vars()")
+ return all_vars
+
+ def _get_inventory_basename(self, path):
+ '''
+ Returns the bsaename minus the extension of the given path, so the
+ bare filename can be matched against host/group names later
+ '''
+
+ (name, ext) = os.path.splitext(os.path.basename(path))
+ if ext not in ('.yml', '.yaml'):
+ return os.path.basename(path)
+ else:
+ return name
+
+ def _load_inventory_file(self, path, loader):
+ '''
+ helper function, which loads the file and gets the
+ basename of the file without the extension
+ '''
+
+ if loader.is_directory(path):
+ data = dict()
+
+ try:
+ names = loader.list_directory(path)
+ except os.error as err:
+ raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror))
+
+ # evaluate files in a stable order rather than whatever
+ # order the filesystem lists them.
+ names.sort()
+
+ # do not parse hidden files or dirs, e.g. .svn/
+ paths = [os.path.join(path, name) for name in names if not name.startswith('.')]
+ for p in paths:
+ _found, results = self._load_inventory_file(path=p, loader=loader)
+ data = self._combine_vars(data, results)
+
+ else:
+ data = loader.load_from_file(path)
+ if data is None:
+ data = dict()
+
+ name = self._get_inventory_basename(path)
+ return (name, data)
+
+ def add_host_vars_file(self, path, loader):
+ '''
+ Loads and caches a host_vars file in the _host_vars_files dict,
+ where the key to that dictionary is the basename of the file, minus
+ the extension, for matching against a given inventory host name
+ '''
+
+ if loader.path_exists(path):
+ (name, data) = self._load_inventory_file(path, loader)
+ self._host_vars_files[name] = data
+
+ def add_group_vars_file(self, path, loader):
+ '''
+ Loads and caches a host_vars file in the _host_vars_files dict,
+ where the key to that dictionary is the basename of the file, minus
+ the extension, for matching against a given inventory host name
+ '''
+
+ if loader.path_exists(path):
+ (name, data) = self._load_inventory_file(path, loader)
+ self._group_vars_files[name] = data
+
+ def set_host_facts(self, host, facts):
+ '''
+ Sets or updates the given facts for a host in the fact cache.
+ '''
+
+ assert isinstance(facts, dict)
+
+ host_name = host.get_name()
+ if host_name not in self._fact_cache:
+ self._fact_cache[host_name] = facts
+ else:
+ self._fact_cache[host_name].update(facts)
+
+ def set_host_variable(self, host, varname, value):
+ '''
+ Sets a value in the vars_cache for a host.
+ '''
+
+ host_name = host.get_name()
+ if host_name not in self._vars_cache:
+ self._vars_cache[host_name] = dict()
+ self._vars_cache[host_name][varname] = value
+
diff --git a/lib/ansible/vars/hostvars.py b/lib/ansible/vars/hostvars.py
new file mode 100644
index 0000000000..45b3340229
--- /dev/null
+++ b/lib/ansible/vars/hostvars.py
@@ -0,0 +1,47 @@
+# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.template import Templar
+
+__all__ = ['HostVars']
+
+class HostVars(dict):
+ ''' A special view of vars_cache that adds values from the inventory when needed. '''
+
+ def __init__(self, vars_manager, inventory, loader):
+ self._vars_manager = vars_manager
+ self._inventory = inventory
+ self._loader = loader
+ self._lookup = {}
+
+ #self.update(vars_cache)
+
+ def __getitem__(self, host_name):
+
+ if host_name not in self._lookup:
+ host = self._inventory.get_host(host_name)
+ result = self._vars_manager.get_vars(loader=self._loader, host=host)
+ #result.update(self._vars_cache.get(host, {}))
+ #templar = Templar(variables=self._vars_cache, loader=self._loader)
+ #self._lookup[host] = templar.template(result)
+ self._lookup[host_name] = result
+ return self._lookup[host_name]
+