summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrian Coca <brian.coca+git@gmail.com>2015-08-27 12:27:38 -0400
committerBrian Coca <brian.coca+git@gmail.com>2015-08-27 12:27:38 -0400
commitef594f708c6eca9ad45d4926b942cf6ce0f15ee1 (patch)
tree982ac7443692116a7908e511b0e25681af061861
parent5ba3452b7ee0e2abdf61d528e385057faff9e584 (diff)
downloadansible-ef594f708c6eca9ad45d4926b942cf6ce0f15ee1.tar.gz
remove old dead code
-rw-r--r--.gitmodules6
-rw-r--r--v1/README.md11
-rw-r--r--v1/ansible/__init__.py18
-rw-r--r--v1/ansible/cache/__init__.py61
-rw-r--r--v1/ansible/cache/base.py41
-rw-r--r--v1/ansible/cache/jsonfile.py143
-rw-r--r--v1/ansible/cache/memcached.py191
-rw-r--r--v1/ansible/cache/memory.py44
-rw-r--r--v1/ansible/cache/redis.py107
-rw-r--r--v1/ansible/callback_plugins/__init__.py0
-rw-r--r--v1/ansible/callback_plugins/noop.py94
-rw-r--r--v1/ansible/callbacks.py729
-rw-r--r--v1/ansible/color.py74
-rw-r--r--v1/ansible/constants.py212
-rw-r--r--v1/ansible/errors.py35
-rw-r--r--v1/ansible/inventory/__init__.py654
-rw-r--r--v1/ansible/inventory/dir.py229
-rw-r--r--v1/ansible/inventory/expand_hosts.py116
-rw-r--r--v1/ansible/inventory/group.py117
-rw-r--r--v1/ansible/inventory/host.py67
-rw-r--r--v1/ansible/inventory/ini.py208
-rw-r--r--v1/ansible/inventory/script.py154
-rw-r--r--v1/ansible/inventory/vars_plugins/__init__.py0
-rw-r--r--v1/ansible/inventory/vars_plugins/noop.py48
-rw-r--r--v1/ansible/module_common.py196
-rw-r--r--v1/ansible/module_utils/__init__.py17
-rw-r--r--v1/ansible/module_utils/a10.py103
-rw-r--r--v1/ansible/module_utils/basic.py1631
-rw-r--r--v1/ansible/module_utils/cloudstack.py368
-rw-r--r--v1/ansible/module_utils/database.py128
-rw-r--r--v1/ansible/module_utils/ec2.py188
-rw-r--r--v1/ansible/module_utils/facts.py2786
-rw-r--r--v1/ansible/module_utils/gce.py93
-rw-r--r--v1/ansible/module_utils/known_hosts.py176
-rw-r--r--v1/ansible/module_utils/openstack.py104
-rw-r--r--v1/ansible/module_utils/powershell.ps1166
-rw-r--r--v1/ansible/module_utils/rax.py328
-rw-r--r--v1/ansible/module_utils/redhat.py280
-rw-r--r--v1/ansible/module_utils/splitter.py201
-rw-r--r--v1/ansible/module_utils/urls.py496
-rw-r--r--v1/ansible/modules/__init__.py0
m---------v1/ansible/modules/core6
m---------v1/ansible/modules/extras9
-rw-r--r--v1/ansible/playbook/__init__.py874
-rw-r--r--v1/ansible/playbook/play.py949
-rw-r--r--v1/ansible/playbook/task.py346
-rw-r--r--v1/ansible/runner/__init__.py1517
-rw-r--r--v1/ansible/runner/action_plugins/__init__.py0
-rw-r--r--v1/ansible/runner/action_plugins/add_host.py111
-rw-r--r--v1/ansible/runner/action_plugins/assemble.py158
-rw-r--r--v1/ansible/runner/action_plugins/assert.py64
-rw-r--r--v1/ansible/runner/action_plugins/async.py48
-rw-r--r--v1/ansible/runner/action_plugins/copy.py381
-rw-r--r--v1/ansible/runner/action_plugins/debug.py60
-rw-r--r--v1/ansible/runner/action_plugins/fail.py44
-rw-r--r--v1/ansible/runner/action_plugins/fetch.py173
-rw-r--r--v1/ansible/runner/action_plugins/group_by.py108
-rw-r--r--v1/ansible/runner/action_plugins/include_vars.py56
-rw-r--r--v1/ansible/runner/action_plugins/normal.py59
-rw-r--r--v1/ansible/runner/action_plugins/patch.py69
-rw-r--r--v1/ansible/runner/action_plugins/pause.py139
-rw-r--r--v1/ansible/runner/action_plugins/raw.py54
-rw-r--r--v1/ansible/runner/action_plugins/script.py136
-rw-r--r--v1/ansible/runner/action_plugins/set_fact.py47
-rw-r--r--v1/ansible/runner/action_plugins/synchronize.py218
-rw-r--r--v1/ansible/runner/action_plugins/template.py179
-rw-r--r--v1/ansible/runner/action_plugins/unarchive.py121
-rw-r--r--v1/ansible/runner/action_plugins/win_copy.py377
-rw-r--r--v1/ansible/runner/action_plugins/win_template.py146
-rw-r--r--v1/ansible/runner/connection.py53
-rw-r--r--v1/ansible/runner/connection_plugins/__init__.py0
-rw-r--r--v1/ansible/runner/connection_plugins/accelerate.py372
-rw-r--r--v1/ansible/runner/connection_plugins/chroot.py132
-rw-r--r--v1/ansible/runner/connection_plugins/fireball.py153
-rw-r--r--v1/ansible/runner/connection_plugins/funcd.py97
-rw-r--r--v1/ansible/runner/connection_plugins/jail.py153
-rw-r--r--v1/ansible/runner/connection_plugins/libvirt_lxc.py129
-rw-r--r--v1/ansible/runner/connection_plugins/local.py129
-rw-r--r--v1/ansible/runner/connection_plugins/paramiko_ssh.py419
-rw-r--r--v1/ansible/runner/connection_plugins/ssh.py460
-rw-r--r--v1/ansible/runner/connection_plugins/winrm.py270
-rw-r--r--v1/ansible/runner/connection_plugins/zone.py162
-rw-r--r--v1/ansible/runner/filter_plugins/__init__.py0
-rw-r--r--v1/ansible/runner/filter_plugins/core.py431
-rw-r--r--v1/ansible/runner/filter_plugins/ipaddr.py659
-rw-r--r--v1/ansible/runner/filter_plugins/mathstuff.py126
-rw-r--r--v1/ansible/runner/lookup_plugins/__init__.py0
-rw-r--r--v1/ansible/runner/lookup_plugins/cartesian.py59
-rwxr-xr-xv1/ansible/runner/lookup_plugins/consul_kv.py128
-rw-r--r--v1/ansible/runner/lookup_plugins/csvfile.py85
-rw-r--r--v1/ansible/runner/lookup_plugins/dict.py39
-rw-r--r--v1/ansible/runner/lookup_plugins/dig.py212
-rw-r--r--v1/ansible/runner/lookup_plugins/dnstxt.py68
-rw-r--r--v1/ansible/runner/lookup_plugins/env.py41
-rw-r--r--v1/ansible/runner/lookup_plugins/etcd.py78
-rw-r--r--v1/ansible/runner/lookup_plugins/file.py59
-rw-r--r--v1/ansible/runner/lookup_plugins/fileglob.py39
-rw-r--r--v1/ansible/runner/lookup_plugins/first_found.py194
-rw-r--r--v1/ansible/runner/lookup_plugins/flattened.py78
-rw-r--r--v1/ansible/runner/lookup_plugins/indexed_items.py44
-rw-r--r--v1/ansible/runner/lookup_plugins/inventory_hostnames.py48
-rw-r--r--v1/ansible/runner/lookup_plugins/items.py44
-rw-r--r--v1/ansible/runner/lookup_plugins/lines.py38
-rw-r--r--v1/ansible/runner/lookup_plugins/nested.py73
-rw-r--r--v1/ansible/runner/lookup_plugins/password.py129
-rw-r--r--v1/ansible/runner/lookup_plugins/pipe.py52
-rw-r--r--v1/ansible/runner/lookup_plugins/random_choice.py41
-rw-r--r--v1/ansible/runner/lookup_plugins/redis_kv.py72
-rw-r--r--v1/ansible/runner/lookup_plugins/sequence.py216
-rw-r--r--v1/ansible/runner/lookup_plugins/subelements.py67
-rw-r--r--v1/ansible/runner/lookup_plugins/template.py33
-rw-r--r--v1/ansible/runner/lookup_plugins/together.py64
-rw-r--r--v1/ansible/runner/lookup_plugins/url.py48
-rw-r--r--v1/ansible/runner/poller.py115
-rw-r--r--v1/ansible/runner/return_data.py58
-rw-r--r--v1/ansible/runner/shell_plugins/__init__.py0
-rw-r--r--v1/ansible/runner/shell_plugins/csh.py26
-rw-r--r--v1/ansible/runner/shell_plugins/fish.py23
-rw-r--r--v1/ansible/runner/shell_plugins/powershell.py131
-rw-r--r--v1/ansible/runner/shell_plugins/sh.py130
-rw-r--r--v1/ansible/utils/__init__.py1662
-rw-r--r--v1/ansible/utils/cmd_functions.py59
-rw-r--r--v1/ansible/utils/display_functions.py63
-rw-r--r--v1/ansible/utils/hashing.py91
-rw-r--r--v1/ansible/utils/module_docs.py111
l---------v1/ansible/utils/module_docs_fragments1
-rw-r--r--v1/ansible/utils/plugins.py304
-rw-r--r--v1/ansible/utils/string_functions.py18
-rw-r--r--v1/ansible/utils/su_prompts.py60
-rw-r--r--v1/ansible/utils/template.py405
-rw-r--r--v1/ansible/utils/unicode.py248
-rw-r--r--v1/ansible/utils/vault.py585
-rwxr-xr-xv1/bin/ansible207
-rwxr-xr-xv1/bin/ansible-doc337
-rwxr-xr-xv1/bin/ansible-galaxy957
-rwxr-xr-xv1/bin/ansible-playbook330
-rwxr-xr-xv1/bin/ansible-pull257
-rwxr-xr-xv1/bin/ansible-vault241
-rw-r--r--v1/hacking/README.md48
-rwxr-xr-xv1/hacking/authors.sh14
-rw-r--r--v1/hacking/env-setup78
-rw-r--r--v1/hacking/env-setup.fish67
-rwxr-xr-xv1/hacking/get_library.py29
-rwxr-xr-xv1/hacking/module_formatter.py447
-rw-r--r--v1/hacking/templates/rst.j2211
-rwxr-xr-xv1/hacking/test-module193
-rwxr-xr-xv1/hacking/update.sh3
-rw-r--r--v1/tests/README.md5
-rw-r--r--v1/tests/TestConstants.py64
-rw-r--r--v1/tests/TestFilters.py191
-rw-r--r--v1/tests/TestInventory.py510
-rw-r--r--v1/tests/TestModuleUtilsBasic.py334
-rw-r--r--v1/tests/TestModuleUtilsDatabase.py118
-rw-r--r--v1/tests/TestModules.py32
-rw-r--r--v1/tests/TestPlayVarsFiles.py390
-rw-r--r--v1/tests/TestSynchronize.py176
-rw-r--r--v1/tests/TestUtils.py945
-rw-r--r--v1/tests/TestUtilsStringFunctions.py33
-rw-r--r--v1/tests/TestVault.py147
-rw-r--r--v1/tests/TestVaultEditor.py180
-rw-r--r--v1/tests/ansible.cfg3
-rw-r--r--v1/tests/inventory_test_data/ansible_hosts2
-rw-r--r--v1/tests/inventory_test_data/broken.yml2
-rw-r--r--v1/tests/inventory_test_data/common_vars.yml4
-rw-r--r--v1/tests/inventory_test_data/complex_hosts96
-rw-r--r--v1/tests/inventory_test_data/encrypted.yml6
-rw-r--r--v1/tests/inventory_test_data/hosts_list.yml6
-rw-r--r--v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg2
-rw-r--r--v1/tests/inventory_test_data/inventory/test_combined_range2
-rw-r--r--v1/tests/inventory_test_data/inventory/test_incorrect_format2
-rw-r--r--v1/tests/inventory_test_data/inventory/test_incorrect_range2
-rw-r--r--v1/tests/inventory_test_data/inventory/test_leading_range6
-rw-r--r--v1/tests/inventory_test_data/inventory/test_missing_end2
-rw-r--r--v1/tests/inventory_test_data/inventory_api.py44
-rw-r--r--v1/tests/inventory_test_data/inventory_dir/0hosts3
-rw-r--r--v1/tests/inventory_test_data/inventory_dir/1mythology6
-rw-r--r--v1/tests/inventory_test_data/inventory_dir/2levels6
-rw-r--r--v1/tests/inventory_test_data/inventory_dir/3comments8
-rw-r--r--v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini2
-rw-r--r--v1/tests/inventory_test_data/large_range1
-rw-r--r--v1/tests/inventory_test_data/restrict_pattern2
-rw-r--r--v1/tests/inventory_test_data/simple_hosts28
-rw-r--r--v1/tests/module_tests/TestApt.py42
-rw-r--r--v1/tests/module_tests/TestDocker.py19
-rw-r--r--v1/tests/vault_test_data/foo-ansible-1.0.yml4
-rw-r--r--v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml6
-rw-r--r--v1/tests/vault_test_data/foo-ansible-1.1.yml6
187 files changed, 0 insertions, 33809 deletions
diff --git a/.gitmodules b/.gitmodules
index 793522a29c..a0e903430a 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -4,9 +4,3 @@
[submodule "lib/ansible/modules/extras"]
path = lib/ansible/modules/extras
url = https://github.com/ansible/ansible-modules-extras
-[submodule "v1/ansible/modules/core"]
- path = v1/ansible/modules/core
- url = https://github.com/ansible/ansible-modules-core
-[submodule "v1/ansible/modules/extras"]
- path = v1/ansible/modules/extras
- url = https://github.com/ansible/ansible-modules-extras
diff --git a/v1/README.md b/v1/README.md
deleted file mode 100644
index 011851da06..0000000000
--- a/v1/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-This is dead code, it is here for convenience for those testing current devel so as to ascertain if a bug was introduced in the v2 rewrite or was preexisting in the 1.x codebase.
-Using this code should be equivalent of checking out the v1_last tag, which was devel at a point between 1.9.1 and 1.9.2 releases.
-The stable-1.9 is the maintenance branch for the 1.9.x code, which might continue to diverge from the v1/ tree as bugs get fixed.
-
-DO NOT:
-
- * use this code as reference
- * make PRs against this code
- * expect this code to be shipped with the 2.0 version of ansible
-
-
diff --git a/v1/ansible/__init__.py b/v1/ansible/__init__.py
deleted file mode 100644
index ba5ca83b72..0000000000
--- a/v1/ansible/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-__version__ = '2.0.0'
-__author__ = 'Michael DeHaan'
diff --git a/v1/ansible/cache/__init__.py b/v1/ansible/cache/__init__.py
deleted file mode 100644
index 4100861c14..0000000000
--- a/v1/ansible/cache/__init__.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from collections import MutableMapping
-
-from ansible import utils
-from ansible import constants as C
-from ansible import errors
-
-
-class FactCache(MutableMapping):
-
- def __init__(self, *args, **kwargs):
- self._plugin = utils.plugins.cache_loader.get(C.CACHE_PLUGIN)
- if self._plugin is None:
- return
-
- def __getitem__(self, key):
- if key not in self:
- raise KeyError
- return self._plugin.get(key)
-
- def __setitem__(self, key, value):
- self._plugin.set(key, value)
-
- def __delitem__(self, key):
- self._plugin.delete(key)
-
- def __contains__(self, key):
- return self._plugin.contains(key)
-
- def __iter__(self):
- return iter(self._plugin.keys())
-
- def __len__(self):
- return len(self._plugin.keys())
-
- def copy(self):
- """ Return a primitive copy of the keys and values from the cache. """
- return dict([(k, v) for (k, v) in self.iteritems()])
-
- def keys(self):
- return self._plugin.keys()
-
- def flush(self):
- """ Flush the fact cache of all keys. """
- self._plugin.flush()
diff --git a/v1/ansible/cache/base.py b/v1/ansible/cache/base.py
deleted file mode 100644
index b6254cdfd4..0000000000
--- a/v1/ansible/cache/base.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import exceptions
-
-class BaseCacheModule(object):
-
- def get(self, key):
- raise exceptions.NotImplementedError
-
- def set(self, key, value):
- raise exceptions.NotImplementedError
-
- def keys(self):
- raise exceptions.NotImplementedError
-
- def contains(self, key):
- raise exceptions.NotImplementedError
-
- def delete(self, key):
- raise exceptions.NotImplementedError
-
- def flush(self):
- raise exceptions.NotImplementedError
-
- def copy(self):
- raise exceptions.NotImplementedError
diff --git a/v1/ansible/cache/jsonfile.py b/v1/ansible/cache/jsonfile.py
deleted file mode 100644
index 0bade893a8..0000000000
--- a/v1/ansible/cache/jsonfile.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import time
-import errno
-import codecs
-
-try:
- import simplejson as json
-except ImportError:
- import json
-
-from ansible import constants as C
-from ansible import utils
-from ansible.cache.base import BaseCacheModule
-
-class CacheModule(BaseCacheModule):
- """
- A caching module backed by json files.
- """
- def __init__(self, *args, **kwargs):
-
- self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
- self._cache = {}
- self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path
- if not self._cache_dir:
- utils.exit("error, fact_caching_connection is not set, cannot use fact cache")
-
- if not os.path.exists(self._cache_dir):
- try:
- os.makedirs(self._cache_dir)
- except (OSError,IOError), e:
- utils.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
- return None
-
- def get(self, key):
-
- if key in self._cache:
- return self._cache.get(key)
-
- if self.has_expired(key):
- raise KeyError
-
- cachefile = "%s/%s" % (self._cache_dir, key)
- try:
- f = codecs.open(cachefile, 'r', encoding='utf-8')
- except (OSError,IOError), e:
- utils.warning("error while trying to read %s : %s" % (cachefile, str(e)))
- else:
- value = json.load(f)
- self._cache[key] = value
- return value
- finally:
- f.close()
-
- def set(self, key, value):
-
- self._cache[key] = value
-
- cachefile = "%s/%s" % (self._cache_dir, key)
- try:
- f = codecs.open(cachefile, 'w', encoding='utf-8')
- except (OSError,IOError), e:
- utils.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
- else:
- f.write(utils.jsonify(value))
- finally:
- f.close()
-
- def has_expired(self, key):
-
- cachefile = "%s/%s" % (self._cache_dir, key)
- try:
- st = os.stat(cachefile)
- except (OSError,IOError), e:
- if e.errno == errno.ENOENT:
- return False
- else:
- utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
-
- if time.time() - st.st_mtime <= self._timeout:
- return False
-
- if key in self._cache:
- del self._cache[key]
- return True
-
- def keys(self):
- keys = []
- for k in os.listdir(self._cache_dir):
- if not (k.startswith('.') or self.has_expired(k)):
- keys.append(k)
- return keys
-
- def contains(self, key):
- cachefile = "%s/%s" % (self._cache_dir, key)
-
- if key in self._cache:
- return True
-
- if self.has_expired(key):
- return False
- try:
- st = os.stat(cachefile)
- return True
- except (OSError,IOError), e:
- if e.errno == errno.ENOENT:
- return False
- else:
- utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
-
- def delete(self, key):
- del self._cache[key]
- try:
- os.remove("%s/%s" % (self._cache_dir, key))
- except (OSError,IOError), e:
- pass #TODO: only pass on non existing?
-
- def flush(self):
- self._cache = {}
- for key in self.keys():
- self.delete(key)
-
- def copy(self):
- ret = dict()
- for key in self.keys():
- ret[key] = self.get(key)
- return ret
diff --git a/v1/ansible/cache/memcached.py b/v1/ansible/cache/memcached.py
deleted file mode 100644
index ea922434b5..0000000000
--- a/v1/ansible/cache/memcached.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import collections
-import os
-import sys
-import time
-import threading
-from itertools import chain
-
-from ansible import constants as C
-from ansible.cache.base import BaseCacheModule
-
-try:
- import memcache
-except ImportError:
- print 'python-memcached is required for the memcached fact cache'
- sys.exit(1)
-
-
-class ProxyClientPool(object):
- """
- Memcached connection pooling for thread/fork safety. Inspired by py-redis
- connection pool.
-
- Available connections are maintained in a deque and released in a FIFO manner.
- """
-
- def __init__(self, *args, **kwargs):
- self.max_connections = kwargs.pop('max_connections', 1024)
- self.connection_args = args
- self.connection_kwargs = kwargs
- self.reset()
-
- def reset(self):
- self.pid = os.getpid()
- self._num_connections = 0
- self._available_connections = collections.deque(maxlen=self.max_connections)
- self._locked_connections = set()
- self._lock = threading.Lock()
-
- def _check_safe(self):
- if self.pid != os.getpid():
- with self._lock:
- if self.pid == os.getpid():
- # bail out - another thread already acquired the lock
- return
- self.disconnect_all()
- self.reset()
-
- def get_connection(self):
- self._check_safe()
- try:
- connection = self._available_connections.popleft()
- except IndexError:
- connection = self.create_connection()
- self._locked_connections.add(connection)
- return connection
-
- def create_connection(self):
- if self._num_connections >= self.max_connections:
- raise RuntimeError("Too many memcached connections")
- self._num_connections += 1
- return memcache.Client(*self.connection_args, **self.connection_kwargs)
-
- def release_connection(self, connection):
- self._check_safe()
- self._locked_connections.remove(connection)
- self._available_connections.append(connection)
-
- def disconnect_all(self):
- for conn in chain(self._available_connections, self._locked_connections):
- conn.disconnect_all()
-
- def __getattr__(self, name):
- def wrapped(*args, **kwargs):
- return self._proxy_client(name, *args, **kwargs)
- return wrapped
-
- def _proxy_client(self, name, *args, **kwargs):
- conn = self.get_connection()
-
- try:
- return getattr(conn, name)(*args, **kwargs)
- finally:
- self.release_connection(conn)
-
-
-class CacheModuleKeys(collections.MutableSet):
- """
- A set subclass that keeps track of insertion time and persists
- the set in memcached.
- """
- PREFIX = 'ansible_cache_keys'
-
- def __init__(self, cache, *args, **kwargs):
- self._cache = cache
- self._keyset = dict(*args, **kwargs)
-
- def __contains__(self, key):
- return key in self._keyset
-
- def __iter__(self):
- return iter(self._keyset)
-
- def __len__(self):
- return len(self._keyset)
-
- def add(self, key):
- self._keyset[key] = time.time()
- self._cache.set(self.PREFIX, self._keyset)
-
- def discard(self, key):
- del self._keyset[key]
- self._cache.set(self.PREFIX, self._keyset)
-
- def remove_by_timerange(self, s_min, s_max):
- for k in self._keyset.keys():
- t = self._keyset[k]
- if s_min < t < s_max:
- del self._keyset[k]
- self._cache.set(self.PREFIX, self._keyset)
-
-
-class CacheModule(BaseCacheModule):
-
- def __init__(self, *args, **kwargs):
- if C.CACHE_PLUGIN_CONNECTION:
- connection = C.CACHE_PLUGIN_CONNECTION.split(',')
- else:
- connection = ['127.0.0.1:11211']
-
- self._timeout = C.CACHE_PLUGIN_TIMEOUT
- self._prefix = C.CACHE_PLUGIN_PREFIX
- self._cache = ProxyClientPool(connection, debug=0)
- self._keys = CacheModuleKeys(self._cache, self._cache.get(CacheModuleKeys.PREFIX) or [])
-
- def _make_key(self, key):
- return "{0}{1}".format(self._prefix, key)
-
- def _expire_keys(self):
- if self._timeout > 0:
- expiry_age = time.time() - self._timeout
- self._keys.remove_by_timerange(0, expiry_age)
-
- def get(self, key):
- value = self._cache.get(self._make_key(key))
- # guard against the key not being removed from the keyset;
- # this could happen in cases where the timeout value is changed
- # between invocations
- if value is None:
- self.delete(key)
- raise KeyError
- return value
-
- def set(self, key, value):
- self._cache.set(self._make_key(key), value, time=self._timeout, min_compress_len=1)
- self._keys.add(key)
-
- def keys(self):
- self._expire_keys()
- return list(iter(self._keys))
-
- def contains(self, key):
- self._expire_keys()
- return key in self._keys
-
- def delete(self, key):
- self._cache.delete(self._make_key(key))
- self._keys.discard(key)
-
- def flush(self):
- for key in self.keys():
- self.delete(key)
-
- def copy(self):
- return self._keys.copy()
diff --git a/v1/ansible/cache/memory.py b/v1/ansible/cache/memory.py
deleted file mode 100644
index 735ed32893..0000000000
--- a/v1/ansible/cache/memory.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.cache.base import BaseCacheModule
-
-class CacheModule(BaseCacheModule):
-
- def __init__(self, *args, **kwargs):
- self._cache = {}
-
- def get(self, key):
- return self._cache.get(key)
-
- def set(self, key, value):
- self._cache[key] = value
-
- def keys(self):
- return self._cache.keys()
-
- def contains(self, key):
- return key in self._cache
-
- def delete(self, key):
- del self._cache[key]
-
- def flush(self):
- self._cache = {}
-
- def copy(self):
- return self._cache.copy()
diff --git a/v1/ansible/cache/redis.py b/v1/ansible/cache/redis.py
deleted file mode 100644
index 7ae5ef74c1..0000000000
--- a/v1/ansible/cache/redis.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# (c) 2014, Brian Coca, Josh Drake, et al
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-import collections
-# FIXME: can we store these as something else before we ship it?
-import sys
-import time
-
-try:
- import simplejson as json
-except ImportError:
- import json
-
-from ansible import constants as C
-from ansible.utils import jsonify
-from ansible.cache.base import BaseCacheModule
-
-try:
- from redis import StrictRedis
-except ImportError:
- print "The 'redis' python module is required, 'pip install redis'"
- sys.exit(1)
-
-class CacheModule(BaseCacheModule):
- """
- A caching module backed by redis.
-
- Keys are maintained in a zset with their score being the timestamp
- when they are inserted. This allows for the usage of 'zremrangebyscore'
- to expire keys. This mechanism is used or a pattern matched 'scan' for
- performance.
- """
- def __init__(self, *args, **kwargs):
- if C.CACHE_PLUGIN_CONNECTION:
- connection = C.CACHE_PLUGIN_CONNECTION.split(':')
- else:
- connection = []
-
- self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
- self._prefix = C.CACHE_PLUGIN_PREFIX
- self._cache = StrictRedis(*connection)
- self._keys_set = 'ansible_cache_keys'
-
- def _make_key(self, key):
- return self._prefix + key
-
- def get(self, key):
- value = self._cache.get(self._make_key(key))
- # guard against the key not being removed from the zset;
- # this could happen in cases where the timeout value is changed
- # between invocations
- if value is None:
- self.delete(key)
- raise KeyError
- return json.loads(value)
-
- def set(self, key, value):
- value2 = jsonify(value)
- if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
- self._cache.setex(self._make_key(key), int(self._timeout), value2)
- else:
- self._cache.set(self._make_key(key), value2)
-
- self._cache.zadd(self._keys_set, time.time(), key)
-
- def _expire_keys(self):
- if self._timeout > 0:
- expiry_age = time.time() - self._timeout
- self._cache.zremrangebyscore(self._keys_set, 0, expiry_age)
-
- def keys(self):
- self._expire_keys()
- return self._cache.zrange(self._keys_set, 0, -1)
-
- def contains(self, key):
- self._expire_keys()
- return (self._cache.zrank(self._keys_set, key) >= 0)
-
- def delete(self, key):
- self._cache.delete(self._make_key(key))
- self._cache.zrem(self._keys_set, key)
-
- def flush(self):
- for key in self.keys():
- self.delete(key)
-
- def copy(self):
- # FIXME: there is probably a better way to do this in redis
- ret = dict()
- for key in self.keys():
- ret[key] = self.get(key)
- return ret
diff --git a/v1/ansible/callback_plugins/__init__.py b/v1/ansible/callback_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/callback_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/callback_plugins/noop.py b/v1/ansible/callback_plugins/noop.py
deleted file mode 100644
index b5d5886874..0000000000
--- a/v1/ansible/callback_plugins/noop.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# (C) 2012-2014, Michael DeHaan, <michael.dehaan@gmail.com>
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-class CallbackModule(object):
-
- """
- this is an example ansible callback file that does nothing. You can drop
- other classes in the same directory to define your own handlers. Methods
- you do not use can be omitted. If self.disabled is set to True, the plugin
- methods will not be called.
-
- example uses include: logging, emailing, storing info, etc
- """
-
- def __init__(self):
- #if foo:
- # self.disabled = True
- pass
-
- def on_any(self, *args, **kwargs):
- pass
-
- def runner_on_failed(self, host, res, ignore_errors=False):
- pass
-
- def runner_on_ok(self, host, res):
- pass
-
- def runner_on_skipped(self, host, item=None):
- pass
-
- def runner_on_unreachable(self, host, res):
- pass
-
- def runner_on_no_hosts(self):
- pass
-
- def runner_on_async_poll(self, host, res, jid, clock):
- pass
-
- def runner_on_async_ok(self, host, res, jid):
- pass
-
- def runner_on_async_failed(self, host, res, jid):
- pass
-
- def playbook_on_start(self):
- pass
-
- def playbook_on_notify(self, host, handler):
- pass
-
- def playbook_on_no_hosts_matched(self):
- pass
-
- def playbook_on_no_hosts_remaining(self):
- pass
-
- def playbook_on_task_start(self, name, is_conditional):
- pass
-
- def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
- pass
-
- def playbook_on_setup(self):
- pass
-
- def playbook_on_import_for_host(self, host, imported_file):
- pass
-
- def playbook_on_not_import_for_host(self, host, missing_file):
- pass
-
- def playbook_on_play_start(self, name):
- pass
-
- def playbook_on_stats(self, stats):
- pass
-
diff --git a/v1/ansible/callbacks.py b/v1/ansible/callbacks.py
deleted file mode 100644
index a7d2283cf0..0000000000
--- a/v1/ansible/callbacks.py
+++ /dev/null
@@ -1,729 +0,0 @@
-# (C) 2012-2014, Michael DeHaan, <michael.dehaan@gmail.com>
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import utils
-import sys
-import getpass
-import os
-import subprocess
-import random
-import fnmatch
-import tempfile
-import fcntl
-import constants
-import locale
-from ansible.color import stringc
-from ansible.module_utils import basic
-from ansible.utils.unicode import to_unicode, to_bytes
-
-import logging
-if constants.DEFAULT_LOG_PATH != '':
- path = constants.DEFAULT_LOG_PATH
-
- if (os.path.exists(path) and not os.access(path, os.W_OK)) and not os.access(os.path.dirname(path), os.W_OK):
- sys.stderr.write("log file at %s is not writeable, aborting\n" % path)
- sys.exit(1)
-
-
- logging.basicConfig(filename=path, level=logging.DEBUG, format='%(asctime)s %(name)s %(message)s')
- mypid = str(os.getpid())
- user = getpass.getuser()
- logger = logging.getLogger("p=%s u=%s | " % (mypid, user))
-
-callback_plugins = []
-
-def load_callback_plugins():
- global callback_plugins
- callback_plugins = [x for x in utils.plugins.callback_loader.all()]
-
-def get_cowsay_info():
- if constants.ANSIBLE_NOCOWS:
- return (None, None)
- cowsay = None
- if os.path.exists("/usr/bin/cowsay"):
- cowsay = "/usr/bin/cowsay"
- elif os.path.exists("/usr/games/cowsay"):
- cowsay = "/usr/games/cowsay"
- elif os.path.exists("/usr/local/bin/cowsay"):
- # BSD path for cowsay
- cowsay = "/usr/local/bin/cowsay"
- elif os.path.exists("/opt/local/bin/cowsay"):
- # MacPorts path for cowsay
- cowsay = "/opt/local/bin/cowsay"
-
- noncow = os.getenv("ANSIBLE_COW_SELECTION",None)
- if cowsay and noncow == 'random':
- cmd = subprocess.Popen([cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
- cows = out.split()
- cows.append(False)
- noncow = random.choice(cows)
- return (cowsay, noncow)
-
-cowsay, noncow = get_cowsay_info()
-
-def log_lockfile():
- # create the path for the lockfile and open it
- tempdir = tempfile.gettempdir()
- uid = os.getuid()
- path = os.path.join(tempdir, ".ansible-lock.%s" % uid)
- lockfile = open(path, 'w')
- # use fcntl to set FD_CLOEXEC on the file descriptor,
- # so that we don't leak the file descriptor later
- lockfile_fd = lockfile.fileno()
- old_flags = fcntl.fcntl(lockfile_fd, fcntl.F_GETFD)
- fcntl.fcntl(lockfile_fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
- return lockfile
-
-LOG_LOCK = log_lockfile()
-
-def log_flock(runner):
- if runner is not None:
- try:
- fcntl.lockf(runner.output_lockfile, fcntl.LOCK_EX)
- except OSError:
- # already got closed?
- pass
- else:
- try:
- fcntl.lockf(LOG_LOCK, fcntl.LOCK_EX)
- except OSError:
- pass
-
-
-def log_unflock(runner):
- if runner is not None:
- try:
- fcntl.lockf(runner.output_lockfile, fcntl.LOCK_UN)
- except OSError:
- # already got closed?
- pass
- else:
- try:
- fcntl.lockf(LOG_LOCK, fcntl.LOCK_UN)
- except OSError:
- pass
-
-def set_playbook(callback, playbook):
- ''' used to notify callback plugins of playbook context '''
- callback.playbook = playbook
- for callback_plugin in callback_plugins:
- callback_plugin.playbook = playbook
-
-def set_play(callback, play):
- ''' used to notify callback plugins of context '''
- callback.play = play
- for callback_plugin in callback_plugins:
- callback_plugin.play = play
-
-def set_task(callback, task):
- ''' used to notify callback plugins of context '''
- callback.task = task
- for callback_plugin in callback_plugins:
- callback_plugin.task = task
-
-def display(msg, color=None, stderr=False, screen_only=False, log_only=False, runner=None):
- # prevent a very rare case of interlaced multiprocess I/O
- log_flock(runner)
- msg2 = msg
- if color:
- msg2 = stringc(msg, color)
- if not log_only:
- if not stderr:
- try:
- print msg2
- except UnicodeEncodeError:
- print msg2.encode('utf-8')
- else:
- try:
- print >>sys.stderr, msg2
- except UnicodeEncodeError:
- print >>sys.stderr, msg2.encode('utf-8')
- if constants.DEFAULT_LOG_PATH != '':
- while msg.startswith("\n"):
- msg = msg.replace("\n","")
- if not screen_only:
- if color == 'red':
- logger.error(msg)
- else:
- logger.info(msg)
- log_unflock(runner)
-
-def call_callback_module(method_name, *args, **kwargs):
-
- for callback_plugin in callback_plugins:
- # a plugin that set self.disabled to True will not be called
- # see osx_say.py example for such a plugin
- if getattr(callback_plugin, 'disabled', False):
- continue
- methods = [
- getattr(callback_plugin, method_name, None),
- getattr(callback_plugin, 'on_any', None)
- ]
- for method in methods:
- if method is not None:
- method(*args, **kwargs)
-
-def vv(msg, host=None):
- return verbose(msg, host=host, caplevel=1)
-
-def vvv(msg, host=None):
- return verbose(msg, host=host, caplevel=2)
-
-def vvvv(msg, host=None):
- return verbose(msg, host=host, caplevel=3)
-
-def verbose(msg, host=None, caplevel=2):
- msg = utils.sanitize_output(msg)
- if utils.VERBOSITY > caplevel:
- if host is None:
- display(msg, color='blue')
- else:
- display("<%s> %s" % (host, msg), color='blue')
-
-class AggregateStats(object):
- ''' holds stats about per-host activity during playbook runs '''
-
- def __init__(self):
-
- self.processed = {}
- self.failures = {}
- self.ok = {}
- self.dark = {}
- self.changed = {}
- self.skipped = {}
-
- def _increment(self, what, host):
- ''' helper function to bump a statistic '''
-
- self.processed[host] = 1
- prev = (getattr(self, what)).get(host, 0)
- getattr(self, what)[host] = prev+1
-
- def compute(self, runner_results, setup=False, poll=False, ignore_errors=False):
- ''' walk through all results and increment stats '''
-
- for (host, value) in runner_results.get('contacted', {}).iteritems():
- if not ignore_errors and (('failed' in value and bool(value['failed'])) or
- ('failed_when_result' in value and [value['failed_when_result']] or ['rc' in value and value['rc'] != 0])[0]):
- self._increment('failures', host)
- elif 'skipped' in value and bool(value['skipped']):
- self._increment('skipped', host)
- elif 'changed' in value and bool(value['changed']):
- if not setup and not poll:
- self._increment('changed', host)
- self._increment('ok', host)
- else:
- if not poll or ('finished' in value and bool(value['finished'])):
- self._increment('ok', host)
-
- for (host, value) in runner_results.get('dark', {}).iteritems():
- self._increment('dark', host)
-
-
- def summarize(self, host):
- ''' return information about a particular host '''
-
- return dict(
- ok = self.ok.get(host, 0),
- failures = self.failures.get(host, 0),
- unreachable = self.dark.get(host,0),
- changed = self.changed.get(host, 0),
- skipped = self.skipped.get(host, 0)
- )
-
-########################################################################
-
-def regular_generic_msg(hostname, result, oneline, caption):
- ''' output on the result of a module run that is not command '''
-
- if not oneline:
- return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result,format=True))
- else:
- return "%s | %s >> %s\n" % (hostname, caption, utils.jsonify(result))
-
-
-def banner_cowsay(msg):
-
- if ": [" in msg:
- msg = msg.replace("[","")
- if msg.endswith("]"):
- msg = msg[:-1]
- runcmd = [cowsay,"-W", "60"]
- if noncow:
- runcmd.append('-f')
- runcmd.append(noncow)
- runcmd.append(msg)
- cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
- return "%s\n" % out
-
-def banner_normal(msg):
-
- width = 78 - len(msg)
- if width < 3:
- width = 3
- filler = "*" * width
- return "\n%s %s " % (msg, filler)
-
-def banner(msg):
- if cowsay:
- try:
- return banner_cowsay(msg)
- except OSError:
- # somebody cleverly deleted cowsay or something during the PB run. heh.
- return banner_normal(msg)
- return banner_normal(msg)
-
-def command_generic_msg(hostname, result, oneline, caption):
- ''' output the result of a command run '''
-
- rc = result.get('rc', '0')
- stdout = result.get('stdout','')
- stderr = result.get('stderr', '')
- msg = result.get('msg', '')
-
- hostname = hostname.encode('utf-8')
- caption = caption.encode('utf-8')
-
- if not oneline:
- buf = "%s | %s | rc=%s >>\n" % (hostname, caption, result.get('rc',0))
- if stdout:
- buf += stdout
- if stderr:
- buf += stderr
- if msg:
- buf += msg
- return buf + "\n"
- else:
- if stderr:
- return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, rc, stdout, stderr)
- else:
- return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, rc, stdout)
-
-def host_report_msg(hostname, module_name, result, oneline):
- ''' summarize the JSON results for a particular host '''
-
- failed = utils.is_failed(result)
- msg = ('', None)
- if module_name in [ 'command', 'shell', 'raw' ] and 'ansible_job_id' not in result and result.get('parsed',True) != False:
- if not failed:
- msg = (command_generic_msg(hostname, result, oneline, 'success'), 'green')
- else:
- msg = (command_generic_msg(hostname, result, oneline, 'FAILED'), 'red')
- else:
- if not failed:
- msg = (regular_generic_msg(hostname, result, oneline, 'success'), 'green')
- else:
- msg = (regular_generic_msg(hostname, result, oneline, 'FAILED'), 'red')
- return msg
-
-###############################################
-
-class DefaultRunnerCallbacks(object):
- ''' no-op callbacks for API usage of Runner() if no callbacks are specified '''
-
- def __init__(self):
- pass
-
- def on_failed(self, host, res, ignore_errors=False):
- call_callback_module('runner_on_failed', host, res, ignore_errors=ignore_errors)
-
- def on_ok(self, host, res):
- call_callback_module('runner_on_ok', host, res)
-
- def on_skipped(self, host, item=None):
- call_callback_module('runner_on_skipped', host, item=item)
-
- def on_unreachable(self, host, res):
- call_callback_module('runner_on_unreachable', host, res)
-
- def on_no_hosts(self):
- call_callback_module('runner_on_no_hosts')
-
- def on_async_poll(self, host, res, jid, clock):
- call_callback_module('runner_on_async_poll', host, res, jid, clock)
-
- def on_async_ok(self, host, res, jid):
- call_callback_module('runner_on_async_ok', host, res, jid)
-
- def on_async_failed(self, host, res, jid):
- call_callback_module('runner_on_async_failed', host, res, jid)
-
- def on_file_diff(self, host, diff):
- call_callback_module('runner_on_file_diff', host, diff)
-
-########################################################################
-
-class CliRunnerCallbacks(DefaultRunnerCallbacks):
- ''' callbacks for use by /usr/bin/ansible '''
-
- def __init__(self):
- # set by /usr/bin/ansible later
- self.options = None
- self._async_notified = {}
-
- def on_failed(self, host, res, ignore_errors=False):
- self._on_any(host,res)
- super(CliRunnerCallbacks, self).on_failed(host, res, ignore_errors=ignore_errors)
-
- def on_ok(self, host, res):
- # hide magic variables used for ansible-playbook
- res.pop('verbose_override', None)
- res.pop('verbose_always', None)
-
- self._on_any(host,res)
- super(CliRunnerCallbacks, self).on_ok(host, res)
-
- def on_unreachable(self, host, res):
- if type(res) == dict:
- res = res.get('msg','')
- display("%s | FAILED => %s" % (host, res), stderr=True, color='red', runner=self.runner)
- if self.options.tree:
- utils.write_tree_file(
- self.options.tree, host,
- utils.jsonify(dict(failed=True, msg=res),format=True)
- )
- super(CliRunnerCallbacks, self).on_unreachable(host, res)
-
- def on_skipped(self, host, item=None):
- display("%s | skipped" % (host), runner=self.runner)
- super(CliRunnerCallbacks, self).on_skipped(host, item)
-
- def on_no_hosts(self):
- display("no hosts matched\n", stderr=True, runner=self.runner)
- super(CliRunnerCallbacks, self).on_no_hosts()
-
- def on_async_poll(self, host, res, jid, clock):
- if jid not in self._async_notified:
- self._async_notified[jid] = clock + 1
- if self._async_notified[jid] > clock:
- self._async_notified[jid] = clock
- display("<job %s> polling on %s, %ss remaining" % (jid, host, clock), runner=self.runner)
- super(CliRunnerCallbacks, self).on_async_poll(host, res, jid, clock)
-
- def on_async_ok(self, host, res, jid):
- if jid:
- display("<job %s> finished on %s => %s"%(jid, host, utils.jsonify(res,format=True)), runner=self.runner)
- super(CliRunnerCallbacks, self).on_async_ok(host, res, jid)
-
- def on_async_failed(self, host, res, jid):
- display("<job %s> FAILED on %s => %s"%(jid, host, utils.jsonify(res,format=True)), color='red', stderr=True, runner=self.runner)
- super(CliRunnerCallbacks, self).on_async_failed(host,res,jid)
-
- def _on_any(self, host, result):
- result2 = result.copy()
- result2.pop('invocation', None)
- (msg, color) = host_report_msg(host, self.options.module_name, result2, self.options.one_line)
- display(msg, color=color, runner=self.runner)
- if self.options.tree:
- utils.write_tree_file(self.options.tree, host, utils.jsonify(result2,format=True))
-
- def on_file_diff(self, host, diff):
- display(utils.get_diff(diff), runner=self.runner)
- super(CliRunnerCallbacks, self).on_file_diff(host, diff)
-
-########################################################################
-
-class PlaybookRunnerCallbacks(DefaultRunnerCallbacks):
- ''' callbacks used for Runner() from /usr/bin/ansible-playbook '''
-
- def __init__(self, stats, verbose=None):
-
- if verbose is None:
- verbose = utils.VERBOSITY
-
- self.verbose = verbose
- self.stats = stats
- self._async_notified = {}
-
- def on_unreachable(self, host, results):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- item = None
- if type(results) == dict:
- item = results.get('item', None)
- if isinstance(item, unicode):
- item = utils.unicode.to_bytes(item)
- results = basic.json_dict_unicode_to_bytes(results)
- else:
- results = utils.unicode.to_bytes(results)
- host = utils.unicode.to_bytes(host)
- if item:
- msg = "fatal: [%s] => (item=%s) => %s" % (host, item, results)
- else:
- msg = "fatal: [%s] => %s" % (host, results)
- display(msg, color='red', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_unreachable(host, results)
-
- def on_failed(self, host, results, ignore_errors=False):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- results2 = results.copy()
- results2.pop('invocation', None)
-
- item = results2.get('item', None)
- parsed = results2.get('parsed', True)
- module_msg = ''
- if not parsed:
- module_msg = results2.pop('msg', None)
- stderr = results2.pop('stderr', None)
- stdout = results2.pop('stdout', None)
- returned_msg = results2.pop('msg', None)
-
- results2['task'] = self.task.name
- results2['role'] = self.task.role_name
- results2['playbook'] = self.playbook.filename
-
- if item:
- msg = "failed: [%s] => (item=%s) => %s" % (host, item, utils.jsonify(results2))
- else:
- msg = "failed: [%s] => %s" % (host, utils.jsonify(results2))
- display(msg, color='red', runner=self.runner)
-
- if stderr:
- display("stderr: %s" % stderr, color='red', runner=self.runner)
- if stdout:
- display("stdout: %s" % stdout, color='red', runner=self.runner)
- if returned_msg:
- display("msg: %s" % returned_msg, color='red', runner=self.runner)
- if not parsed and module_msg:
- display(module_msg, color='red', runner=self.runner)
- if ignore_errors:
- display("...ignoring", color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_failed(host, results, ignore_errors=ignore_errors)
-
- def on_ok(self, host, host_result):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- item = host_result.get('item', None)
-
- host_result2 = host_result.copy()
- host_result2.pop('invocation', None)
- verbose_always = host_result2.pop('verbose_always', False)
- changed = host_result.get('changed', False)
- ok_or_changed = 'ok'
- if changed:
- ok_or_changed = 'changed'
-
- # show verbose output for non-setup module results if --verbose is used
- msg = ''
- if (not self.verbose or host_result2.get("verbose_override",None) is not
- None) and not verbose_always:
- if item:
- msg = "%s: [%s] => (item=%s)" % (ok_or_changed, host, item)
- else:
- if 'ansible_job_id' not in host_result or 'finished' in host_result:
- msg = "%s: [%s]" % (ok_or_changed, host)
- else:
- # verbose ...
- if item:
- msg = "%s: [%s] => (item=%s) => %s" % (ok_or_changed, host, item, utils.jsonify(host_result2, format=verbose_always))
- else:
- if 'ansible_job_id' not in host_result or 'finished' in host_result2:
- msg = "%s: [%s] => %s" % (ok_or_changed, host, utils.jsonify(host_result2, format=verbose_always))
-
- if msg != '':
- if not changed:
- display(msg, color='green', runner=self.runner)
- else:
- display(msg, color='yellow', runner=self.runner)
- if constants.COMMAND_WARNINGS and 'warnings' in host_result2 and host_result2['warnings']:
- for warning in host_result2['warnings']:
- display("warning: %s" % warning, color='purple', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_ok(host, host_result)
-
- def on_skipped(self, host, item=None):
- if self.runner.delegate_to:
- host = '%s -> %s' % (host, self.runner.delegate_to)
-
- if constants.DISPLAY_SKIPPED_HOSTS:
- msg = ''
- if item:
- msg = "skipping: [%s] => (item=%s)" % (host, item)
- else:
- msg = "skipping: [%s]" % host
- display(msg, color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_skipped(host, item)
-
- def on_no_hosts(self):
- display("FATAL: no hosts matched or all hosts have already failed -- aborting\n", color='red', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_no_hosts()
-
- def on_async_poll(self, host, res, jid, clock):
- if jid not in self._async_notified:
- self._async_notified[jid] = clock + 1
- if self._async_notified[jid] > clock:
- self._async_notified[jid] = clock
- msg = "<job %s> polling, %ss remaining"%(jid, clock)
- display(msg, color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_async_poll(host,res,jid,clock)
-
- def on_async_ok(self, host, res, jid):
- if jid:
- msg = "<job %s> finished on %s"%(jid, host)
- display(msg, color='cyan', runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_async_ok(host, res, jid)
-
- def on_async_failed(self, host, res, jid):
- msg = "<job %s> FAILED on %s" % (jid, host)
- display(msg, color='red', stderr=True, runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_async_failed(host,res,jid)
-
- def on_file_diff(self, host, diff):
- display(utils.get_diff(diff), runner=self.runner)
- super(PlaybookRunnerCallbacks, self).on_file_diff(host, diff)
-
-########################################################################
-
-class PlaybookCallbacks(object):
- ''' playbook.py callbacks used by /usr/bin/ansible-playbook '''
-
- def __init__(self, verbose=False):
-
- self.verbose = verbose
-
- def on_start(self):
- call_callback_module('playbook_on_start')
-
- def on_notify(self, host, handler):
- call_callback_module('playbook_on_notify', host, handler)
-
- def on_no_hosts_matched(self):
- display("skipping: no hosts matched", color='cyan')
- call_callback_module('playbook_on_no_hosts_matched')
-
- def on_no_hosts_remaining(self):
- display("\nFATAL: all hosts have already failed -- aborting", color='red')
- call_callback_module('playbook_on_no_hosts_remaining')
-
- def on_task_start(self, name, is_conditional):
- name = utils.unicode.to_bytes(name)
- msg = "TASK: [%s]" % name
- if is_conditional:
- msg = "NOTIFIED: [%s]" % name
-
- if hasattr(self, 'start_at'):
- self.start_at = utils.unicode.to_bytes(self.start_at)
- if name == self.start_at or fnmatch.fnmatch(name, self.start_at):
- # we found out match, we can get rid of this now
- del self.start_at
- elif self.task.role_name:
- # handle tasks prefixed with rolenames
- actual_name = name.split('|', 1)[1].lstrip()
- if actual_name == self.start_at or fnmatch.fnmatch(actual_name, self.start_at):
- del self.start_at
-
- if hasattr(self, 'start_at'): # we still have start_at so skip the task
- self.skip_task = True
- elif hasattr(self, 'step') and self.step:
- if isinstance(name, str):
- name = utils.unicode.to_unicode(name)
- msg = u'Perform task: %s (y/n/c): ' % name
- if sys.stdout.encoding:
- msg = to_bytes(msg, sys.stdout.encoding)
- else:
- msg = to_bytes(msg)
- resp = raw_input(msg)
- if resp.lower() in ['y','yes']:
- self.skip_task = False
- display(banner(msg))
- elif resp.lower() in ['c', 'continue']:
- self.skip_task = False
- self.step = False
- display(banner(msg))
- else:
- self.skip_task = True
- else:
- self.skip_task = False
- display(banner(msg))
-
- call_callback_module('playbook_on_task_start', name, is_conditional)
-
- def on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
-
- if prompt and default is not None:
- msg = "%s [%s]: " % (prompt, default)
- elif prompt:
- msg = "%s: " % prompt
- else:
- msg = 'input for %s: ' % varname
-
- def do_prompt(prompt, private):
- if sys.stdout.encoding:
- msg = prompt.encode(sys.stdout.encoding)
- else:
- # when piping the output, or at other times when stdout
- # may not be the standard file descriptor, the stdout
- # encoding may not be set, so default to something sane
- msg = prompt.encode(locale.getpreferredencoding())
- if private:
- return getpass.getpass(msg)
- return raw_input(msg)
-
-
- if confirm:
- while True:
- result = do_prompt(msg, private)
- second = do_prompt("confirm " + msg, private)
- if result == second:
- break
- display("***** VALUES ENTERED DO NOT MATCH ****")
- else:
- result = do_prompt(msg, private)
-
- # if result is false and default is not None
- if not result and default is not None:
- result = default
-
-
- if encrypt:
- result = utils.do_encrypt(result, encrypt, salt_size, salt)
-
- # handle utf-8 chars
- result = to_unicode(result, errors='strict')
- call_callback_module( 'playbook_on_vars_prompt', varname, private=private, prompt=prompt,
- encrypt=encrypt, confirm=confirm, salt_size=salt_size, salt=None, default=default
- )
-
- return result
-
- def on_setup(self):
- display(banner("GATHERING FACTS"))
- call_callback_module('playbook_on_setup')
-
- def on_import_for_host(self, host, imported_file):
- msg = "%s: importing %s" % (host, imported_file)
- display(msg, color='cyan')
- call_callback_module('playbook_on_import_for_host', host, imported_file)
-
- def on_not_import_for_host(self, host, missing_file):
- msg = "%s: not importing file: %s" % (host, missing_file)
- display(msg, color='cyan')
- call_callback_module('playbook_on_not_import_for_host', host, missing_file)
-
- def on_play_start(self, name):
- display(banner("PLAY [%s]" % name))
- call_callback_module('playbook_on_play_start', name)
-
- def on_stats(self, stats):
- call_callback_module('playbook_on_stats', stats)
-
-
diff --git a/v1/ansible/color.py b/v1/ansible/color.py
deleted file mode 100644
index b3127d85fe..0000000000
--- a/v1/ansible/color.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-import constants
-
-ANSIBLE_COLOR=True
-if constants.ANSIBLE_NOCOLOR:
- ANSIBLE_COLOR=False
-elif not hasattr(sys.stdout, 'isatty') or not sys.stdout.isatty():
- ANSIBLE_COLOR=False
-else:
- try:
- import curses
- curses.setupterm()
- if curses.tigetnum('colors') < 0:
- ANSIBLE_COLOR=False
- except ImportError:
- # curses library was not found
- pass
- except curses.error:
- # curses returns an error (e.g. could not find terminal)
- ANSIBLE_COLOR=False
-
-if constants.ANSIBLE_FORCE_COLOR:
- ANSIBLE_COLOR=True
-
-# --- begin "pretty"
-#
-# pretty - A miniature library that provides a Python print and stdout
-# wrapper that makes colored terminal text easier to use (e.g. without
-# having to mess around with ANSI escape sequences). This code is public
-# domain - there is no license except that you must leave this header.
-#
-# Copyright (C) 2008 Brian Nez <thedude at bri1 dot com>
-#
-# http://nezzen.net/2008/06/23/colored-text-in-python-using-ansi-escape-sequences/
-
-codeCodes = {
- 'black': '0;30', 'bright gray': '0;37',
- 'blue': '0;34', 'white': '1;37',
- 'green': '0;32', 'bright blue': '1;34',
- 'cyan': '0;36', 'bright green': '1;32',
- 'red': '0;31', 'bright cyan': '1;36',
- 'purple': '0;35', 'bright red': '1;31',
- 'yellow': '0;33', 'bright purple': '1;35',
- 'dark gray': '1;30', 'bright yellow': '1;33',
- 'normal': '0'
-}
-
-def stringc(text, color):
- """String in color."""
-
- if ANSIBLE_COLOR:
- return "\033["+codeCodes[color]+"m"+text+"\033[0m"
- else:
- return text
-
-# --- end "pretty"
-
diff --git a/v1/ansible/constants.py b/v1/ansible/constants.py
deleted file mode 100644
index 2cdc08d8ce..0000000000
--- a/v1/ansible/constants.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-import sys
-import ConfigParser
-from string import ascii_letters, digits
-
-# copied from utils, avoid circular reference fun :)
-def mk_boolean(value):
- if value is None:
- return False
- val = str(value)
- if val.lower() in [ "true", "t", "y", "1", "yes" ]:
- return True
- else:
- return False
-
-def get_config(p, section, key, env_var, default, boolean=False, integer=False, floating=False, islist=False):
- ''' return a configuration variable with casting '''
- value = _get_config(p, section, key, env_var, default)
- if boolean:
- return mk_boolean(value)
- if value and integer:
- return int(value)
- if value and floating:
- return float(value)
- if value and islist:
- return [x.strip() for x in value.split(',')]
- return value
-
-def _get_config(p, section, key, env_var, default):
- ''' helper function for get_config '''
- if env_var is not None:
- value = os.environ.get(env_var, None)
- if value is not None:
- return value
- if p is not None:
- try:
- return p.get(section, key, raw=True)
- except:
- return default
- return default
-
-def load_config_file():
- ''' Load Config File order(first found is used): ENV, CWD, HOME, /etc/ansible '''
-
- p = ConfigParser.ConfigParser()
-
- path0 = os.getenv("ANSIBLE_CONFIG", None)
- if path0 is not None:
- path0 = os.path.expanduser(path0)
- path1 = os.getcwd() + "/ansible.cfg"
- path2 = os.path.expanduser("~/.ansible.cfg")
- path3 = "/etc/ansible/ansible.cfg"
-
- for path in [path0, path1, path2, path3]:
- if path is not None and os.path.exists(path):
- try:
- p.read(path)
- except ConfigParser.Error as e:
- print "Error reading config file: \n%s" % e
- sys.exit(1)
- return p
- return None
-
-def shell_expand_path(path):
- ''' shell_expand_path is needed as os.path.expanduser does not work
- when path is None, which is the default for ANSIBLE_PRIVATE_KEY_FILE '''
- if path:
- path = os.path.expanduser(os.path.expandvars(path))
- return path
-
-p = load_config_file()
-
-active_user = pwd.getpwuid(os.geteuid())[0]
-
-# check all of these extensions when looking for yaml files for things like
-# group variables -- really anything we can load
-YAML_FILENAME_EXTENSIONS = [ "", ".yml", ".yaml", ".json" ]
-
-# sections in config file
-DEFAULTS='defaults'
-
-# configurable things
-DEFAULT_HOST_LIST = shell_expand_path(get_config(p, DEFAULTS, 'inventory', 'ANSIBLE_INVENTORY', get_config(p, DEFAULTS,'hostfile','ANSIBLE_HOSTS', '/etc/ansible/hosts')))
-DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None)
-DEFAULT_ROLES_PATH = shell_expand_path(get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles'))
-DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '$HOME/.ansible/tmp')
-DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
-DEFAULT_PATTERN = get_config(p, DEFAULTS, 'pattern', None, '*')
-DEFAULT_FORKS = get_config(p, DEFAULTS, 'forks', 'ANSIBLE_FORKS', 5, integer=True)
-DEFAULT_MODULE_ARGS = get_config(p, DEFAULTS, 'module_args', 'ANSIBLE_MODULE_ARGS', '')
-DEFAULT_MODULE_LANG = get_config(p, DEFAULTS, 'module_lang', 'ANSIBLE_MODULE_LANG', 'en_US.UTF-8')
-DEFAULT_TIMEOUT = get_config(p, DEFAULTS, 'timeout', 'ANSIBLE_TIMEOUT', 10, integer=True)
-DEFAULT_POLL_INTERVAL = get_config(p, DEFAULTS, 'poll_interval', 'ANSIBLE_POLL_INTERVAL', 15, integer=True)
-DEFAULT_REMOTE_USER = get_config(p, DEFAULTS, 'remote_user', 'ANSIBLE_REMOTE_USER', active_user)
-DEFAULT_ASK_PASS = get_config(p, DEFAULTS, 'ask_pass', 'ANSIBLE_ASK_PASS', False, boolean=True)
-DEFAULT_PRIVATE_KEY_FILE = shell_expand_path(get_config(p, DEFAULTS, 'private_key_file', 'ANSIBLE_PRIVATE_KEY_FILE', None))
-DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, boolean=True)
-DEFAULT_REMOTE_PORT = get_config(p, DEFAULTS, 'remote_port', 'ANSIBLE_REMOTE_PORT', None, integer=True)
-DEFAULT_ASK_VAULT_PASS = get_config(p, DEFAULTS, 'ask_vault_pass', 'ANSIBLE_ASK_VAULT_PASS', False, boolean=True)
-DEFAULT_VAULT_PASSWORD_FILE = shell_expand_path(get_config(p, DEFAULTS, 'vault_password_file', 'ANSIBLE_VAULT_PASSWORD_FILE', None))
-DEFAULT_TRANSPORT = get_config(p, DEFAULTS, 'transport', 'ANSIBLE_TRANSPORT', 'smart')
-DEFAULT_SCP_IF_SSH = get_config(p, 'ssh_connection', 'scp_if_ssh', 'ANSIBLE_SCP_IF_SSH', False, boolean=True)
-DEFAULT_MANAGED_STR = get_config(p, DEFAULTS, 'ansible_managed', None, 'Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}')
-DEFAULT_SYSLOG_FACILITY = get_config(p, DEFAULTS, 'syslog_facility', 'ANSIBLE_SYSLOG_FACILITY', 'LOG_USER')
-DEFAULT_KEEP_REMOTE_FILES = get_config(p, DEFAULTS, 'keep_remote_files', 'ANSIBLE_KEEP_REMOTE_FILES', False, boolean=True)
-DEFAULT_SUDO = get_config(p, DEFAULTS, 'sudo', 'ANSIBLE_SUDO', False, boolean=True)
-DEFAULT_SUDO_USER = get_config(p, DEFAULTS, 'sudo_user', 'ANSIBLE_SUDO_USER', 'root')
-DEFAULT_SUDO_EXE = get_config(p, DEFAULTS, 'sudo_exe', 'ANSIBLE_SUDO_EXE', 'sudo')
-DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_FLAGS', '-H')
-DEFAULT_HASH_BEHAVIOUR = get_config(p, DEFAULTS, 'hash_behaviour', 'ANSIBLE_HASH_BEHAVIOUR', 'replace')
-DEFAULT_JINJA2_EXTENSIONS = get_config(p, DEFAULTS, 'jinja2_extensions', 'ANSIBLE_JINJA2_EXTENSIONS', None)
-DEFAULT_EXECUTABLE = get_config(p, DEFAULTS, 'executable', 'ANSIBLE_EXECUTABLE', '/bin/sh')
-DEFAULT_SU_EXE = get_config(p, DEFAULTS, 'su_exe', 'ANSIBLE_SU_EXE', 'su')
-DEFAULT_SU = get_config(p, DEFAULTS, 'su', 'ANSIBLE_SU', False, boolean=True)
-DEFAULT_SU_FLAGS = get_config(p, DEFAULTS, 'su_flags', 'ANSIBLE_SU_FLAGS', '')
-DEFAULT_SU_USER = get_config(p, DEFAULTS, 'su_user', 'ANSIBLE_SU_USER', 'root')
-DEFAULT_ASK_SU_PASS = get_config(p, DEFAULTS, 'ask_su_pass', 'ANSIBLE_ASK_SU_PASS', False, boolean=True)
-DEFAULT_GATHERING = get_config(p, DEFAULTS, 'gathering', 'ANSIBLE_GATHERING', 'implicit').lower()
-DEFAULT_LOG_PATH = shell_expand_path(get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', ''))
-
-# selinux
-DEFAULT_SELINUX_SPECIAL_FS = get_config(p, 'selinux', 'special_context_filesystems', None, 'fuse, nfs, vboxsf', islist=True)
-
-#TODO: get rid of ternary chain mess
-BECOME_METHODS = ['sudo','su','pbrun','pfexec','runas']
-BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'runas': ''}
-DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, boolean=True)
-DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
-DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER',default=None)
-DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pass', 'ANSIBLE_BECOME_ASK_PASS', False, boolean=True)
-# need to rethink impementing these 2
-DEFAULT_BECOME_EXE = None
-#DEFAULT_BECOME_EXE = get_config(p, DEFAULTS, 'become_exe', 'ANSIBLE_BECOME_EXE','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo')
-#DEFAULT_BECOME_FLAGS = get_config(p, DEFAULTS, 'become_flags', 'ANSIBLE_BECOME_FLAGS',DEFAULT_SUDO_FLAGS if DEFAULT_SUDO else DEFAULT_SU_FLAGS if DEFAULT_SU else '-H')
-
-
-DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action_plugins:/usr/share/ansible_plugins/action_plugins')
-DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache_plugins:/usr/share/ansible_plugins/cache_plugins')
-DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback_plugins:/usr/share/ansible_plugins/callback_plugins')
-DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection_plugins:/usr/share/ansible_plugins/connection_plugins')
-DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup_plugins:/usr/share/ansible_plugins/lookup_plugins')
-DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars_plugins:/usr/share/ansible_plugins/vars_plugins')
-DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter_plugins:/usr/share/ansible_plugins/filter_plugins')
-
-CACHE_PLUGIN = get_config(p, DEFAULTS, 'fact_caching', 'ANSIBLE_CACHE_PLUGIN', 'memory')
-CACHE_PLUGIN_CONNECTION = get_config(p, DEFAULTS, 'fact_caching_connection', 'ANSIBLE_CACHE_PLUGIN_CONNECTION', None)
-CACHE_PLUGIN_PREFIX = get_config(p, DEFAULTS, 'fact_caching_prefix', 'ANSIBLE_CACHE_PLUGIN_PREFIX', 'ansible_facts')
-CACHE_PLUGIN_TIMEOUT = get_config(p, DEFAULTS, 'fact_caching_timeout', 'ANSIBLE_CACHE_PLUGIN_TIMEOUT', 24 * 60 * 60, integer=True)
-
-ANSIBLE_FORCE_COLOR = get_config(p, DEFAULTS, 'force_color', 'ANSIBLE_FORCE_COLOR', None, boolean=True)
-ANSIBLE_NOCOLOR = get_config(p, DEFAULTS, 'nocolor', 'ANSIBLE_NOCOLOR', None, boolean=True)
-ANSIBLE_NOCOWS = get_config(p, DEFAULTS, 'nocows', 'ANSIBLE_NOCOWS', None, boolean=True)
-DISPLAY_SKIPPED_HOSTS = get_config(p, DEFAULTS, 'display_skipped_hosts', 'DISPLAY_SKIPPED_HOSTS', True, boolean=True)
-DEFAULT_UNDEFINED_VAR_BEHAVIOR = get_config(p, DEFAULTS, 'error_on_undefined_vars', 'ANSIBLE_ERROR_ON_UNDEFINED_VARS', True, boolean=True)
-HOST_KEY_CHECKING = get_config(p, DEFAULTS, 'host_key_checking', 'ANSIBLE_HOST_KEY_CHECKING', True, boolean=True)
-SYSTEM_WARNINGS = get_config(p, DEFAULTS, 'system_warnings', 'ANSIBLE_SYSTEM_WARNINGS', True, boolean=True)
-DEPRECATION_WARNINGS = get_config(p, DEFAULTS, 'deprecation_warnings', 'ANSIBLE_DEPRECATION_WARNINGS', True, boolean=True)
-DEFAULT_CALLABLE_WHITELIST = get_config(p, DEFAULTS, 'callable_whitelist', 'ANSIBLE_CALLABLE_WHITELIST', [], islist=True)
-COMMAND_WARNINGS = get_config(p, DEFAULTS, 'command_warnings', 'ANSIBLE_COMMAND_WARNINGS', False, boolean=True)
-DEFAULT_LOAD_CALLBACK_PLUGINS = get_config(p, DEFAULTS, 'bin_ansible_callbacks', 'ANSIBLE_LOAD_CALLBACK_PLUGINS', False, boolean=True)
-DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, boolean=True)
-
-
-RETRY_FILES_ENABLED = get_config(p, DEFAULTS, 'retry_files_enabled', 'ANSIBLE_RETRY_FILES_ENABLED', True, boolean=True)
-RETRY_FILES_SAVE_PATH = get_config(p, DEFAULTS, 'retry_files_save_path', 'ANSIBLE_RETRY_FILES_SAVE_PATH', '~/')
-
-# CONNECTION RELATED
-ANSIBLE_SSH_ARGS = get_config(p, 'ssh_connection', 'ssh_args', 'ANSIBLE_SSH_ARGS', None)
-ANSIBLE_SSH_CONTROL_PATH = get_config(p, 'ssh_connection', 'control_path', 'ANSIBLE_SSH_CONTROL_PATH', "%(directory)s/ansible-ssh-%%h-%%p-%%r")
-ANSIBLE_SSH_PIPELINING = get_config(p, 'ssh_connection', 'pipelining', 'ANSIBLE_SSH_PIPELINING', False, boolean=True)
-PARAMIKO_RECORD_HOST_KEYS = get_config(p, 'paramiko_connection', 'record_host_keys', 'ANSIBLE_PARAMIKO_RECORD_HOST_KEYS', True, boolean=True)
-# obsolete -- will be formally removed
-ZEROMQ_PORT = get_config(p, 'fireball_connection', 'zeromq_port', 'ANSIBLE_ZEROMQ_PORT', 5099, integer=True)
-ACCELERATE_PORT = get_config(p, 'accelerate', 'accelerate_port', 'ACCELERATE_PORT', 5099, integer=True)
-ACCELERATE_TIMEOUT = get_config(p, 'accelerate', 'accelerate_timeout', 'ACCELERATE_TIMEOUT', 30, integer=True)
-ACCELERATE_CONNECT_TIMEOUT = get_config(p, 'accelerate', 'accelerate_connect_timeout', 'ACCELERATE_CONNECT_TIMEOUT', 1.0, floating=True)
-ACCELERATE_DAEMON_TIMEOUT = get_config(p, 'accelerate', 'accelerate_daemon_timeout', 'ACCELERATE_DAEMON_TIMEOUT', 30, integer=True)
-ACCELERATE_KEYS_DIR = get_config(p, 'accelerate', 'accelerate_keys_dir', 'ACCELERATE_KEYS_DIR', '~/.fireball.keys')
-ACCELERATE_KEYS_DIR_PERMS = get_config(p, 'accelerate', 'accelerate_keys_dir_perms', 'ACCELERATE_KEYS_DIR_PERMS', '700')
-ACCELERATE_KEYS_FILE_PERMS = get_config(p, 'accelerate', 'accelerate_keys_file_perms', 'ACCELERATE_KEYS_FILE_PERMS', '600')
-ACCELERATE_MULTI_KEY = get_config(p, 'accelerate', 'accelerate_multi_key', 'ACCELERATE_MULTI_KEY', False, boolean=True)
-PARAMIKO_PTY = get_config(p, 'paramiko_connection', 'pty', 'ANSIBLE_PARAMIKO_PTY', True, boolean=True)
-
-# characters included in auto-generated passwords
-DEFAULT_PASSWORD_CHARS = ascii_letters + digits + ".,:-_"
-
-# non-configurable things
-DEFAULT_BECOME_PASS = None
-DEFAULT_SUDO_PASS = None
-DEFAULT_REMOTE_PASS = None
-DEFAULT_SUBSET = None
-DEFAULT_SU_PASS = None
-VAULT_VERSION_MIN = 1.0
-VAULT_VERSION_MAX = 1.0
diff --git a/v1/ansible/errors.py b/v1/ansible/errors.py
deleted file mode 100644
index 65edbc294a..0000000000
--- a/v1/ansible/errors.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-class AnsibleError(Exception):
- ''' The base Ansible exception from which all others should subclass '''
- pass
-
-class AnsibleFileNotFound(AnsibleError):
- pass
-
-class AnsibleConnectionFailed(AnsibleError):
- pass
-
-class AnsibleYAMLValidationFailed(AnsibleError):
- pass
-
-class AnsibleUndefinedVariable(AnsibleError):
- pass
-
-class AnsibleFilterError(AnsibleError):
- pass
diff --git a/v1/ansible/inventory/__init__.py b/v1/ansible/inventory/__init__.py
deleted file mode 100644
index f012246e22..0000000000
--- a/v1/ansible/inventory/__init__.py
+++ /dev/null
@@ -1,654 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#############################################
-import fnmatch
-import os
-import sys
-import re
-import subprocess
-
-import ansible.constants as C
-from ansible.inventory.ini import InventoryParser
-from ansible.inventory.script import InventoryScript
-from ansible.inventory.dir import InventoryDirectory
-from ansible.inventory.group import Group
-from ansible.inventory.host import Host
-from ansible import errors
-from ansible import utils
-
-class Inventory(object):
- """
- Host inventory for ansible.
- """
-
- __slots__ = [ 'host_list', 'groups', '_restriction', '_also_restriction', '_subset',
- 'parser', '_vars_per_host', '_vars_per_group', '_hosts_cache', '_groups_list',
- '_pattern_cache', '_vault_password', '_vars_plugins', '_playbook_basedir']
-
- def __init__(self, host_list=C.DEFAULT_HOST_LIST, vault_password=None):
-
- # the host file file, or script path, or list of hosts
- # if a list, inventory data will NOT be loaded
- self.host_list = host_list
- self._vault_password=vault_password
-
- # caching to avoid repeated calculations, particularly with
- # external inventory scripts.
-
- self._vars_per_host = {}
- self._vars_per_group = {}
- self._hosts_cache = {}
- self._groups_list = {}
- self._pattern_cache = {}
-
- # to be set by calling set_playbook_basedir by playbook code
- self._playbook_basedir = None
-
- # the inventory object holds a list of groups
- self.groups = []
-
- # a list of host(names) to contain current inquiries to
- self._restriction = None
- self._also_restriction = None
- self._subset = None
-
- if isinstance(host_list, basestring):
- if "," in host_list:
- host_list = host_list.split(",")
- host_list = [ h for h in host_list if h and h.strip() ]
-
- if host_list is None:
- self.parser = None
- elif isinstance(host_list, list):
- self.parser = None
- all = Group('all')
- self.groups = [ all ]
- ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?')
- for x in host_list:
- m = ipv6_re.match(x)
- if m:
- all.add_host(Host(m.groups()[0], m.groups()[1]))
- else:
- if ":" in x:
- tokens = x.rsplit(":", 1)
- # if there is ':' in the address, then this is an ipv6
- if ':' in tokens[0]:
- all.add_host(Host(x))
- else:
- all.add_host(Host(tokens[0], tokens[1]))
- else:
- all.add_host(Host(x))
- elif os.path.exists(host_list):
- if os.path.isdir(host_list):
- # Ensure basedir is inside the directory
- self.host_list = os.path.join(self.host_list, "")
- self.parser = InventoryDirectory(filename=host_list)
- self.groups = self.parser.groups.values()
- else:
- # check to see if the specified file starts with a
- # shebang (#!/), so if an error is raised by the parser
- # class we can show a more apropos error
- shebang_present = False
- try:
- inv_file = open(host_list)
- first_line = inv_file.readlines()[0]
- inv_file.close()
- if first_line.startswith('#!'):
- shebang_present = True
- except:
- pass
-
- if utils.is_executable(host_list):
- try:
- self.parser = InventoryScript(filename=host_list)
- self.groups = self.parser.groups.values()
- except:
- if not shebang_present:
- raise errors.AnsibleError("The file %s is marked as executable, but failed to execute correctly. " % host_list + \
- "If this is not supposed to be an executable script, correct this with `chmod -x %s`." % host_list)
- else:
- raise
- else:
- try:
- self.parser = InventoryParser(filename=host_list)
- self.groups = self.parser.groups.values()
- except:
- if shebang_present:
- raise errors.AnsibleError("The file %s looks like it should be an executable inventory script, but is not marked executable. " % host_list + \
- "Perhaps you want to correct this with `chmod +x %s`?" % host_list)
- else:
- raise
-
- utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True)
- else:
- raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
-
- self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ]
-
- # get group vars from group_vars/ files and vars plugins
- for group in self.groups:
- group.vars = utils.combine_vars(group.vars, self.get_group_variables(group.name, vault_password=self._vault_password))
-
- # get host vars from host_vars/ files and vars plugins
- for host in self.get_hosts():
- host.vars = utils.combine_vars(host.vars, self.get_host_variables(host.name, vault_password=self._vault_password))
-
-
- def _match(self, str, pattern_str):
- try:
- if pattern_str.startswith('~'):
- return re.search(pattern_str[1:], str)
- else:
- return fnmatch.fnmatch(str, pattern_str)
- except Exception, e:
- raise errors.AnsibleError('invalid host pattern: %s' % pattern_str)
-
- def _match_list(self, items, item_attr, pattern_str):
- results = []
- try:
- if not pattern_str.startswith('~'):
- pattern = re.compile(fnmatch.translate(pattern_str))
- else:
- pattern = re.compile(pattern_str[1:])
- except Exception, e:
- raise errors.AnsibleError('invalid host pattern: %s' % pattern_str)
-
- for item in items:
- if pattern.match(getattr(item, item_attr)):
- results.append(item)
- return results
-
- def get_hosts(self, pattern="all"):
- """
- find all host names matching a pattern string, taking into account any inventory restrictions or
- applied subsets.
- """
-
- # process patterns
- if isinstance(pattern, list):
- pattern = ';'.join(pattern)
- patterns = pattern.replace(";",":").split(":")
- hosts = self._get_hosts(patterns)
-
- # exclude hosts not in a subset, if defined
- if self._subset:
- subset = self._get_hosts(self._subset)
- hosts = [ h for h in hosts if h in subset ]
-
- # exclude hosts mentioned in any restriction (ex: failed hosts)
- if self._restriction is not None:
- hosts = [ h for h in hosts if h.name in self._restriction ]
- if self._also_restriction is not None:
- hosts = [ h for h in hosts if h.name in self._also_restriction ]
-
- return hosts
-
- def _get_hosts(self, patterns):
- """
- finds hosts that match a list of patterns. Handles negative
- matches as well as intersection matches.
- """
-
- # Host specifiers should be sorted to ensure consistent behavior
- pattern_regular = []
- pattern_intersection = []
- pattern_exclude = []
- for p in patterns:
- if p.startswith("!"):
- pattern_exclude.append(p)
- elif p.startswith("&"):
- pattern_intersection.append(p)
- elif p:
- pattern_regular.append(p)
-
- # if no regular pattern was given, hence only exclude and/or intersection
- # make that magically work
- if pattern_regular == []:
- pattern_regular = ['all']
-
- # when applying the host selectors, run those without the "&" or "!"
- # first, then the &s, then the !s.
- patterns = pattern_regular + pattern_intersection + pattern_exclude
-
- hosts = []
-
- for p in patterns:
- # avoid resolving a pattern that is a plain host
- if p in self._hosts_cache:
- hosts.append(self.get_host(p))
- else:
- that = self.__get_hosts(p)
- if p.startswith("!"):
- hosts = [ h for h in hosts if h not in that ]
- elif p.startswith("&"):
- hosts = [ h for h in hosts if h in that ]
- else:
- to_append = [ h for h in that if h.name not in [ y.name for y in hosts ] ]
- hosts.extend(to_append)
- return hosts
-
- def __get_hosts(self, pattern):
- """
- finds hosts that positively match a particular pattern. Does not
- take into account negative matches.
- """
-
- if pattern in self._pattern_cache:
- return self._pattern_cache[pattern]
-
- (name, enumeration_details) = self._enumeration_info(pattern)
- hpat = self._hosts_in_unenumerated_pattern(name)
- result = self._apply_ranges(pattern, hpat)
- self._pattern_cache[pattern] = result
- return result
-
- def _enumeration_info(self, pattern):
- """
- returns (pattern, limits) taking a regular pattern and finding out
- which parts of it correspond to start/stop offsets. limits is
- a tuple of (start, stop) or None
- """
-
- # Do not parse regexes for enumeration info
- if pattern.startswith('~'):
- return (pattern, None)
-
- # The regex used to match on the range, which can be [x] or [x-y].
- pattern_re = re.compile("^(.*)\[([-]?[0-9]+)(?:(?:-)([0-9]+))?\](.*)$")
- m = pattern_re.match(pattern)
- if m:
- (target, first, last, rest) = m.groups()
- first = int(first)
- if last:
- if first < 0:
- raise errors.AnsibleError("invalid range: negative indices cannot be used as the first item in a range")
- last = int(last)
- else:
- last = first
- return (target, (first, last))
- else:
- return (pattern, None)
-
- def _apply_ranges(self, pat, hosts):
- """
- given a pattern like foo, that matches hosts, return all of hosts
- given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
- """
-
- # If there are no hosts to select from, just return the
- # empty set. This prevents trying to do selections on an empty set.
- # issue#6258
- if not hosts:
- return hosts
-
- (loose_pattern, limits) = self._enumeration_info(pat)
- if not limits:
- return hosts
-
- (left, right) = limits
-
- if left == '':
- left = 0
- if right == '':
- right = 0
- left=int(left)
- right=int(right)
- try:
- if left != right:
- return hosts[left:right]
- else:
- return [ hosts[left] ]
- except IndexError:
- raise errors.AnsibleError("no hosts matching the pattern '%s' were found" % pat)
-
- def _create_implicit_localhost(self, pattern):
- new_host = Host(pattern)
- new_host.set_variable("ansible_python_interpreter", sys.executable)
- new_host.set_variable("ansible_connection", "local")
- ungrouped = self.get_group("ungrouped")
- if ungrouped is None:
- self.add_group(Group('ungrouped'))
- ungrouped = self.get_group('ungrouped')
- self.get_group('all').add_child_group(ungrouped)
- ungrouped.add_host(new_host)
- return new_host
-
- def _hosts_in_unenumerated_pattern(self, pattern):
- """ Get all host names matching the pattern """
-
- results = []
- hosts = []
- hostnames = set()
-
- # ignore any negative checks here, this is handled elsewhere
- pattern = pattern.replace("!","").replace("&", "")
-
- def __append_host_to_results(host):
- if host not in results and host.name not in hostnames:
- hostnames.add(host.name)
- results.append(host)
-
- groups = self.get_groups()
- for group in groups:
- if pattern == 'all':
- for host in group.get_hosts():
- __append_host_to_results(host)
- else:
- if self._match(group.name, pattern):
- for host in group.get_hosts():
- __append_host_to_results(host)
- else:
- matching_hosts = self._match_list(group.get_hosts(), 'name', pattern)
- for host in matching_hosts:
- __append_host_to_results(host)
-
- if pattern in ["localhost", "127.0.0.1"] and len(results) == 0:
- new_host = self._create_implicit_localhost(pattern)
- results.append(new_host)
- return results
-
- def clear_pattern_cache(self):
- ''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
- self._pattern_cache = {}
-
- def groups_for_host(self, host):
- if host in self._hosts_cache:
- return self._hosts_cache[host].get_groups()
- else:
- return []
-
- def groups_list(self):
- if not self._groups_list:
- groups = {}
- for g in self.groups:
- groups[g.name] = [h.name for h in g.get_hosts()]
- ancestors = g.get_ancestors()
- for a in ancestors:
- if a.name not in groups:
- groups[a.name] = [h.name for h in a.get_hosts()]
- self._groups_list = groups
- return self._groups_list
-
- def get_groups(self):
- return self.groups
-
- def get_host(self, hostname):
- if hostname not in self._hosts_cache:
- self._hosts_cache[hostname] = self._get_host(hostname)
- return self._hosts_cache[hostname]
-
- def _get_host(self, hostname):
- if hostname in ['localhost','127.0.0.1']:
- for host in self.get_group('all').get_hosts():
- if host.name in ['localhost', '127.0.0.1']:
- return host
- return self._create_implicit_localhost(hostname)
- else:
- for group in self.groups:
- for host in group.get_hosts():
- if hostname == host.name:
- return host
- return None
-
- def get_group(self, groupname):
- for group in self.groups:
- if group.name == groupname:
- return group
- return None
-
- def get_group_variables(self, groupname, update_cached=False, vault_password=None):
- if groupname not in self._vars_per_group or update_cached:
- self._vars_per_group[groupname] = self._get_group_variables(groupname, vault_password=vault_password)
- return self._vars_per_group[groupname]
-
- def _get_group_variables(self, groupname, vault_password=None):
-
- group = self.get_group(groupname)
- if group is None:
- raise errors.AnsibleError("group not found: %s" % groupname)
-
- vars = {}
-
- # plugin.get_group_vars retrieves just vars for specific group
- vars_results = [ plugin.get_group_vars(group, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_group_vars')]
- for updated in vars_results:
- if updated is not None:
- vars = utils.combine_vars(vars, updated)
-
- # Read group_vars/ files
- vars = utils.combine_vars(vars, self.get_group_vars(group))
-
- return vars
-
- def get_variables(self, hostname, update_cached=False, vault_password=None):
-
- host = self.get_host(hostname)
- if not host:
- raise errors.AnsibleError("host not found: %s" % hostname)
- return host.get_variables()
-
- def get_host_variables(self, hostname, update_cached=False, vault_password=None):
-
- if hostname not in self._vars_per_host or update_cached:
- self._vars_per_host[hostname] = self._get_host_variables(hostname, vault_password=vault_password)
- return self._vars_per_host[hostname]
-
- def _get_host_variables(self, hostname, vault_password=None):
-
- host = self.get_host(hostname)
- if host is None:
- raise errors.AnsibleError("host not found: %s" % hostname)
-
- vars = {}
-
- # plugin.run retrieves all vars (also from groups) for host
- vars_results = [ plugin.run(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'run')]
- for updated in vars_results:
- if updated is not None:
- vars = utils.combine_vars(vars, updated)
-
- # plugin.get_host_vars retrieves just vars for specific host
- vars_results = [ plugin.get_host_vars(host, vault_password=vault_password) for plugin in self._vars_plugins if hasattr(plugin, 'get_host_vars')]
- for updated in vars_results:
- if updated is not None:
- vars = utils.combine_vars(vars, updated)
-
- # still need to check InventoryParser per host vars
- # which actually means InventoryScript per host,
- # which is not performant
- if self.parser is not None:
- vars = utils.combine_vars(vars, self.parser.get_host_variables(host))
-
- # Read host_vars/ files
- vars = utils.combine_vars(vars, self.get_host_vars(host))
-
- return vars
-
- def add_group(self, group):
- if group.name not in self.groups_list():
- self.groups.append(group)
- self._groups_list = None # invalidate internal cache
- else:
- raise errors.AnsibleError("group already in inventory: %s" % group.name)
-
- def list_hosts(self, pattern="all"):
-
- """ return a list of hostnames for a pattern """
-
- result = [ h.name for h in self.get_hosts(pattern) ]
- if len(result) == 0 and pattern in ["localhost", "127.0.0.1"]:
- result = [pattern]
- return result
-
- def list_groups(self):
- return sorted([ g.name for g in self.groups ], key=lambda x: x)
-
- # TODO: remove this function
- def get_restriction(self):
- return self._restriction
-
- def restrict_to(self, restriction):
- """
- Restrict list operations to the hosts given in restriction. This is used
- to exclude failed hosts in main playbook code, don't use this for other
- reasons.
- """
- if not isinstance(restriction, list):
- restriction = [ restriction ]
- self._restriction = restriction
-
- def also_restrict_to(self, restriction):
- """
- Works like restict_to but offers an additional restriction. Playbooks use this
- to implement serial behavior.
- """
- if not isinstance(restriction, list):
- restriction = [ restriction ]
- self._also_restriction = restriction
-
- def subset(self, subset_pattern):
- """
- Limits inventory results to a subset of inventory that matches a given
- pattern, such as to select a given geographic of numeric slice amongst
- a previous 'hosts' selection that only select roles, or vice versa.
- Corresponds to --limit parameter to ansible-playbook
- """
- if subset_pattern is None:
- self._subset = None
- else:
- subset_pattern = subset_pattern.replace(',',':')
- subset_pattern = subset_pattern.replace(";",":").split(":")
- results = []
- # allow Unix style @filename data
- for x in subset_pattern:
- if x.startswith("@"):
- fd = open(x[1:])
- results.extend(fd.read().split("\n"))
- fd.close()
- else:
- results.append(x)
- self._subset = results
-
- def lift_restriction(self):
- """ Do not restrict list operations """
- self._restriction = None
-
- def lift_also_restriction(self):
- """ Clears the also restriction """
- self._also_restriction = None
-
- def is_file(self):
- """ did inventory come from a file? """
- if not isinstance(self.host_list, basestring):
- return False
- return os.path.exists(self.host_list)
-
- def basedir(self):
- """ if inventory came from a file, what's the directory? """
- if not self.is_file():
- return None
- dname = os.path.dirname(self.host_list)
- if dname is None or dname == '' or dname == '.':
- cwd = os.getcwd()
- return os.path.abspath(cwd)
- return os.path.abspath(dname)
-
- def src(self):
- """ if inventory came from a file, what's the directory and file name? """
- if not self.is_file():
- return None
- return self.host_list
-
- def playbook_basedir(self):
- """ returns the directory of the current playbook """
- return self._playbook_basedir
-
- def set_playbook_basedir(self, dir):
- """
- sets the base directory of the playbook so inventory can use it as a
- basedir for host_ and group_vars, and other things.
- """
- # Only update things if dir is a different playbook basedir
- if dir != self._playbook_basedir:
- self._playbook_basedir = dir
- # get group vars from group_vars/ files
- for group in self.groups:
- group.vars = utils.combine_vars(group.vars, self.get_group_vars(group, new_pb_basedir=True))
- # get host vars from host_vars/ files
- for host in self.get_hosts():
- host.vars = utils.combine_vars(host.vars, self.get_host_vars(host, new_pb_basedir=True))
- # invalidate cache
- self._vars_per_host = {}
- self._vars_per_group = {}
-
- def get_host_vars(self, host, new_pb_basedir=False):
- """ Read host_vars/ files """
- return self._get_hostgroup_vars(host=host, group=None, new_pb_basedir=new_pb_basedir)
-
- def get_group_vars(self, group, new_pb_basedir=False):
- """ Read group_vars/ files """
- return self._get_hostgroup_vars(host=None, group=group, new_pb_basedir=new_pb_basedir)
-
- def _get_hostgroup_vars(self, host=None, group=None, new_pb_basedir=False):
- """
- Loads variables from group_vars/<groupname> and host_vars/<hostname> in directories parallel
- to the inventory base directory or in the same directory as the playbook. Variables in the playbook
- dir will win over the inventory dir if files are in both.
- """
-
- results = {}
- scan_pass = 0
- _basedir = self.basedir()
-
- # look in both the inventory base directory and the playbook base directory
- # unless we do an update for a new playbook base dir
- if not new_pb_basedir:
- basedirs = [_basedir, self._playbook_basedir]
- else:
- basedirs = [self._playbook_basedir]
-
- for basedir in basedirs:
-
- # this can happen from particular API usages, particularly if not run
- # from /usr/bin/ansible-playbook
- if basedir is None:
- continue
-
- scan_pass = scan_pass + 1
-
- # it's not an eror if the directory does not exist, keep moving
- if not os.path.exists(basedir):
- continue
-
- # save work of second scan if the directories are the same
- if _basedir == self._playbook_basedir and scan_pass != 1:
- continue
-
- if group and host is None:
- # load vars in dir/group_vars/name_of_group
- base_path = os.path.join(basedir, "group_vars/%s" % group.name)
- results = utils.load_vars(base_path, results, vault_password=self._vault_password)
-
- elif host and group is None:
- # same for hostvars in dir/host_vars/name_of_host
- base_path = os.path.join(basedir, "host_vars/%s" % host.name)
- results = utils.load_vars(base_path, results, vault_password=self._vault_password)
-
- # all done, results is a dictionary of variables for this particular host.
- return results
-
diff --git a/v1/ansible/inventory/dir.py b/v1/ansible/inventory/dir.py
deleted file mode 100644
index 9ac23fff89..0000000000
--- a/v1/ansible/inventory/dir.py
+++ /dev/null
@@ -1,229 +0,0 @@
-# (c) 2013, Daniel Hokka Zakrisson <daniel@hozac.com>
-# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#############################################
-
-import os
-import ansible.constants as C
-from ansible.inventory.host import Host
-from ansible.inventory.group import Group
-from ansible.inventory.ini import InventoryParser
-from ansible.inventory.script import InventoryScript
-from ansible import utils
-from ansible import errors
-
-class InventoryDirectory(object):
- ''' Host inventory parser for ansible using a directory of inventories. '''
-
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
- self.names = os.listdir(filename)
- self.names.sort()
- self.directory = filename
- self.parsers = []
- self.hosts = {}
- self.groups = {}
-
- for i in self.names:
-
- # Skip files that end with certain extensions or characters
- if any(i.endswith(ext) for ext in ("~", ".orig", ".bak", ".ini", ".retry", ".pyc", ".pyo")):
- continue
- # Skip hidden files
- if i.startswith('.') and not i.startswith('./'):
- continue
- # These are things inside of an inventory basedir
- if i in ("host_vars", "group_vars", "vars_plugins"):
- continue
- fullpath = os.path.join(self.directory, i)
- if os.path.isdir(fullpath):
- parser = InventoryDirectory(filename=fullpath)
- elif utils.is_executable(fullpath):
- parser = InventoryScript(filename=fullpath)
- else:
- parser = InventoryParser(filename=fullpath)
- self.parsers.append(parser)
-
- # retrieve all groups and hosts form the parser and add them to
- # self, don't look at group lists yet, to avoid
- # recursion trouble, but just make sure all objects exist in self
- newgroups = parser.groups.values()
- for group in newgroups:
- for host in group.hosts:
- self._add_host(host)
- for group in newgroups:
- self._add_group(group)
-
- # now check the objects lists so they contain only objects from
- # self; membership data in groups is already fine (except all &
- # ungrouped, see later), but might still reference objects not in self
- for group in self.groups.values():
- # iterate on a copy of the lists, as those lists get changed in
- # the loop
- # list with group's child group objects:
- for child in group.child_groups[:]:
- if child != self.groups[child.name]:
- group.child_groups.remove(child)
- group.child_groups.append(self.groups[child.name])
- # list with group's parent group objects:
- for parent in group.parent_groups[:]:
- if parent != self.groups[parent.name]:
- group.parent_groups.remove(parent)
- group.parent_groups.append(self.groups[parent.name])
- # list with group's host objects:
- for host in group.hosts[:]:
- if host != self.hosts[host.name]:
- group.hosts.remove(host)
- group.hosts.append(self.hosts[host.name])
- # also check here that the group that contains host, is
- # also contained in the host's group list
- if group not in self.hosts[host.name].groups:
- self.hosts[host.name].groups.append(group)
-
- # extra checks on special groups all and ungrouped
- # remove hosts from 'ungrouped' if they became member of other groups
- if 'ungrouped' in self.groups:
- ungrouped = self.groups['ungrouped']
- # loop on a copy of ungrouped hosts, as we want to change that list
- for host in ungrouped.hosts[:]:
- if len(host.groups) > 1:
- host.groups.remove(ungrouped)
- ungrouped.hosts.remove(host)
-
- # remove hosts from 'all' if they became member of other groups
- # all should only contain direct children, not grandchildren
- # direct children should have dept == 1
- if 'all' in self.groups:
- allgroup = self.groups['all' ]
- # loop on a copy of all's child groups, as we want to change that list
- for group in allgroup.child_groups[:]:
- # groups might once have beeen added to all, and later be added
- # to another group: we need to remove the link wit all then
- if len(group.parent_groups) > 1 and allgroup in group.parent_groups:
- # real children of all have just 1 parent, all
- # this one has more, so not a direct child of all anymore
- group.parent_groups.remove(allgroup)
- allgroup.child_groups.remove(group)
- elif allgroup not in group.parent_groups:
- # this group was once added to all, but doesn't list it as
- # a parent any more; the info in the group is the correct
- # info
- allgroup.child_groups.remove(group)
-
-
- def _add_group(self, group):
- """ Merge an existing group or add a new one;
- Track parent and child groups, and hosts of the new one """
-
- if group.name not in self.groups:
- # it's brand new, add him!
- self.groups[group.name] = group
- if self.groups[group.name] != group:
- # different object, merge
- self._merge_groups(self.groups[group.name], group)
-
- def _add_host(self, host):
- if host.name not in self.hosts:
- # Papa's got a brand new host
- self.hosts[host.name] = host
- if self.hosts[host.name] != host:
- # different object, merge
- self._merge_hosts(self.hosts[host.name], host)
-
- def _merge_groups(self, group, newgroup):
- """ Merge all of instance newgroup into group,
- update parent/child relationships
- group lists may still contain group objects that exist in self with
- same name, but was instanciated as a different object in some other
- inventory parser; these are handled later """
-
- # name
- if group.name != newgroup.name:
- raise errors.AnsibleError("Cannot merge group %s with %s" % (group.name, newgroup.name))
-
- # depth
- group.depth = max([group.depth, newgroup.depth])
-
- # hosts list (host objects are by now already added to self.hosts)
- for host in newgroup.hosts:
- grouphosts = dict([(h.name, h) for h in group.hosts])
- if host.name in grouphosts:
- # same host name but different object, merge
- self._merge_hosts(grouphosts[host.name], host)
- else:
- # new membership, add host to group from self
- # group from self will also be added again to host.groups, but
- # as different object
- group.add_host(self.hosts[host.name])
- # now remove this the old object for group in host.groups
- for hostgroup in [g for g in host.groups]:
- if hostgroup.name == group.name and hostgroup != self.groups[group.name]:
- self.hosts[host.name].groups.remove(hostgroup)
-
-
- # group child membership relation
- for newchild in newgroup.child_groups:
- # dict with existing child groups:
- childgroups = dict([(g.name, g) for g in group.child_groups])
- # check if child of new group is already known as a child
- if newchild.name not in childgroups:
- self.groups[group.name].add_child_group(newchild)
-
- # group parent membership relation
- for newparent in newgroup.parent_groups:
- # dict with existing parent groups:
- parentgroups = dict([(g.name, g) for g in group.parent_groups])
- # check if parent of new group is already known as a parent
- if newparent.name not in parentgroups:
- if newparent.name not in self.groups:
- # group does not exist yet in self, import him
- self.groups[newparent.name] = newparent
- # group now exists but not yet as a parent here
- self.groups[newparent.name].add_child_group(group)
-
- # variables
- group.vars = utils.combine_vars(group.vars, newgroup.vars)
-
- def _merge_hosts(self,host, newhost):
- """ Merge all of instance newhost into host """
-
- # name
- if host.name != newhost.name:
- raise errors.AnsibleError("Cannot merge host %s with %s" % (host.name, newhost.name))
-
- # group membership relation
- for newgroup in newhost.groups:
- # dict with existing groups:
- hostgroups = dict([(g.name, g) for g in host.groups])
- # check if new group is already known as a group
- if newgroup.name not in hostgroups:
- if newgroup.name not in self.groups:
- # group does not exist yet in self, import him
- self.groups[newgroup.name] = newgroup
- # group now exists but doesn't have host yet
- self.groups[newgroup.name].add_host(host)
-
- # variables
- host.vars = utils.combine_vars(host.vars, newhost.vars)
-
- def get_host_variables(self, host):
- """ Gets additional host variables from all inventories """
- vars = {}
- for i in self.parsers:
- vars.update(i.get_host_variables(host))
- return vars
-
diff --git a/v1/ansible/inventory/expand_hosts.py b/v1/ansible/inventory/expand_hosts.py
deleted file mode 100644
index f129740935..0000000000
--- a/v1/ansible/inventory/expand_hosts.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# (c) 2012, Zettar Inc.
-# Written by Chin Fang <fangchin@zettar.com>
-#
-# This file is part of Ansible
-#
-# This module is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This software is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this software. If not, see <http://www.gnu.org/licenses/>.
-#
-
-'''
-This module is for enhancing ansible's inventory parsing capability such
-that it can deal with hostnames specified using a simple pattern in the
-form of [beg:end], example: [1:5], [a:c], [D:G]. If beg is not specified,
-it defaults to 0.
-
-If beg is given and is left-zero-padded, e.g. '001', it is taken as a
-formatting hint when the range is expanded. e.g. [001:010] is to be
-expanded into 001, 002 ...009, 010.
-
-Note that when beg is specified with left zero padding, then the length of
-end must be the same as that of beg, else an exception is raised.
-'''
-import string
-
-from ansible import errors
-
-def detect_range(line = None):
- '''
- A helper function that checks a given host line to see if it contains
- a range pattern described in the docstring above.
-
- Returnes True if the given line contains a pattern, else False.
- '''
- if 0 <= line.find("[") < line.find(":") < line.find("]"):
- return True
- else:
- return False
-
-def expand_hostname_range(line = None):
- '''
- A helper function that expands a given line that contains a pattern
- specified in top docstring, and returns a list that consists of the
- expanded version.
-
- The '[' and ']' characters are used to maintain the pseudo-code
- appearance. They are replaced in this function with '|' to ease
- string splitting.
-
- References: http://ansible.github.com/patterns.html#hosts-and-groups
- '''
- all_hosts = []
- if line:
- # A hostname such as db[1:6]-node is considered to consists
- # three parts:
- # head: 'db'
- # nrange: [1:6]; range() is a built-in. Can't use the name
- # tail: '-node'
-
- # Add support for multiple ranges in a host so:
- # db[01:10:3]node-[01:10]
- # - to do this we split off at the first [...] set, getting the list
- # of hosts and then repeat until none left.
- # - also add an optional third parameter which contains the step. (Default: 1)
- # so range can be [01:10:2] -> 01 03 05 07 09
- # FIXME: make this work for alphabetic sequences too.
-
- (head, nrange, tail) = line.replace('[','|',1).replace(']','|',1).split('|')
- bounds = nrange.split(":")
- if len(bounds) != 2 and len(bounds) != 3:
- raise errors.AnsibleError("host range incorrectly specified")
- beg = bounds[0]
- end = bounds[1]
- if len(bounds) == 2:
- step = 1
- else:
- step = bounds[2]
- if not beg:
- beg = "0"
- if not end:
- raise errors.AnsibleError("host range end value missing")
- if beg[0] == '0' and len(beg) > 1:
- rlen = len(beg) # range length formatting hint
- if rlen != len(end):
- raise errors.AnsibleError("host range format incorrectly specified!")
- fill = lambda _: str(_).zfill(rlen) # range sequence
- else:
- fill = str
-
- try:
- i_beg = string.ascii_letters.index(beg)
- i_end = string.ascii_letters.index(end)
- if i_beg > i_end:
- raise errors.AnsibleError("host range format incorrectly specified!")
- seq = string.ascii_letters[i_beg:i_end+1]
- except ValueError: # not an alpha range
- seq = range(int(beg), int(end)+1, int(step))
-
- for rseq in seq:
- hname = ''.join((head, fill(rseq), tail))
-
- if detect_range(hname):
- all_hosts.extend( expand_hostname_range( hname ) )
- else:
- all_hosts.append(hname)
-
- return all_hosts
diff --git a/v1/ansible/inventory/group.py b/v1/ansible/inventory/group.py
deleted file mode 100644
index 262558e69c..0000000000
--- a/v1/ansible/inventory/group.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-class Group(object):
- ''' a group of ansible hosts '''
-
- __slots__ = [ 'name', 'hosts', 'vars', 'child_groups', 'parent_groups', 'depth', '_hosts_cache' ]
-
- def __init__(self, name=None):
-
- self.depth = 0
- self.name = name
- self.hosts = []
- self.vars = {}
- self.child_groups = []
- self.parent_groups = []
- self._hosts_cache = None
- #self.clear_hosts_cache()
- if self.name is None:
- raise Exception("group name is required")
-
- def add_child_group(self, group):
-
- if self == group:
- raise Exception("can't add group to itself")
-
- # don't add if it's already there
- if not group in self.child_groups:
- self.child_groups.append(group)
-
- # update the depth of the child
- group.depth = max([self.depth+1, group.depth])
-
- # update the depth of the grandchildren
- group._check_children_depth()
-
- # now add self to child's parent_groups list, but only if there
- # isn't already a group with the same name
- if not self.name in [g.name for g in group.parent_groups]:
- group.parent_groups.append(self)
-
- self.clear_hosts_cache()
-
- def _check_children_depth(self):
-
- for group in self.child_groups:
- group.depth = max([self.depth+1, group.depth])
- group._check_children_depth()
-
- def add_host(self, host):
-
- self.hosts.append(host)
- host.add_group(self)
- self.clear_hosts_cache()
-
- def set_variable(self, key, value):
-
- self.vars[key] = value
-
- def clear_hosts_cache(self):
-
- self._hosts_cache = None
- for g in self.parent_groups:
- g.clear_hosts_cache()
-
- def get_hosts(self):
-
- if self._hosts_cache is None:
- self._hosts_cache = self._get_hosts()
-
- return self._hosts_cache
-
- def _get_hosts(self):
-
- hosts = []
- seen = {}
- for kid in self.child_groups:
- kid_hosts = kid.get_hosts()
- for kk in kid_hosts:
- if kk not in seen:
- seen[kk] = 1
- hosts.append(kk)
- for mine in self.hosts:
- if mine not in seen:
- seen[mine] = 1
- hosts.append(mine)
- return hosts
-
- def get_variables(self):
- return self.vars.copy()
-
- def _get_ancestors(self):
-
- results = {}
- for g in self.parent_groups:
- results[g.name] = g
- results.update(g._get_ancestors())
- return results
-
- def get_ancestors(self):
-
- return self._get_ancestors().values()
-
diff --git a/v1/ansible/inventory/host.py b/v1/ansible/inventory/host.py
deleted file mode 100644
index d4dc20fa46..0000000000
--- a/v1/ansible/inventory/host.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.constants as C
-from ansible import utils
-
-class Host(object):
- ''' a single ansible host '''
-
- __slots__ = [ 'name', 'vars', 'groups' ]
-
- def __init__(self, name=None, port=None):
-
- self.name = name
- self.vars = {}
- self.groups = []
- if port and port != C.DEFAULT_REMOTE_PORT:
- self.set_variable('ansible_ssh_port', int(port))
-
- if self.name is None:
- raise Exception("host name is required")
-
- def add_group(self, group):
-
- self.groups.append(group)
-
- def set_variable(self, key, value):
-
- self.vars[key]=value
-
- def get_groups(self):
-
- groups = {}
- for g in self.groups:
- groups[g.name] = g
- ancestors = g.get_ancestors()
- for a in ancestors:
- groups[a.name] = a
- return groups.values()
-
- def get_variables(self):
-
- results = {}
- groups = self.get_groups()
- for group in sorted(groups, key=lambda g: g.depth):
- results = utils.combine_vars(results, group.get_variables())
- results = utils.combine_vars(results, self.vars)
- results['inventory_hostname'] = self.name
- results['inventory_hostname_short'] = self.name.split('.')[0]
- results['group_names'] = sorted([ g.name for g in groups if g.name != 'all'])
- return results
-
-
diff --git a/v1/ansible/inventory/ini.py b/v1/ansible/inventory/ini.py
deleted file mode 100644
index bd9a98e7f8..0000000000
--- a/v1/ansible/inventory/ini.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#############################################
-
-import ansible.constants as C
-from ansible.inventory.host import Host
-from ansible.inventory.group import Group
-from ansible.inventory.expand_hosts import detect_range
-from ansible.inventory.expand_hosts import expand_hostname_range
-from ansible import errors
-from ansible import utils
-import shlex
-import re
-import ast
-
-class InventoryParser(object):
- """
- Host inventory for ansible.
- """
-
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
-
- with open(filename) as fh:
- self.filename = filename
- self.lines = fh.readlines()
- self.groups = {}
- self.hosts = {}
- self._parse()
-
- def _parse(self):
-
- self._parse_base_groups()
- self._parse_group_children()
- self._add_allgroup_children()
- self._parse_group_variables()
- return self.groups
-
- @staticmethod
- def _parse_value(v):
- if "#" not in v:
- try:
- ret = ast.literal_eval(v)
- if not isinstance(ret, float):
- # Do not trim floats. Eg: "1.20" to 1.2
- return ret
- # Using explicit exceptions.
- # Likely a string that literal_eval does not like. We wil then just set it.
- except ValueError:
- # For some reason this was thought to be malformed.
- pass
- except SyntaxError:
- # Is this a hash with an equals at the end?
- pass
- return v
-
- # [webservers]
- # alpha
- # beta:2345
- # gamma sudo=True user=root
- # delta asdf=jkl favcolor=red
-
- def _add_allgroup_children(self):
-
- for group in self.groups.values():
- if group.depth == 0 and group.name != 'all':
- self.groups['all'].add_child_group(group)
-
-
- def _parse_base_groups(self):
- # FIXME: refactor
-
- ungrouped = Group(name='ungrouped')
- all = Group(name='all')
- all.add_child_group(ungrouped)
-
- self.groups = dict(all=all, ungrouped=ungrouped)
- active_group_name = 'ungrouped'
-
- for lineno in range(len(self.lines)):
- line = utils.before_comment(self.lines[lineno]).strip()
- if line.startswith("[") and line.endswith("]"):
- active_group_name = line.replace("[","").replace("]","")
- if ":vars" in line or ":children" in line:
- active_group_name = active_group_name.rsplit(":", 1)[0]
- if active_group_name not in self.groups:
- new_group = self.groups[active_group_name] = Group(name=active_group_name)
- active_group_name = None
- elif active_group_name not in self.groups:
- new_group = self.groups[active_group_name] = Group(name=active_group_name)
- elif line.startswith(";") or line == '':
- pass
- elif active_group_name:
- tokens = shlex.split(line)
- if len(tokens) == 0:
- continue
- hostname = tokens[0]
- port = C.DEFAULT_REMOTE_PORT
- # Three cases to check:
- # 0. A hostname that contains a range pesudo-code and a port
- # 1. A hostname that contains just a port
- if hostname.count(":") > 1:
- # Possible an IPv6 address, or maybe a host line with multiple ranges
- # IPv6 with Port XXX:XXX::XXX.port
- # FQDN foo.example.com
- if hostname.count(".") == 1:
- (hostname, port) = hostname.rsplit(".", 1)
- elif ("[" in hostname and
- "]" in hostname and
- ":" in hostname and
- (hostname.rindex("]") < hostname.rindex(":")) or
- ("]" not in hostname and ":" in hostname)):
- (hostname, port) = hostname.rsplit(":", 1)
-
- hostnames = []
- if detect_range(hostname):
- hostnames = expand_hostname_range(hostname)
- else:
- hostnames = [hostname]
-
- for hn in hostnames:
- host = None
- if hn in self.hosts:
- host = self.hosts[hn]
- else:
- host = Host(name=hn, port=port)
- self.hosts[hn] = host
- if len(tokens) > 1:
- for t in tokens[1:]:
- if t.startswith('#'):
- break
- try:
- (k,v) = t.split("=", 1)
- except ValueError, e:
- raise errors.AnsibleError("%s:%s: Invalid ini entry: %s - %s" % (self.filename, lineno + 1, t, str(e)))
- host.set_variable(k, self._parse_value(v))
- self.groups[active_group_name].add_host(host)
-
- # [southeast:children]
- # atlanta
- # raleigh
-
- def _parse_group_children(self):
- group = None
-
- for lineno in range(len(self.lines)):
- line = self.lines[lineno].strip()
- if line is None or line == '':
- continue
- if line.startswith("[") and ":children]" in line:
- line = line.replace("[","").replace(":children]","")
- group = self.groups.get(line, None)
- if group is None:
- group = self.groups[line] = Group(name=line)
- elif line.startswith("#") or line.startswith(";"):
- pass
- elif line.startswith("["):
- group = None
- elif group:
- kid_group = self.groups.get(line, None)
- if kid_group is None:
- raise errors.AnsibleError("%s:%d: child group is not defined: (%s)" % (self.filename, lineno + 1, line))
- else:
- group.add_child_group(kid_group)
-
-
- # [webservers:vars]
- # http_port=1234
- # maxRequestsPerChild=200
-
- def _parse_group_variables(self):
- group = None
- for lineno in range(len(self.lines)):
- line = self.lines[lineno].strip()
- if line.startswith("[") and ":vars]" in line:
- line = line.replace("[","").replace(":vars]","")
- group = self.groups.get(line, None)
- if group is None:
- raise errors.AnsibleError("%s:%d: can't add vars to undefined group: %s" % (self.filename, lineno + 1, line))
- elif line.startswith("#") or line.startswith(";"):
- pass
- elif line.startswith("["):
- group = None
- elif line == '':
- pass
- elif group:
- if "=" not in line:
- raise errors.AnsibleError("%s:%d: variables assigned to group must be in key=value form" % (self.filename, lineno + 1))
- else:
- (k, v) = [e.strip() for e in line.split("=", 1)]
- group.set_variable(k, self._parse_value(v))
-
- def get_host_variables(self, host):
- return {}
diff --git a/v1/ansible/inventory/script.py b/v1/ansible/inventory/script.py
deleted file mode 100644
index b83cb9bcc7..0000000000
--- a/v1/ansible/inventory/script.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#############################################
-
-import os
-import subprocess
-import ansible.constants as C
-from ansible.inventory.host import Host
-from ansible.inventory.group import Group
-from ansible.module_utils.basic import json_dict_bytes_to_unicode
-from ansible import utils
-from ansible import errors
-import sys
-
-
-class InventoryScript(object):
- ''' Host inventory parser for ansible using external inventory scripts. '''
-
- def __init__(self, filename=C.DEFAULT_HOST_LIST):
-
- # Support inventory scripts that are not prefixed with some
- # path information but happen to be in the current working
- # directory when '.' is not in PATH.
- self.filename = os.path.abspath(filename)
- cmd = [ self.filename, "--list" ]
- try:
- sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
- (stdout, stderr) = sp.communicate()
-
- if sp.returncode != 0:
- raise errors.AnsibleError("Inventory script (%s) had an execution error: %s " % (filename,stderr))
-
- self.data = stdout
- # see comment about _meta below
- self.host_vars_from_top = None
- self.groups = self._parse(stderr)
-
-
- def _parse(self, err):
-
- all_hosts = {}
-
- # not passing from_remote because data from CMDB is trusted
- self.raw = utils.parse_json(self.data)
- self.raw = json_dict_bytes_to_unicode(self.raw)
-
- all = Group('all')
- groups = dict(all=all)
- group = None
-
-
- if 'failed' in self.raw:
- sys.stderr.write(err + "\n")
- raise errors.AnsibleError("failed to parse executable inventory script results: %s" % self.raw)
-
- for (group_name, data) in self.raw.items():
-
- # in Ansible 1.3 and later, a "_meta" subelement may contain
- # a variable "hostvars" which contains a hash for each host
- # if this "hostvars" exists at all then do not call --host for each
- # host. This is for efficiency and scripts should still return data
- # if called with --host for backwards compat with 1.2 and earlier.
-
- if group_name == '_meta':
- if 'hostvars' in data:
- self.host_vars_from_top = data['hostvars']
- continue
-
- if group_name != all.name:
- group = groups[group_name] = Group(group_name)
- else:
- group = all
- host = None
-
- if not isinstance(data, dict):
- data = {'hosts': data}
- # is not those subkeys, then simplified syntax, host with vars
- elif not any(k in data for k in ('hosts','vars','children')):
- data = {'hosts': [group_name], 'vars': data}
-
- if 'hosts' in data:
- if not isinstance(data['hosts'], list):
- raise errors.AnsibleError("You defined a group \"%s\" with bad "
- "data for the host list:\n %s" % (group_name, data))
-
- for hostname in data['hosts']:
- if not hostname in all_hosts:
- all_hosts[hostname] = Host(hostname)
- host = all_hosts[hostname]
- group.add_host(host)
-
- if 'vars' in data:
- if not isinstance(data['vars'], dict):
- raise errors.AnsibleError("You defined a group \"%s\" with bad "
- "data for variables:\n %s" % (group_name, data))
-
- for k, v in data['vars'].iteritems():
- if group.name == all.name:
- all.set_variable(k, v)
- else:
- group.set_variable(k, v)
-
- # Separate loop to ensure all groups are defined
- for (group_name, data) in self.raw.items():
- if group_name == '_meta':
- continue
- if isinstance(data, dict) and 'children' in data:
- for child_name in data['children']:
- if child_name in groups:
- groups[group_name].add_child_group(groups[child_name])
-
- for group in groups.values():
- if group.depth == 0 and group.name != 'all':
- all.add_child_group(group)
-
- return groups
-
- def get_host_variables(self, host):
- """ Runs <script> --host <hostname> to determine additional host variables """
- if self.host_vars_from_top is not None:
- got = self.host_vars_from_top.get(host.name, {})
- return got
-
-
- cmd = [self.filename, "--host", host.name]
- try:
- sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
- (out, err) = sp.communicate()
- if out.strip() == '':
- return dict()
- try:
- return json_dict_bytes_to_unicode(utils.parse_json(out))
- except ValueError:
- raise errors.AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
-
diff --git a/v1/ansible/inventory/vars_plugins/__init__.py b/v1/ansible/inventory/vars_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/inventory/vars_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/inventory/vars_plugins/noop.py b/v1/ansible/inventory/vars_plugins/noop.py
deleted file mode 100644
index 5d4b4b6658..0000000000
--- a/v1/ansible/inventory/vars_plugins/noop.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2014, Serge van Ginderachter <serge@vanginderachter.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-class VarsModule(object):
-
- """
- Loads variables for groups and/or hosts
- """
-
- def __init__(self, inventory):
-
- """ constructor """
-
- self.inventory = inventory
- self.inventory_basedir = inventory.basedir()
-
-
- def run(self, host, vault_password=None):
- """ For backwards compatibility, when only vars per host were retrieved
- This method should return both host specific vars as well as vars
- calculated from groups it is a member of """
- return {}
-
-
- def get_host_vars(self, host, vault_password=None):
- """ Get host specific variables. """
- return {}
-
-
- def get_group_vars(self, group, vault_password=None):
- """ Get group specific variables. """
- return {}
-
diff --git a/v1/ansible/module_common.py b/v1/ansible/module_common.py
deleted file mode 100644
index fba5b9137d..0000000000
--- a/v1/ansible/module_common.py
+++ /dev/null
@@ -1,196 +0,0 @@
-# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# from python and deps
-from cStringIO import StringIO
-import inspect
-import os
-import shlex
-
-# from Ansible
-from ansible import errors
-from ansible import utils
-from ansible import constants as C
-from ansible import __version__
-from ansible.utils.unicode import to_bytes
-
-REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
-REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
-REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
-REPLACER_WINDOWS = "# POWERSHELL_COMMON"
-REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
-REPLACER_SELINUX = "<<SELINUX_SPECIAL_FILESYSTEMS>>"
-
-
-class ModuleReplacer(object):
-
- """
- The Replacer is used to insert chunks of code into modules before
- transfer. Rather than doing classical python imports, this allows for more
- efficient transfer in a no-bootstrapping scenario by not moving extra files
- over the wire, and also takes care of embedding arguments in the transferred
- modules.
-
- This version is done in such a way that local imports can still be
- used in the module code, so IDEs don't have to be aware of what is going on.
-
- Example:
-
- from ansible.module_utils.basic import *
-
- ... will result in the insertion basic.py into the module
-
- from the module_utils/ directory in the source tree.
-
- All modules are required to import at least basic, though there will also
- be other snippets.
-
- # POWERSHELL_COMMON
-
- Also results in the inclusion of the common code in powershell.ps1
-
- """
-
- # ******************************************************************************
-
- def __init__(self, strip_comments=False):
- this_file = inspect.getfile(inspect.currentframe())
- self.snippet_path = os.path.join(os.path.dirname(this_file), 'module_utils')
- self.strip_comments = strip_comments # TODO: implement
-
- # ******************************************************************************
-
-
- def slurp(self, path):
- if not os.path.exists(path):
- raise errors.AnsibleError("imported module support code does not exist at %s" % path)
- fd = open(path)
- data = fd.read()
- fd.close()
- return data
-
- def _find_snippet_imports(self, module_data, module_path):
- """
- Given the source of the module, convert it to a Jinja2 template to insert
- module code and return whether it's a new or old style module.
- """
-
- module_style = 'old'
- if REPLACER in module_data:
- module_style = 'new'
- elif 'from ansible.module_utils.' in module_data:
- module_style = 'new'
- elif 'WANT_JSON' in module_data:
- module_style = 'non_native_want_json'
-
- output = StringIO()
- lines = module_data.split('\n')
- snippet_names = []
-
- for line in lines:
-
- if REPLACER in line:
- output.write(self.slurp(os.path.join(self.snippet_path, "basic.py")))
- snippet_names.append('basic')
- if REPLACER_WINDOWS in line:
- ps_data = self.slurp(os.path.join(self.snippet_path, "powershell.ps1"))
- output.write(ps_data)
- snippet_names.append('powershell')
- elif line.startswith('from ansible.module_utils.'):
- tokens=line.split(".")
- import_error = False
- if len(tokens) != 3:
- import_error = True
- if " import *" not in line:
- import_error = True
- if import_error:
- raise errors.AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
- snippet_name = tokens[2].split()[0]
- snippet_names.append(snippet_name)
- output.write(self.slurp(os.path.join(self.snippet_path, snippet_name + ".py")))
-
- else:
- if self.strip_comments and line.startswith("#") or line == '':
- pass
- output.write(line)
- output.write("\n")
-
- if not module_path.endswith(".ps1"):
- # Unixy modules
- if len(snippet_names) > 0 and not 'basic' in snippet_names:
- raise errors.AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
- else:
- # Windows modules
- if len(snippet_names) > 0 and not 'powershell' in snippet_names:
- raise errors.AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
-
- return (output.getvalue(), module_style)
-
- # ******************************************************************************
-
- def modify_module(self, module_path, complex_args, module_args, inject):
-
- with open(module_path) as f:
-
- # read in the module source
- module_data = f.read()
-
- (module_data, module_style) = self._find_snippet_imports(module_data, module_path)
-
- complex_args_json = utils.jsonify(complex_args)
- # We force conversion of module_args to str because module_common calls shlex.split,
- # a standard library function that incorrectly handles Unicode input before Python 2.7.3.
- # Note: it would be better to do all this conversion at the border
- # (when the data is originally parsed into data structures) but
- # it's currently coming from too many sources to make that
- # effective.
- try:
- encoded_args = repr(module_args.encode('utf-8'))
- except UnicodeDecodeError:
- encoded_args = repr(module_args)
- try:
- encoded_complex = repr(complex_args_json.encode('utf-8'))
- except UnicodeDecodeError:
- encoded_complex = repr(complex_args_json.encode('utf-8'))
-
- # these strings should be part of the 'basic' snippet which is required to be included
- module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
- module_data = module_data.replace(REPLACER_SELINUX, ','.join(C.DEFAULT_SELINUX_SPECIAL_FS))
- module_data = module_data.replace(REPLACER_ARGS, encoded_args)
- module_data = module_data.replace(REPLACER_COMPLEX, encoded_complex)
-
- if module_style == 'new':
- facility = C.DEFAULT_SYSLOG_FACILITY
- if 'ansible_syslog_facility' in inject:
- facility = inject['ansible_syslog_facility']
- module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
-
- lines = module_data.split("\n")
- shebang = None
- if lines[0].startswith("#!"):
- shebang = lines[0].strip()
- args = shlex.split(str(shebang[2:]))
- interpreter = args[0]
- interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
-
- if interpreter_config in inject:
- interpreter = to_bytes(inject[interpreter_config], errors='strict')
- lines[0] = shebang = "#!%s %s" % (interpreter, " ".join(args[1:]))
- module_data = "\n".join(lines)
-
- return (module_data, module_style, shebang)
-
diff --git a/v1/ansible/module_utils/__init__.py b/v1/ansible/module_utils/__init__.py
deleted file mode 100644
index 266d06a613..0000000000
--- a/v1/ansible/module_utils/__init__.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# 2013, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
diff --git a/v1/ansible/module_utils/a10.py b/v1/ansible/module_utils/a10.py
deleted file mode 100644
index cfc217ee61..0000000000
--- a/v1/ansible/module_utils/a10.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-AXAPI_PORT_PROTOCOLS = {
- 'tcp': 2,
- 'udp': 3,
-}
-
-AXAPI_VPORT_PROTOCOLS = {
- 'tcp': 2,
- 'udp': 3,
- 'fast-http': 9,
- 'http': 11,
- 'https': 12,
-}
-
-def a10_argument_spec():
- return dict(
- host=dict(type='str', required=True),
- username=dict(type='str', aliases=['user', 'admin'], required=True),
- password=dict(type='str', aliases=['pass', 'pwd'], required=True, no_log=True),
- write_config=dict(type='bool', default=False)
- )
-
-def axapi_failure(result):
- if 'response' in result and result['response'].get('status') == 'fail':
- return True
- return False
-
-def axapi_call(module, url, post=None):
- '''
- Returns a datastructure based on the result of the API call
- '''
- rsp, info = fetch_url(module, url, data=post)
- if not rsp or info['status'] >= 400:
- module.fail_json(msg="failed to connect (status code %s), error was %s" % (info['status'], info.get('msg', 'no error given')))
- try:
- raw_data = rsp.read()
- data = json.loads(raw_data)
- except ValueError:
- # at least one API call (system.action.write_config) returns
- # XML even when JSON is requested, so do some minimal handling
- # here to prevent failing even when the call succeeded
- if 'status="ok"' in raw_data.lower():
- data = {"response": {"status": "OK"}}
- else:
- data = {"response": {"status": "fail", "err": {"msg": raw_data}}}
- except:
- module.fail_json(msg="could not read the result from the host")
- finally:
- rsp.close()
- return data
-
-def axapi_authenticate(module, base_url, username, password):
- url = '%s&method=authenticate&username=%s&password=%s' % (base_url, username, password)
- result = axapi_call(module, url)
- if axapi_failure(result):
- return module.fail_json(msg=result['response']['err']['msg'])
- sessid = result['session_id']
- return base_url + '&session_id=' + sessid
-
-def axapi_enabled_disabled(flag):
- '''
- The axapi uses 0/1 integer values for flags, rather than strings
- or booleans, so convert the given flag to a 0 or 1. For now, params
- are specified as strings only so thats what we check.
- '''
- if flag == 'enabled':
- return 1
- else:
- return 0
-
-def axapi_get_port_protocol(protocol):
- return AXAPI_PORT_PROTOCOLS.get(protocol.lower(), None)
-
-def axapi_get_vport_protocol(protocol):
- return AXAPI_VPORT_PROTOCOLS.get(protocol.lower(), None)
-
diff --git a/v1/ansible/module_utils/basic.py b/v1/ansible/module_utils/basic.py
deleted file mode 100644
index e772a12efc..0000000000
--- a/v1/ansible/module_utils/basic.py
+++ /dev/null
@@ -1,1631 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-# == BEGIN DYNAMICALLY INSERTED CODE ==
-
-ANSIBLE_VERSION = "<<ANSIBLE_VERSION>>"
-
-MODULE_ARGS = "<<INCLUDE_ANSIBLE_MODULE_ARGS>>"
-MODULE_COMPLEX_ARGS = "<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>"
-
-BOOLEANS_TRUE = ['yes', 'on', '1', 'true', 1]
-BOOLEANS_FALSE = ['no', 'off', '0', 'false', 0]
-BOOLEANS = BOOLEANS_TRUE + BOOLEANS_FALSE
-
-SELINUX_SPECIAL_FS="<<SELINUX_SPECIAL_FILESYSTEMS>>"
-
-# ansible modules can be written in any language. To simplify
-# development of Python modules, the functions available here
-# can be inserted in any module source automatically by including
-# #<<INCLUDE_ANSIBLE_MODULE_COMMON>> on a blank line by itself inside
-# of an ansible module. The source of this common code lives
-# in lib/ansible/module_common.py
-
-import locale
-import os
-import re
-import pipes
-import shlex
-import subprocess
-import sys
-import syslog
-import types
-import time
-import select
-import shutil
-import stat
-import tempfile
-import traceback
-import grp
-import pwd
-import platform
-import errno
-import tempfile
-
-try:
- import json
-except ImportError:
- try:
- import simplejson as json
- except ImportError:
- sys.stderr.write('Error: ansible requires a json module, none found!')
- sys.exit(1)
- except SyntaxError:
- sys.stderr.write('SyntaxError: probably due to json and python being for different versions')
- sys.exit(1)
-
-HAVE_SELINUX=False
-try:
- import selinux
- HAVE_SELINUX=True
-except ImportError:
- pass
-
-HAVE_HASHLIB=False
-try:
- from hashlib import sha1 as _sha1
- HAVE_HASHLIB=True
-except ImportError:
- from sha import sha as _sha1
-
-try:
- from hashlib import md5 as _md5
-except ImportError:
- try:
- from md5 import md5 as _md5
- except ImportError:
- # MD5 unavailable. Possibly FIPS mode
- _md5 = None
-
-try:
- from hashlib import sha256 as _sha256
-except ImportError:
- pass
-
-try:
- from systemd import journal
- has_journal = True
-except ImportError:
- import syslog
- has_journal = False
-
-try:
- from ast import literal_eval as _literal_eval
-except ImportError:
- # a replacement for literal_eval that works with python 2.4. from:
- # https://mail.python.org/pipermail/python-list/2009-September/551880.html
- # which is essentially a cut/past from an earlier (2.6) version of python's
- # ast.py
- from compiler import parse
- from compiler.ast import *
- def _literal_eval(node_or_string):
- """
- Safely evaluate an expression node or a string containing a Python
- expression. The string or node provided may only consist of the following
- Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
- and None.
- """
- _safe_names = {'None': None, 'True': True, 'False': False}
- if isinstance(node_or_string, basestring):
- node_or_string = parse(node_or_string, mode='eval')
- if isinstance(node_or_string, Expression):
- node_or_string = node_or_string.node
- def _convert(node):
- if isinstance(node, Const) and isinstance(node.value, (basestring, int, float, long, complex)):
- return node.value
- elif isinstance(node, Tuple):
- return tuple(map(_convert, node.nodes))
- elif isinstance(node, List):
- return list(map(_convert, node.nodes))
- elif isinstance(node, Dict):
- return dict((_convert(k), _convert(v)) for k, v in node.items)
- elif isinstance(node, Name):
- if node.name in _safe_names:
- return _safe_names[node.name]
- elif isinstance(node, UnarySub):
- return -_convert(node.expr)
- raise ValueError('malformed string')
- return _convert(node_or_string)
-
-FILE_COMMON_ARGUMENTS=dict(
- src = dict(),
- mode = dict(),
- owner = dict(),
- group = dict(),
- seuser = dict(),
- serole = dict(),
- selevel = dict(),
- setype = dict(),
- follow = dict(type='bool', default=False),
- # not taken by the file module, but other modules call file so it must ignore them.
- content = dict(no_log=True),
- backup = dict(),
- force = dict(),
- remote_src = dict(), # used by assemble
- regexp = dict(), # used by assemble
- delimiter = dict(), # used by assemble
- directory_mode = dict(), # used by copy
-)
-
-PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
-
-def get_platform():
- ''' what's the platform? example: Linux is a platform. '''
- return platform.system()
-
-def get_distribution():
- ''' return the distribution name '''
- if platform.system() == 'Linux':
- try:
- supported_dists = platform._supported_dists + ('arch',)
- distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
- if not distribution and os.path.isfile('/etc/system-release'):
- distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
- if 'Amazon' in distribution:
- distribution = 'Amazon'
- else:
- distribution = 'OtherLinux'
- except:
- # FIXME: MethodMissing, I assume?
- distribution = platform.dist()[0].capitalize()
- else:
- distribution = None
- return distribution
-
-def get_distribution_version():
- ''' return the distribution version '''
- if platform.system() == 'Linux':
- try:
- distribution_version = platform.linux_distribution()[1]
- if not distribution_version and os.path.isfile('/etc/system-release'):
- distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
- except:
- # FIXME: MethodMissing, I assume?
- distribution_version = platform.dist()[1]
- else:
- distribution_version = None
- return distribution_version
-
-def load_platform_subclass(cls, *args, **kwargs):
- '''
- used by modules like User to have different implementations based on detected platform. See User
- module for an example.
- '''
-
- this_platform = get_platform()
- distribution = get_distribution()
- subclass = None
-
- # get the most specific superclass for this platform
- if distribution is not None:
- for sc in cls.__subclasses__():
- if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
- subclass = sc
- if subclass is None:
- for sc in cls.__subclasses__():
- if sc.platform == this_platform and sc.distribution is None:
- subclass = sc
- if subclass is None:
- subclass = cls
-
- return super(cls, subclass).__new__(subclass)
-
-
-def json_dict_unicode_to_bytes(d):
- ''' Recursively convert dict keys and values to byte str
-
- Specialized for json return because this only handles, lists, tuples,
- and dict container types (the containers that the json module returns)
- '''
-
- if isinstance(d, unicode):
- return d.encode('utf-8')
- elif isinstance(d, dict):
- return dict(map(json_dict_unicode_to_bytes, d.iteritems()))
- elif isinstance(d, list):
- return list(map(json_dict_unicode_to_bytes, d))
- elif isinstance(d, tuple):
- return tuple(map(json_dict_unicode_to_bytes, d))
- else:
- return d
-
-def json_dict_bytes_to_unicode(d):
- ''' Recursively convert dict keys and values to byte str
-
- Specialized for json return because this only handles, lists, tuples,
- and dict container types (the containers that the json module returns)
- '''
-
- if isinstance(d, str):
- return unicode(d, 'utf-8')
- elif isinstance(d, dict):
- return dict(map(json_dict_bytes_to_unicode, d.iteritems()))
- elif isinstance(d, list):
- return list(map(json_dict_bytes_to_unicode, d))
- elif isinstance(d, tuple):
- return tuple(map(json_dict_bytes_to_unicode, d))
- else:
- return d
-
-def heuristic_log_sanitize(data):
- ''' Remove strings that look like passwords from log messages '''
- # Currently filters:
- # user:pass@foo/whatever and http://username:pass@wherever/foo
- # This code has false positives and consumes parts of logs that are
- # not passwds
-
- # begin: start of a passwd containing string
- # end: end of a passwd containing string
- # sep: char between user and passwd
- # prev_begin: where in the overall string to start a search for
- # a passwd
- # sep_search_end: where in the string to end a search for the sep
- output = []
- begin = len(data)
- prev_begin = begin
- sep = 1
- while sep:
- # Find the potential end of a passwd
- try:
- end = data.rindex('@', 0, begin)
- except ValueError:
- # No passwd in the rest of the data
- output.insert(0, data[0:begin])
- break
-
- # Search for the beginning of a passwd
- sep = None
- sep_search_end = end
- while not sep:
- # URL-style username+password
- try:
- begin = data.rindex('://', 0, sep_search_end)
- except ValueError:
- # No url style in the data, check for ssh style in the
- # rest of the string
- begin = 0
- # Search for separator
- try:
- sep = data.index(':', begin + 3, end)
- except ValueError:
- # No separator; choices:
- if begin == 0:
- # Searched the whole string so there's no password
- # here. Return the remaining data
- output.insert(0, data[0:begin])
- break
- # Search for a different beginning of the password field.
- sep_search_end = begin
- continue
- if sep:
- # Password was found; remove it.
- output.insert(0, data[end:prev_begin])
- output.insert(0, '********')
- output.insert(0, data[begin:sep + 1])
- prev_begin = begin
-
- return ''.join(output)
-
-
-class AnsibleModule(object):
-
- def __init__(self, argument_spec, bypass_checks=False, no_log=False,
- check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
- required_one_of=None, add_file_common_args=False, supports_check_mode=False,
- required_if=None):
-
- '''
- common code for quickly building an ansible module in Python
- (although you can write modules in anything that can return JSON)
- see library/* for examples
- '''
-
- self.argument_spec = argument_spec
- self.supports_check_mode = supports_check_mode
- self.check_mode = False
- self.no_log = no_log
- self.cleanup_files = []
-
- self.aliases = {}
-
- if add_file_common_args:
- for k, v in FILE_COMMON_ARGUMENTS.iteritems():
- if k not in self.argument_spec:
- self.argument_spec[k] = v
-
- # check the locale as set by the current environment, and
- # reset to LANG=C if it's an invalid/unavailable locale
- self._check_locale()
-
- (self.params, self.args) = self._load_params()
-
- self._legal_inputs = ['CHECKMODE', 'NO_LOG']
-
- self.aliases = self._handle_aliases()
-
- if check_invalid_arguments:
- self._check_invalid_arguments()
- self._check_for_check_mode()
- self._check_for_no_log()
-
- # check exclusive early
- if not bypass_checks:
- self._check_mutually_exclusive(mutually_exclusive)
-
- self._set_defaults(pre=True)
-
- if not bypass_checks:
- self._check_required_arguments()
- self._check_argument_values()
- self._check_argument_types()
- self._check_required_together(required_together)
- self._check_required_one_of(required_one_of)
- self._check_required_if(required_if)
-
- self._set_defaults(pre=False)
- if not self.no_log:
- self._log_invocation()
-
- # finally, make sure we're in a sane working dir
- self._set_cwd()
-
- def load_file_common_arguments(self, params):
- '''
- many modules deal with files, this encapsulates common
- options that the file module accepts such that it is directly
- available to all modules and they can share code.
- '''
-
- path = params.get('path', params.get('dest', None))
- if path is None:
- return {}
- else:
- path = os.path.expanduser(path)
-
- # if the path is a symlink, and we're following links, get
- # the target of the link instead for testing
- if params.get('follow', False) and os.path.islink(path):
- path = os.path.realpath(path)
-
- mode = params.get('mode', None)
- owner = params.get('owner', None)
- group = params.get('group', None)
-
- # selinux related options
- seuser = params.get('seuser', None)
- serole = params.get('serole', None)
- setype = params.get('setype', None)
- selevel = params.get('selevel', None)
- secontext = [seuser, serole, setype]
-
- if self.selinux_mls_enabled():
- secontext.append(selevel)
-
- default_secontext = self.selinux_default_context(path)
- for i in range(len(default_secontext)):
- if i is not None and secontext[i] == '_default':
- secontext[i] = default_secontext[i]
-
- return dict(
- path=path, mode=mode, owner=owner, group=group,
- seuser=seuser, serole=serole, setype=setype,
- selevel=selevel, secontext=secontext,
- )
-
-
- # Detect whether using selinux that is MLS-aware.
- # While this means you can set the level/range with
- # selinux.lsetfilecon(), it may or may not mean that you
- # will get the selevel as part of the context returned
- # by selinux.lgetfilecon().
-
- def selinux_mls_enabled(self):
- if not HAVE_SELINUX:
- return False
- if selinux.is_selinux_mls_enabled() == 1:
- return True
- else:
- return False
-
- def selinux_enabled(self):
- if not HAVE_SELINUX:
- seenabled = self.get_bin_path('selinuxenabled')
- if seenabled is not None:
- (rc,out,err) = self.run_command(seenabled)
- if rc == 0:
- self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
- return False
- if selinux.is_selinux_enabled() == 1:
- return True
- else:
- return False
-
- # Determine whether we need a placeholder for selevel/mls
- def selinux_initial_context(self):
- context = [None, None, None]
- if self.selinux_mls_enabled():
- context.append(None)
- return context
-
- def _to_filesystem_str(self, path):
- '''Returns filesystem path as a str, if it wasn't already.
-
- Used in selinux interactions because it cannot accept unicode
- instances, and specifying complex args in a playbook leaves
- you with unicode instances. This method currently assumes
- that your filesystem encoding is UTF-8.
-
- '''
- if isinstance(path, unicode):
- path = path.encode("utf-8")
- return path
-
- # If selinux fails to find a default, return an array of None
- def selinux_default_context(self, path, mode=0):
- context = self.selinux_initial_context()
- if not HAVE_SELINUX or not self.selinux_enabled():
- return context
- try:
- ret = selinux.matchpathcon(self._to_filesystem_str(path), mode)
- except OSError:
- return context
- if ret[0] == -1:
- return context
- # Limit split to 4 because the selevel, the last in the list,
- # may contain ':' characters
- context = ret[1].split(':', 3)
- return context
-
- def selinux_context(self, path):
- context = self.selinux_initial_context()
- if not HAVE_SELINUX or not self.selinux_enabled():
- return context
- try:
- ret = selinux.lgetfilecon_raw(self._to_filesystem_str(path))
- except OSError, e:
- if e.errno == errno.ENOENT:
- self.fail_json(path=path, msg='path %s does not exist' % path)
- else:
- self.fail_json(path=path, msg='failed to retrieve selinux context')
- if ret[0] == -1:
- return context
- # Limit split to 4 because the selevel, the last in the list,
- # may contain ':' characters
- context = ret[1].split(':', 3)
- return context
-
- def user_and_group(self, filename):
- filename = os.path.expanduser(filename)
- st = os.lstat(filename)
- uid = st.st_uid
- gid = st.st_gid
- return (uid, gid)
-
- def find_mount_point(self, path):
- path = os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
- while not os.path.ismount(path):
- path = os.path.dirname(path)
- return path
-
- def is_special_selinux_path(self, path):
- """
- Returns a tuple containing (True, selinux_context) if the given path is on a
- NFS or other 'special' fs mount point, otherwise the return will be (False, None).
- """
- try:
- f = open('/proc/mounts', 'r')
- mount_data = f.readlines()
- f.close()
- except:
- return (False, None)
- path_mount_point = self.find_mount_point(path)
- for line in mount_data:
- (device, mount_point, fstype, options, rest) = line.split(' ', 4)
-
- if path_mount_point == mount_point:
- for fs in SELINUX_SPECIAL_FS.split(','):
- if fs in fstype:
- special_context = self.selinux_context(path_mount_point)
- return (True, special_context)
-
- return (False, None)
-
- def set_default_selinux_context(self, path, changed):
- if not HAVE_SELINUX or not self.selinux_enabled():
- return changed
- context = self.selinux_default_context(path)
- return self.set_context_if_different(path, context, False)
-
- def set_context_if_different(self, path, context, changed):
-
- if not HAVE_SELINUX or not self.selinux_enabled():
- return changed
- cur_context = self.selinux_context(path)
- new_context = list(cur_context)
- # Iterate over the current context instead of the
- # argument context, which may have selevel.
-
- (is_special_se, sp_context) = self.is_special_selinux_path(path)
- if is_special_se:
- new_context = sp_context
- else:
- for i in range(len(cur_context)):
- if len(context) > i:
- if context[i] is not None and context[i] != cur_context[i]:
- new_context[i] = context[i]
- if context[i] is None:
- new_context[i] = cur_context[i]
-
- if cur_context != new_context:
- try:
- if self.check_mode:
- return True
- rc = selinux.lsetfilecon(self._to_filesystem_str(path),
- str(':'.join(new_context)))
- except OSError:
- self.fail_json(path=path, msg='invalid selinux context', new_context=new_context, cur_context=cur_context, input_was=context)
- if rc != 0:
- self.fail_json(path=path, msg='set selinux context failed')
- changed = True
- return changed
-
- def set_owner_if_different(self, path, owner, changed):
- path = os.path.expanduser(path)
- if owner is None:
- return changed
- orig_uid, orig_gid = self.user_and_group(path)
- try:
- uid = int(owner)
- except ValueError:
- try:
- uid = pwd.getpwnam(owner).pw_uid
- except KeyError:
- self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
- if orig_uid != uid:
- if self.check_mode:
- return True
- try:
- os.lchown(path, uid, -1)
- except OSError:
- self.fail_json(path=path, msg='chown failed')
- changed = True
- return changed
-
- def set_group_if_different(self, path, group, changed):
- path = os.path.expanduser(path)
- if group is None:
- return changed
- orig_uid, orig_gid = self.user_and_group(path)
- try:
- gid = int(group)
- except ValueError:
- try:
- gid = grp.getgrnam(group).gr_gid
- except KeyError:
- self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
- if orig_gid != gid:
- if self.check_mode:
- return True
- try:
- os.lchown(path, -1, gid)
- except OSError:
- self.fail_json(path=path, msg='chgrp failed')
- changed = True
- return changed
-
- def set_mode_if_different(self, path, mode, changed):
- path = os.path.expanduser(path)
- path_stat = os.lstat(path)
-
- if mode is None:
- return changed
-
- if not isinstance(mode, int):
- try:
- mode = int(mode, 8)
- except Exception:
- try:
- mode = self._symbolic_mode_to_octal(path_stat, mode)
- except Exception, e:
- self.fail_json(path=path,
- msg="mode must be in octal or symbolic form",
- details=str(e))
-
- prev_mode = stat.S_IMODE(path_stat.st_mode)
-
- if prev_mode != mode:
- if self.check_mode:
- return True
- # FIXME: comparison against string above will cause this to be executed
- # every time
- try:
- if hasattr(os, 'lchmod'):
- os.lchmod(path, mode)
- else:
- if not os.path.islink(path):
- os.chmod(path, mode)
- else:
- # Attempt to set the perms of the symlink but be
- # careful not to change the perms of the underlying
- # file while trying
- underlying_stat = os.stat(path)
- os.chmod(path, mode)
- new_underlying_stat = os.stat(path)
- if underlying_stat.st_mode != new_underlying_stat.st_mode:
- os.chmod(path, stat.S_IMODE(underlying_stat.st_mode))
- q_stat = os.stat(path)
- except OSError, e:
- if os.path.islink(path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
- pass
- elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
- pass
- else:
- raise e
- except Exception, e:
- self.fail_json(path=path, msg='chmod failed', details=str(e))
-
- path_stat = os.lstat(path)
- new_mode = stat.S_IMODE(path_stat.st_mode)
-
- if new_mode != prev_mode:
- changed = True
- return changed
-
- def _symbolic_mode_to_octal(self, path_stat, symbolic_mode):
- new_mode = stat.S_IMODE(path_stat.st_mode)
-
- mode_re = re.compile(r'^(?P<users>[ugoa]+)(?P<operator>[-+=])(?P<perms>[rwxXst]*|[ugo])$')
- for mode in symbolic_mode.split(','):
- match = mode_re.match(mode)
- if match:
- users = match.group('users')
- operator = match.group('operator')
- perms = match.group('perms')
-
- if users == 'a': users = 'ugo'
-
- for user in users:
- mode_to_apply = self._get_octal_mode_from_symbolic_perms(path_stat, user, perms)
- new_mode = self._apply_operation_to_mode(user, operator, mode_to_apply, new_mode)
- else:
- raise ValueError("bad symbolic permission for mode: %s" % mode)
- return new_mode
-
- def _apply_operation_to_mode(self, user, operator, mode_to_apply, current_mode):
- if operator == '=':
- if user == 'u': mask = stat.S_IRWXU | stat.S_ISUID
- elif user == 'g': mask = stat.S_IRWXG | stat.S_ISGID
- elif user == 'o': mask = stat.S_IRWXO | stat.S_ISVTX
-
- # mask out u, g, or o permissions from current_mode and apply new permissions
- inverse_mask = mask ^ 07777
- new_mode = (current_mode & inverse_mask) | mode_to_apply
- elif operator == '+':
- new_mode = current_mode | mode_to_apply
- elif operator == '-':
- new_mode = current_mode - (current_mode & mode_to_apply)
- return new_mode
-
- def _get_octal_mode_from_symbolic_perms(self, path_stat, user, perms):
- prev_mode = stat.S_IMODE(path_stat.st_mode)
-
- is_directory = stat.S_ISDIR(path_stat.st_mode)
- has_x_permissions = (prev_mode & 00111) > 0
- apply_X_permission = is_directory or has_x_permissions
-
- # Permission bits constants documented at:
- # http://docs.python.org/2/library/stat.html#stat.S_ISUID
- if apply_X_permission:
- X_perms = {
- 'u': {'X': stat.S_IXUSR},
- 'g': {'X': stat.S_IXGRP},
- 'o': {'X': stat.S_IXOTH}
- }
- else:
- X_perms = {
- 'u': {'X': 0},
- 'g': {'X': 0},
- 'o': {'X': 0}
- }
-
- user_perms_to_modes = {
- 'u': {
- 'r': stat.S_IRUSR,
- 'w': stat.S_IWUSR,
- 'x': stat.S_IXUSR,
- 's': stat.S_ISUID,
- 't': 0,
- 'u': prev_mode & stat.S_IRWXU,
- 'g': (prev_mode & stat.S_IRWXG) << 3,
- 'o': (prev_mode & stat.S_IRWXO) << 6 },
- 'g': {
- 'r': stat.S_IRGRP,
- 'w': stat.S_IWGRP,
- 'x': stat.S_IXGRP,
- 's': stat.S_ISGID,
- 't': 0,
- 'u': (prev_mode & stat.S_IRWXU) >> 3,
- 'g': prev_mode & stat.S_IRWXG,
- 'o': (prev_mode & stat.S_IRWXO) << 3 },
- 'o': {
- 'r': stat.S_IROTH,
- 'w': stat.S_IWOTH,
- 'x': stat.S_IXOTH,
- 's': 0,
- 't': stat.S_ISVTX,
- 'u': (prev_mode & stat.S_IRWXU) >> 6,
- 'g': (prev_mode & stat.S_IRWXG) >> 3,
- 'o': prev_mode & stat.S_IRWXO }
- }
-
- # Insert X_perms into user_perms_to_modes
- for key, value in X_perms.items():
- user_perms_to_modes[key].update(value)
-
- or_reduce = lambda mode, perm: mode | user_perms_to_modes[user][perm]
- return reduce(or_reduce, perms, 0)
-
- def set_fs_attributes_if_different(self, file_args, changed):
- # set modes owners and context as needed
- changed = self.set_context_if_different(
- file_args['path'], file_args['secontext'], changed
- )
- changed = self.set_owner_if_different(
- file_args['path'], file_args['owner'], changed
- )
- changed = self.set_group_if_different(
- file_args['path'], file_args['group'], changed
- )
- changed = self.set_mode_if_different(
- file_args['path'], file_args['mode'], changed
- )
- return changed
-
- def set_directory_attributes_if_different(self, file_args, changed):
- return self.set_fs_attributes_if_different(file_args, changed)
-
- def set_file_attributes_if_different(self, file_args, changed):
- return self.set_fs_attributes_if_different(file_args, changed)
-
- def add_path_info(self, kwargs):
- '''
- for results that are files, supplement the info about the file
- in the return path with stats about the file path.
- '''
-
- path = kwargs.get('path', kwargs.get('dest', None))
- if path is None:
- return kwargs
- if os.path.exists(path):
- (uid, gid) = self.user_and_group(path)
- kwargs['uid'] = uid
- kwargs['gid'] = gid
- try:
- user = pwd.getpwuid(uid)[0]
- except KeyError:
- user = str(uid)
- try:
- group = grp.getgrgid(gid)[0]
- except KeyError:
- group = str(gid)
- kwargs['owner'] = user
- kwargs['group'] = group
- st = os.lstat(path)
- kwargs['mode'] = oct(stat.S_IMODE(st[stat.ST_MODE]))
- # secontext not yet supported
- if os.path.islink(path):
- kwargs['state'] = 'link'
- elif os.path.isdir(path):
- kwargs['state'] = 'directory'
- elif os.stat(path).st_nlink > 1:
- kwargs['state'] = 'hard'
- else:
- kwargs['state'] = 'file'
- if HAVE_SELINUX and self.selinux_enabled():
- kwargs['secontext'] = ':'.join(self.selinux_context(path))
- kwargs['size'] = st[stat.ST_SIZE]
- else:
- kwargs['state'] = 'absent'
- return kwargs
-
- def _check_locale(self):
- '''
- Uses the locale module to test the currently set locale
- (per the LANG and LC_CTYPE environment settings)
- '''
- try:
- # setting the locale to '' uses the default locale
- # as it would be returned by locale.getdefaultlocale()
- locale.setlocale(locale.LC_ALL, '')
- except locale.Error, e:
- # fallback to the 'C' locale, which may cause unicode
- # issues but is preferable to simply failing because
- # of an unknown locale
- locale.setlocale(locale.LC_ALL, 'C')
- os.environ['LANG'] = 'C'
- os.environ['LC_CTYPE'] = 'C'
- os.environ['LC_MESSAGES'] = 'C'
- except Exception, e:
- self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" % e)
-
- def _handle_aliases(self):
- aliases_results = {} #alias:canon
- for (k,v) in self.argument_spec.iteritems():
- self._legal_inputs.append(k)
- aliases = v.get('aliases', None)
- default = v.get('default', None)
- required = v.get('required', False)
- if default is not None and required:
- # not alias specific but this is a good place to check this
- self.fail_json(msg="internal error: required and default are mutually exclusive for %s" % k)
- if aliases is None:
- continue
- if type(aliases) != list:
- self.fail_json(msg='internal error: aliases must be a list')
- for alias in aliases:
- self._legal_inputs.append(alias)
- aliases_results[alias] = k
- if alias in self.params:
- self.params[k] = self.params[alias]
-
- return aliases_results
-
- def _check_for_check_mode(self):
- for (k,v) in self.params.iteritems():
- if k == 'CHECKMODE':
- if not self.supports_check_mode:
- self.exit_json(skipped=True, msg="remote module does not support check mode")
- if self.supports_check_mode:
- self.check_mode = True
-
- def _check_for_no_log(self):
- for (k,v) in self.params.iteritems():
- if k == 'NO_LOG':
- self.no_log = self.boolean(v)
-
- def _check_invalid_arguments(self):
- for (k,v) in self.params.iteritems():
- # these should be in legal inputs already
- #if k in ('CHECKMODE', 'NO_LOG'):
- # continue
- if k not in self._legal_inputs:
- self.fail_json(msg="unsupported parameter for module: %s" % k)
-
- def _count_terms(self, check):
- count = 0
- for term in check:
- if term in self.params:
- count += 1
- return count
-
- def _check_mutually_exclusive(self, spec):
- if spec is None:
- return
- for check in spec:
- count = self._count_terms(check)
- if count > 1:
- self.fail_json(msg="parameters are mutually exclusive: %s" % check)
-
- def _check_required_one_of(self, spec):
- if spec is None:
- return
- for check in spec:
- count = self._count_terms(check)
- if count == 0:
- self.fail_json(msg="one of the following is required: %s" % ','.join(check))
-
- def _check_required_together(self, spec):
- if spec is None:
- return
- for check in spec:
- counts = [ self._count_terms([field]) for field in check ]
- non_zero = [ c for c in counts if c > 0 ]
- if len(non_zero) > 0:
- if 0 in counts:
- self.fail_json(msg="parameters are required together: %s" % check)
-
- def _check_required_arguments(self):
- ''' ensure all required arguments are present '''
- missing = []
- for (k,v) in self.argument_spec.iteritems():
- required = v.get('required', False)
- if required and k not in self.params:
- missing.append(k)
- if len(missing) > 0:
- self.fail_json(msg="missing required arguments: %s" % ",".join(missing))
-
- def _check_required_if(self, spec):
- ''' ensure that parameters which conditionally required are present '''
- if spec is None:
- return
- for (key, val, requirements) in spec:
- missing = []
- if key in self.params and self.params[key] == val:
- for check in requirements:
- count = self._count_terms(check)
- if count == 0:
- missing.append(check)
- if len(missing) > 0:
- self.fail_json(msg="%s is %s but the following are missing: %s" % (key, val, ','.join(missing)))
-
- def _check_argument_values(self):
- ''' ensure all arguments have the requested values, and there are no stray arguments '''
- for (k,v) in self.argument_spec.iteritems():
- choices = v.get('choices',None)
- if choices is None:
- continue
- if type(choices) == list:
- if k in self.params:
- if self.params[k] not in choices:
- choices_str=",".join([str(c) for c in choices])
- msg="value of %s must be one of: %s, got: %s" % (k, choices_str, self.params[k])
- self.fail_json(msg=msg)
- else:
- self.fail_json(msg="internal error: do not know how to interpret argument_spec")
-
- def safe_eval(self, str, locals=None, include_exceptions=False):
-
- # do not allow method calls to modules
- if not isinstance(str, basestring):
- # already templated to a datastructure, perhaps?
- if include_exceptions:
- return (str, None)
- return str
- if re.search(r'\w\.\w+\(', str):
- if include_exceptions:
- return (str, None)
- return str
- # do not allow imports
- if re.search(r'import \w+', str):
- if include_exceptions:
- return (str, None)
- return str
- try:
- result = None
- if not locals:
- result = _literal_eval(str)
- else:
- result = _literal_eval(str, None, locals)
- if include_exceptions:
- return (result, None)
- else:
- return result
- except Exception, e:
- if include_exceptions:
- return (str, e)
- return str
-
- def _check_argument_types(self):
- ''' ensure all arguments have the requested type '''
- for (k, v) in self.argument_spec.iteritems():
- wanted = v.get('type', None)
- if wanted is None:
- continue
- if k not in self.params:
- continue
-
- value = self.params[k]
- is_invalid = False
-
- try:
- if wanted == 'str':
- if not isinstance(value, basestring):
- self.params[k] = str(value)
- elif wanted == 'list':
- if not isinstance(value, list):
- if isinstance(value, basestring):
- self.params[k] = value.split(",")
- elif isinstance(value, int) or isinstance(value, float):
- self.params[k] = [ str(value) ]
- else:
- is_invalid = True
- elif wanted == 'dict':
- if not isinstance(value, dict):
- if isinstance(value, basestring):
- if value.startswith("{"):
- try:
- self.params[k] = json.loads(value)
- except:
- (result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
- if exc is not None:
- self.fail_json(msg="unable to evaluate dictionary for %s" % k)
- self.params[k] = result
- elif '=' in value:
- self.params[k] = dict([x.strip().split("=", 1) for x in value.split(",")])
- else:
- self.fail_json(msg="dictionary requested, could not parse JSON or key=value")
- else:
- is_invalid = True
- elif wanted == 'bool':
- if not isinstance(value, bool):
- if isinstance(value, basestring):
- self.params[k] = self.boolean(value)
- else:
- is_invalid = True
- elif wanted == 'int':
- if not isinstance(value, int):
- if isinstance(value, basestring):
- self.params[k] = int(value)
- else:
- is_invalid = True
- elif wanted == 'float':
- if not isinstance(value, float):
- if isinstance(value, basestring):
- self.params[k] = float(value)
- else:
- is_invalid = True
- else:
- self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
-
- if is_invalid:
- self.fail_json(msg="argument %s is of invalid type: %s, required: %s" % (k, type(value), wanted))
- except ValueError, e:
- self.fail_json(msg="value of argument %s is not of type %s and we were unable to automatically convert" % (k, wanted))
-
- def _set_defaults(self, pre=True):
- for (k,v) in self.argument_spec.iteritems():
- default = v.get('default', None)
- if pre == True:
- # this prevents setting defaults on required items
- if default is not None and k not in self.params:
- self.params[k] = default
- else:
- # make sure things without a default still get set None
- if k not in self.params:
- self.params[k] = default
-
- def _load_params(self):
- ''' read the input and return a dictionary and the arguments string '''
- args = MODULE_ARGS
- items = shlex.split(args)
- params = {}
- for x in items:
- try:
- (k, v) = x.split("=",1)
- except Exception, e:
- self.fail_json(msg="this module requires key=value arguments (%s)" % (items))
- if k in params:
- self.fail_json(msg="duplicate parameter: %s (value=%s)" % (k, v))
- params[k] = v
- params2 = json_dict_unicode_to_bytes(json.loads(MODULE_COMPLEX_ARGS))
- params2.update(params)
- return (params2, args)
-
- def _log_invocation(self):
- ''' log that ansible ran the module '''
- # TODO: generalize a separate log function and make log_invocation use it
- # Sanitize possible password argument when logging.
- log_args = dict()
- passwd_keys = ['password', 'login_password']
-
- for param in self.params:
- canon = self.aliases.get(param, param)
- arg_opts = self.argument_spec.get(canon, {})
- no_log = arg_opts.get('no_log', False)
-
- if self.boolean(no_log):
- log_args[param] = 'NOT_LOGGING_PARAMETER'
- elif param in passwd_keys:
- log_args[param] = 'NOT_LOGGING_PASSWORD'
- else:
- param_val = self.params[param]
- if not isinstance(param_val, basestring):
- param_val = str(param_val)
- elif isinstance(param_val, unicode):
- param_val = param_val.encode('utf-8')
- log_args[param] = heuristic_log_sanitize(param_val)
-
- module = 'ansible-%s' % os.path.basename(__file__)
- msg = []
- for arg in log_args:
- arg_val = log_args[arg]
- if not isinstance(arg_val, basestring):
- arg_val = str(arg_val)
- elif isinstance(arg_val, unicode):
- arg_val = arg_val.encode('utf-8')
- msg.append('%s=%s ' % (arg, arg_val))
- if msg:
- msg = 'Invoked with %s' % ''.join(msg)
- else:
- msg = 'Invoked'
-
- # 6655 - allow for accented characters
- if isinstance(msg, unicode):
- # We should never get here as msg should be type str, not unicode
- msg = msg.encode('utf-8')
-
- if (has_journal):
- journal_args = [("MODULE", os.path.basename(__file__))]
- for arg in log_args:
- journal_args.append((arg.upper(), str(log_args[arg])))
- try:
- journal.send("%s %s" % (module, msg), **dict(journal_args))
- except IOError, e:
- # fall back to syslog since logging to journal failed
- syslog.openlog(str(module), 0, syslog.LOG_USER)
- syslog.syslog(syslog.LOG_NOTICE, msg) #1
- else:
- syslog.openlog(str(module), 0, syslog.LOG_USER)
- syslog.syslog(syslog.LOG_NOTICE, msg) #2
-
- def _set_cwd(self):
- try:
- cwd = os.getcwd()
- if not os.access(cwd, os.F_OK|os.R_OK):
- raise
- return cwd
- except:
- # we don't have access to the cwd, probably because of sudo.
- # Try and move to a neutral location to prevent errors
- for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
- try:
- if os.access(cwd, os.F_OK|os.R_OK):
- os.chdir(cwd)
- return cwd
- except:
- pass
- # we won't error here, as it may *not* be a problem,
- # and we don't want to break modules unnecessarily
- return None
-
- def get_bin_path(self, arg, required=False, opt_dirs=[]):
- '''
- find system executable in PATH.
- Optional arguments:
- - required: if executable is not found and required is true, fail_json
- - opt_dirs: optional list of directories to search in addition to PATH
- if found return full path; otherwise return None
- '''
- sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
- paths = []
- for d in opt_dirs:
- if d is not None and os.path.exists(d):
- paths.append(d)
- paths += os.environ.get('PATH', '').split(os.pathsep)
- bin_path = None
- # mangle PATH to include /sbin dirs
- for p in sbin_paths:
- if p not in paths and os.path.exists(p):
- paths.append(p)
- for d in paths:
- path = os.path.join(d, arg)
- if os.path.exists(path) and self.is_executable(path):
- bin_path = path
- break
- if required and bin_path is None:
- self.fail_json(msg='Failed to find required executable %s' % arg)
- return bin_path
-
- def boolean(self, arg):
- ''' return a bool for the arg '''
- if arg is None or type(arg) == bool:
- return arg
- if type(arg) in types.StringTypes:
- arg = arg.lower()
- if arg in BOOLEANS_TRUE:
- return True
- elif arg in BOOLEANS_FALSE:
- return False
- else:
- self.fail_json(msg='Boolean %s not in either boolean list' % arg)
-
- def jsonify(self, data):
- for encoding in ("utf-8", "latin-1", "unicode_escape"):
- try:
- return json.dumps(data, encoding=encoding)
- # Old systems using simplejson module does not support encoding keyword.
- except TypeError, e:
- return json.dumps(data)
- except UnicodeDecodeError, e:
- continue
- self.fail_json(msg='Invalid unicode encoding encountered')
-
- def from_json(self, data):
- return json.loads(data)
-
- def add_cleanup_file(self, path):
- if path not in self.cleanup_files:
- self.cleanup_files.append(path)
-
- def do_cleanup_files(self):
- for path in self.cleanup_files:
- self.cleanup(path)
-
- def exit_json(self, **kwargs):
- ''' return from the module, without error '''
- self.add_path_info(kwargs)
- if not 'changed' in kwargs:
- kwargs['changed'] = False
- self.do_cleanup_files()
- print self.jsonify(kwargs)
- sys.exit(0)
-
- def fail_json(self, **kwargs):
- ''' return from the module, with an error message '''
- self.add_path_info(kwargs)
- assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
- kwargs['failed'] = True
- self.do_cleanup_files()
- print self.jsonify(kwargs)
- sys.exit(1)
-
- def is_executable(self, path):
- '''is the given path executable?'''
- return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
- or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
- or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
-
- def digest_from_file(self, filename, digest_method):
- ''' Return hex digest of local file for a given digest_method, or None if file is not present. '''
- if not os.path.exists(filename):
- return None
- if os.path.isdir(filename):
- self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
- digest = digest_method
- blocksize = 64 * 1024
- infile = open(filename, 'rb')
- block = infile.read(blocksize)
- while block:
- digest.update(block)
- block = infile.read(blocksize)
- infile.close()
- return digest.hexdigest()
-
- def md5(self, filename):
- ''' Return MD5 hex digest of local file using digest_from_file().
-
- Do not use this function unless you have no other choice for:
- 1) Optional backwards compatibility
- 2) Compatibility with a third party protocol
-
- This function will not work on systems complying with FIPS-140-2.
-
- Most uses of this function can use the module.sha1 function instead.
- '''
- if not _md5:
- raise ValueError('MD5 not available. Possibly running in FIPS mode')
- return self.digest_from_file(filename, _md5())
-
- def sha1(self, filename):
- ''' Return SHA1 hex digest of local file using digest_from_file(). '''
- return self.digest_from_file(filename, _sha1())
-
- def sha256(self, filename):
- ''' Return SHA-256 hex digest of local file using digest_from_file(). '''
- if not HAVE_HASHLIB:
- self.fail_json(msg="SHA-256 checksums require hashlib, which is available in Python 2.5 and higher")
- return self.digest_from_file(filename, _sha256())
-
- def backup_local(self, fn):
- '''make a date-marked backup of the specified file, return True or False on success or failure'''
-
- backupdest = ''
- if os.path.exists(fn):
- # backups named basename-YYYY-MM-DD@HH:MM:SS~
- ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
- backupdest = '%s.%s' % (fn, ext)
-
- try:
- shutil.copy2(fn, backupdest)
- except (shutil.Error, IOError), e:
- self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, e))
-
- return backupdest
-
- def cleanup(self, tmpfile):
- if os.path.exists(tmpfile):
- try:
- os.unlink(tmpfile)
- except OSError, e:
- sys.stderr.write("could not cleanup %s: %s" % (tmpfile, e))
-
- def atomic_move(self, src, dest):
- '''atomically move src to dest, copying attributes from dest, returns true on success
- it uses os.rename to ensure this as it is an atomic operation, rest of the function is
- to work around limitations, corner cases and ensure selinux context is saved if possible'''
- context = None
- dest_stat = None
- if os.path.exists(dest):
- try:
- dest_stat = os.stat(dest)
- os.chmod(src, dest_stat.st_mode & 07777)
- os.chown(src, dest_stat.st_uid, dest_stat.st_gid)
- except OSError, e:
- if e.errno != errno.EPERM:
- raise
- if self.selinux_enabled():
- context = self.selinux_context(dest)
- else:
- if self.selinux_enabled():
- context = self.selinux_default_context(dest)
-
- creating = not os.path.exists(dest)
-
- try:
- login_name = os.getlogin()
- except OSError:
- # not having a tty can cause the above to fail, so
- # just get the LOGNAME environment variable instead
- login_name = os.environ.get('LOGNAME', None)
-
- # if the original login_name doesn't match the currently
- # logged-in user, or if the SUDO_USER environment variable
- # is set, then this user has switched their credentials
- switched_user = login_name and login_name != pwd.getpwuid(os.getuid())[0] or os.environ.get('SUDO_USER')
-
- try:
- # Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
- os.rename(src, dest)
- except (IOError,OSError), e:
- # only try workarounds for errno 18 (cross device), 1 (not permitted) and 13 (permission denied)
- if e.errno != errno.EPERM and e.errno != errno.EXDEV and e.errno != errno.EACCES:
- self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
-
- dest_dir = os.path.dirname(dest)
- dest_file = os.path.basename(dest)
- try:
- tmp_dest = tempfile.NamedTemporaryFile(
- prefix=".ansible_tmp", dir=dest_dir, suffix=dest_file)
- except (OSError, IOError), e:
- self.fail_json(msg='The destination directory (%s) is not writable by the current user.' % dest_dir)
-
- try: # leaves tmp file behind when sudo and not root
- if switched_user and os.getuid() != 0:
- # cleanup will happen by 'rm' of tempdir
- # copy2 will preserve some metadata
- shutil.copy2(src, tmp_dest.name)
- else:
- shutil.move(src, tmp_dest.name)
- if self.selinux_enabled():
- self.set_context_if_different(
- tmp_dest.name, context, False)
- try:
- tmp_stat = os.stat(tmp_dest.name)
- if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
- os.chown(tmp_dest.name, dest_stat.st_uid, dest_stat.st_gid)
- except OSError, e:
- if e.errno != errno.EPERM:
- raise
- os.rename(tmp_dest.name, dest)
- except (shutil.Error, OSError, IOError), e:
- self.cleanup(tmp_dest.name)
- self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, e))
-
- if creating:
- # make sure the file has the correct permissions
- # based on the current value of umask
- umask = os.umask(0)
- os.umask(umask)
- os.chmod(dest, 0666 & ~umask)
- if switched_user:
- os.chown(dest, os.getuid(), os.getgid())
-
- if self.selinux_enabled():
- # rename might not preserve context
- self.set_context_if_different(dest, context, False)
-
- def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None):
- '''
- Execute a command, returns rc, stdout, and stderr.
- args is the command to run
- If args is a list, the command will be run with shell=False.
- If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
- If args is a string and use_unsafe_shell=True it run with shell=True.
- Other arguments:
- - check_rc (boolean) Whether to call fail_json in case of
- non zero RC. Default is False.
- - close_fds (boolean) See documentation for subprocess.Popen().
- Default is True.
- - executable (string) See documentation for subprocess.Popen().
- Default is None.
- - prompt_regex (string) A regex string (not a compiled regex) which
- can be used to detect prompts in the stdout
- which would otherwise cause the execution
- to hang (especially if no input data is
- specified)
- '''
-
- shell = False
- if isinstance(args, list):
- if use_unsafe_shell:
- args = " ".join([pipes.quote(x) for x in args])
- shell = True
- elif isinstance(args, basestring) and use_unsafe_shell:
- shell = True
- elif isinstance(args, basestring):
- args = shlex.split(args.encode('utf-8'))
- else:
- msg = "Argument 'args' to run_command must be list or string"
- self.fail_json(rc=257, cmd=args, msg=msg)
-
- prompt_re = None
- if prompt_regex:
- try:
- prompt_re = re.compile(prompt_regex, re.MULTILINE)
- except re.error:
- self.fail_json(msg="invalid prompt regular expression given to run_command")
-
- # expand things like $HOME and ~
- if not shell:
- args = [ os.path.expandvars(os.path.expanduser(x)) for x in args ]
-
- rc = 0
- msg = None
- st_in = None
-
- # Set a temporart env path if a prefix is passed
- env=os.environ
- if path_prefix:
- env['PATH']="%s:%s" % (path_prefix, env['PATH'])
-
- # create a printable version of the command for use
- # in reporting later, which strips out things like
- # passwords from the args list
- if isinstance(args, basestring):
- if isinstance(args, unicode):
- b_args = args.encode('utf-8')
- else:
- b_args = args
- to_clean_args = shlex.split(b_args)
- del b_args
- else:
- to_clean_args = args
-
- clean_args = []
- is_passwd = False
- for arg in to_clean_args:
- if is_passwd:
- is_passwd = False
- clean_args.append('********')
- continue
- if PASSWD_ARG_RE.match(arg):
- sep_idx = arg.find('=')
- if sep_idx > -1:
- clean_args.append('%s=********' % arg[:sep_idx])
- continue
- else:
- is_passwd = True
- clean_args.append(heuristic_log_sanitize(arg))
- clean_args = ' '.join(pipes.quote(arg) for arg in clean_args)
-
- if data:
- st_in = subprocess.PIPE
-
- kwargs = dict(
- executable=executable,
- shell=shell,
- close_fds=close_fds,
- stdin=st_in,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE
- )
-
- if path_prefix:
- kwargs['env'] = env
- if cwd and os.path.isdir(cwd):
- kwargs['cwd'] = cwd
-
- # store the pwd
- prev_dir = os.getcwd()
-
- # make sure we're in the right working directory
- if cwd and os.path.isdir(cwd):
- try:
- os.chdir(cwd)
- except (OSError, IOError), e:
- self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, str(e)))
-
- try:
- cmd = subprocess.Popen(args, **kwargs)
-
- # the communication logic here is essentially taken from that
- # of the _communicate() function in ssh.py
-
- stdout = ''
- stderr = ''
- rpipes = [cmd.stdout, cmd.stderr]
-
- if data:
- if not binary_data:
- data += '\n'
- cmd.stdin.write(data)
- cmd.stdin.close()
-
- while True:
- rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
- if cmd.stdout in rfd:
- dat = os.read(cmd.stdout.fileno(), 9000)
- stdout += dat
- if dat == '':
- rpipes.remove(cmd.stdout)
- if cmd.stderr in rfd:
- dat = os.read(cmd.stderr.fileno(), 9000)
- stderr += dat
- if dat == '':
- rpipes.remove(cmd.stderr)
- # if we're checking for prompts, do it now
- if prompt_re:
- if prompt_re.search(stdout) and not data:
- return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
- # only break out if no pipes are left to read or
- # the pipes are completely read and
- # the process is terminated
- if (not rpipes or not rfd) and cmd.poll() is not None:
- break
- # No pipes are left to read but process is not yet terminated
- # Only then it is safe to wait for the process to be finished
- # NOTE: Actually cmd.poll() is always None here if rpipes is empty
- elif not rpipes and cmd.poll() == None:
- cmd.wait()
- # The process is terminated. Since no pipes to read from are
- # left, there is no need to call select() again.
- break
-
- cmd.stdout.close()
- cmd.stderr.close()
-
- rc = cmd.returncode
- except (OSError, IOError), e:
- self.fail_json(rc=e.errno, msg=str(e), cmd=clean_args)
- except:
- self.fail_json(rc=257, msg=traceback.format_exc(), cmd=clean_args)
-
- if rc != 0 and check_rc:
- msg = heuristic_log_sanitize(stderr.rstrip())
- self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
-
- # reset the pwd
- os.chdir(prev_dir)
-
- return (rc, stdout, stderr)
-
- def append_to_file(self, filename, str):
- filename = os.path.expandvars(os.path.expanduser(filename))
- fh = open(filename, 'a')
- fh.write(str)
- fh.close()
-
- def pretty_bytes(self,size):
- ranges = (
- (1<<70L, 'ZB'),
- (1<<60L, 'EB'),
- (1<<50L, 'PB'),
- (1<<40L, 'TB'),
- (1<<30L, 'GB'),
- (1<<20L, 'MB'),
- (1<<10L, 'KB'),
- (1, 'Bytes')
- )
- for limit, suffix in ranges:
- if size >= limit:
- break
- return '%.2f %s' % (float(size)/ limit, suffix)
-
-def get_module_path():
- return os.path.dirname(os.path.realpath(__file__))
diff --git a/v1/ansible/module_utils/cloudstack.py b/v1/ansible/module_utils/cloudstack.py
deleted file mode 100644
index 752defec2b..0000000000
--- a/v1/ansible/module_utils/cloudstack.py
+++ /dev/null
@@ -1,368 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# (c) 2015, René Moser <mail@renemoser.net>
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-try:
- from cs import CloudStack, CloudStackException, read_config
- has_lib_cs = True
-except ImportError:
- has_lib_cs = False
-
-
-class AnsibleCloudStack:
-
- def __init__(self, module):
- if not has_lib_cs:
- module.fail_json(msg="python library cs required: pip install cs")
-
- self.result = {
- 'changed': False,
- }
-
- self.module = module
- self._connect()
-
- self.domain = None
- self.account = None
- self.project = None
- self.ip_address = None
- self.zone = None
- self.vm = None
- self.os_type = None
- self.hypervisor = None
- self.capabilities = None
-
-
- def _connect(self):
- api_key = self.module.params.get('api_key')
- api_secret = self.module.params.get('secret_key')
- api_url = self.module.params.get('api_url')
- api_http_method = self.module.params.get('api_http_method')
- api_timeout = self.module.params.get('api_timeout')
-
- if api_key and api_secret and api_url:
- self.cs = CloudStack(
- endpoint=api_url,
- key=api_key,
- secret=api_secret,
- timeout=api_timeout,
- method=api_http_method
- )
- else:
- self.cs = CloudStack(**read_config())
-
-
- def get_or_fallback(self, key=None, fallback_key=None):
- value = self.module.params.get(key)
- if not value:
- value = self.module.params.get(fallback_key)
- return value
-
-
- # TODO: for backward compatibility only, remove if not used anymore
- def _has_changed(self, want_dict, current_dict, only_keys=None):
- return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
-
-
- def has_changed(self, want_dict, current_dict, only_keys=None):
- for key, value in want_dict.iteritems():
-
- # Optionally limit by a list of keys
- if only_keys and key not in only_keys:
- continue;
-
- # Skip None values
- if value is None:
- continue;
-
- if key in current_dict:
-
- # API returns string for int in some cases, just to make sure
- if isinstance(value, int):
- current_dict[key] = int(current_dict[key])
- elif isinstance(value, str):
- current_dict[key] = str(current_dict[key])
-
- # Only need to detect a singe change, not every item
- if value != current_dict[key]:
- return True
- return False
-
-
- def _get_by_key(self, key=None, my_dict={}):
- if key:
- if key in my_dict:
- return my_dict[key]
- self.module.fail_json(msg="Something went wrong: %s not found" % key)
- return my_dict
-
-
- def get_project(self, key=None):
- if self.project:
- return self._get_by_key(key, self.project)
-
- project = self.module.params.get('project')
- if not project:
- return None
- args = {}
- args['account'] = self.get_account(key='name')
- args['domainid'] = self.get_domain(key='id')
- projects = self.cs.listProjects(**args)
- if projects:
- for p in projects['project']:
- if project.lower() in [ p['name'].lower(), p['id'] ]:
- self.project = p
- return self._get_by_key(key, self.project)
- self.module.fail_json(msg="project '%s' not found" % project)
-
-
- def get_ip_address(self, key=None):
- if self.ip_address:
- return self._get_by_key(key, self.ip_address)
-
- ip_address = self.module.params.get('ip_address')
- if not ip_address:
- self.module.fail_json(msg="IP address param 'ip_address' is required")
-
- args = {}
- args['ipaddress'] = ip_address
- args['account'] = self.get_account(key='name')
- args['domainid'] = self.get_domain(key='id')
- args['projectid'] = self.get_project(key='id')
- ip_addresses = self.cs.listPublicIpAddresses(**args)
-
- if not ip_addresses:
- self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
-
- self.ip_address = ip_addresses['publicipaddress'][0]
- return self._get_by_key(key, self.ip_address)
-
-
- def get_vm(self, key=None):
- if self.vm:
- return self._get_by_key(key, self.vm)
-
- vm = self.module.params.get('vm')
- if not vm:
- self.module.fail_json(msg="Virtual machine param 'vm' is required")
-
- args = {}
- args['account'] = self.get_account(key='name')
- args['domainid'] = self.get_domain(key='id')
- args['projectid'] = self.get_project(key='id')
- args['zoneid'] = self.get_zone(key='id')
- vms = self.cs.listVirtualMachines(**args)
- if vms:
- for v in vms['virtualmachine']:
- if vm in [ v['name'], v['displayname'], v['id'] ]:
- self.vm = v
- return self._get_by_key(key, self.vm)
- self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
-
-
- def get_zone(self, key=None):
- if self.zone:
- return self._get_by_key(key, self.zone)
-
- zone = self.module.params.get('zone')
- zones = self.cs.listZones()
-
- # use the first zone if no zone param given
- if not zone:
- self.zone = zones['zone'][0]
- return self._get_by_key(key, self.zone)
-
- if zones:
- for z in zones['zone']:
- if zone in [ z['name'], z['id'] ]:
- self.zone = z
- return self._get_by_key(key, self.zone)
- self.module.fail_json(msg="zone '%s' not found" % zone)
-
-
- def get_os_type(self, key=None):
- if self.os_type:
- return self._get_by_key(key, self.zone)
-
- os_type = self.module.params.get('os_type')
- if not os_type:
- return None
-
- os_types = self.cs.listOsTypes()
- if os_types:
- for o in os_types['ostype']:
- if os_type in [ o['description'], o['id'] ]:
- self.os_type = o
- return self._get_by_key(key, self.os_type)
- self.module.fail_json(msg="OS type '%s' not found" % os_type)
-
-
- def get_hypervisor(self):
- if self.hypervisor:
- return self.hypervisor
-
- hypervisor = self.module.params.get('hypervisor')
- hypervisors = self.cs.listHypervisors()
-
- # use the first hypervisor if no hypervisor param given
- if not hypervisor:
- self.hypervisor = hypervisors['hypervisor'][0]['name']
- return self.hypervisor
-
- for h in hypervisors['hypervisor']:
- if hypervisor.lower() == h['name'].lower():
- self.hypervisor = h['name']
- return self.hypervisor
- self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
-
-
- def get_account(self, key=None):
- if self.account:
- return self._get_by_key(key, self.account)
-
- account = self.module.params.get('account')
- if not account:
- return None
-
- domain = self.module.params.get('domain')
- if not domain:
- self.module.fail_json(msg="Account must be specified with Domain")
-
- args = {}
- args['name'] = account
- args['domainid'] = self.get_domain(key='id')
- args['listall'] = True
- accounts = self.cs.listAccounts(**args)
- if accounts:
- self.account = accounts['account'][0]
- return self._get_by_key(key, self.account)
- self.module.fail_json(msg="Account '%s' not found" % account)
-
-
- def get_domain(self, key=None):
- if self.domain:
- return self._get_by_key(key, self.domain)
-
- domain = self.module.params.get('domain')
- if not domain:
- return None
-
- args = {}
- args['listall'] = True
- domains = self.cs.listDomains(**args)
- if domains:
- for d in domains['domain']:
- if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
- self.domain = d
- return self._get_by_key(key, self.domain)
- self.module.fail_json(msg="Domain '%s' not found" % domain)
-
-
- def get_tags(self, resource=None):
- existing_tags = self.cs.listTags(resourceid=resource['id'])
- if existing_tags:
- return existing_tags['tag']
- return []
-
-
- def _delete_tags(self, resource, resource_type, tags):
- existing_tags = resource['tags']
- tags_to_delete = []
- for existing_tag in existing_tags:
- if existing_tag['key'] in tags:
- if existing_tag['value'] != tags[key]:
- tags_to_delete.append(existing_tag)
- else:
- tags_to_delete.append(existing_tag)
- if tags_to_delete:
- self.result['changed'] = True
- if not self.module.check_mode:
- args = {}
- args['resourceids'] = resource['id']
- args['resourcetype'] = resource_type
- args['tags'] = tags_to_delete
- self.cs.deleteTags(**args)
-
-
- def _create_tags(self, resource, resource_type, tags):
- tags_to_create = []
- for i, tag_entry in enumerate(tags):
- tag = {
- 'key': tag_entry['key'],
- 'value': tag_entry['value'],
- }
- tags_to_create.append(tag)
- if tags_to_create:
- self.result['changed'] = True
- if not self.module.check_mode:
- args = {}
- args['resourceids'] = resource['id']
- args['resourcetype'] = resource_type
- args['tags'] = tags_to_create
- self.cs.createTags(**args)
-
-
- def ensure_tags(self, resource, resource_type=None):
- if not resource_type or not resource:
- self.module.fail_json(msg="Error: Missing resource or resource_type for tags.")
-
- if 'tags' in resource:
- tags = self.module.params.get('tags')
- if tags is not None:
- self._delete_tags(resource, resource_type, tags)
- self._create_tags(resource, resource_type, tags)
- resource['tags'] = self.get_tags(resource)
- return resource
-
-
- def get_capabilities(self, key=None):
- if self.capabilities:
- return self._get_by_key(key, self.capabilities)
- capabilities = self.cs.listCapabilities()
- self.capabilities = capabilities['capability']
- return self._get_by_key(key, self.capabilities)
-
-
- # TODO: for backward compatibility only, remove if not used anymore
- def _poll_job(self, job=None, key=None):
- return self.poll_job(job=job, key=key)
-
-
- def poll_job(self, job=None, key=None):
- if 'jobid' in job:
- while True:
- res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
- if res['jobstatus'] != 0 and 'jobresult' in res:
- if 'errortext' in res['jobresult']:
- self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
- if key and key in res['jobresult']:
- job = res['jobresult'][key]
- break
- time.sleep(2)
- return job
diff --git a/v1/ansible/module_utils/database.py b/v1/ansible/module_utils/database.py
deleted file mode 100644
index 6170614e90..0000000000
--- a/v1/ansible/module_utils/database.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-class SQLParseError(Exception):
- pass
-
-class UnclosedQuoteError(SQLParseError):
- pass
-
-# maps a type of identifier to the maximum number of dot levels that are
-# allowed to specify that identifier. For example, a database column can be
-# specified by up to 4 levels: database.schema.table.column
-_PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1)
-_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
-
-def _find_end_quote(identifier, quote_char):
- accumulate = 0
- while True:
- try:
- quote = identifier.index(quote_char)
- except ValueError:
- raise UnclosedQuoteError
- accumulate = accumulate + quote
- try:
- next_char = identifier[quote+1]
- except IndexError:
- return accumulate
- if next_char == quote_char:
- try:
- identifier = identifier[quote+2:]
- accumulate = accumulate + 2
- except IndexError:
- raise UnclosedQuoteError
- else:
- return accumulate
-
-
-def _identifier_parse(identifier, quote_char):
- if not identifier:
- raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
-
- already_quoted = False
- if identifier.startswith(quote_char):
- already_quoted = True
- try:
- end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
- except UnclosedQuoteError:
- already_quoted = False
- else:
- if end_quote < len(identifier) - 1:
- if identifier[end_quote+1] == '.':
- dot = end_quote + 1
- first_identifier = identifier[:dot]
- next_identifier = identifier[dot+1:]
- further_identifiers = _identifier_parse(next_identifier, quote_char)
- further_identifiers.insert(0, first_identifier)
- else:
- raise SQLParseError('User escaped identifiers must escape extra quotes')
- else:
- further_identifiers = [identifier]
-
- if not already_quoted:
- try:
- dot = identifier.index('.')
- except ValueError:
- identifier = identifier.replace(quote_char, quote_char*2)
- identifier = ''.join((quote_char, identifier, quote_char))
- further_identifiers = [identifier]
- else:
- if dot == 0 or dot >= len(identifier) - 1:
- identifier = identifier.replace(quote_char, quote_char*2)
- identifier = ''.join((quote_char, identifier, quote_char))
- further_identifiers = [identifier]
- else:
- first_identifier = identifier[:dot]
- next_identifier = identifier[dot+1:]
- further_identifiers = _identifier_parse(next_identifier, quote_char)
- first_identifier = first_identifier.replace(quote_char, quote_char*2)
- first_identifier = ''.join((quote_char, first_identifier, quote_char))
- further_identifiers.insert(0, first_identifier)
-
- return further_identifiers
-
-
-def pg_quote_identifier(identifier, id_type):
- identifier_fragments = _identifier_parse(identifier, quote_char='"')
- if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
- raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
- return '.'.join(identifier_fragments)
-
-def mysql_quote_identifier(identifier, id_type):
- identifier_fragments = _identifier_parse(identifier, quote_char='`')
- if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
- raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
-
- special_cased_fragments = []
- for fragment in identifier_fragments:
- if fragment == '`*`':
- special_cased_fragments.append('*')
- else:
- special_cased_fragments.append(fragment)
-
- return '.'.join(special_cased_fragments)
diff --git a/v1/ansible/module_utils/ec2.py b/v1/ansible/module_utils/ec2.py
deleted file mode 100644
index d02c3476f2..0000000000
--- a/v1/ansible/module_utils/ec2.py
+++ /dev/null
@@ -1,188 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-try:
- from distutils.version import LooseVersion
- HAS_LOOSE_VERSION = True
-except:
- HAS_LOOSE_VERSION = False
-
-
-
-def aws_common_argument_spec():
- return dict(
- ec2_url=dict(),
- aws_secret_key=dict(aliases=['ec2_secret_key', 'secret_key'], no_log=True),
- aws_access_key=dict(aliases=['ec2_access_key', 'access_key']),
- validate_certs=dict(default=True, type='bool'),
- security_token=dict(aliases=['access_token'], no_log=True),
- profile=dict(),
- )
-
-
-def ec2_argument_spec():
- spec = aws_common_argument_spec()
- spec.update(
- dict(
- region=dict(aliases=['aws_region', 'ec2_region']),
- )
- )
- return spec
-
-
-def boto_supports_profile_name():
- return hasattr(boto.ec2.EC2Connection, 'profile_name')
-
-
-def get_aws_connection_info(module):
-
- # Check module args for credentials, then check environment vars
- # access_key
-
- ec2_url = module.params.get('ec2_url')
- access_key = module.params.get('aws_access_key')
- secret_key = module.params.get('aws_secret_key')
- security_token = module.params.get('security_token')
- region = module.params.get('region')
- profile_name = module.params.get('profile')
- validate_certs = module.params.get('validate_certs')
-
- if not ec2_url:
- if 'AWS_URL' in os.environ:
- ec2_url = os.environ['AWS_URL']
- elif 'EC2_URL' in os.environ:
- ec2_url = os.environ['EC2_URL']
-
- if not access_key:
- if 'AWS_ACCESS_KEY_ID' in os.environ:
- access_key = os.environ['AWS_ACCESS_KEY_ID']
- elif 'AWS_ACCESS_KEY' in os.environ:
- access_key = os.environ['AWS_ACCESS_KEY']
- elif 'EC2_ACCESS_KEY' in os.environ:
- access_key = os.environ['EC2_ACCESS_KEY']
- else:
- # in case access_key came in as empty string
- access_key = None
-
- if not secret_key:
- if 'AWS_SECRET_ACCESS_KEY' in os.environ:
- secret_key = os.environ['AWS_SECRET_ACCESS_KEY']
- elif 'AWS_SECRET_KEY' in os.environ:
- secret_key = os.environ['AWS_SECRET_KEY']
- elif 'EC2_SECRET_KEY' in os.environ:
- secret_key = os.environ['EC2_SECRET_KEY']
- else:
- # in case secret_key came in as empty string
- secret_key = None
-
- if not region:
- if 'AWS_REGION' in os.environ:
- region = os.environ['AWS_REGION']
- elif 'EC2_REGION' in os.environ:
- region = os.environ['EC2_REGION']
- else:
- # boto.config.get returns None if config not found
- region = boto.config.get('Boto', 'aws_region')
- if not region:
- region = boto.config.get('Boto', 'ec2_region')
-
- if not security_token:
- if 'AWS_SECURITY_TOKEN' in os.environ:
- security_token = os.environ['AWS_SECURITY_TOKEN']
- elif 'EC2_SECURITY_TOKEN' in os.environ:
- security_token = os.environ['EC2_SECURITY_TOKEN']
- else:
- # in case security_token came in as empty string
- security_token = None
-
- boto_params = dict(aws_access_key_id=access_key,
- aws_secret_access_key=secret_key,
- security_token=security_token)
-
- # profile_name only works as a key in boto >= 2.24
- # so only set profile_name if passed as an argument
- if profile_name:
- if not boto_supports_profile_name():
- module.fail_json("boto does not support profile_name before 2.24")
- boto_params['profile_name'] = profile_name
-
- if validate_certs and HAS_LOOSE_VERSION and LooseVersion(boto.Version) >= LooseVersion("2.6.0"):
- boto_params['validate_certs'] = validate_certs
-
- return region, ec2_url, boto_params
-
-
-def get_ec2_creds(module):
- ''' for compatibility mode with old modules that don't/can't yet
- use ec2_connect method '''
- region, ec2_url, boto_params = get_aws_connection_info(module)
- return ec2_url, boto_params['aws_access_key_id'], boto_params['aws_secret_access_key'], region
-
-
-def boto_fix_security_token_in_profile(conn, profile_name):
- ''' monkey patch for boto issue boto/boto#2100 '''
- profile = 'profile ' + profile_name
- if boto.config.has_option(profile, 'aws_security_token'):
- conn.provider.set_security_token(boto.config.get(profile, 'aws_security_token'))
- return conn
-
-
-def connect_to_aws(aws_module, region, **params):
- conn = aws_module.connect_to_region(region, **params)
- if not conn:
- if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
- raise StandardError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
- else:
- raise StandardError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
- if params.get('profile_name'):
- conn = boto_fix_security_token_in_profile(conn, params['profile_name'])
- return conn
-
-
-def ec2_connect(module):
-
- """ Return an ec2 connection"""
-
- region, ec2_url, boto_params = get_aws_connection_info(module)
-
- # If we have a region specified, connect to its endpoint.
- if region:
- try:
- ec2 = connect_to_aws(boto.ec2, region, **boto_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
- module.fail_json(msg=str(e))
- # Otherwise, no region so we fallback to the old connection method
- elif ec2_url:
- try:
- ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
- module.fail_json(msg=str(e))
- else:
- module.fail_json(msg="Either region or ec2_url must be specified")
-
- return ec2
diff --git a/v1/ansible/module_utils/facts.py b/v1/ansible/module_utils/facts.py
deleted file mode 100644
index 1162e05b9c..0000000000
--- a/v1/ansible/module_utils/facts.py
+++ /dev/null
@@ -1,2786 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import stat
-import array
-import errno
-import fcntl
-import fnmatch
-import glob
-import platform
-import re
-import signal
-import socket
-import struct
-import datetime
-import getpass
-import pwd
-import ConfigParser
-import StringIO
-
-from string import maketrans
-
-try:
- import selinux
- HAVE_SELINUX=True
-except ImportError:
- HAVE_SELINUX=False
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-# --------------------------------------------------------------
-# timeout function to make sure some fact gathering
-# steps do not exceed a time limit
-
-class TimeoutError(Exception):
- pass
-
-def timeout(seconds=10, error_message="Timer expired"):
- def decorator(func):
- def _handle_timeout(signum, frame):
- raise TimeoutError(error_message)
-
- def wrapper(*args, **kwargs):
- signal.signal(signal.SIGALRM, _handle_timeout)
- signal.alarm(seconds)
- try:
- result = func(*args, **kwargs)
- finally:
- signal.alarm(0)
- return result
-
- return wrapper
-
- return decorator
-
-# --------------------------------------------------------------
-
-class Facts(object):
- """
- This class should only attempt to populate those facts that
- are mostly generic to all systems. This includes platform facts,
- service facts (e.g. ssh keys or selinux), and distribution facts.
- Anything that requires extensive code or may have more than one
- possible implementation to establish facts for a given topic should
- subclass Facts.
- """
-
- # i86pc is a Solaris and derivatives-ism
- _I386RE = re.compile(r'i([3456]86|86pc)')
- # For the most part, we assume that platform.dist() will tell the truth.
- # This is the fallback to handle unknowns or exceptions
- OSDIST_LIST = ( ('/etc/oracle-release', 'OracleLinux'),
- ('/etc/redhat-release', 'RedHat'),
- ('/etc/vmware-release', 'VMwareESX'),
- ('/etc/openwrt_release', 'OpenWrt'),
- ('/etc/system-release', 'OtherLinux'),
- ('/etc/alpine-release', 'Alpine'),
- ('/etc/release', 'Solaris'),
- ('/etc/arch-release', 'Archlinux'),
- ('/etc/SuSE-release', 'SuSE'),
- ('/etc/os-release', 'SuSE'),
- ('/etc/gentoo-release', 'Gentoo'),
- ('/etc/os-release', 'Debian'),
- ('/etc/lsb-release', 'Mandriva'),
- ('/etc/os-release', 'NA'),
- )
- SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
-
- # A list of dicts. If there is a platform with more than one
- # package manager, put the preferred one last. If there is an
- # ansible module, use that as the value for the 'name' key.
- PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
- { 'path' : '/usr/bin/dnf', 'name' : 'dnf' },
- { 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
- { 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
- { 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
- { 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
- { 'path' : '/bin/opkg', 'name' : 'opkg' },
- { 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
- { 'path' : '/opt/local/bin/port', 'name' : 'macports' },
- { 'path' : '/sbin/apk', 'name' : 'apk' },
- { 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
- { 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
- { 'path' : '/usr/bin/emerge', 'name' : 'portage' },
- { 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
- { 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
- ]
-
- def __init__(self, load_on_init=True):
-
- self.facts = {}
-
- if load_on_init:
- self.get_platform_facts()
- self.get_distribution_facts()
- self.get_cmdline()
- self.get_public_ssh_host_keys()
- self.get_selinux_facts()
- self.get_fips_facts()
- self.get_pkg_mgr_facts()
- self.get_lsb_facts()
- self.get_date_time_facts()
- self.get_user_facts()
- self.get_local_facts()
- self.get_env_facts()
-
- def populate(self):
- return self.facts
-
- # Platform
- # platform.system() can be Linux, Darwin, Java, or Windows
- def get_platform_facts(self):
- self.facts['system'] = platform.system()
- self.facts['kernel'] = platform.release()
- self.facts['machine'] = platform.machine()
- self.facts['python_version'] = platform.python_version()
- self.facts['fqdn'] = socket.getfqdn()
- self.facts['hostname'] = platform.node().split('.')[0]
- self.facts['nodename'] = platform.node()
- self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
- arch_bits = platform.architecture()[0]
- self.facts['userspace_bits'] = arch_bits.replace('bit', '')
- if self.facts['machine'] == 'x86_64':
- self.facts['architecture'] = self.facts['machine']
- if self.facts['userspace_bits'] == '64':
- self.facts['userspace_architecture'] = 'x86_64'
- elif self.facts['userspace_bits'] == '32':
- self.facts['userspace_architecture'] = 'i386'
- elif Facts._I386RE.search(self.facts['machine']):
- self.facts['architecture'] = 'i386'
- if self.facts['userspace_bits'] == '64':
- self.facts['userspace_architecture'] = 'x86_64'
- elif self.facts['userspace_bits'] == '32':
- self.facts['userspace_architecture'] = 'i386'
- else:
- self.facts['architecture'] = self.facts['machine']
- if self.facts['system'] == 'Linux':
- self.get_distribution_facts()
- elif self.facts['system'] == 'AIX':
- try:
- rc, out, err = module.run_command("/usr/sbin/bootinfo -p")
- data = out.split('\n')
- self.facts['architecture'] = data[0]
- except:
- self.facts['architecture'] = 'Not Available'
- elif self.facts['system'] == 'OpenBSD':
- self.facts['architecture'] = platform.uname()[5]
-
-
- def get_local_facts(self):
-
- fact_path = module.params.get('fact_path', None)
- if not fact_path or not os.path.exists(fact_path):
- return
-
- local = {}
- for fn in sorted(glob.glob(fact_path + '/*.fact')):
- # where it will sit under local facts
- fact_base = os.path.basename(fn).replace('.fact','')
- if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
- # run it
- # try to read it as json first
- # if that fails read it with ConfigParser
- # if that fails, skip it
- rc, out, err = module.run_command(fn)
- else:
- out = get_file_content(fn, default='')
-
- # load raw json
- fact = 'loading %s' % fact_base
- try:
- fact = json.loads(out)
- except ValueError, e:
- # load raw ini
- cp = ConfigParser.ConfigParser()
- try:
- cp.readfp(StringIO.StringIO(out))
- except ConfigParser.Error, e:
- fact="error loading fact - please check content"
- else:
- fact = {}
- #print cp.sections()
- for sect in cp.sections():
- if sect not in fact:
- fact[sect] = {}
- for opt in cp.options(sect):
- val = cp.get(sect, opt)
- fact[sect][opt]=val
-
- local[fact_base] = fact
- if not local:
- return
- self.facts['local'] = local
-
- # platform.dist() is deprecated in 2.6
- # in 2.6 and newer, you should use platform.linux_distribution()
- def get_distribution_facts(self):
-
- # A list with OS Family members
- OS_FAMILY = dict(
- RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
- SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
- OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
- XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', SLES = 'Suse',
- SLED = 'Suse', openSUSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
- Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
- Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
- SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
- FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
- )
-
- # TODO: Rewrite this to use the function references in a dict pattern
- # as it's much cleaner than this massive if-else
- if self.facts['system'] == 'AIX':
- self.facts['distribution'] = 'AIX'
- rc, out, err = module.run_command("/usr/bin/oslevel")
- data = out.split('.')
- self.facts['distribution_version'] = data[0]
- self.facts['distribution_release'] = data[1]
- elif self.facts['system'] == 'HP-UX':
- self.facts['distribution'] = 'HP-UX'
- rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
- data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
- if data:
- self.facts['distribution_version'] = data.groups()[0]
- self.facts['distribution_release'] = data.groups()[1]
- elif self.facts['system'] == 'Darwin':
- self.facts['distribution'] = 'MacOSX'
- rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion")
- data = out.split()[-1]
- self.facts['distribution_version'] = data
- elif self.facts['system'] == 'FreeBSD':
- self.facts['distribution'] = 'FreeBSD'
- self.facts['distribution_release'] = platform.release()
- self.facts['distribution_version'] = platform.version()
- elif self.facts['system'] == 'NetBSD':
- self.facts['distribution'] = 'NetBSD'
- self.facts['distribution_release'] = platform.release()
- self.facts['distribution_version'] = platform.version()
- elif self.facts['system'] == 'OpenBSD':
- self.facts['distribution'] = 'OpenBSD'
- self.facts['distribution_release'] = platform.release()
- rc, out, err = module.run_command("/sbin/sysctl -n kern.version")
- match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
- if match:
- self.facts['distribution_version'] = match.groups()[0]
- else:
- self.facts['distribution_version'] = 'release'
- else:
- dist = platform.dist()
- self.facts['distribution'] = dist[0].capitalize() or 'NA'
- self.facts['distribution_version'] = dist[1] or 'NA'
- self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
- self.facts['distribution_release'] = dist[2] or 'NA'
- # Try to handle the exceptions now ...
- for (path, name) in Facts.OSDIST_LIST:
- if os.path.exists(path):
- if os.path.getsize(path) > 0:
- if self.facts['distribution'] in ('Fedora', ):
- # Once we determine the value is one of these distros
- # we trust the values are always correct
- break
- elif name == 'OracleLinux':
- data = get_file_content(path)
- if 'Oracle Linux' in data:
- self.facts['distribution'] = name
- else:
- self.facts['distribution'] = data.split()[0]
- break
- elif name == 'RedHat':
- data = get_file_content(path)
- if 'Red Hat' in data:
- self.facts['distribution'] = name
- else:
- self.facts['distribution'] = data.split()[0]
- break
- elif name == 'OtherLinux':
- data = get_file_content(path)
- if 'Amazon' in data:
- self.facts['distribution'] = 'Amazon'
- self.facts['distribution_version'] = data.split()[-1]
- break
- elif name == 'OpenWrt':
- data = get_file_content(path)
- if 'OpenWrt' in data:
- self.facts['distribution'] = name
- version = re.search('DISTRIB_RELEASE="(.*)"', data)
- if version:
- self.facts['distribution_version'] = version.groups()[0]
- release = re.search('DISTRIB_CODENAME="(.*)"', data)
- if release:
- self.facts['distribution_release'] = release.groups()[0]
- break
- elif name == 'Alpine':
- data = get_file_content(path)
- self.facts['distribution'] = name
- self.facts['distribution_version'] = data
- break
- elif name == 'Solaris':
- data = get_file_content(path).split('\n')[0]
- if 'Solaris' in data:
- ora_prefix = ''
- if 'Oracle Solaris' in data:
- data = data.replace('Oracle ','')
- ora_prefix = 'Oracle '
- self.facts['distribution'] = data.split()[0]
- self.facts['distribution_version'] = data.split()[1]
- self.facts['distribution_release'] = ora_prefix + data
- break
-
- uname_rc, uname_out, uname_err = module.run_command(['uname', '-v'])
- distribution_version = None
- if 'SmartOS' in data:
- self.facts['distribution'] = 'SmartOS'
- if os.path.exists('/etc/product'):
- product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').split('\n') if ': ' in l])
- if 'Image' in product_data:
- distribution_version = product_data.get('Image').split()[-1]
- elif 'OpenIndiana' in data:
- self.facts['distribution'] = 'OpenIndiana'
- elif 'OmniOS' in data:
- self.facts['distribution'] = 'OmniOS'
- distribution_version = data.split()[-1]
- elif uname_rc == 0 and 'NexentaOS_' in uname_out:
- self.facts['distribution'] = 'Nexenta'
- distribution_version = data.split()[-1].lstrip('v')
-
- if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
- self.facts['distribution_release'] = data.strip()
- if distribution_version is not None:
- self.facts['distribution_version'] = distribution_version
- elif uname_rc == 0:
- self.facts['distribution_version'] = uname_out.split('\n')[0].strip()
- break
-
- elif name == 'SuSE':
- data = get_file_content(path)
- if 'suse' in data.lower():
- if path == '/etc/os-release':
- for line in data.splitlines():
- distribution = re.search("^NAME=(.*)", line)
- if distribution:
- self.facts['distribution'] = distribution.group(1).strip('"')
- distribution_version = re.search('^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line) # example pattern are 13.04 13.0 13
- if distribution_version:
- self.facts['distribution_version'] = distribution_version.group(1)
- if 'open' in data.lower():
- release = re.search("^PRETTY_NAME=[^(]+ \(?([^)]+?)\)", line)
- if release:
- self.facts['distribution_release'] = release.groups()[0]
- elif 'enterprise' in data.lower():
- release = re.search('^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line) # SLES doesn't got funny release names
- if release:
- release = release.group(1)
- else:
- release = "0" # no minor number, so it is the first release
- self.facts['distribution_release'] = release
- break
- elif path == '/etc/SuSE-release':
- if 'open' in data.lower():
- data = data.splitlines()
- distdata = get_file_content(path).split('\n')[0]
- self.facts['distribution'] = distdata.split()[0]
- for line in data:
- release = re.search('CODENAME *= *([^\n]+)', line)
- if release:
- self.facts['distribution_release'] = release.groups()[0].strip()
- elif 'enterprise' in data.lower():
- lines = data.splitlines()
- distribution = lines[0].split()[0]
- if "Server" in data:
- self.facts['distribution'] = "SLES"
- elif "Desktop" in data:
- self.facts['distribution'] = "SLED"
- for line in lines:
- release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
- if release:
- self.facts['distribution_release'] = release.group(1)
- self.facts['distribution_version'] = self.facts['distribution_version'] + '.' + release.group(1)
- elif name == 'Debian':
- data = get_file_content(path)
- if 'Ubuntu' in data:
- break # Ubuntu gets correct info from python functions
- elif 'Debian' in data or 'Raspbian' in data:
- release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
- if release:
- self.facts['distribution_release'] = release.groups()[0]
- break
- elif name == 'Mandriva':
- data = get_file_content(path)
- if 'Mandriva' in data:
- version = re.search('DISTRIB_RELEASE="(.*)"', data)
- if version:
- self.facts['distribution_version'] = version.groups()[0]
- release = re.search('DISTRIB_CODENAME="(.*)"', data)
- if release:
- self.facts['distribution_release'] = release.groups()[0]
- self.facts['distribution'] = name
- break
- elif name == 'NA':
- data = get_file_content(path)
- for line in data.splitlines():
- distribution = re.search("^NAME=(.*)", line)
- if distribution:
- self.facts['distribution'] = distribution.group(1).strip('"')
- version = re.search("^VERSION=(.*)", line)
- if version:
- self.facts['distribution_version'] = version.group(1).strip('"')
- if self.facts['distribution'].lower() == 'coreos':
- data = get_file_content('/etc/coreos/update.conf')
- release = re.search("^GROUP=(.*)", data)
- if release:
- self.facts['distribution_release'] = release.group(1).strip('"')
- else:
- self.facts['distribution'] = name
- machine_id = get_file_content("/var/lib/dbus/machine-id") or get_file_content("/etc/machine-id")
- if machine_id:
- machine_id = machine_id.split('\n')[0]
- self.facts["machine_id"] = machine_id
- self.facts['os_family'] = self.facts['distribution']
- if self.facts['distribution'] in OS_FAMILY:
- self.facts['os_family'] = OS_FAMILY[self.facts['distribution']]
-
- def get_cmdline(self):
- data = get_file_content('/proc/cmdline')
- if data:
- self.facts['cmdline'] = {}
- try:
- for piece in shlex.split(data):
- item = piece.split('=', 1)
- if len(item) == 1:
- self.facts['cmdline'][item[0]] = True
- else:
- self.facts['cmdline'][item[0]] = item[1]
- except ValueError, e:
- pass
-
- def get_public_ssh_host_keys(self):
- dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub'
- rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub'
- ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub'
-
- if self.facts['system'] == 'Darwin':
- dsa_filename = '/etc/ssh_host_dsa_key.pub'
- rsa_filename = '/etc/ssh_host_rsa_key.pub'
- ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub'
- dsa = get_file_content(dsa_filename)
- rsa = get_file_content(rsa_filename)
- ecdsa = get_file_content(ecdsa_filename)
- if dsa is None:
- dsa = 'NA'
- else:
- self.facts['ssh_host_key_dsa_public'] = dsa.split()[1]
- if rsa is None:
- rsa = 'NA'
- else:
- self.facts['ssh_host_key_rsa_public'] = rsa.split()[1]
- if ecdsa is None:
- ecdsa = 'NA'
- else:
- self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1]
-
- def get_pkg_mgr_facts(self):
- self.facts['pkg_mgr'] = 'unknown'
- for pkg in Facts.PKG_MGRS:
- if os.path.exists(pkg['path']):
- self.facts['pkg_mgr'] = pkg['name']
- if self.facts['system'] == 'OpenBSD':
- self.facts['pkg_mgr'] = 'openbsd_pkg'
-
- def get_lsb_facts(self):
- lsb_path = module.get_bin_path('lsb_release')
- if lsb_path:
- rc, out, err = module.run_command([lsb_path, "-a"])
- if rc == 0:
- self.facts['lsb'] = {}
- for line in out.split('\n'):
- if len(line) < 1 or ':' not in line:
- continue
- value = line.split(':', 1)[1].strip()
- if 'LSB Version:' in line:
- self.facts['lsb']['release'] = value
- elif 'Distributor ID:' in line:
- self.facts['lsb']['id'] = value
- elif 'Description:' in line:
- self.facts['lsb']['description'] = value
- elif 'Release:' in line:
- self.facts['lsb']['release'] = value
- elif 'Codename:' in line:
- self.facts['lsb']['codename'] = value
- if 'lsb' in self.facts and 'release' in self.facts['lsb']:
- self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
- elif lsb_path is None and os.path.exists('/etc/lsb-release'):
- self.facts['lsb'] = {}
- for line in get_file_lines('/etc/lsb-release'):
- value = line.split('=',1)[1].strip()
- if 'DISTRIB_ID' in line:
- self.facts['lsb']['id'] = value
- elif 'DISTRIB_RELEASE' in line:
- self.facts['lsb']['release'] = value
- elif 'DISTRIB_DESCRIPTION' in line:
- self.facts['lsb']['description'] = value
- elif 'DISTRIB_CODENAME' in line:
- self.facts['lsb']['codename'] = value
- else:
- return self.facts
-
- if 'lsb' in self.facts and 'release' in self.facts['lsb']:
- self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
-
-
- def get_selinux_facts(self):
- if not HAVE_SELINUX:
- self.facts['selinux'] = False
- return
- self.facts['selinux'] = {}
- if not selinux.is_selinux_enabled():
- self.facts['selinux']['status'] = 'disabled'
- else:
- self.facts['selinux']['status'] = 'enabled'
- try:
- self.facts['selinux']['policyvers'] = selinux.security_policyvers()
- except OSError, e:
- self.facts['selinux']['policyvers'] = 'unknown'
- try:
- (rc, configmode) = selinux.selinux_getenforcemode()
- if rc == 0:
- self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
- else:
- self.facts['selinux']['config_mode'] = 'unknown'
- except OSError, e:
- self.facts['selinux']['config_mode'] = 'unknown'
- try:
- mode = selinux.security_getenforce()
- self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
- except OSError, e:
- self.facts['selinux']['mode'] = 'unknown'
- try:
- (rc, policytype) = selinux.selinux_getpolicytype()
- if rc == 0:
- self.facts['selinux']['type'] = policytype
- else:
- self.facts['selinux']['type'] = 'unknown'
- except OSError, e:
- self.facts['selinux']['type'] = 'unknown'
-
-
- def get_fips_facts(self):
- self.facts['fips'] = False
- data = get_file_content('/proc/sys/crypto/fips_enabled')
- if data and data == '1':
- self.facts['fips'] = True
-
-
- def get_date_time_facts(self):
- self.facts['date_time'] = {}
-
- now = datetime.datetime.now()
- self.facts['date_time']['year'] = now.strftime('%Y')
- self.facts['date_time']['month'] = now.strftime('%m')
- self.facts['date_time']['weekday'] = now.strftime('%A')
- self.facts['date_time']['day'] = now.strftime('%d')
- self.facts['date_time']['hour'] = now.strftime('%H')
- self.facts['date_time']['minute'] = now.strftime('%M')
- self.facts['date_time']['second'] = now.strftime('%S')
- self.facts['date_time']['epoch'] = now.strftime('%s')
- if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
- self.facts['date_time']['epoch'] = str(int(time.time()))
- self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
- self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
- self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
- self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
- self.facts['date_time']['tz'] = time.strftime("%Z")
- self.facts['date_time']['tz_offset'] = time.strftime("%z")
-
-
- # User
- def get_user_facts(self):
- self.facts['user_id'] = getpass.getuser()
- pwent = pwd.getpwnam(getpass.getuser())
- self.facts['user_uid'] = pwent.pw_uid
- self.facts['user_gid'] = pwent.pw_gid
- self.facts['user_gecos'] = pwent.pw_gecos
- self.facts['user_dir'] = pwent.pw_dir
- self.facts['user_shell'] = pwent.pw_shell
-
- def get_env_facts(self):
- self.facts['env'] = {}
- for k,v in os.environ.iteritems():
- self.facts['env'][k] = v
-
-class Hardware(Facts):
- """
- This is a generic Hardware subclass of Facts. This should be further
- subclassed to implement per platform. If you subclass this, it
- should define:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
-
- All subclasses MUST define platform.
- """
- platform = 'Generic'
-
- def __new__(cls, *arguments, **keyword):
- subclass = cls
- for sc in Hardware.__subclasses__():
- if sc.platform == platform.system():
- subclass = sc
- return super(cls, subclass).__new__(subclass, *arguments, **keyword)
-
- def __init__(self):
- Facts.__init__(self)
-
- def populate(self):
- return self.facts
-
-class LinuxHardware(Hardware):
- """
- Linux-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
-
- In addition, it also defines number of DMI facts and device facts.
- """
-
- platform = 'Linux'
-
- # Originally only had these four as toplevelfacts
- ORIGINAL_MEMORY_FACTS = frozenset(('MemTotal', 'SwapTotal', 'MemFree', 'SwapFree'))
- # Now we have all of these in a dict structure
- MEMORY_FACTS = ORIGINAL_MEMORY_FACTS.union(('Buffers', 'Cached', 'SwapCached'))
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_dmi_facts()
- self.get_device_facts()
- self.get_uptime_facts()
- try:
- self.get_mount_facts()
- except TimeoutError:
- pass
- return self.facts
-
- def get_memory_facts(self):
- if not os.access("/proc/meminfo", os.R_OK):
- return
-
- memstats = {}
- for line in get_file_lines("/proc/meminfo"):
- data = line.split(":", 1)
- key = data[0]
- if key in self.ORIGINAL_MEMORY_FACTS:
- val = data[1].strip().split(' ')[0]
- self.facts["%s_mb" % key.lower()] = long(val) / 1024
-
- if key in self.MEMORY_FACTS:
- val = data[1].strip().split(' ')[0]
- memstats[key.lower()] = long(val) / 1024
-
- if None not in (memstats.get('memtotal'), memstats.get('memfree')):
- memstats['real:used'] = memstats['memtotal'] - memstats['memfree']
- if None not in (memstats.get('cached'), memstats.get('memfree'), memstats.get('buffers')):
- memstats['nocache:free'] = memstats['cached'] + memstats['memfree'] + memstats['buffers']
- if None not in (memstats.get('memtotal'), memstats.get('nocache:free')):
- memstats['nocache:used'] = memstats['memtotal'] - memstats['nocache:free']
- if None not in (memstats.get('swaptotal'), memstats.get('swapfree')):
- memstats['swap:used'] = memstats['swaptotal'] - memstats['swapfree']
-
- self.facts['memory_mb'] = {
- 'real' : {
- 'total': memstats.get('memtotal'),
- 'used': memstats.get('real:used'),
- 'free': memstats.get('memfree'),
- },
- 'nocache' : {
- 'free': memstats.get('nocache:free'),
- 'used': memstats.get('nocache:used'),
- },
- 'swap' : {
- 'total': memstats.get('swaptotal'),
- 'free': memstats.get('swapfree'),
- 'used': memstats.get('swap:used'),
- 'cached': memstats.get('swapcached'),
- },
- }
-
- def get_cpu_facts(self):
- i = 0
- vendor_id_occurrence = 0
- model_name_occurrence = 0
- physid = 0
- coreid = 0
- sockets = {}
- cores = {}
-
- xen = False
- xen_paravirt = False
- try:
- if os.path.exists('/proc/xen'):
- xen = True
- else:
- for line in get_file_lines('/sys/hypervisor/type'):
- if line.strip() == 'xen':
- xen = True
- # Only interested in the first line
- break
- except IOError:
- pass
-
- if not os.access("/proc/cpuinfo", os.R_OK):
- return
- self.facts['processor'] = []
- for line in get_file_lines('/proc/cpuinfo'):
- data = line.split(":", 1)
- key = data[0].strip()
-
- if xen:
- if key == 'flags':
- # Check for vme cpu flag, Xen paravirt does not expose this.
- # Need to detect Xen paravirt because it exposes cpuinfo
- # differently than Xen HVM or KVM and causes reporting of
- # only a single cpu core.
- if 'vme' not in data:
- xen_paravirt = True
-
- # model name is for Intel arch, Processor (mind the uppercase P)
- # works for some ARM devices, like the Sheevaplug.
- if key == 'model name' or key == 'Processor' or key == 'vendor_id':
- if 'processor' not in self.facts:
- self.facts['processor'] = []
- self.facts['processor'].append(data[1].strip())
- if key == 'vendor_id':
- vendor_id_occurrence += 1
- if key == 'model name':
- model_name_occurrence += 1
- i += 1
- elif key == 'physical id':
- physid = data[1].strip()
- if physid not in sockets:
- sockets[physid] = 1
- elif key == 'core id':
- coreid = data[1].strip()
- if coreid not in sockets:
- cores[coreid] = 1
- elif key == 'cpu cores':
- sockets[physid] = int(data[1].strip())
- elif key == 'siblings':
- cores[coreid] = int(data[1].strip())
- elif key == '# processors':
- self.facts['processor_cores'] = int(data[1].strip())
-
- if vendor_id_occurrence == model_name_occurrence:
- i = vendor_id_occurrence
-
- if self.facts['architecture'] != 's390x':
- if xen_paravirt:
- self.facts['processor_count'] = i
- self.facts['processor_cores'] = i
- self.facts['processor_threads_per_core'] = 1
- self.facts['processor_vcpus'] = i
- else:
- self.facts['processor_count'] = sockets and len(sockets) or i
- self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1
- self.facts['processor_threads_per_core'] = ((cores.values() and
- cores.values()[0] or 1) / self.facts['processor_cores'])
- self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
- self.facts['processor_count'] * self.facts['processor_cores'])
-
- def get_dmi_facts(self):
- ''' learn dmi facts from system
-
- Try /sys first for dmi related facts.
- If that is not available, fall back to dmidecode executable '''
-
- if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
- # Use kernel DMI info, if available
-
- # DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
- FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
- "Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
- "Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
- "All In One", "Sub Notebook", "Space-saving", "Lunch Box",
- "Main Server Chassis", "Expansion Chassis", "Sub Chassis",
- "Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
- "Rack Mount Chassis", "Sealed-case PC", "Multi-system",
- "CompactPCI", "AdvancedTCA", "Blade" ]
-
- DMI_DICT = {
- 'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
- 'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
- 'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
- 'product_name': '/sys/devices/virtual/dmi/id/product_name',
- 'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
- 'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
- 'product_version': '/sys/devices/virtual/dmi/id/product_version',
- 'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
- }
-
- for (key,path) in DMI_DICT.items():
- data = get_file_content(path)
- if data is not None:
- if key == 'form_factor':
- try:
- self.facts['form_factor'] = FORM_FACTOR[int(data)]
- except IndexError, e:
- self.facts['form_factor'] = 'unknown (%s)' % data
- else:
- self.facts[key] = data
- else:
- self.facts[key] = 'NA'
-
- else:
- # Fall back to using dmidecode, if available
- dmi_bin = module.get_bin_path('dmidecode')
- DMI_DICT = {
- 'bios_date': 'bios-release-date',
- 'bios_version': 'bios-version',
- 'form_factor': 'chassis-type',
- 'product_name': 'system-product-name',
- 'product_serial': 'system-serial-number',
- 'product_uuid': 'system-uuid',
- 'product_version': 'system-version',
- 'system_vendor': 'system-manufacturer'
- }
- for (k, v) in DMI_DICT.items():
- if dmi_bin is not None:
- (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
- if rc == 0:
- # Strip out commented lines (specific dmidecode output)
- thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
- try:
- json.dumps(thisvalue)
- except UnicodeDecodeError:
- thisvalue = "NA"
-
- self.facts[k] = thisvalue
- else:
- self.facts[k] = 'NA'
- else:
- self.facts[k] = 'NA'
-
- @timeout(10)
- def get_mount_facts(self):
- self.facts['mounts'] = []
- mtab = get_file_content('/etc/mtab', '')
- for line in mtab.split('\n'):
- if line.startswith('/'):
- fields = line.rstrip('\n').split()
- if(fields[2] != 'none'):
- size_total = None
- size_available = None
- try:
- statvfs_result = os.statvfs(fields[1])
- size_total = statvfs_result.f_bsize * statvfs_result.f_blocks
- size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail)
- except OSError, e:
- continue
-
- uuid = 'NA'
- lsblkPath = module.get_bin_path("lsblk")
- if lsblkPath:
- rc, out, err = module.run_command("%s -ln --output UUID %s" % (lsblkPath, fields[0]), use_unsafe_shell=True)
-
- if rc == 0:
- uuid = out.strip()
-
- self.facts['mounts'].append(
- {'mount': fields[1],
- 'device':fields[0],
- 'fstype': fields[2],
- 'options': fields[3],
- # statvfs data
- 'size_total': size_total,
- 'size_available': size_available,
- 'uuid': uuid,
- })
-
- def get_device_facts(self):
- self.facts['devices'] = {}
- lspci = module.get_bin_path('lspci')
- if lspci:
- rc, pcidata, err = module.run_command([lspci, '-D'])
- else:
- pcidata = None
-
- try:
- block_devs = os.listdir("/sys/block")
- except OSError:
- return
-
- for block in block_devs:
- virtual = 1
- sysfs_no_links = 0
- try:
- path = os.readlink(os.path.join("/sys/block/", block))
- except OSError, e:
- if e.errno == errno.EINVAL:
- path = block
- sysfs_no_links = 1
- else:
- continue
- if "virtual" in path:
- continue
- sysdir = os.path.join("/sys/block", path)
- if sysfs_no_links == 1:
- for folder in os.listdir(sysdir):
- if "device" in folder:
- virtual = 0
- break
- if virtual:
- continue
- d = {}
- diskname = os.path.basename(sysdir)
- for key in ['vendor', 'model']:
- d[key] = get_file_content(sysdir + "/device/" + key)
-
- for key,test in [ ('removable','/removable'), \
- ('support_discard','/queue/discard_granularity'),
- ]:
- d[key] = get_file_content(sysdir + test)
-
- d['partitions'] = {}
- for folder in os.listdir(sysdir):
- m = re.search("(" + diskname + "\d+)", folder)
- if m:
- part = {}
- partname = m.group(1)
- part_sysdir = sysdir + "/" + partname
-
- part['start'] = get_file_content(part_sysdir + "/start",0)
- part['sectors'] = get_file_content(part_sysdir + "/size",0)
- part['sectorsize'] = get_file_content(part_sysdir + "/queue/physical_block_size")
- if not part['sectorsize']:
- part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
- part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
- d['partitions'][partname] = part
-
- d['rotational'] = get_file_content(sysdir + "/queue/rotational")
- d['scheduler_mode'] = ""
- scheduler = get_file_content(sysdir + "/queue/scheduler")
- if scheduler is not None:
- m = re.match(".*?(\[(.*)\])", scheduler)
- if m:
- d['scheduler_mode'] = m.group(2)
-
- d['sectors'] = get_file_content(sysdir + "/size")
- if not d['sectors']:
- d['sectors'] = 0
- d['sectorsize'] = get_file_content(sysdir + "/queue/physical_block_size")
- if not d['sectorsize']:
- d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
- d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
-
- d['host'] = ""
-
- # domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
- m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
- if m and pcidata:
- pciid = m.group(1)
- did = re.escape(pciid)
- m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
- d['host'] = m.group(1)
-
- d['holders'] = []
- if os.path.isdir(sysdir + "/holders"):
- for folder in os.listdir(sysdir + "/holders"):
- if not folder.startswith("dm-"):
- continue
- name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
- if name:
- d['holders'].append(name)
- else:
- d['holders'].append(folder)
-
- self.facts['devices'][diskname] = d
-
- def get_uptime_facts(self):
- uptime_seconds_string = get_file_content('/proc/uptime').split(' ')[0]
- self.facts['uptime_seconds'] = int(float(uptime_seconds_string))
-
-class SunOSHardware(Hardware):
- """
- In addition to the generic memory and cpu facts, this also sets
- swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
- """
- platform = 'SunOS'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- try:
- self.get_mount_facts()
- except TimeoutError:
- pass
- return self.facts
-
- def get_cpu_facts(self):
- physid = 0
- sockets = {}
- rc, out, err = module.run_command("/usr/bin/kstat cpu_info")
- self.facts['processor'] = []
- for line in out.split('\n'):
- if len(line) < 1:
- continue
- data = line.split(None, 1)
- key = data[0].strip()
- # "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
- if key == 'module:':
- brand = ''
- elif key == 'brand':
- brand = data[1].strip()
- elif key == 'clock_MHz':
- clock_mhz = data[1].strip()
- elif key == 'implementation':
- processor = brand or data[1].strip()
- # Add clock speed to description for SPARC CPU
- if self.facts['machine'] != 'i86pc':
- processor += " @ " + clock_mhz + "MHz"
- if 'processor' not in self.facts:
- self.facts['processor'] = []
- self.facts['processor'].append(processor)
- elif key == 'chip_id':
- physid = data[1].strip()
- if physid not in sockets:
- sockets[physid] = 1
- else:
- sockets[physid] += 1
- # Counting cores on Solaris can be complicated.
- # https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
- # Treat 'processor_count' as physical sockets and 'processor_cores' as
- # virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
- # these processors have: sockets -> cores -> threads/virtual CPU.
- if len(sockets) > 0:
- self.facts['processor_count'] = len(sockets)
- self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
- else:
- self.facts['processor_cores'] = 'NA'
- self.facts['processor_count'] = len(self.facts['processor'])
-
- def get_memory_facts(self):
- rc, out, err = module.run_command(["/usr/sbin/prtconf"])
- for line in out.split('\n'):
- if 'Memory size' in line:
- self.facts['memtotal_mb'] = line.split()[2]
- rc, out, err = module.run_command("/usr/sbin/swap -s")
- allocated = long(out.split()[1][:-1])
- reserved = long(out.split()[5][:-1])
- used = long(out.split()[8][:-1])
- free = long(out.split()[10][:-1])
- self.facts['swapfree_mb'] = free / 1024
- self.facts['swaptotal_mb'] = (free + used) / 1024
- self.facts['swap_allocated_mb'] = allocated / 1024
- self.facts['swap_reserved_mb'] = reserved / 1024
-
- @timeout(10)
- def get_mount_facts(self):
- self.facts['mounts'] = []
- # For a detailed format description see mnttab(4)
- # special mount_point fstype options time
- fstab = get_file_content('/etc/mnttab')
- if fstab:
- for line in fstab.split('\n'):
- fields = line.rstrip('\n').split('\t')
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4]})
-
-class OpenBSDHardware(Hardware):
- """
- OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- - processor_speed
- - devices
- """
- platform = 'OpenBSD'
- DMESG_BOOT = '/var/run/dmesg.boot'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.sysctl = self.get_sysctl()
- self.get_memory_facts()
- self.get_processor_facts()
- self.get_device_facts()
- self.get_mount_facts()
- return self.facts
-
- def get_sysctl(self):
- rc, out, err = module.run_command(["/sbin/sysctl", "hw"])
- if rc != 0:
- return dict()
- sysctl = dict()
- for line in out.splitlines():
- (key, value) = line.split('=')
- sysctl[key] = value.strip()
- return sysctl
-
- @timeout(10)
- def get_mount_facts(self):
- self.facts['mounts'] = []
- fstab = get_file_content('/etc/fstab')
- if fstab:
- for line in fstab.split('\n'):
- if line.startswith('#') or line.strip() == '':
- continue
- fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
- if fields[1] == 'none' or fields[3] == 'xx':
- continue
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
-
- def get_memory_facts(self):
- # Get free memory. vmstat output looks like:
- # procs memory page disks traps cpu
- # r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
- # 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
- rc, out, err = module.run_command("/usr/bin/vmstat")
- if rc == 0:
- self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024
- self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
-
- # Get swapctl info. swapctl output looks like:
- # total: 69268 1K-blocks allocated, 0 used, 69268 available
- # And for older OpenBSD:
- # total: 69268k bytes allocated = 0k used, 69268k available
- rc, out, err = module.run_command("/sbin/swapctl -sk")
- if rc == 0:
- swaptrans = maketrans(' ', ' ')
- data = out.split()
- self.facts['swapfree_mb'] = long(data[-2].translate(swaptrans, "kmg")) / 1024
- self.facts['swaptotal_mb'] = long(data[1].translate(swaptrans, "kmg")) / 1024
-
- def get_processor_facts(self):
- processor = []
- dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
- if not dmesg_boot:
- rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
- i = 0
- for line in dmesg_boot.splitlines():
- if line.split(' ', 1)[0] == 'cpu%i:' % i:
- processor.append(line.split(' ', 1)[1])
- i = i + 1
- processor_count = i
- self.facts['processor'] = processor
- self.facts['processor_count'] = processor_count
- # I found no way to figure out the number of Cores per CPU in OpenBSD
- self.facts['processor_cores'] = 'NA'
-
- def get_device_facts(self):
- devices = []
- devices.extend(self.sysctl['hw.disknames'].split(','))
- self.facts['devices'] = devices
-
-class FreeBSDHardware(Hardware):
- """
- FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- - devices
- """
- platform = 'FreeBSD'
- DMESG_BOOT = '/var/run/dmesg.boot'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_dmi_facts()
- self.get_device_facts()
- try:
- self.get_mount_facts()
- except TimeoutError:
- pass
- return self.facts
-
- def get_cpu_facts(self):
- self.facts['processor'] = []
- rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu")
- self.facts['processor_count'] = out.strip()
-
- dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
- if not dmesg_boot:
- rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
- for line in dmesg_boot.split('\n'):
- if 'CPU:' in line:
- cpu = re.sub(r'CPU:\s+', r"", line)
- self.facts['processor'].append(cpu.strip())
- if 'Logical CPUs per core' in line:
- self.facts['processor_cores'] = line.split()[4]
-
-
- def get_memory_facts(self):
- rc, out, err = module.run_command("/sbin/sysctl vm.stats")
- for line in out.split('\n'):
- data = line.split()
- if 'vm.stats.vm.v_page_size' in line:
- pagesize = long(data[1])
- if 'vm.stats.vm.v_page_count' in line:
- pagecount = long(data[1])
- if 'vm.stats.vm.v_free_count' in line:
- freecount = long(data[1])
- self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
- self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
- # Get swapinfo. swapinfo output looks like:
- # Device 1M-blocks Used Avail Capacity
- # /dev/ada0p3 314368 0 314368 0%
- #
- rc, out, err = module.run_command("/usr/sbin/swapinfo -m")
- lines = out.split('\n')
- if len(lines[-1]) == 0:
- lines.pop()
- data = lines[-1].split()
- self.facts['swaptotal_mb'] = data[1]
- self.facts['swapfree_mb'] = data[3]
-
- @timeout(10)
- def get_mount_facts(self):
- self.facts['mounts'] = []
- fstab = get_file_content('/etc/fstab')
- if fstab:
- for line in fstab.split('\n'):
- if line.startswith('#') or line.strip() == '':
- continue
- fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
-
- def get_device_facts(self):
- sysdir = '/dev'
- self.facts['devices'] = {}
- drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks")
- slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
- if os.path.isdir(sysdir):
- dirlist = sorted(os.listdir(sysdir))
- for device in dirlist:
- d = drives.match(device)
- if d:
- self.facts['devices'][d.group(1)] = []
- s = slices.match(device)
- if s:
- self.facts['devices'][d.group(1)].append(s.group(1))
-
- def get_dmi_facts(self):
- ''' learn dmi facts from system
-
- Use dmidecode executable if available'''
-
- # Fall back to using dmidecode, if available
- dmi_bin = module.get_bin_path('dmidecode')
- DMI_DICT = dict(
- bios_date='bios-release-date',
- bios_version='bios-version',
- form_factor='chassis-type',
- product_name='system-product-name',
- product_serial='system-serial-number',
- product_uuid='system-uuid',
- product_version='system-version',
- system_vendor='system-manufacturer'
- )
- for (k, v) in DMI_DICT.items():
- if dmi_bin is not None:
- (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
- if rc == 0:
- # Strip out commented lines (specific dmidecode output)
- self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
- try:
- json.dumps(self.facts[k])
- except UnicodeDecodeError:
- self.facts[k] = 'NA'
- else:
- self.facts[k] = 'NA'
- else:
- self.facts[k] = 'NA'
-
-
-class NetBSDHardware(Hardware):
- """
- NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- - devices
- """
- platform = 'NetBSD'
- MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- try:
- self.get_mount_facts()
- except TimeoutError:
- pass
- return self.facts
-
- def get_cpu_facts(self):
-
- i = 0
- physid = 0
- sockets = {}
- if not os.access("/proc/cpuinfo", os.R_OK):
- return
- self.facts['processor'] = []
- for line in get_file_lines("/proc/cpuinfo"):
- data = line.split(":", 1)
- key = data[0].strip()
- # model name is for Intel arch, Processor (mind the uppercase P)
- # works for some ARM devices, like the Sheevaplug.
- if key == 'model name' or key == 'Processor':
- if 'processor' not in self.facts:
- self.facts['processor'] = []
- self.facts['processor'].append(data[1].strip())
- i += 1
- elif key == 'physical id':
- physid = data[1].strip()
- if physid not in sockets:
- sockets[physid] = 1
- elif key == 'cpu cores':
- sockets[physid] = int(data[1].strip())
- if len(sockets) > 0:
- self.facts['processor_count'] = len(sockets)
- self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
- else:
- self.facts['processor_count'] = i
- self.facts['processor_cores'] = 'NA'
-
- def get_memory_facts(self):
- if not os.access("/proc/meminfo", os.R_OK):
- return
- for line in get_file_lines("/proc/meminfo"):
- data = line.split(":", 1)
- key = data[0]
- if key in NetBSDHardware.MEMORY_FACTS:
- val = data[1].strip().split(' ')[0]
- self.facts["%s_mb" % key.lower()] = long(val) / 1024
-
- @timeout(10)
- def get_mount_facts(self):
- self.facts['mounts'] = []
- fstab = get_file_content('/etc/fstab')
- if fstab:
- for line in fstab.split('\n'):
- if line.startswith('#') or line.strip() == '':
- continue
- fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
-
-class AIX(Hardware):
- """
- AIX-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- """
- platform = 'AIX'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_dmi_facts()
- return self.facts
-
- def get_cpu_facts(self):
- self.facts['processor'] = []
-
-
- rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor")
- if out:
- i = 0
- for line in out.split('\n'):
-
- if 'Available' in line:
- if i == 0:
- data = line.split(' ')
- cpudev = data[0]
-
- i += 1
- self.facts['processor_count'] = int(i)
-
- rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
-
- data = out.split(' ')
- self.facts['processor'] = data[1]
-
- rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
-
- data = out.split(' ')
- self.facts['processor_cores'] = int(data[1])
-
- def get_memory_facts(self):
- pagesize = 4096
- rc, out, err = module.run_command("/usr/bin/vmstat -v")
- for line in out.split('\n'):
- data = line.split()
- if 'memory pages' in line:
- pagecount = long(data[0])
- if 'free pages' in line:
- freecount = long(data[0])
- self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
- self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
- # Get swapinfo. swapinfo output looks like:
- # Device 1M-blocks Used Avail Capacity
- # /dev/ada0p3 314368 0 314368 0%
- #
- rc, out, err = module.run_command("/usr/sbin/lsps -s")
- if out:
- lines = out.split('\n')
- data = lines[1].split()
- swaptotal_mb = long(data[0].rstrip('MB'))
- percused = int(data[1].rstrip('%'))
- self.facts['swaptotal_mb'] = swaptotal_mb
- self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
-
- def get_dmi_facts(self):
- rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
- data = out.split()
- self.facts['firmware_version'] = data[1].strip('IBM,')
-
-class HPUX(Hardware):
- """
- HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor
- - processor_cores
- - processor_count
- - model
- - firmware
- """
-
- platform = 'HP-UX'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_hw_facts()
- return self.facts
-
- def get_cpu_facts(self):
- if self.facts['architecture'] == '9000/800':
- rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
- self.facts['processor_count'] = int(out.strip())
- #Working with machinfo mess
- elif self.facts['architecture'] == 'ia64':
- if self.facts['distribution_version'] == "B.11.23":
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
- self.facts['processor_count'] = int(out.strip().split('=')[1])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
- self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
- rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
- self.facts['processor_cores'] = int(out.strip())
- if self.facts['distribution_version'] == "B.11.31":
- #if machinfo return cores strings release B.11.31 > 1204
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
- if out.strip()== '0':
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
- self.facts['processor_count'] = int(out.strip().split(" ")[0])
- #If hyperthreading is active divide cores by 2
- rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
- data = re.sub(' +',' ',out).strip().split(' ')
- if len(data) == 1:
- hyperthreading = 'OFF'
- else:
- hyperthreading = data[1]
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
- data = out.strip().split(" ")
- if hyperthreading == 'ON':
- self.facts['processor_cores'] = int(data[0])/2
- else:
- if len(data) == 1:
- self.facts['processor_cores'] = self.facts['processor_count']
- else:
- self.facts['processor_cores'] = int(data[0])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
- self.facts['processor'] = out.strip()
- else:
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
- self.facts['processor_count'] = int(out.strip().split(" ")[0])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
- self.facts['processor_cores'] = int(out.strip().split(" ")[0])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
- self.facts['processor'] = out.strip()
-
- def get_memory_facts(self):
- pagesize = 4096
- rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
- data = int(re.sub(' +',' ',out).split(' ')[5].strip())
- self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
- if self.facts['architecture'] == '9000/800':
- try:
- rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log")
- data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
- self.facts['memtotal_mb'] = int(data) / 1024
- except AttributeError:
- #For systems where memory details aren't sent to syslog or the log has rotated, use parsed
- #adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
- if os.access("/dev/kmem", os.R_OK):
- rc, out, err = module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
- if not err:
- data = out
- self.facts['memtotal_mb'] = int(data) / 256
- else:
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
- data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
- self.facts['memtotal_mb'] = int(data)
- rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q")
- self.facts['swaptotal_mb'] = int(out.strip())
- rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
- swap = 0
- for line in out.strip().split('\n'):
- swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
- self.facts['swapfree_mb'] = swap
-
- def get_hw_facts(self):
- rc, out, err = module.run_command("model")
- self.facts['model'] = out.strip()
- if self.facts['architecture'] == 'ia64':
- separator = ':'
- if self.facts['distribution_version'] == "B.11.23":
- separator = '='
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
- self.facts['firmware_version'] = out.split(separator)[1].strip()
-
-
-class Darwin(Hardware):
- """
- Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- - processor
- - processor_cores
- - memtotal_mb
- - memfree_mb
- - model
- - osversion
- - osrevision
- """
- platform = 'Darwin'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.sysctl = self.get_sysctl()
- self.get_mac_facts()
- self.get_cpu_facts()
- self.get_memory_facts()
- return self.facts
-
- def get_sysctl(self):
- rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"])
- if rc != 0:
- return dict()
- sysctl = dict()
- for line in out.splitlines():
- if line.rstrip("\n"):
- (key, value) = re.split(' = |: ', line, maxsplit=1)
- sysctl[key] = value.strip()
- return sysctl
-
- def get_system_profile(self):
- rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
- if rc != 0:
- return dict()
- system_profile = dict()
- for line in out.splitlines():
- if ': ' in line:
- (key, value) = line.split(': ', 1)
- system_profile[key.strip()] = ' '.join(value.strip().split())
- return system_profile
-
- def get_mac_facts(self):
- rc, out, err = module.run_command("sysctl hw.model")
- if rc == 0:
- self.facts['model'] = out.splitlines()[-1].split()[1]
- self.facts['osversion'] = self.sysctl['kern.osversion']
- self.facts['osrevision'] = self.sysctl['kern.osrevision']
-
- def get_cpu_facts(self):
- if 'machdep.cpu.brand_string' in self.sysctl: # Intel
- self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
- self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
- else: # PowerPC
- system_profile = self.get_system_profile()
- self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
- self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
-
- def get_memory_facts(self):
- self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024
-
- rc, out, err = module.run_command("sysctl hw.usermem")
- if rc == 0:
- self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
-
-
-class Network(Facts):
- """
- This is a generic Network subclass of Facts. This should be further
- subclassed to implement per platform. If you subclass this,
- you must define:
- - interfaces (a list of interface names)
- - interface_<name> dictionary of ipv4, ipv6, and mac address information.
-
- All subclasses MUST define platform.
- """
- platform = 'Generic'
-
- IPV6_SCOPE = { '0' : 'global',
- '10' : 'host',
- '20' : 'link',
- '40' : 'admin',
- '50' : 'site',
- '80' : 'organization' }
-
- def __new__(cls, *arguments, **keyword):
- subclass = cls
- for sc in Network.__subclasses__():
- if sc.platform == platform.system():
- subclass = sc
- return super(cls, subclass).__new__(subclass, *arguments, **keyword)
-
- def __init__(self, module):
- self.module = module
- Facts.__init__(self)
-
- def populate(self):
- return self.facts
-
-class LinuxNetwork(Network):
- """
- This is a Linux-specific subclass of Network. It defines
- - interfaces (a list of interface names)
- - interface_<name> dictionary of ipv4, ipv6, and mac address information.
- - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- - ipv4_address and ipv6_address: the first non-local address for each family.
- """
- platform = 'Linux'
-
- def __init__(self, module):
- Network.__init__(self, module)
-
- def populate(self):
- ip_path = self.module.get_bin_path('ip')
- if ip_path is None:
- return self.facts
- default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
- interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
- self.facts['interfaces'] = interfaces.keys()
- for iface in interfaces:
- self.facts[iface] = interfaces[iface]
- self.facts['default_ipv4'] = default_ipv4
- self.facts['default_ipv6'] = default_ipv6
- self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
- self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
- return self.facts
-
- def get_default_interfaces(self, ip_path):
- # Use the commands:
- # ip -4 route get 8.8.8.8 -> Google public DNS
- # ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
- # to find out the default outgoing interface, address, and gateway
- command = dict(
- v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
- v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
- )
- interface = dict(v4 = {}, v6 = {})
- for v in 'v4', 'v6':
- if v == 'v6' and self.facts['os_family'] == 'RedHat' \
- and self.facts['distribution_version'].startswith('4.'):
- continue
- if v == 'v6' and not socket.has_ipv6:
- continue
- rc, out, err = module.run_command(command[v])
- if not out:
- # v6 routing may result in
- # RTNETLINK answers: Invalid argument
- continue
- words = out.split('\n')[0].split()
- # A valid output starts with the queried address on the first line
- if len(words) > 0 and words[0] == command[v][-1]:
- for i in range(len(words) - 1):
- if words[i] == 'dev':
- interface[v]['interface'] = words[i+1]
- elif words[i] == 'src':
- interface[v]['address'] = words[i+1]
- elif words[i] == 'via' and words[i+1] != command[v][-1]:
- interface[v]['gateway'] = words[i+1]
- return interface['v4'], interface['v6']
-
- def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
- interfaces = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
-
- for path in glob.glob('/sys/class/net/*'):
- if not os.path.isdir(path):
- continue
- device = os.path.basename(path)
- interfaces[device] = { 'device': device }
- if os.path.exists(os.path.join(path, 'address')):
- macaddress = get_file_content(os.path.join(path, 'address'), default='')
- if macaddress and macaddress != '00:00:00:00:00:00':
- interfaces[device]['macaddress'] = macaddress
- if os.path.exists(os.path.join(path, 'mtu')):
- interfaces[device]['mtu'] = int(get_file_content(os.path.join(path, 'mtu')))
- if os.path.exists(os.path.join(path, 'operstate')):
- interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
-# if os.path.exists(os.path.join(path, 'carrier')):
-# interfaces[device]['link'] = get_file_content(os.path.join(path, 'carrier')) == '1'
- if os.path.exists(os.path.join(path, 'device','driver', 'module')):
- interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
- if os.path.exists(os.path.join(path, 'type')):
- _type = get_file_content(os.path.join(path, 'type'))
- if _type == '1':
- interfaces[device]['type'] = 'ether'
- elif _type == '512':
- interfaces[device]['type'] = 'ppp'
- elif _type == '772':
- interfaces[device]['type'] = 'loopback'
- if os.path.exists(os.path.join(path, 'bridge')):
- interfaces[device]['type'] = 'bridge'
- interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
- if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
- interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
- if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
- interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
- if os.path.exists(os.path.join(path, 'bonding')):
- interfaces[device]['type'] = 'bonding'
- interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
- interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
- interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
- interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
- primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
- if primary:
- interfaces[device]['primary'] = primary
- path = os.path.join(path, 'bonding', 'all_slaves_active')
- if os.path.exists(path):
- interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
-
- # Check whether an interface is in promiscuous mode
- if os.path.exists(os.path.join(path,'flags')):
- promisc_mode = False
- # The second byte indicates whether the interface is in promiscuous mode.
- # 1 = promisc
- # 0 = no promisc
- data = int(get_file_content(os.path.join(path, 'flags')),16)
- promisc_mode = (data & 0x0100 > 0)
- interfaces[device]['promisc'] = promisc_mode
-
- def parse_ip_output(output, secondary=False):
- for line in output.split('\n'):
- if not line:
- continue
- words = line.split()
- if words[0] == 'inet':
- if '/' in words[1]:
- address, netmask_length = words[1].split('/')
- else:
- # pointopoint interfaces do not have a prefix
- address = words[1]
- netmask_length = "32"
- address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
- netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
- netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
- network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
- iface = words[-1]
- if iface != device:
- interfaces[iface] = {}
- if not secondary and "ipv4" not in interfaces[iface]:
- interfaces[iface]['ipv4'] = {'address': address,
- 'netmask': netmask,
- 'network': network}
- else:
- if "ipv4_secondaries" not in interfaces[iface]:
- interfaces[iface]["ipv4_secondaries"] = []
- interfaces[iface]["ipv4_secondaries"].append({
- 'address': address,
- 'netmask': netmask,
- 'network': network,
- })
-
- # add this secondary IP to the main device
- if secondary:
- if "ipv4_secondaries" not in interfaces[device]:
- interfaces[device]["ipv4_secondaries"] = []
- interfaces[device]["ipv4_secondaries"].append({
- 'address': address,
- 'netmask': netmask,
- 'network': network,
- })
-
- # If this is the default address, update default_ipv4
- if 'address' in default_ipv4 and default_ipv4['address'] == address:
- default_ipv4['netmask'] = netmask
- default_ipv4['network'] = network
- default_ipv4['macaddress'] = macaddress
- default_ipv4['mtu'] = interfaces[device]['mtu']
- default_ipv4['type'] = interfaces[device].get("type", "unknown")
- default_ipv4['alias'] = words[-1]
- if not address.startswith('127.'):
- ips['all_ipv4_addresses'].append(address)
- elif words[0] == 'inet6':
- address, prefix = words[1].split('/')
- scope = words[3]
- if 'ipv6' not in interfaces[device]:
- interfaces[device]['ipv6'] = []
- interfaces[device]['ipv6'].append({
- 'address' : address,
- 'prefix' : prefix,
- 'scope' : scope
- })
- # If this is the default address, update default_ipv6
- if 'address' in default_ipv6 and default_ipv6['address'] == address:
- default_ipv6['prefix'] = prefix
- default_ipv6['scope'] = scope
- default_ipv6['macaddress'] = macaddress
- default_ipv6['mtu'] = interfaces[device]['mtu']
- default_ipv6['type'] = interfaces[device].get("type", "unknown")
- if not address == '::1':
- ips['all_ipv6_addresses'].append(address)
-
- ip_path = module.get_bin_path("ip")
-
- args = [ip_path, 'addr', 'show', 'primary', device]
- rc, stdout, stderr = self.module.run_command(args)
- primary_data = stdout
-
- args = [ip_path, 'addr', 'show', 'secondary', device]
- rc, stdout, stderr = self.module.run_command(args)
- secondary_data = stdout
-
- parse_ip_output(primary_data)
- parse_ip_output(secondary_data, secondary=True)
-
- # replace : by _ in interface name since they are hard to use in template
- new_interfaces = {}
- for i in interfaces:
- if ':' in i:
- new_interfaces[i.replace(':','_')] = interfaces[i]
- else:
- new_interfaces[i] = interfaces[i]
- return new_interfaces, ips
-
-class GenericBsdIfconfigNetwork(Network):
- """
- This is a generic BSD subclass of Network using the ifconfig command.
- It defines
- - interfaces (a list of interface names)
- - interface_<name> dictionary of ipv4, ipv6, and mac address information.
- - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- It currently does not define
- - default_ipv4 and default_ipv6
- - type, mtu and network on interfaces
- """
- platform = 'Generic_BSD_Ifconfig'
-
- def __init__(self, module):
- Network.__init__(self, module)
-
- def populate(self):
-
- ifconfig_path = module.get_bin_path('ifconfig')
-
- if ifconfig_path is None:
- return self.facts
- route_path = module.get_bin_path('route')
-
- if route_path is None:
- return self.facts
-
- default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
- interfaces, ips = self.get_interfaces_info(ifconfig_path)
- self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
- self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
- self.facts['interfaces'] = interfaces.keys()
-
- for iface in interfaces:
- self.facts[iface] = interfaces[iface]
-
- self.facts['default_ipv4'] = default_ipv4
- self.facts['default_ipv6'] = default_ipv6
- self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
- self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
-
- return self.facts
-
- def get_default_interfaces(self, route_path):
-
- # Use the commands:
- # route -n get 8.8.8.8 -> Google public DNS
- # route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
- # to find out the default outgoing interface, address, and gateway
-
- command = dict(
- v4 = [route_path, '-n', 'get', '8.8.8.8'],
- v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
- )
-
- interface = dict(v4 = {}, v6 = {})
-
- for v in 'v4', 'v6':
-
- if v == 'v6' and not socket.has_ipv6:
- continue
- rc, out, err = module.run_command(command[v])
- if not out:
- # v6 routing may result in
- # RTNETLINK answers: Invalid argument
- continue
- lines = out.split('\n')
- for line in lines:
- words = line.split()
- # Collect output from route command
- if len(words) > 1:
- if words[0] == 'interface:':
- interface[v]['interface'] = words[1]
- if words[0] == 'gateway:':
- interface[v]['gateway'] = words[1]
-
- return interface['v4'], interface['v6']
-
- def get_interfaces_info(self, ifconfig_path):
- interfaces = {}
- current_if = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
- # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
- # when running the command 'ifconfig'.
- # Solaris must explicitly run the command 'ifconfig -a'.
- rc, out, err = module.run_command([ifconfig_path, '-a'])
-
- for line in out.split('\n'):
-
- if line:
- words = line.split()
-
- if words[0] == 'pass':
- continue
- elif re.match('^\S', line) and len(words) > 3:
- current_if = self.parse_interface_line(words)
- interfaces[ current_if['device'] ] = current_if
- elif words[0].startswith('options='):
- self.parse_options_line(words, current_if, ips)
- elif words[0] == 'nd6':
- self.parse_nd6_line(words, current_if, ips)
- elif words[0] == 'ether':
- self.parse_ether_line(words, current_if, ips)
- elif words[0] == 'media:':
- self.parse_media_line(words, current_if, ips)
- elif words[0] == 'status:':
- self.parse_status_line(words, current_if, ips)
- elif words[0] == 'lladdr':
- self.parse_lladdr_line(words, current_if, ips)
- elif words[0] == 'inet':
- self.parse_inet_line(words, current_if, ips)
- elif words[0] == 'inet6':
- self.parse_inet6_line(words, current_if, ips)
- else:
- self.parse_unknown_line(words, current_if, ips)
-
- return interfaces, ips
-
- def parse_interface_line(self, words):
- device = words[0][0:-1]
- current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
- current_if['flags'] = self.get_options(words[1])
- current_if['macaddress'] = 'unknown' # will be overwritten later
-
- if len(words) >= 5 : # Newer FreeBSD versions
- current_if['metric'] = words[3]
- current_if['mtu'] = words[5]
- else:
- current_if['mtu'] = words[3]
-
- return current_if
-
- def parse_options_line(self, words, current_if, ips):
- # Mac has options like this...
- current_if['options'] = self.get_options(words[0])
-
- def parse_nd6_line(self, words, current_if, ips):
- # FreeBSD has options like this...
- current_if['options'] = self.get_options(words[1])
-
- def parse_ether_line(self, words, current_if, ips):
- current_if['macaddress'] = words[1]
-
- def parse_media_line(self, words, current_if, ips):
- # not sure if this is useful - we also drop information
- current_if['media'] = words[1]
- if len(words) > 2:
- current_if['media_select'] = words[2]
- if len(words) > 3:
- current_if['media_type'] = words[3][1:]
- if len(words) > 4:
- current_if['media_options'] = self.get_options(words[4])
-
- def parse_status_line(self, words, current_if, ips):
- current_if['status'] = words[1]
-
- def parse_lladdr_line(self, words, current_if, ips):
- current_if['lladdr'] = words[1]
-
- def parse_inet_line(self, words, current_if, ips):
- address = {'address': words[1]}
- # deal with hex netmask
- if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
- words[3] = '0x' + words[3]
- if words[3].startswith('0x'):
- address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
- else:
- # otherwise assume this is a dotted quad
- address['netmask'] = words[3]
- # calculate the network
- address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
- netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
- address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
- # broadcast may be given or we need to calculate
- if len(words) > 5:
- address['broadcast'] = words[5]
- else:
- address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
- # add to our list of addresses
- if not words[1].startswith('127.'):
- ips['all_ipv4_addresses'].append(address['address'])
- current_if['ipv4'].append(address)
-
- def parse_inet6_line(self, words, current_if, ips):
- address = {'address': words[1]}
- if (len(words) >= 4) and (words[2] == 'prefixlen'):
- address['prefix'] = words[3]
- if (len(words) >= 6) and (words[4] == 'scopeid'):
- address['scope'] = words[5]
- localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
- if address['address'] not in localhost6:
- ips['all_ipv6_addresses'].append(address['address'])
- current_if['ipv6'].append(address)
-
- def parse_unknown_line(self, words, current_if, ips):
- # we are going to ignore unknown lines here - this may be
- # a bad idea - but you can override it in your subclass
- pass
-
- def get_options(self, option_string):
- start = option_string.find('<') + 1
- end = option_string.rfind('>')
- if (start > 0) and (end > 0) and (end > start + 1):
- option_csv = option_string[start:end]
- return option_csv.split(',')
- else:
- return []
-
- def merge_default_interface(self, defaults, interfaces, ip_type):
- if not 'interface' in defaults.keys():
- return
- if not defaults['interface'] in interfaces:
- return
- ifinfo = interfaces[defaults['interface']]
- # copy all the interface values across except addresses
- for item in ifinfo.keys():
- if item != 'ipv4' and item != 'ipv6':
- defaults[item] = ifinfo[item]
- if len(ifinfo[ip_type]) > 0:
- for item in ifinfo[ip_type][0].keys():
- defaults[item] = ifinfo[ip_type][0][item]
-
-class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the Mac OS X/Darwin Network Class.
- It uses the GenericBsdIfconfigNetwork unchanged
- """
- platform = 'Darwin'
-
- # media line is different to the default FreeBSD one
- def parse_media_line(self, words, current_if, ips):
- # not sure if this is useful - we also drop information
- current_if['media'] = 'Unknown' # Mac does not give us this
- current_if['media_select'] = words[1]
- if len(words) > 2:
- current_if['media_type'] = words[2][1:-1]
- if len(words) > 3:
- current_if['media_options'] = self.get_options(words[3])
-
-
-class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the FreeBSD Network Class.
- It uses the GenericBsdIfconfigNetwork unchanged.
- """
- platform = 'FreeBSD'
-
-class AIXNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the AIX Network Class.
- It uses the GenericBsdIfconfigNetwork unchanged.
- """
- platform = 'AIX'
-
- # AIX 'ifconfig -a' does not have three words in the interface line
- def get_interfaces_info(self, ifconfig_path):
- interfaces = {}
- current_if = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
- rc, out, err = module.run_command([ifconfig_path, '-a'])
-
- for line in out.split('\n'):
-
- if line:
- words = line.split()
-
- # only this condition differs from GenericBsdIfconfigNetwork
- if re.match('^\w*\d*:', line):
- current_if = self.parse_interface_line(words)
- interfaces[ current_if['device'] ] = current_if
- elif words[0].startswith('options='):
- self.parse_options_line(words, current_if, ips)
- elif words[0] == 'nd6':
- self.parse_nd6_line(words, current_if, ips)
- elif words[0] == 'ether':
- self.parse_ether_line(words, current_if, ips)
- elif words[0] == 'media:':
- self.parse_media_line(words, current_if, ips)
- elif words[0] == 'status:':
- self.parse_status_line(words, current_if, ips)
- elif words[0] == 'lladdr':
- self.parse_lladdr_line(words, current_if, ips)
- elif words[0] == 'inet':
- self.parse_inet_line(words, current_if, ips)
- elif words[0] == 'inet6':
- self.parse_inet6_line(words, current_if, ips)
- else:
- self.parse_unknown_line(words, current_if, ips)
- uname_path = module.get_bin_path('uname')
- if uname_path:
- rc, out, err = module.run_command([uname_path, '-W'])
- # don't bother with wpars it does not work
- # zero means not in wpar
- if out.split()[0] == '0':
- if current_if['macaddress'] == 'unknown' and re.match('^en', current_if['device']):
- entstat_path = module.get_bin_path('entstat')
- if entstat_path:
- rc, out, err = module.run_command([entstat_path, current_if['device'] ])
- if rc != 0:
- break
- for line in out.split('\n'):
- if not line:
- pass
- buff = re.match('^Hardware Address: (.*)', line)
- if buff:
- current_if['macaddress'] = buff.group(1)
-
- buff = re.match('^Device Type:', line)
- if buff and re.match('.*Ethernet', line):
- current_if['type'] = 'ether'
- # device must have mtu attribute in ODM
- if 'mtu' not in current_if:
- lsattr_path = module.get_bin_path('lsattr')
- if lsattr_path:
- rc, out, err = module.run_command([lsattr_path,'-El', current_if['device'] ])
- if rc != 0:
- break
- for line in out.split('\n'):
- if line:
- words = line.split()
- if words[0] == 'mtu':
- current_if['mtu'] = words[1]
- return interfaces, ips
-
- # AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
- def parse_interface_line(self, words):
- device = words[0][0:-1]
- current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
- current_if['flags'] = self.get_options(words[1])
- current_if['macaddress'] = 'unknown' # will be overwritten later
- return current_if
-
-class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the OpenBSD Network Class.
- It uses the GenericBsdIfconfigNetwork.
- """
- platform = 'OpenBSD'
-
- # Return macaddress instead of lladdr
- def parse_lladdr_line(self, words, current_if, ips):
- current_if['macaddress'] = words[1]
-
-class SunOSNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the SunOS Network Class.
- It uses the GenericBsdIfconfigNetwork.
-
- Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
- so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
- """
- platform = 'SunOS'
-
- # Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
- # MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
- # 'parse_interface_line()' checks for previously seen interfaces before defining
- # 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
- def get_interfaces_info(self, ifconfig_path):
- interfaces = {}
- current_if = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
- rc, out, err = module.run_command([ifconfig_path, '-a'])
-
- for line in out.split('\n'):
-
- if line:
- words = line.split()
-
- if re.match('^\S', line) and len(words) > 3:
- current_if = self.parse_interface_line(words, current_if, interfaces)
- interfaces[ current_if['device'] ] = current_if
- elif words[0].startswith('options='):
- self.parse_options_line(words, current_if, ips)
- elif words[0] == 'nd6':
- self.parse_nd6_line(words, current_if, ips)
- elif words[0] == 'ether':
- self.parse_ether_line(words, current_if, ips)
- elif words[0] == 'media:':
- self.parse_media_line(words, current_if, ips)
- elif words[0] == 'status:':
- self.parse_status_line(words, current_if, ips)
- elif words[0] == 'lladdr':
- self.parse_lladdr_line(words, current_if, ips)
- elif words[0] == 'inet':
- self.parse_inet_line(words, current_if, ips)
- elif words[0] == 'inet6':
- self.parse_inet6_line(words, current_if, ips)
- else:
- self.parse_unknown_line(words, current_if, ips)
-
- # 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
- # ipv4/ipv6 lists which is ugly and hard to read.
- # This quick hack merges the dictionaries. Purely cosmetic.
- for iface in interfaces:
- for v in 'ipv4', 'ipv6':
- combined_facts = {}
- for facts in interfaces[iface][v]:
- combined_facts.update(facts)
- if len(combined_facts.keys()) > 0:
- interfaces[iface][v] = [combined_facts]
-
- return interfaces, ips
-
- def parse_interface_line(self, words, current_if, interfaces):
- device = words[0][0:-1]
- if device not in interfaces.keys():
- current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
- else:
- current_if = interfaces[device]
- flags = self.get_options(words[1])
- v = 'ipv4'
- if 'IPv6' in flags:
- v = 'ipv6'
- current_if[v].append({'flags': flags, 'mtu': words[3]})
- current_if['macaddress'] = 'unknown' # will be overwritten later
- return current_if
-
- # Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
- # Add leading zero to each octet where needed.
- def parse_ether_line(self, words, current_if, ips):
- macaddress = ''
- for octet in words[1].split(':'):
- octet = ('0' + octet)[-2:None]
- macaddress += (octet + ':')
- current_if['macaddress'] = macaddress[0:-1]
-
-class Virtual(Facts):
- """
- This is a generic Virtual subclass of Facts. This should be further
- subclassed to implement per platform. If you subclass this,
- you should define:
- - virtualization_type
- - virtualization_role
- - container (e.g. solaris zones, freebsd jails, linux containers)
-
- All subclasses MUST define platform.
- """
-
- def __new__(cls, *arguments, **keyword):
- subclass = cls
- for sc in Virtual.__subclasses__():
- if sc.platform == platform.system():
- subclass = sc
- return super(cls, subclass).__new__(subclass, *arguments, **keyword)
-
- def __init__(self):
- Facts.__init__(self)
-
- def populate(self):
- return self.facts
-
-class LinuxVirtual(Virtual):
- """
- This is a Linux-specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- """
- platform = 'Linux'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- # For more information, check: http://people.redhat.com/~rjones/virt-what/
- def get_virtual_facts(self):
- if os.path.exists("/proc/xen"):
- self.facts['virtualization_type'] = 'xen'
- self.facts['virtualization_role'] = 'guest'
- try:
- for line in get_file_lines('/proc/xen/capabilities'):
- if "control_d" in line:
- self.facts['virtualization_role'] = 'host'
- except IOError:
- pass
- return
-
- if os.path.exists('/proc/vz'):
- self.facts['virtualization_type'] = 'openvz'
- if os.path.exists('/proc/bc'):
- self.facts['virtualization_role'] = 'host'
- else:
- self.facts['virtualization_role'] = 'guest'
- return
-
- if os.path.exists('/proc/1/cgroup'):
- for line in get_file_lines('/proc/1/cgroup'):
- if re.search(r'/docker(/|-[0-9a-f]+\.scope)', line):
- self.facts['virtualization_type'] = 'docker'
- self.facts['virtualization_role'] = 'guest'
- return
- if re.search('/lxc/', line):
- self.facts['virtualization_type'] = 'lxc'
- self.facts['virtualization_role'] = 'guest'
- return
-
- product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
-
- if product_name in ['KVM', 'Bochs']:
- self.facts['virtualization_type'] = 'kvm'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if product_name == 'RHEV Hypervisor':
- self.facts['virtualization_type'] = 'RHEV'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if product_name == 'VMware Virtual Platform':
- self.facts['virtualization_type'] = 'VMware'
- self.facts['virtualization_role'] = 'guest'
- return
-
- bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
-
- if bios_vendor == 'Xen':
- self.facts['virtualization_type'] = 'xen'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if bios_vendor == 'innotek GmbH':
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'guest'
- return
-
- sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
-
- # FIXME: This does also match hyperv
- if sys_vendor == 'Microsoft Corporation':
- self.facts['virtualization_type'] = 'VirtualPC'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if sys_vendor == 'Parallels Software International Inc.':
- self.facts['virtualization_type'] = 'parallels'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if sys_vendor == 'QEMU':
- self.facts['virtualization_type'] = 'kvm'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if sys_vendor == 'oVirt':
- self.facts['virtualization_type'] = 'kvm'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if os.path.exists('/proc/self/status'):
- for line in get_file_lines('/proc/self/status'):
- if re.match('^VxID: \d+', line):
- self.facts['virtualization_type'] = 'linux_vserver'
- if re.match('^VxID: 0', line):
- self.facts['virtualization_role'] = 'host'
- else:
- self.facts['virtualization_role'] = 'guest'
- return
-
- if os.path.exists('/proc/cpuinfo'):
- for line in get_file_lines('/proc/cpuinfo'):
- if re.match('^model name.*QEMU Virtual CPU', line):
- self.facts['virtualization_type'] = 'kvm'
- elif re.match('^vendor_id.*User Mode Linux', line):
- self.facts['virtualization_type'] = 'uml'
- elif re.match('^model name.*UML', line):
- self.facts['virtualization_type'] = 'uml'
- elif re.match('^vendor_id.*PowerVM Lx86', line):
- self.facts['virtualization_type'] = 'powervm_lx86'
- elif re.match('^vendor_id.*IBM/S390', line):
- self.facts['virtualization_type'] = 'PR/SM'
- lscpu = module.get_bin_path('lscpu')
- if lscpu:
- rc, out, err = module.run_command(["lscpu"])
- if rc == 0:
- for line in out.split("\n"):
- data = line.split(":", 1)
- key = data[0].strip()
- if key == 'Hypervisor':
- self.facts['virtualization_type'] = data[1].strip()
- else:
- self.facts['virtualization_type'] = 'ibm_systemz'
- else:
- continue
- if self.facts['virtualization_type'] == 'PR/SM':
- self.facts['virtualization_role'] = 'LPAR'
- else:
- self.facts['virtualization_role'] = 'guest'
- return
-
- # Beware that we can have both kvm and virtualbox running on a single system
- if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
- modules = []
- for line in get_file_lines("/proc/modules"):
- data = line.split(" ", 1)
- modules.append(data[0])
-
- if 'kvm' in modules:
- self.facts['virtualization_type'] = 'kvm'
- self.facts['virtualization_role'] = 'host'
- return
-
- if 'vboxdrv' in modules:
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'host'
- return
-
- # If none of the above matches, return 'NA' for virtualization_type
- # and virtualization_role. This allows for proper grouping.
- self.facts['virtualization_type'] = 'NA'
- self.facts['virtualization_role'] = 'NA'
- return
-
-class FreeBSDVirtual(Virtual):
- """
- This is a FreeBSD-specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- """
- platform = 'FreeBSD'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- def get_virtual_facts(self):
- self.facts['virtualization_type'] = ''
- self.facts['virtualization_role'] = ''
-
-class OpenBSDVirtual(Virtual):
- """
- This is a OpenBSD-specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- """
- platform = 'OpenBSD'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- def get_virtual_facts(self):
- self.facts['virtualization_type'] = ''
- self.facts['virtualization_role'] = ''
-
-class HPUXVirtual(Virtual):
- """
- This is a HP-UX specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- """
- platform = 'HP-UX'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- def get_virtual_facts(self):
- if os.path.exists('/usr/sbin/vecheck'):
- rc, out, err = module.run_command("/usr/sbin/vecheck")
- if rc == 0:
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HP vPar'
- if os.path.exists('/opt/hpvm/bin/hpvminfo'):
- rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo")
- if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HPVM vPar'
- elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HPVM IVM'
- elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
- self.facts['virtualization_type'] = 'host'
- self.facts['virtualization_role'] = 'HPVM'
- if os.path.exists('/usr/sbin/parstatus'):
- rc, out, err = module.run_command("/usr/sbin/parstatus")
- if rc == 0:
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HP nPar'
-
-
-class SunOSVirtual(Virtual):
- """
- This is a SunOS-specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- - container
- """
- platform = 'SunOS'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- def get_virtual_facts(self):
- rc, out, err = module.run_command("/usr/sbin/prtdiag")
- for line in out.split('\n'):
- if 'VMware' in line:
- self.facts['virtualization_type'] = 'vmware'
- self.facts['virtualization_role'] = 'guest'
- if 'Parallels' in line:
- self.facts['virtualization_type'] = 'parallels'
- self.facts['virtualization_role'] = 'guest'
- if 'VirtualBox' in line:
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'guest'
- if 'HVM domU' in line:
- self.facts['virtualization_type'] = 'xen'
- self.facts['virtualization_role'] = 'guest'
- # Check if it's a zone
- if os.path.exists("/usr/bin/zonename"):
- rc, out, err = module.run_command("/usr/bin/zonename")
- if out.rstrip() != "global":
- self.facts['container'] = 'zone'
- # Check if it's a branded zone (i.e. Solaris 8/9 zone)
- if os.path.isdir('/.SUNWnative'):
- self.facts['container'] = 'zone'
- # If it's a zone check if we can detect if our global zone is itself virtualized.
- # Relies on the "guest tools" (e.g. vmware tools) to be installed
- if 'container' in self.facts and self.facts['container'] == 'zone':
- rc, out, err = module.run_command("/usr/sbin/modinfo")
- for line in out.split('\n'):
- if 'VMware' in line:
- self.facts['virtualization_type'] = 'vmware'
- self.facts['virtualization_role'] = 'guest'
- if 'VirtualBox' in line:
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'guest'
- # Detect domaining on Sparc hardware
- if os.path.exists("/usr/sbin/virtinfo"):
- # The output of virtinfo is different whether we are on a machine with logical
- # domains ('LDoms') on a T-series or domains ('Domains') on a M-series. Try LDoms first.
- rc, out, err = module.run_command("/usr/sbin/virtinfo -p")
- # The output contains multiple lines with different keys like this:
- # DOMAINROLE|impl=LDoms|control=false|io=false|service=false|root=false
- # The output may also be not formatted and the returncode is set to 0 regardless of the error condition:
- # virtinfo can only be run from the global zone
- try:
- for line in out.split('\n'):
- fields = line.split('|')
- if( fields[0] == 'DOMAINROLE' and fields[1] == 'impl=LDoms' ):
- self.facts['virtualization_type'] = 'ldom'
- self.facts['virtualization_role'] = 'guest'
- hostfeatures = []
- for field in fields[2:]:
- arg = field.split('=')
- if( arg[1] == 'true' ):
- hostfeatures.append(arg[0])
- if( len(hostfeatures) > 0 ):
- self.facts['virtualization_role'] = 'host (' + ','.join(hostfeatures) + ')'
- except ValueError, e:
- pass
-
-def get_file_content(path, default=None, strip=True):
- data = default
- if os.path.exists(path) and os.access(path, os.R_OK):
- try:
- datafile = open(path)
- data = datafile.read()
- if strip:
- data = data.strip()
- if len(data) == 0:
- data = default
- finally:
- datafile.close()
- return data
-
-def get_file_lines(path):
- '''file.readlines() that closes the file'''
- datafile = open(path)
- try:
- return datafile.readlines()
- finally:
- datafile.close()
-
-def ansible_facts(module):
- facts = {}
- facts.update(Facts().populate())
- facts.update(Hardware().populate())
- facts.update(Network(module).populate())
- facts.update(Virtual().populate())
- return facts
-
-# ===========================================
-
-def get_all_facts(module):
-
- setup_options = dict(module_setup=True)
- facts = ansible_facts(module)
-
- for (k, v) in facts.items():
- setup_options["ansible_%s" % k.replace('-', '_')] = v
-
- # Look for the path to the facter and ohai binary and set
- # the variable to that path.
-
- facter_path = module.get_bin_path('facter')
- ohai_path = module.get_bin_path('ohai')
-
- # if facter is installed, and we can use --json because
- # ruby-json is ALSO installed, include facter data in the JSON
-
- if facter_path is not None:
- rc, out, err = module.run_command(facter_path + " --json")
- facter = True
- try:
- facter_ds = json.loads(out)
- except:
- facter = False
- if facter:
- for (k,v) in facter_ds.items():
- setup_options["facter_%s" % k] = v
-
- # ditto for ohai
-
- if ohai_path is not None:
- rc, out, err = module.run_command(ohai_path)
- ohai = True
- try:
- ohai_ds = json.loads(out)
- except:
- ohai = False
- if ohai:
- for (k,v) in ohai_ds.items():
- k2 = "ohai_%s" % k.replace('-', '_')
- setup_options[k2] = v
-
- setup_result = { 'ansible_facts': {} }
-
- for (k,v) in setup_options.items():
- if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
- setup_result['ansible_facts'][k] = v
-
- # hack to keep --verbose from showing all the setup module results
- setup_result['verbose_override'] = True
-
- return setup_result
diff --git a/v1/ansible/module_utils/gce.py b/v1/ansible/module_utils/gce.py
deleted file mode 100644
index 37a4bf1dea..0000000000
--- a/v1/ansible/module_utils/gce.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-import pprint
-
-USER_AGENT_PRODUCT="Ansible-gce"
-USER_AGENT_VERSION="v1"
-
-def gce_connect(module, provider=None):
- """Return a Google Cloud Engine connection."""
- service_account_email = module.params.get('service_account_email', None)
- pem_file = module.params.get('pem_file', None)
- project_id = module.params.get('project_id', None)
-
- # If any of the values are not given as parameters, check the appropriate
- # environment variables.
- if not service_account_email:
- service_account_email = os.environ.get('GCE_EMAIL', None)
- if not project_id:
- project_id = os.environ.get('GCE_PROJECT', None)
- if not pem_file:
- pem_file = os.environ.get('GCE_PEM_FILE_PATH', None)
-
- # If we still don't have one or more of our credentials, attempt to
- # get the remaining values from the libcloud secrets file.
- if service_account_email is None or pem_file is None:
- try:
- import secrets
- except ImportError:
- secrets = None
-
- if hasattr(secrets, 'GCE_PARAMS'):
- if not service_account_email:
- service_account_email = secrets.GCE_PARAMS[0]
- if not pem_file:
- pem_file = secrets.GCE_PARAMS[1]
- keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
- if not project_id:
- project_id = keyword_params.get('project', None)
-
- # If we *still* don't have the credentials we need, then it's time to
- # just fail out.
- if service_account_email is None or pem_file is None or project_id is None:
- module.fail_json(msg='Missing GCE connection parameters in libcloud '
- 'secrets file.')
- return None
-
- # Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE)
- if provider is None:
- provider = Provider.GCE
-
- try:
- gce = get_driver(provider)(service_account_email, pem_file,
- datacenter=module.params.get('zone', None),
- project=project_id)
- gce.connection.user_agent_append("%s/%s" % (
- USER_AGENT_PRODUCT, USER_AGENT_VERSION))
- except (RuntimeError, ValueError), e:
- module.fail_json(msg=str(e), changed=False)
- except Exception, e:
- module.fail_json(msg=unexpected_error_msg(e), changed=False)
-
- return gce
-
-def unexpected_error_msg(error):
- """Create an error string based on passed in error."""
- return 'Unexpected response: ' + pprint.pformat(vars(error))
diff --git a/v1/ansible/module_utils/known_hosts.py b/v1/ansible/module_utils/known_hosts.py
deleted file mode 100644
index 99dbf2c03a..0000000000
--- a/v1/ansible/module_utils/known_hosts.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import hmac
-import urlparse
-
-try:
- from hashlib import sha1
-except ImportError:
- import sha as sha1
-
-HASHED_KEY_MAGIC = "|1|"
-
-def add_git_host_key(module, url, accept_hostkey=True, create_dir=True):
-
- """ idempotently add a git url hostkey """
-
- fqdn = get_fqdn(url)
-
- if fqdn:
- known_host = check_hostkey(module, fqdn)
- if not known_host:
- if accept_hostkey:
- rc, out, err = add_host_key(module, fqdn, create_dir=create_dir)
- if rc != 0:
- module.fail_json(msg="failed to add %s hostkey: %s" % (fqdn, out + err))
- else:
- module.fail_json(msg="%s has an unknown hostkey. Set accept_hostkey to True or manually add the hostkey prior to running the git module" % fqdn)
-
-def get_fqdn(repo_url):
-
- """ chop the hostname out of a giturl """
-
- result = None
- if "@" in repo_url and "://" not in repo_url:
- # most likely a git@ or ssh+git@ type URL
- repo_url = repo_url.split("@", 1)[1]
- if ":" in repo_url:
- repo_url = repo_url.split(":")[0]
- result = repo_url
- elif "/" in repo_url:
- repo_url = repo_url.split("/")[0]
- result = repo_url
- elif "://" in repo_url:
- # this should be something we can parse with urlparse
- parts = urlparse.urlparse(repo_url)
- if 'ssh' not in parts[0] and 'git' not in parts[0]:
- # don't try and scan a hostname that's not ssh
- return None
- # parts[1] will be empty on python2.4 on ssh:// or git:// urls, so
- # ensure we actually have a parts[1] before continuing.
- if parts[1] != '':
- result = parts[1]
- if ":" in result:
- result = result.split(":")[0]
- if "@" in result:
- result = result.split("@", 1)[1]
-
- return result
-
-def check_hostkey(module, fqdn):
- return not not_in_host_file(module, fqdn)
-
-# this is a variant of code found in connection_plugins/paramiko.py and we should modify
-# the paramiko code to import and use this.
-
-def not_in_host_file(self, host):
-
-
- if 'USER' in os.environ:
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_host_file = "~/.ssh/known_hosts"
- user_host_file = os.path.expanduser(user_host_file)
-
- host_file_list = []
- host_file_list.append(user_host_file)
- host_file_list.append("/etc/ssh/ssh_known_hosts")
- host_file_list.append("/etc/ssh/ssh_known_hosts2")
-
- hfiles_not_found = 0
- for hf in host_file_list:
- if not os.path.exists(hf):
- hfiles_not_found += 1
- continue
-
- try:
- host_fh = open(hf)
- except IOError, e:
- hfiles_not_found += 1
- continue
- else:
- data = host_fh.read()
- host_fh.close()
-
- for line in data.split("\n"):
- if line is None or " " not in line:
- continue
- tokens = line.split()
- if tokens[0].find(HASHED_KEY_MAGIC) == 0:
- # this is a hashed known host entry
- try:
- (kn_salt,kn_host) = tokens[0][len(HASHED_KEY_MAGIC):].split("|",2)
- hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
- hash.update(host)
- if hash.digest() == kn_host.decode('base64'):
- return False
- except:
- # invalid hashed host key, skip it
- continue
- else:
- # standard host file entry
- if host in tokens[0]:
- return False
-
- return True
-
-
-def add_host_key(module, fqdn, key_type="rsa", create_dir=False):
-
- """ use ssh-keyscan to add the hostkey """
-
- result = False
- keyscan_cmd = module.get_bin_path('ssh-keyscan', True)
-
- if 'USER' in os.environ:
- user_ssh_dir = os.path.expandvars("~${USER}/.ssh/")
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_ssh_dir = "~/.ssh/"
- user_host_file = "~/.ssh/known_hosts"
- user_ssh_dir = os.path.expanduser(user_ssh_dir)
-
- if not os.path.exists(user_ssh_dir):
- if create_dir:
- try:
- os.makedirs(user_ssh_dir, 0700)
- except:
- module.fail_json(msg="failed to create host key directory: %s" % user_ssh_dir)
- else:
- module.fail_json(msg="%s does not exist" % user_ssh_dir)
- elif not os.path.isdir(user_ssh_dir):
- module.fail_json(msg="%s is not a directory" % user_ssh_dir)
-
- this_cmd = "%s -t %s %s" % (keyscan_cmd, key_type, fqdn)
-
- rc, out, err = module.run_command(this_cmd)
- module.append_to_file(user_host_file, out)
-
- return rc, out, err
-
diff --git a/v1/ansible/module_utils/openstack.py b/v1/ansible/module_utils/openstack.py
deleted file mode 100644
index 4069449144..0000000000
--- a/v1/ansible/module_utils/openstack.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-
-
-def openstack_argument_spec():
- # DEPRECATED: This argument spec is only used for the deprecated old
- # OpenStack modules. It turns out that modern OpenStack auth is WAY
- # more complex than this.
- # Consume standard OpenStack environment variables.
- # This is mainly only useful for ad-hoc command line operation as
- # in playbooks one would assume variables would be used appropriately
- OS_AUTH_URL=os.environ.get('OS_AUTH_URL', 'http://127.0.0.1:35357/v2.0/')
- OS_PASSWORD=os.environ.get('OS_PASSWORD', None)
- OS_REGION_NAME=os.environ.get('OS_REGION_NAME', None)
- OS_USERNAME=os.environ.get('OS_USERNAME', 'admin')
- OS_TENANT_NAME=os.environ.get('OS_TENANT_NAME', OS_USERNAME)
-
- spec = dict(
- login_username = dict(default=OS_USERNAME),
- auth_url = dict(default=OS_AUTH_URL),
- region_name = dict(default=OS_REGION_NAME),
- availability_zone = dict(default=None),
- )
- if OS_PASSWORD:
- spec['login_password'] = dict(default=OS_PASSWORD)
- else:
- spec['login_password'] = dict(required=True)
- if OS_TENANT_NAME:
- spec['login_tenant_name'] = dict(default=OS_TENANT_NAME)
- else:
- spec['login_tenant_name'] = dict(required=True)
- return spec
-
-def openstack_find_nova_addresses(addresses, ext_tag, key_name=None):
-
- ret = []
- for (k, v) in addresses.iteritems():
- if key_name and k == key_name:
- ret.extend([addrs['addr'] for addrs in v])
- else:
- for interface_spec in v:
- if 'OS-EXT-IPS:type' in interface_spec and interface_spec['OS-EXT-IPS:type'] == ext_tag:
- ret.append(interface_spec['addr'])
- return ret
-
-def openstack_full_argument_spec(**kwargs):
- spec = dict(
- cloud=dict(default=None),
- auth_type=dict(default=None),
- auth=dict(default=None),
- region_name=dict(default=None),
- availability_zone=dict(default=None),
- verify=dict(default=True, aliases=['validate_certs']),
- cacert=dict(default=None),
- cert=dict(default=None),
- key=dict(default=None),
- wait=dict(default=True, type='bool'),
- timeout=dict(default=180, type='int'),
- api_timeout=dict(default=None, type='int'),
- endpoint_type=dict(
- default='public', choices=['public', 'internal', 'admin']
- )
- )
- spec.update(kwargs)
- return spec
-
-
-def openstack_module_kwargs(**kwargs):
- ret = {}
- for key in ('mutually_exclusive', 'required_together', 'required_one_of'):
- if key in kwargs:
- if key in ret:
- ret[key].extend(kwargs[key])
- else:
- ret[key] = kwargs[key]
-
- return ret
diff --git a/v1/ansible/module_utils/powershell.ps1 b/v1/ansible/module_utils/powershell.ps1
deleted file mode 100644
index a11e316989..0000000000
--- a/v1/ansible/module_utils/powershell.ps1
+++ /dev/null
@@ -1,166 +0,0 @@
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2014, and others
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-# Helper function to parse Ansible JSON arguments from a file passed as
-# the single argument to the module
-# Example: $params = Parse-Args $args
-Function Parse-Args($arguments)
-{
- $parameters = New-Object psobject;
- If ($arguments.Length -gt 0)
- {
- $parameters = Get-Content $arguments[0] | ConvertFrom-Json;
- }
- $parameters;
-}
-
-# Helper function to set an "attribute" on a psobject instance in powershell.
-# This is a convenience to make adding Members to the object easier and
-# slightly more pythonic
-# Example: Set-Attr $result "changed" $true
-Function Set-Attr($obj, $name, $value)
-{
- # If the provided $obj is undefined, define one to be nice
- If (-not $obj.GetType)
- {
- $obj = New-Object psobject
- }
-
- $obj | Add-Member -Force -MemberType NoteProperty -Name $name -Value $value
-}
-
-# Helper function to convert a powershell object to JSON to echo it, exiting
-# the script
-# Example: Exit-Json $result
-Function Exit-Json($obj)
-{
- # If the provided $obj is undefined, define one to be nice
- If (-not $obj.GetType)
- {
- $obj = New-Object psobject
- }
-
- echo $obj | ConvertTo-Json -Compress -Depth 99
- Exit
-}
-
-# Helper function to add the "msg" property and "failed" property, convert the
-# powershell object to JSON and echo it, exiting the script
-# Example: Fail-Json $result "This is the failure message"
-Function Fail-Json($obj, $message = $null)
-{
- # If we weren't given 2 args, and the only arg was a string, create a new
- # psobject and use the arg as the failure message
- If ($message -eq $null -and $obj.GetType().Name -eq "String")
- {
- $message = $obj
- $obj = New-Object psobject
- }
- # If the first args is undefined or not an object, make it an object
- ElseIf (-not $obj.GetType -or $obj.GetType().Name -ne "PSCustomObject")
- {
- $obj = New-Object psobject
- }
-
- Set-Attr $obj "msg" $message
- Set-Attr $obj "failed" $true
- echo $obj | ConvertTo-Json -Compress -Depth 99
- Exit 1
-}
-
-# Helper function to get an "attribute" from a psobject instance in powershell.
-# This is a convenience to make getting Members from an object easier and
-# slightly more pythonic
-# Example: $attr = Get-Attr $response "code" -default "1"
-#Note that if you use the failifempty option, you do need to specify resultobject as well.
-Function Get-Attr($obj, $name, $default = $null,$resultobj, $failifempty=$false, $emptyattributefailmessage)
-{
- # Check if the provided Member $name exists in $obj and return it or the
- # default
- If ($obj.$name.GetType)
- {
- $obj.$name
- }
- Elseif($failifempty -eq $false)
- {
- $default
- }
- else
- {
- if (!$emptyattributefailmessage) {$emptyattributefailmessage = "Missing required argument: $name"}
- Fail-Json -obj $resultobj -message $emptyattributefailmessage
- }
- return
-}
-
-# Helper filter/pipeline function to convert a value to boolean following current
-# Ansible practices
-# Example: $is_true = "true" | ConvertTo-Bool
-Function ConvertTo-Bool
-{
- param(
- [parameter(valuefrompipeline=$true)]
- $obj
- )
-
- $boolean_strings = "yes", "on", "1", "true", 1
- $obj_string = [string]$obj
-
- if (($obj.GetType().Name -eq "Boolean" -and $obj) -or $boolean_strings -contains $obj_string.ToLower())
- {
- $true
- }
- Else
- {
- $false
- }
- return
-}
-
-# Helper function to calculate a hash of a file in a way which powershell 3
-# and above can handle:
-Function Get-FileChecksum($path)
-{
- $hash = ""
- If (Test-Path -PathType Leaf $path)
- {
- $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
- $fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
- $hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
- $fp.Dispose();
- }
- ElseIf (Test-Path -PathType Container $path)
- {
- $hash= "3";
- }
- Else
- {
- $hash = "1";
- }
- return $hash
-}
diff --git a/v1/ansible/module_utils/rax.py b/v1/ansible/module_utils/rax.py
deleted file mode 100644
index 73b48cc780..0000000000
--- a/v1/ansible/module_utils/rax.py
+++ /dev/null
@@ -1,328 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by
-# Ansible still belong to the author of the module, and may assign their own
-# license to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
-# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-from uuid import UUID
-
-
-FINAL_STATUSES = ('ACTIVE', 'ERROR')
-VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
- 'error', 'error_deleting')
-
-CLB_ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
- 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
-CLB_PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS',
- 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP',
- 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
-
-NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
-PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
-SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
-
-
-def rax_slugify(value):
- """Prepend a key with rax_ and normalize the key name"""
- return 'rax_%s' % (re.sub('[^\w-]', '_', value).lower().lstrip('_'))
-
-
-def rax_clb_node_to_dict(obj):
- """Function to convert a CLB Node object to a dict"""
- if not obj:
- return {}
- node = obj.to_dict()
- node['id'] = obj.id
- node['weight'] = obj.weight
- return node
-
-
-def rax_to_dict(obj, obj_type='standard'):
- """Generic function to convert a pyrax object to a dict
-
- obj_type values:
- standard
- clb
- server
-
- """
- instance = {}
- for key in dir(obj):
- value = getattr(obj, key)
- if obj_type == 'clb' and key == 'nodes':
- instance[key] = []
- for node in value:
- instance[key].append(rax_clb_node_to_dict(node))
- elif (isinstance(value, list) and len(value) > 0 and
- not isinstance(value[0], NON_CALLABLES)):
- instance[key] = []
- for item in value:
- instance[key].append(rax_to_dict(item))
- elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
- if obj_type == 'server':
- if key == 'image':
- if not value:
- instance['rax_boot_source'] = 'volume'
- else:
- instance['rax_boot_source'] = 'local'
- key = rax_slugify(key)
- instance[key] = value
-
- if obj_type == 'server':
- for attr in ['id', 'accessIPv4', 'name', 'status']:
- instance[attr] = instance.get(rax_slugify(attr))
-
- return instance
-
-
-def rax_find_bootable_volume(module, rax_module, server, exit=True):
- """Find a servers bootable volume"""
- cs = rax_module.cloudservers
- cbs = rax_module.cloud_blockstorage
- server_id = rax_module.utils.get_id(server)
- volumes = cs.volumes.get_server_volumes(server_id)
- bootable_volumes = []
- for volume in volumes:
- vol = cbs.get(volume)
- if module.boolean(vol.bootable):
- bootable_volumes.append(vol)
- if not bootable_volumes:
- if exit:
- module.fail_json(msg='No bootable volumes could be found for '
- 'server %s' % server_id)
- else:
- return False
- elif len(bootable_volumes) > 1:
- if exit:
- module.fail_json(msg='Multiple bootable volumes found for server '
- '%s' % server_id)
- else:
- return False
-
- return bootable_volumes[0]
-
-
-def rax_find_image(module, rax_module, image, exit=True):
- """Find a server image by ID or Name"""
- cs = rax_module.cloudservers
- try:
- UUID(image)
- except ValueError:
- try:
- image = cs.images.find(human_id=image)
- except(cs.exceptions.NotFound,
- cs.exceptions.NoUniqueMatch):
- try:
- image = cs.images.find(name=image)
- except (cs.exceptions.NotFound,
- cs.exceptions.NoUniqueMatch):
- if exit:
- module.fail_json(msg='No matching image found (%s)' %
- image)
- else:
- return False
-
- return rax_module.utils.get_id(image)
-
-
-def rax_find_volume(module, rax_module, name):
- """Find a Block storage volume by ID or name"""
- cbs = rax_module.cloud_blockstorage
- try:
- UUID(name)
- volume = cbs.get(name)
- except ValueError:
- try:
- volume = cbs.find(name=name)
- except rax_module.exc.NotFound:
- volume = None
- except Exception, e:
- module.fail_json(msg='%s' % e)
- return volume
-
-
-def rax_find_network(module, rax_module, network):
- """Find a cloud network by ID or name"""
- cnw = rax_module.cloud_networks
- try:
- UUID(network)
- except ValueError:
- if network.lower() == 'public':
- return cnw.get_server_networks(PUBLIC_NET_ID)
- elif network.lower() == 'private':
- return cnw.get_server_networks(SERVICE_NET_ID)
- else:
- try:
- network_obj = cnw.find_network_by_label(network)
- except (rax_module.exceptions.NetworkNotFound,
- rax_module.exceptions.NetworkLabelNotUnique):
- module.fail_json(msg='No matching network found (%s)' %
- network)
- else:
- return cnw.get_server_networks(network_obj)
- else:
- return cnw.get_server_networks(network)
-
-
-def rax_find_server(module, rax_module, server):
- """Find a Cloud Server by ID or name"""
- cs = rax_module.cloudservers
- try:
- UUID(server)
- server = cs.servers.get(server)
- except ValueError:
- servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
- if not servers:
- module.fail_json(msg='No Server was matched by name, '
- 'try using the Server ID instead')
- if len(servers) > 1:
- module.fail_json(msg='Multiple servers matched by name, '
- 'try using the Server ID instead')
-
- # We made it this far, grab the first and hopefully only server
- # in the list
- server = servers[0]
- return server
-
-
-def rax_find_loadbalancer(module, rax_module, loadbalancer):
- """Find a Cloud Load Balancer by ID or name"""
- clb = rax_module.cloud_loadbalancers
- try:
- found = clb.get(loadbalancer)
- except:
- found = []
- for lb in clb.list():
- if loadbalancer == lb.name:
- found.append(lb)
-
- if not found:
- module.fail_json(msg='No loadbalancer was matched')
-
- if len(found) > 1:
- module.fail_json(msg='Multiple loadbalancers matched')
-
- # We made it this far, grab the first and hopefully only item
- # in the list
- found = found[0]
-
- return found
-
-
-def rax_argument_spec():
- """Return standard base dictionary used for the argument_spec
- argument in AnsibleModule
-
- """
- return dict(
- api_key=dict(type='str', aliases=['password'], no_log=True),
- auth_endpoint=dict(type='str'),
- credentials=dict(type='str', aliases=['creds_file']),
- env=dict(type='str'),
- identity_type=dict(type='str', default='rackspace'),
- region=dict(type='str'),
- tenant_id=dict(type='str'),
- tenant_name=dict(type='str'),
- username=dict(type='str'),
- verify_ssl=dict(choices=BOOLEANS, type='bool'),
- )
-
-
-def rax_required_together():
- """Return the default list used for the required_together argument to
- AnsibleModule"""
- return [['api_key', 'username']]
-
-
-def setup_rax_module(module, rax_module, region_required=True):
- """Set up pyrax in a standard way for all modules"""
- rax_module.USER_AGENT = 'ansible/%s %s' % (ANSIBLE_VERSION,
- rax_module.USER_AGENT)
-
- api_key = module.params.get('api_key')
- auth_endpoint = module.params.get('auth_endpoint')
- credentials = module.params.get('credentials')
- env = module.params.get('env')
- identity_type = module.params.get('identity_type')
- region = module.params.get('region')
- tenant_id = module.params.get('tenant_id')
- tenant_name = module.params.get('tenant_name')
- username = module.params.get('username')
- verify_ssl = module.params.get('verify_ssl')
-
- if env is not None:
- rax_module.set_environment(env)
-
- rax_module.set_setting('identity_type', identity_type)
- if verify_ssl is not None:
- rax_module.set_setting('verify_ssl', verify_ssl)
- if auth_endpoint is not None:
- rax_module.set_setting('auth_endpoint', auth_endpoint)
- if tenant_id is not None:
- rax_module.set_setting('tenant_id', tenant_id)
- if tenant_name is not None:
- rax_module.set_setting('tenant_name', tenant_name)
-
- try:
- username = username or os.environ.get('RAX_USERNAME')
- if not username:
- username = rax_module.get_setting('keyring_username')
- if username:
- api_key = 'USE_KEYRING'
- if not api_key:
- api_key = os.environ.get('RAX_API_KEY')
- credentials = (credentials or os.environ.get('RAX_CREDENTIALS') or
- os.environ.get('RAX_CREDS_FILE'))
- region = (region or os.environ.get('RAX_REGION') or
- rax_module.get_setting('region'))
- except KeyError, e:
- module.fail_json(msg='Unable to load %s' % e.message)
-
- try:
- if api_key and username:
- if api_key == 'USE_KEYRING':
- rax_module.keyring_auth(username, region=region)
- else:
- rax_module.set_credentials(username, api_key=api_key,
- region=region)
- elif credentials:
- credentials = os.path.expanduser(credentials)
- rax_module.set_credential_file(credentials, region=region)
- else:
- raise Exception('No credentials supplied!')
- except Exception, e:
- if e.message:
- msg = str(e.message)
- else:
- msg = repr(e)
- module.fail_json(msg=msg)
-
- if region_required and region not in rax_module.regions:
- module.fail_json(msg='%s is not a valid region, must be one of: %s' %
- (region, ','.join(rax_module.regions)))
-
- return rax_module
diff --git a/v1/ansible/module_utils/redhat.py b/v1/ansible/module_utils/redhat.py
deleted file mode 100644
index bf19ccf390..0000000000
--- a/v1/ansible/module_utils/redhat.py
+++ /dev/null
@@ -1,280 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), James Laska
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import re
-import types
-import ConfigParser
-import shlex
-
-
-class RegistrationBase(object):
- def __init__(self, module, username=None, password=None):
- self.module = module
- self.username = username
- self.password = password
-
- def configure(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def enable(self):
- # Remove any existing redhat.repo
- redhat_repo = '/etc/yum.repos.d/redhat.repo'
- if os.path.isfile(redhat_repo):
- os.unlink(redhat_repo)
-
- def register(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unregister(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unsubscribe(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def update_plugin_conf(self, plugin, enabled=True):
- plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
- if os.path.isfile(plugin_conf):
- cfg = ConfigParser.ConfigParser()
- cfg.read([plugin_conf])
- if enabled:
- cfg.set('main', 'enabled', 1)
- else:
- cfg.set('main', 'enabled', 0)
- fd = open(plugin_conf, 'rwa+')
- cfg.write(fd)
- fd.close()
-
- def subscribe(self, **kwargs):
- raise NotImplementedError("Must be implemented by a sub-class")
-
-
-class Rhsm(RegistrationBase):
- def __init__(self, module, username=None, password=None):
- RegistrationBase.__init__(self, module, username, password)
- self.config = self._read_config()
- self.module = module
-
- def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
- '''
- Load RHSM configuration from /etc/rhsm/rhsm.conf.
- Returns:
- * ConfigParser object
- '''
-
- # Read RHSM defaults ...
- cp = ConfigParser.ConfigParser()
- cp.read(rhsm_conf)
-
- # Add support for specifying a default value w/o having to standup some configuration
- # Yeah, I know this should be subclassed ... but, oh well
- def get_option_default(self, key, default=''):
- sect, opt = key.split('.', 1)
- if self.has_section(sect) and self.has_option(sect, opt):
- return self.get(sect, opt)
- else:
- return default
-
- cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser)
-
- return cp
-
- def enable(self):
- '''
- Enable the system to receive updates from subscription-manager.
- This involves updating affected yum plugins and removing any
- conflicting yum repositories.
- '''
- RegistrationBase.enable(self)
- self.update_plugin_conf('rhnplugin', False)
- self.update_plugin_conf('subscription-manager', True)
-
- def configure(self, **kwargs):
- '''
- Configure the system as directed for registration with RHN
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'config']
-
- # Pass supplied **kwargs as parameters to subscription-manager. Ignore
- # non-configuration parameters and replace '_' with '.'. For example,
- # 'server_hostname' becomes '--system.hostname'.
- for k,v in kwargs.items():
- if re.search(r'^(system|rhsm)_', k):
- args.append('--%s=%s' % (k.replace('_','.'), v))
-
- self.module.run_command(args, check_rc=True)
-
- @property
- def is_registered(self):
- '''
- Determine whether the current system
- Returns:
- * Boolean - whether the current system is currently registered to
- RHN.
- '''
- # Quick version...
- if False:
- return os.path.isfile('/etc/pki/consumer/cert.pem') and \
- os.path.isfile('/etc/pki/consumer/key.pem')
-
- args = ['subscription-manager', 'identity']
- rc, stdout, stderr = self.module.run_command(args, check_rc=False)
- if rc == 0:
- return True
- else:
- return False
-
- def register(self, username, password, autosubscribe, activationkey):
- '''
- Register the current system to the provided RHN server
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'register']
-
- # Generate command arguments
- if activationkey:
- args.append('--activationkey "%s"' % activationkey)
- else:
- if autosubscribe:
- args.append('--autosubscribe')
- if username:
- args.extend(['--username', username])
- if password:
- args.extend(['--password', password])
-
- # Do the needful...
- rc, stderr, stdout = self.module.run_command(args, check_rc=True)
-
- def unsubscribe(self):
- '''
- Unsubscribe a system from all subscribed channels
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'unsubscribe', '--all']
- rc, stderr, stdout = self.module.run_command(args, check_rc=True)
-
- def unregister(self):
- '''
- Unregister a currently registered system
- Raises:
- * Exception - if error occurs while running command
- '''
- args = ['subscription-manager', 'unregister']
- rc, stderr, stdout = self.module.run_command(args, check_rc=True)
-
- def subscribe(self, regexp):
- '''
- Subscribe current system to available pools matching the specified
- regular expression
- Raises:
- * Exception - if error occurs while running command
- '''
-
- # Available pools ready for subscription
- available_pools = RhsmPools(self.module)
-
- for pool in available_pools.filter(regexp):
- pool.subscribe()
-
-
-class RhsmPool(object):
- '''
- Convenience class for housing subscription information
- '''
-
- def __init__(self, module, **kwargs):
- self.module = module
- for k,v in kwargs.items():
- setattr(self, k, v)
-
- def __str__(self):
- return str(self.__getattribute__('_name'))
-
- def subscribe(self):
- args = "subscription-manager subscribe --pool %s" % self.PoolId
- rc, stdout, stderr = self.module.run_command(args, check_rc=True)
- if rc == 0:
- return True
- else:
- return False
-
-
-class RhsmPools(object):
- """
- This class is used for manipulating pools subscriptions with RHSM
- """
- def __init__(self, module):
- self.module = module
- self.products = self._load_product_list()
-
- def __iter__(self):
- return self.products.__iter__()
-
- def _load_product_list(self):
- """
- Loads list of all available pools for system in data structure
- """
- args = "subscription-manager list --available"
- rc, stdout, stderr = self.module.run_command(args, check_rc=True)
-
- products = []
- for line in stdout.split('\n'):
- # Remove leading+trailing whitespace
- line = line.strip()
- # An empty line implies the end of an output group
- if len(line) == 0:
- continue
- # If a colon ':' is found, parse
- elif ':' in line:
- (key, value) = line.split(':',1)
- key = key.strip().replace(" ", "") # To unify
- value = value.strip()
- if key in ['ProductName', 'SubscriptionName']:
- # Remember the name for later processing
- products.append(RhsmPool(self.module, _name=value, key=value))
- elif products:
- # Associate value with most recently recorded product
- products[-1].__setattr__(key, value)
- # FIXME - log some warning?
- #else:
- # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
- return products
-
- def filter(self, regexp='^$'):
- '''
- Return a list of RhsmPools whose name matches the provided regular expression
- '''
- r = re.compile(regexp)
- for product in self.products:
- if r.search(product._name):
- yield product
-
diff --git a/v1/ansible/module_utils/splitter.py b/v1/ansible/module_utils/splitter.py
deleted file mode 100644
index 899fa8cd92..0000000000
--- a/v1/ansible/module_utils/splitter.py
+++ /dev/null
@@ -1,201 +0,0 @@
-# (c) 2014 James Cammarata, <jcammarata@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-def _get_quote_state(token, quote_char):
- '''
- the goal of this block is to determine if the quoted string
- is unterminated in which case it needs to be put back together
- '''
- # the char before the current one, used to see if
- # the current character is escaped
- prev_char = None
- for idx, cur_char in enumerate(token):
- if idx > 0:
- prev_char = token[idx-1]
- if cur_char in '"\'' and prev_char != '\\':
- if quote_char:
- if cur_char == quote_char:
- quote_char = None
- else:
- quote_char = cur_char
- return quote_char
-
-def _count_jinja2_blocks(token, cur_depth, open_token, close_token):
- '''
- this function counts the number of opening/closing blocks for a
- given opening/closing type and adjusts the current depth for that
- block based on the difference
- '''
- num_open = token.count(open_token)
- num_close = token.count(close_token)
- if num_open != num_close:
- cur_depth += (num_open - num_close)
- if cur_depth < 0:
- cur_depth = 0
- return cur_depth
-
-def split_args(args):
- '''
- Splits args on whitespace, but intelligently reassembles
- those that may have been split over a jinja2 block or quotes.
-
- When used in a remote module, we won't ever have to be concerned about
- jinja2 blocks, however this function is/will be used in the
- core portions as well before the args are templated.
-
- example input: a=b c="foo bar"
- example output: ['a=b', 'c="foo bar"']
-
- Basically this is a variation shlex that has some more intelligence for
- how Ansible needs to use it.
- '''
-
- # the list of params parsed out of the arg string
- # this is going to be the result value when we are donei
- params = []
-
- # here we encode the args, so we have a uniform charset to
- # work with, and split on white space
- args = args.strip()
- try:
- args = args.encode('utf-8')
- do_decode = True
- except UnicodeDecodeError:
- do_decode = False
- items = args.split('\n')
-
- # iterate over the tokens, and reassemble any that may have been
- # split on a space inside a jinja2 block.
- # ex if tokens are "{{", "foo", "}}" these go together
-
- # These variables are used
- # to keep track of the state of the parsing, since blocks and quotes
- # may be nested within each other.
-
- quote_char = None
- inside_quotes = False
- print_depth = 0 # used to count nested jinja2 {{ }} blocks
- block_depth = 0 # used to count nested jinja2 {% %} blocks
- comment_depth = 0 # used to count nested jinja2 {# #} blocks
-
- # now we loop over each split chunk, coalescing tokens if the white space
- # split occurred within quotes or a jinja2 block of some kind
- for itemidx,item in enumerate(items):
-
- # we split on spaces and newlines separately, so that we
- # can tell which character we split on for reassembly
- # inside quotation characters
- tokens = item.strip().split(' ')
-
- line_continuation = False
- for idx,token in enumerate(tokens):
-
- # if we hit a line continuation character, but
- # we're not inside quotes, ignore it and continue
- # on to the next token while setting a flag
- if token == '\\' and not inside_quotes:
- line_continuation = True
- continue
-
- # store the previous quoting state for checking later
- was_inside_quotes = inside_quotes
- quote_char = _get_quote_state(token, quote_char)
- inside_quotes = quote_char is not None
-
- # multiple conditions may append a token to the list of params,
- # so we keep track with this flag to make sure it only happens once
- # append means add to the end of the list, don't append means concatenate
- # it to the end of the last token
- appended = False
-
- # if we're inside quotes now, but weren't before, append the token
- # to the end of the list, since we'll tack on more to it later
- # otherwise, if we're inside any jinja2 block, inside quotes, or we were
- # inside quotes (but aren't now) concat this token to the last param
- if inside_quotes and not was_inside_quotes:
- params.append(token)
- appended = True
- elif print_depth or block_depth or comment_depth or inside_quotes or was_inside_quotes:
- if idx == 0 and not inside_quotes and was_inside_quotes:
- params[-1] = "%s%s" % (params[-1], token)
- elif len(tokens) > 1:
- spacer = ''
- if idx > 0:
- spacer = ' '
- params[-1] = "%s%s%s" % (params[-1], spacer, token)
- else:
- spacer = ''
- if not params[-1].endswith('\n') and idx == 0:
- spacer = '\n'
- params[-1] = "%s%s%s" % (params[-1], spacer, token)
- appended = True
-
- # if the number of paired block tags is not the same, the depth has changed, so we calculate that here
- # and may append the current token to the params (if we haven't previously done so)
- prev_print_depth = print_depth
- print_depth = _count_jinja2_blocks(token, print_depth, "{{", "}}")
- if print_depth != prev_print_depth and not appended:
- params.append(token)
- appended = True
-
- prev_block_depth = block_depth
- block_depth = _count_jinja2_blocks(token, block_depth, "{%", "%}")
- if block_depth != prev_block_depth and not appended:
- params.append(token)
- appended = True
-
- prev_comment_depth = comment_depth
- comment_depth = _count_jinja2_blocks(token, comment_depth, "{#", "#}")
- if comment_depth != prev_comment_depth and not appended:
- params.append(token)
- appended = True
-
- # finally, if we're at zero depth for all blocks and not inside quotes, and have not
- # yet appended anything to the list of params, we do so now
- if not (print_depth or block_depth or comment_depth) and not inside_quotes and not appended and token != '':
- params.append(token)
-
- # if this was the last token in the list, and we have more than
- # one item (meaning we split on newlines), add a newline back here
- # to preserve the original structure
- if len(items) > 1 and itemidx != len(items) - 1 and not line_continuation:
- if not params[-1].endswith('\n') or item == '':
- params[-1] += '\n'
-
- # always clear the line continuation flag
- line_continuation = False
-
- # If we're done and things are not at zero depth or we're still inside quotes,
- # raise an error to indicate that the args were unbalanced
- if print_depth or block_depth or comment_depth or inside_quotes:
- raise Exception("error while splitting arguments, either an unbalanced jinja2 block or quotes")
-
- # finally, we decode each param back to the unicode it was in the arg string
- if do_decode:
- params = [x.decode('utf-8') for x in params]
-
- return params
-
-def is_quoted(data):
- return len(data) > 0 and (data[0] == '"' and data[-1] == '"' or data[0] == "'" and data[-1] == "'")
-
-def unquote(data):
- ''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
- if is_quoted(data):
- return data[1:-1]
- return data
-
diff --git a/v1/ansible/module_utils/urls.py b/v1/ansible/module_utils/urls.py
deleted file mode 100644
index 18317e86ae..0000000000
--- a/v1/ansible/module_utils/urls.py
+++ /dev/null
@@ -1,496 +0,0 @@
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-try:
- import urllib
- HAS_URLLIB = True
-except:
- HAS_URLLIB = False
-
-try:
- import urllib2
- HAS_URLLIB2 = True
-except:
- HAS_URLLIB2 = False
-
-try:
- import urlparse
- HAS_URLPARSE = True
-except:
- HAS_URLPARSE = False
-
-try:
- import ssl
- HAS_SSL=True
-except:
- HAS_SSL=False
-
-HAS_MATCH_HOSTNAME = True
-try:
- from ssl import match_hostname, CertificateError
-except ImportError:
- try:
- from backports.ssl_match_hostname import match_hostname, CertificateError
- except ImportError:
- HAS_MATCH_HOSTNAME = False
-
-import httplib
-import os
-import re
-import socket
-import tempfile
-
-
-# This is a dummy cacert provided for Mac OS since you need at least 1
-# ca cert, regardless of validity, for Python on Mac OS to use the
-# keychain functionality in OpenSSL for validating SSL certificates.
-# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
-DUMMY_CA_CERT = """-----BEGIN CERTIFICATE-----
-MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
-BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
-MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
-MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
-VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
-gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
-gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
-4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
-gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
-FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
-CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
-aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
-MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
-qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
-zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
------END CERTIFICATE-----
-"""
-
-class CustomHTTPSConnection(httplib.HTTPSConnection):
- def connect(self):
- "Connect to a host on a given (SSL) port."
-
- if hasattr(self, 'source_address'):
- sock = socket.create_connection((self.host, self.port), self.timeout, self.source_address)
- else:
- sock = socket.create_connection((self.host, self.port), self.timeout)
- if self._tunnel_host:
- self.sock = sock
- self._tunnel()
- self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1)
-
-class CustomHTTPSHandler(urllib2.HTTPSHandler):
-
- def https_open(self, req):
- return self.do_open(CustomHTTPSConnection, req)
-
- https_request = urllib2.AbstractHTTPHandler.do_request_
-
-def generic_urlparse(parts):
- '''
- Returns a dictionary of url parts as parsed by urlparse,
- but accounts for the fact that older versions of that
- library do not support named attributes (ie. .netloc)
- '''
- generic_parts = dict()
- if hasattr(parts, 'netloc'):
- # urlparse is newer, just read the fields straight
- # from the parts object
- generic_parts['scheme'] = parts.scheme
- generic_parts['netloc'] = parts.netloc
- generic_parts['path'] = parts.path
- generic_parts['params'] = parts.params
- generic_parts['query'] = parts.query
- generic_parts['fragment'] = parts.fragment
- generic_parts['username'] = parts.username
- generic_parts['password'] = parts.password
- generic_parts['hostname'] = parts.hostname
- generic_parts['port'] = parts.port
- else:
- # we have to use indexes, and then parse out
- # the other parts not supported by indexing
- generic_parts['scheme'] = parts[0]
- generic_parts['netloc'] = parts[1]
- generic_parts['path'] = parts[2]
- generic_parts['params'] = parts[3]
- generic_parts['query'] = parts[4]
- generic_parts['fragment'] = parts[5]
- # get the username, password, etc.
- try:
- netloc_re = re.compile(r'^((?:\w)+(?::(?:\w)+)?@)?([A-Za-z0-9.-]+)(:\d+)?$')
- (auth, hostname, port) = netloc_re.match(parts[1])
- if port:
- # the capture group for the port will include the ':',
- # so remove it and convert the port to an integer
- port = int(port[1:])
- if auth:
- # the capture group above inclues the @, so remove it
- # and then split it up based on the first ':' found
- auth = auth[:-1]
- username, password = auth.split(':', 1)
- generic_parts['username'] = username
- generic_parts['password'] = password
- generic_parts['hostname'] = hostnme
- generic_parts['port'] = port
- except:
- generic_parts['username'] = None
- generic_parts['password'] = None
- generic_parts['hostname'] = None
- generic_parts['port'] = None
- return generic_parts
-
-class RequestWithMethod(urllib2.Request):
- '''
- Workaround for using DELETE/PUT/etc with urllib2
- Originally contained in library/net_infrastructure/dnsmadeeasy
- '''
-
- def __init__(self, url, method, data=None, headers={}):
- self._method = method
- urllib2.Request.__init__(self, url, data, headers)
-
- def get_method(self):
- if self._method:
- return self._method
- else:
- return urllib2.Request.get_method(self)
-
-
-class SSLValidationHandler(urllib2.BaseHandler):
- '''
- A custom handler class for SSL validation.
-
- Based on:
- http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
- http://techknack.net/python-urllib2-handlers/
- '''
- CONNECT_COMMAND = "CONNECT %s:%s HTTP/1.0\r\nConnection: close\r\n"
-
- def __init__(self, module, hostname, port):
- self.module = module
- self.hostname = hostname
- self.port = port
-
- def get_ca_certs(self):
- # tries to find a valid CA cert in one of the
- # standard locations for the current distribution
-
- ca_certs = []
- paths_checked = []
- platform = get_platform()
- distribution = get_distribution()
-
- # build a list of paths to check for .crt/.pem files
- # based on the platform type
- paths_checked.append('/etc/ssl/certs')
- if platform == 'Linux':
- paths_checked.append('/etc/pki/ca-trust/extracted/pem')
- paths_checked.append('/etc/pki/tls/certs')
- paths_checked.append('/usr/share/ca-certificates/cacert.org')
- elif platform == 'FreeBSD':
- paths_checked.append('/usr/local/share/certs')
- elif platform == 'OpenBSD':
- paths_checked.append('/etc/ssl')
- elif platform == 'NetBSD':
- ca_certs.append('/etc/openssl/certs')
- elif platform == 'SunOS':
- paths_checked.append('/opt/local/etc/openssl/certs')
-
- # fall back to a user-deployed cert in a standard
- # location if the OS platform one is not available
- paths_checked.append('/etc/ansible')
-
- tmp_fd, tmp_path = tempfile.mkstemp()
-
- # Write the dummy ca cert if we are running on Mac OS X
- if platform == 'Darwin':
- os.write(tmp_fd, DUMMY_CA_CERT)
- # Default Homebrew path for OpenSSL certs
- paths_checked.append('/usr/local/etc/openssl')
-
- # for all of the paths, find any .crt or .pem files
- # and compile them into single temp file for use
- # in the ssl check to speed up the test
- for path in paths_checked:
- if os.path.exists(path) and os.path.isdir(path):
- dir_contents = os.listdir(path)
- for f in dir_contents:
- full_path = os.path.join(path, f)
- if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'):
- try:
- cert_file = open(full_path, 'r')
- os.write(tmp_fd, cert_file.read())
- os.write(tmp_fd, '\n')
- cert_file.close()
- except:
- pass
-
- return (tmp_path, paths_checked)
-
- def validate_proxy_response(self, response, valid_codes=[200]):
- '''
- make sure we get back a valid code from the proxy
- '''
- try:
- (http_version, resp_code, msg) = re.match(r'(HTTP/\d\.\d) (\d\d\d) (.*)', response).groups()
- if int(resp_code) not in valid_codes:
- raise Exception
- except:
- self.module.fail_json(msg='Connection to proxy failed')
-
- def detect_no_proxy(self, url):
- '''
- Detect if the 'no_proxy' environment variable is set and honor those locations.
- '''
- env_no_proxy = os.environ.get('no_proxy')
- if env_no_proxy:
- env_no_proxy = env_no_proxy.split(',')
- netloc = urlparse.urlparse(url).netloc
-
- for host in env_no_proxy:
- if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
- # Our requested URL matches something in no_proxy, so don't
- # use the proxy for this
- return False
- return True
-
- def http_request(self, req):
- tmp_ca_cert_path, paths_checked = self.get_ca_certs()
- https_proxy = os.environ.get('https_proxy')
-
- # Detect if 'no_proxy' environment variable is set and if our URL is included
- use_proxy = self.detect_no_proxy(req.get_full_url())
-
- if not use_proxy:
- # ignore proxy settings for this host request
- return req
-
- try:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- if https_proxy:
- proxy_parts = generic_urlparse(urlparse.urlparse(https_proxy))
- s.connect((proxy_parts.get('hostname'), proxy_parts.get('port')))
- if proxy_parts.get('scheme') == 'http':
- s.sendall(self.CONNECT_COMMAND % (self.hostname, self.port))
- if proxy_parts.get('username'):
- credentials = "%s:%s" % (proxy_parts.get('username',''), proxy_parts.get('password',''))
- s.sendall('Proxy-Authorization: Basic %s\r\n' % credentials.encode('base64').strip())
- s.sendall('\r\n')
- connect_result = s.recv(4096)
- self.validate_proxy_response(connect_result)
- ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
- match_hostname(ssl_s.getpeercert(), self.hostname)
- else:
- self.module.fail_json(msg='Unsupported proxy scheme: %s. Currently ansible only supports HTTP proxies.' % proxy_parts.get('scheme'))
- else:
- s.connect((self.hostname, self.port))
- ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
- match_hostname(ssl_s.getpeercert(), self.hostname)
- # close the ssl connection
- #ssl_s.unwrap()
- s.close()
- except (ssl.SSLError, socket.error), e:
- # fail if we tried all of the certs but none worked
- if 'connection refused' in str(e).lower():
- self.module.fail_json(msg='Failed to connect to %s:%s.' % (self.hostname, self.port))
- else:
- self.module.fail_json(
- msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \
- 'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \
- 'Paths checked for this platform: %s' % ", ".join(paths_checked)
- )
- except CertificateError:
- self.module.fail_json(msg="SSL Certificate does not belong to %s. Make sure the url has a certificate that belongs to it or use validate_certs=no (insecure)" % self.hostname)
-
- try:
- # cleanup the temp file created, don't worry
- # if it fails for some reason
- os.remove(tmp_ca_cert_path)
- except:
- pass
-
- return req
-
- https_request = http_request
-
-
-def url_argument_spec():
- '''
- Creates an argument spec that can be used with any module
- that will be requesting content via urllib/urllib2
- '''
- return dict(
- url = dict(),
- force = dict(default='no', aliases=['thirsty'], type='bool'),
- http_agent = dict(default='ansible-httpget'),
- use_proxy = dict(default='yes', type='bool'),
- validate_certs = dict(default='yes', type='bool'),
- url_username = dict(required=False),
- url_password = dict(required=False),
- )
-
-
-def fetch_url(module, url, data=None, headers=None, method=None,
- use_proxy=True, force=False, last_mod_time=None, timeout=10):
- '''
- Fetches a file from an HTTP/FTP server using urllib2
- '''
-
- if not HAS_URLLIB:
- module.fail_json(msg='urllib is not installed')
- if not HAS_URLLIB2:
- module.fail_json(msg='urllib2 is not installed')
- elif not HAS_URLPARSE:
- module.fail_json(msg='urlparse is not installed')
-
- r = None
- handlers = []
- info = dict(url=url)
-
- distribution = get_distribution()
- # Get validate_certs from the module params
- validate_certs = module.params.get('validate_certs', True)
-
- # FIXME: change the following to use the generic_urlparse function
- # to remove the indexed references for 'parsed'
- parsed = urlparse.urlparse(url)
- if parsed[0] == 'https' and validate_certs:
- if not HAS_SSL:
- if distribution == 'Redhat':
- module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended. You can also install python-ssl from EPEL')
- else:
- module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended')
- if not HAS_MATCH_HOSTNAME:
- module.fail_json(msg='Available SSL validation does not check that the certificate matches the hostname. You can install backports.ssl_match_hostname or update your managed machine to python-2.7.9 or newer. You could also use validate_certs=no, however this is unsafe and not recommended')
-
- # do the cert validation
- netloc = parsed[1]
- if '@' in netloc:
- netloc = netloc.split('@', 1)[1]
- if ':' in netloc:
- hostname, port = netloc.split(':', 1)
- port = int(port)
- else:
- hostname = netloc
- port = 443
- # create the SSL validation handler and
- # add it to the list of handlers
- ssl_handler = SSLValidationHandler(module, hostname, port)
- handlers.append(ssl_handler)
-
- if parsed[0] != 'ftp':
- username = module.params.get('url_username', '')
- if username:
- password = module.params.get('url_password', '')
- netloc = parsed[1]
- elif '@' in parsed[1]:
- credentials, netloc = parsed[1].split('@', 1)
- if ':' in credentials:
- username, password = credentials.split(':', 1)
- else:
- username = credentials
- password = ''
-
- parsed = list(parsed)
- parsed[1] = netloc
-
- # reconstruct url without credentials
- url = urlparse.urlunparse(parsed)
-
- if username:
- passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
-
- # this creates a password manager
- passman.add_password(None, netloc, username, password)
-
- # because we have put None at the start it will always
- # use this username/password combination for urls
- # for which `theurl` is a super-url
- authhandler = urllib2.HTTPBasicAuthHandler(passman)
-
- # create the AuthHandler
- handlers.append(authhandler)
-
- if not use_proxy:
- proxyhandler = urllib2.ProxyHandler({})
- handlers.append(proxyhandler)
-
- # pre-2.6 versions of python cannot use the custom https
- # handler, since the socket class is lacking this method
- if hasattr(socket, 'create_connection'):
- handlers.append(CustomHTTPSHandler)
-
- opener = urllib2.build_opener(*handlers)
- urllib2.install_opener(opener)
-
- if method:
- if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'):
- module.fail_json(msg='invalid HTTP request method; %s' % method.upper())
- request = RequestWithMethod(url, method.upper(), data)
- else:
- request = urllib2.Request(url, data)
-
- # add the custom agent header, to help prevent issues
- # with sites that block the default urllib agent string
- request.add_header('User-agent', module.params.get('http_agent'))
-
- # if we're ok with getting a 304, set the timestamp in the
- # header, otherwise make sure we don't get a cached copy
- if last_mod_time and not force:
- tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
- request.add_header('If-Modified-Since', tstamp)
- else:
- request.add_header('cache-control', 'no-cache')
-
- # user defined headers now, which may override things we've set above
- if headers:
- if not isinstance(headers, dict):
- module.fail_json("headers provided to fetch_url() must be a dict")
- for header in headers:
- request.add_header(header, headers[header])
-
- try:
- if sys.version_info < (2,6,0):
- # urlopen in python prior to 2.6.0 did not
- # have a timeout parameter
- r = urllib2.urlopen(request, None)
- else:
- r = urllib2.urlopen(request, None, timeout)
- info.update(r.info())
- info['url'] = r.geturl() # The URL goes in too, because of redirects.
- info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200))
- except urllib2.HTTPError, e:
- info.update(dict(msg=str(e), status=e.code))
- except urllib2.URLError, e:
- code = int(getattr(e, 'code', -1))
- info.update(dict(msg="Request failed: %s" % str(e), status=code))
- except socket.error, e:
- info.update(dict(msg="Connection failure: %s" % str(e), status=-1))
- except Exception, e:
- info.update(dict(msg="An unknown error occurred: %s" % str(e), status=-1))
-
- return r, info
-
diff --git a/v1/ansible/modules/__init__.py b/v1/ansible/modules/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/modules/__init__.py
+++ /dev/null
diff --git a/v1/ansible/modules/core b/v1/ansible/modules/core
deleted file mode 160000
-Subproject f8d8af17cdc72500af8319c96004b86ac702a0a
diff --git a/v1/ansible/modules/extras b/v1/ansible/modules/extras
deleted file mode 160000
-Subproject 495ad450e53feb1cd26218dc68056cc34d1ea9f
diff --git a/v1/ansible/playbook/__init__.py b/v1/ansible/playbook/__init__.py
deleted file mode 100644
index 24ba2d3c6e..0000000000
--- a/v1/ansible/playbook/__init__.py
+++ /dev/null
@@ -1,874 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.inventory
-import ansible.constants as C
-import ansible.runner
-from ansible.utils.template import template
-from ansible import utils
-from ansible import errors
-from ansible.module_utils.splitter import split_args, unquote
-import ansible.callbacks
-import ansible.cache
-import os
-import shlex
-import collections
-from play import Play
-import StringIO
-import pipes
-
-# the setup cache stores all variables about a host
-# gathered during the setup step, while the vars cache
-# holds all other variables about a host
-SETUP_CACHE = ansible.cache.FactCache()
-VARS_CACHE = collections.defaultdict(dict)
-RESERVED_TAGS = ['all','tagged','untagged','always']
-
-
-class PlayBook(object):
- '''
- runs an ansible playbook, given as a datastructure or YAML filename.
- A playbook is a deployment, config management, or automation based
- set of commands to run in series.
-
- multiple plays/tasks do not execute simultaneously, but tasks in each
- pattern do execute in parallel (according to the number of forks
- requested) among the hosts they address
- '''
-
- # *****************************************************
-
- def __init__(self,
- playbook = None,
- host_list = C.DEFAULT_HOST_LIST,
- module_path = None,
- forks = C.DEFAULT_FORKS,
- timeout = C.DEFAULT_TIMEOUT,
- remote_user = C.DEFAULT_REMOTE_USER,
- remote_pass = C.DEFAULT_REMOTE_PASS,
- remote_port = None,
- transport = C.DEFAULT_TRANSPORT,
- private_key_file = C.DEFAULT_PRIVATE_KEY_FILE,
- callbacks = None,
- runner_callbacks = None,
- stats = None,
- extra_vars = None,
- only_tags = None,
- skip_tags = None,
- subset = C.DEFAULT_SUBSET,
- inventory = None,
- check = False,
- diff = False,
- any_errors_fatal = False,
- vault_password = False,
- force_handlers = False,
- # privilege escalation
- become = C.DEFAULT_BECOME,
- become_method = C.DEFAULT_BECOME_METHOD,
- become_user = C.DEFAULT_BECOME_USER,
- become_pass = None,
- ):
-
- """
- playbook: path to a playbook file
- host_list: path to a file like /etc/ansible/hosts
- module_path: path to ansible modules, like /usr/share/ansible/
- forks: desired level of parallelism
- timeout: connection timeout
- remote_user: run as this user if not specified in a particular play
- remote_pass: use this remote password (for all plays) vs using SSH keys
- remote_port: default remote port to use if not specified with the host or play
- transport: how to connect to hosts that don't specify a transport (local, paramiko, etc)
- callbacks output callbacks for the playbook
- runner_callbacks: more callbacks, this time for the runner API
- stats: holds aggregrate data about events occurring to each host
- inventory: can be specified instead of host_list to use a pre-existing inventory object
- check: don't change anything, just try to detect some potential changes
- any_errors_fatal: terminate the entire execution immediately when one of the hosts has failed
- force_handlers: continue to notify and run handlers even if a task fails
- """
-
- self.SETUP_CACHE = SETUP_CACHE
- self.VARS_CACHE = VARS_CACHE
-
- arguments = []
- if playbook is None:
- arguments.append('playbook')
- if callbacks is None:
- arguments.append('callbacks')
- if runner_callbacks is None:
- arguments.append('runner_callbacks')
- if stats is None:
- arguments.append('stats')
- if arguments:
- raise Exception('PlayBook missing required arguments: %s' % ', '.join(arguments))
-
- if extra_vars is None:
- extra_vars = {}
- if only_tags is None:
- only_tags = [ 'all' ]
- if skip_tags is None:
- skip_tags = []
-
- self.check = check
- self.diff = diff
- self.module_path = module_path
- self.forks = forks
- self.timeout = timeout
- self.remote_user = remote_user
- self.remote_pass = remote_pass
- self.remote_port = remote_port
- self.transport = transport
- self.callbacks = callbacks
- self.runner_callbacks = runner_callbacks
- self.stats = stats
- self.extra_vars = extra_vars
- self.global_vars = {}
- self.private_key_file = private_key_file
- self.only_tags = only_tags
- self.skip_tags = skip_tags
- self.any_errors_fatal = any_errors_fatal
- self.vault_password = vault_password
- self.force_handlers = force_handlers
-
- self.become = become
- self.become_method = become_method
- self.become_user = become_user
- self.become_pass = become_pass
-
- self.callbacks.playbook = self
- self.runner_callbacks.playbook = self
-
- if inventory is None:
- self.inventory = ansible.inventory.Inventory(host_list)
- self.inventory.subset(subset)
- else:
- self.inventory = inventory
-
- if self.module_path is not None:
- utils.plugins.module_finder.add_directory(self.module_path)
-
- self.basedir = os.path.dirname(playbook) or '.'
- utils.plugins.push_basedir(self.basedir)
-
- # let inventory know the playbook basedir so it can load more vars
- self.inventory.set_playbook_basedir(self.basedir)
-
- vars = extra_vars.copy()
- vars['playbook_dir'] = os.path.abspath(self.basedir)
- if self.inventory.basedir() is not None:
- vars['inventory_dir'] = self.inventory.basedir()
-
- if self.inventory.src() is not None:
- vars['inventory_file'] = self.inventory.src()
-
- self.filename = playbook
- (self.playbook, self.play_basedirs) = self._load_playbook_from_file(playbook, vars)
- ansible.callbacks.load_callback_plugins()
- ansible.callbacks.set_playbook(self.callbacks, self)
-
- self._ansible_version = utils.version_info(gitinfo=True)
-
- # *****************************************************
-
- def _get_playbook_vars(self, play_ds, existing_vars):
- '''
- Gets the vars specified with the play and blends them
- with any existing vars that have already been read in
- '''
- new_vars = existing_vars.copy()
- if 'vars' in play_ds:
- if isinstance(play_ds['vars'], dict):
- new_vars.update(play_ds['vars'])
- elif isinstance(play_ds['vars'], list):
- for v in play_ds['vars']:
- new_vars.update(v)
- return new_vars
-
- # *****************************************************
-
- def _get_include_info(self, play_ds, basedir, existing_vars={}):
- '''
- Gets any key=value pairs specified with the included file
- name and returns the merged vars along with the path
- '''
- new_vars = existing_vars.copy()
- tokens = split_args(play_ds.get('include', ''))
- for t in tokens[1:]:
- try:
- (k,v) = unquote(t).split("=", 1)
- new_vars[k] = template(basedir, v, new_vars)
- except ValueError, e:
- raise errors.AnsibleError('included playbook variables must be in the form k=v, got: %s' % t)
-
- return (new_vars, unquote(tokens[0]))
-
- # *****************************************************
-
- def _get_playbook_vars_files(self, play_ds, existing_vars_files):
- new_vars_files = list(existing_vars_files)
- if 'vars_files' in play_ds:
- new_vars_files = utils.list_union(new_vars_files, play_ds['vars_files'])
- return new_vars_files
-
- # *****************************************************
-
- def _extend_play_vars(self, play, vars={}):
- '''
- Extends the given play's variables with the additional specified vars.
- '''
-
- if 'vars' not in play or not play['vars']:
- # someone left out or put an empty "vars:" entry in their playbook
- return vars.copy()
-
- play_vars = None
- if isinstance(play['vars'], dict):
- play_vars = play['vars'].copy()
- play_vars.update(vars)
- elif isinstance(play['vars'], list):
- # nobody should really do this, but handle vars: a=1 b=2
- play_vars = play['vars'][:]
- play_vars.extend([{k:v} for k,v in vars.iteritems()])
-
- return play_vars
-
- # *****************************************************
-
- def _load_playbook_from_file(self, path, vars={}, vars_files=[]):
- '''
- run top level error checking on playbooks and allow them to include other playbooks.
- '''
-
- playbook_data = utils.parse_yaml_from_file(path, vault_password=self.vault_password)
- accumulated_plays = []
- play_basedirs = []
-
- if type(playbook_data) != list:
- raise errors.AnsibleError("parse error: playbooks must be formatted as a YAML list, got %s" % type(playbook_data))
-
- basedir = os.path.dirname(path) or '.'
- utils.plugins.push_basedir(basedir)
- for play in playbook_data:
- if type(play) != dict:
- raise errors.AnsibleError("parse error: each play in a playbook must be a YAML dictionary (hash), received: %s" % play)
-
- if 'include' in play:
- # a playbook (list of plays) decided to include some other list of plays
- # from another file. The result is a flat list of plays in the end.
-
- play_vars = self._get_playbook_vars(play, vars)
- play_vars_files = self._get_playbook_vars_files(play, vars_files)
- inc_vars, inc_path = self._get_include_info(play, basedir, play_vars)
- play_vars.update(inc_vars)
-
- included_path = utils.path_dwim(basedir, template(basedir, inc_path, play_vars))
- (plays, basedirs) = self._load_playbook_from_file(included_path, vars=play_vars, vars_files=play_vars_files)
- for p in plays:
- # support for parameterized play includes works by passing
- # those variables along to the subservient play
- p['vars'] = self._extend_play_vars(p, play_vars)
- # now add in the vars_files
- p['vars_files'] = utils.list_union(p.get('vars_files', []), play_vars_files)
-
- accumulated_plays.extend(plays)
- play_basedirs.extend(basedirs)
-
- else:
-
- # this is a normal (non-included play)
- accumulated_plays.append(play)
- play_basedirs.append(basedir)
-
- return (accumulated_plays, play_basedirs)
-
- # *****************************************************
-
- def run(self):
- ''' run all patterns in the playbook '''
- plays = []
- matched_tags_all = set()
- unmatched_tags_all = set()
-
- # loop through all patterns and run them
- self.callbacks.on_start()
- for (play_ds, play_basedir) in zip(self.playbook, self.play_basedirs):
- play = Play(self, play_ds, play_basedir, vault_password=self.vault_password)
- assert play is not None
-
- matched_tags, unmatched_tags = play.compare_tags(self.only_tags)
-
- matched_tags_all = matched_tags_all | matched_tags
- unmatched_tags_all = unmatched_tags_all | unmatched_tags
-
- # Remove tasks we wish to skip
- matched_tags = matched_tags - set(self.skip_tags)
-
- # if we have matched_tags, the play must be run.
- # if the play contains no tasks, assume we just want to gather facts
- # in this case there are actually 3 meta tasks (handler flushes) not 0
- # tasks, so that's why there's a check against 3
- if (len(matched_tags) > 0 or len(play.tasks()) == 3):
- plays.append(play)
-
- # if the playbook is invoked with --tags or --skip-tags that don't
- # exist at all in the playbooks then we need to raise an error so that
- # the user can correct the arguments.
- unknown_tags = ((set(self.only_tags) | set(self.skip_tags)) -
- (matched_tags_all | unmatched_tags_all))
-
- for t in RESERVED_TAGS:
- unknown_tags.discard(t)
-
- if len(unknown_tags) > 0:
- for t in RESERVED_TAGS:
- unmatched_tags_all.discard(t)
- msg = 'tag(s) not found in playbook: %s. possible values: %s'
- unknown = ','.join(sorted(unknown_tags))
- unmatched = ','.join(sorted(unmatched_tags_all))
- raise errors.AnsibleError(msg % (unknown, unmatched))
-
- for play in plays:
- ansible.callbacks.set_play(self.callbacks, play)
- ansible.callbacks.set_play(self.runner_callbacks, play)
- if not self._run_play(play):
- break
-
- ansible.callbacks.set_play(self.callbacks, None)
- ansible.callbacks.set_play(self.runner_callbacks, None)
-
- # summarize the results
- results = {}
- for host in self.stats.processed.keys():
- results[host] = self.stats.summarize(host)
- return results
-
- # *****************************************************
-
- def _async_poll(self, poller, async_seconds, async_poll_interval):
- ''' launch an async job, if poll_interval is set, wait for completion '''
-
- results = poller.wait(async_seconds, async_poll_interval)
-
- # mark any hosts that are still listed as started as failed
- # since these likely got killed by async_wrapper
- for host in poller.hosts_to_poll:
- reason = { 'failed' : 1, 'rc' : None, 'msg' : 'timed out' }
- self.runner_callbacks.on_async_failed(host, reason, poller.runner.vars_cache[host]['ansible_job_id'])
- results['contacted'][host] = reason
-
- return results
-
- # *****************************************************
-
- def _trim_unavailable_hosts(self, hostlist=[], keep_failed=False):
- ''' returns a list of hosts that haven't failed and aren't dark '''
-
- return [ h for h in hostlist if (keep_failed or h not in self.stats.failures) and (h not in self.stats.dark)]
-
- # *****************************************************
-
- def _run_task_internal(self, task, include_failed=False):
- ''' run a particular module step in a playbook '''
-
- hosts = self._trim_unavailable_hosts(self.inventory.list_hosts(task.play._play_hosts), keep_failed=include_failed)
- self.inventory.restrict_to(hosts)
-
- runner = ansible.runner.Runner(
- pattern=task.play.hosts,
- inventory=self.inventory,
- module_name=task.module_name,
- module_args=task.module_args,
- forks=self.forks,
- remote_pass=self.remote_pass,
- module_path=self.module_path,
- timeout=self.timeout,
- remote_user=task.remote_user,
- remote_port=task.play.remote_port,
- module_vars=task.module_vars,
- play_vars=task.play_vars,
- play_file_vars=task.play_file_vars,
- role_vars=task.role_vars,
- role_params=task.role_params,
- default_vars=task.default_vars,
- extra_vars=self.extra_vars,
- private_key_file=self.private_key_file,
- setup_cache=self.SETUP_CACHE,
- vars_cache=self.VARS_CACHE,
- basedir=task.play.basedir,
- conditional=task.when,
- callbacks=self.runner_callbacks,
- transport=task.transport,
- is_playbook=True,
- check=self.check,
- diff=self.diff,
- environment=task.environment,
- complex_args=task.args,
- accelerate=task.play.accelerate,
- accelerate_port=task.play.accelerate_port,
- accelerate_ipv6=task.play.accelerate_ipv6,
- error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR,
- vault_pass = self.vault_password,
- run_hosts=hosts,
- no_log=task.no_log,
- run_once=task.run_once,
- become=task.become,
- become_method=task.become_method,
- become_user=task.become_user,
- become_pass=task.become_pass,
- )
-
- runner.module_vars.update({'play_hosts': hosts})
- runner.module_vars.update({'ansible_version': self._ansible_version})
-
- if task.async_seconds == 0:
- results = runner.run()
- else:
- results, poller = runner.run_async(task.async_seconds)
- self.stats.compute(results)
- if task.async_poll_interval > 0:
- # if not polling, playbook requested fire and forget, so don't poll
- results = self._async_poll(poller, task.async_seconds, task.async_poll_interval)
- else:
- for (host, res) in results.get('contacted', {}).iteritems():
- self.runner_callbacks.on_async_ok(host, res, poller.runner.vars_cache[host]['ansible_job_id'])
-
- contacted = results.get('contacted',{})
- dark = results.get('dark', {})
-
- self.inventory.lift_restriction()
-
- if len(contacted.keys()) == 0 and len(dark.keys()) == 0:
- return None
-
- return results
-
- # *****************************************************
-
- def _run_task(self, play, task, is_handler):
- ''' run a single task in the playbook and recursively run any subtasks. '''
-
- ansible.callbacks.set_task(self.callbacks, task)
- ansible.callbacks.set_task(self.runner_callbacks, task)
-
- if task.role_name:
- name = '%s | %s' % (task.role_name, task.name)
- else:
- name = task.name
-
- try:
- # v1 HACK: we don't have enough information to template many names
- # at this point. Rather than making this work for all cases in
- # v1, just make this degrade gracefully. Will fix in v2
- name = template(play.basedir, name, task.module_vars, lookup_fatal=False, filter_fatal=False)
- except:
- pass
-
- self.callbacks.on_task_start(name, is_handler)
- if hasattr(self.callbacks, 'skip_task') and self.callbacks.skip_task:
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
- return True
-
- # template ignore_errors
- # TODO: Is this needed here? cond is templated again in
- # check_conditional after some more manipulations.
- # TODO: we don't have enough information here to template cond either
- # (see note on templating name above)
- cond = template(play.basedir, task.ignore_errors, task.module_vars, expand_lists=False)
- task.ignore_errors = utils.check_conditional(cond, play.basedir, task.module_vars, fail_on_undefined=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR)
-
- # load up an appropriate ansible runner to run the task in parallel
- include_failed = is_handler and play.force_handlers
- results = self._run_task_internal(task, include_failed=include_failed)
-
- # if no hosts are matched, carry on
- hosts_remaining = True
- if results is None:
- hosts_remaining = False
- results = {}
-
- contacted = results.get('contacted', {})
- self.stats.compute(results, ignore_errors=task.ignore_errors)
-
- def _register_play_vars(host, result):
- # when 'register' is used, persist the result in the vars cache
- # rather than the setup cache - vars should be transient between
- # playbook executions
- if 'stdout' in result and 'stdout_lines' not in result:
- result['stdout_lines'] = result['stdout'].splitlines()
- utils.update_hash(self.VARS_CACHE, host, {task.register: result})
-
- def _save_play_facts(host, facts):
- # saves play facts in SETUP_CACHE, unless the module executed was
- # set_fact, in which case we add them to the VARS_CACHE
- if task.module_name in ('set_fact', 'include_vars'):
- utils.update_hash(self.VARS_CACHE, host, facts)
- else:
- utils.update_hash(self.SETUP_CACHE, host, facts)
-
- # add facts to the global setup cache
- for host, result in contacted.iteritems():
- if 'results' in result:
- # task ran with_ lookup plugin, so facts are encapsulated in
- # multiple list items in the results key
- for res in result['results']:
- if type(res) == dict:
- facts = res.get('ansible_facts', {})
- _save_play_facts(host, facts)
- else:
- # when facts are returned, persist them in the setup cache
- facts = result.get('ansible_facts', {})
- _save_play_facts(host, facts)
-
- # if requested, save the result into the registered variable name
- if task.register:
- _register_play_vars(host, result)
-
- # also have to register some failed, but ignored, tasks
- if task.ignore_errors and task.register:
- failed = results.get('failed', {})
- for host, result in failed.iteritems():
- _register_play_vars(host, result)
-
- # flag which notify handlers need to be run
- if len(task.notify) > 0:
- for host, results in results.get('contacted',{}).iteritems():
- if results.get('changed', False):
- for handler_name in task.notify:
- self._flag_handler(play, template(play.basedir, handler_name, task.module_vars), host)
-
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
- return hosts_remaining
-
- # *****************************************************
-
- def _flag_handler(self, play, handler_name, host):
- '''
- if a task has any notify elements, flag handlers for run
- at end of execution cycle for hosts that have indicated
- changes have been made
- '''
-
- found = False
- for x in play.handlers():
- if handler_name == template(play.basedir, x.name, x.module_vars):
- found = True
- self.callbacks.on_notify(host, x.name)
- x.notified_by.append(host)
- if not found:
- raise errors.AnsibleError("change handler (%s) is not defined" % handler_name)
-
- # *****************************************************
-
- def _do_setup_step(self, play):
- ''' get facts from the remote system '''
-
- host_list = self._trim_unavailable_hosts(play._play_hosts)
-
- if play.gather_facts is None and C.DEFAULT_GATHERING == 'smart':
- host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]]
- if len(host_list) == 0:
- return {}
- elif play.gather_facts is False or (play.gather_facts is None and C.DEFAULT_GATHERING == 'explicit'):
- return {}
-
- self.callbacks.on_setup()
- self.inventory.restrict_to(host_list)
-
- ansible.callbacks.set_task(self.callbacks, None)
- ansible.callbacks.set_task(self.runner_callbacks, None)
-
- # push any variables down to the system
- setup_results = ansible.runner.Runner(
- basedir=self.basedir,
- pattern=play.hosts,
- module_name='setup',
- module_args={},
- inventory=self.inventory,
- forks=self.forks,
- module_path=self.module_path,
- timeout=self.timeout,
- remote_user=play.remote_user,
- remote_pass=self.remote_pass,
- remote_port=play.remote_port,
- private_key_file=self.private_key_file,
- setup_cache=self.SETUP_CACHE,
- vars_cache=self.VARS_CACHE,
- callbacks=self.runner_callbacks,
- become=play.become,
- become_method=play.become_method,
- become_user=play.become_user,
- become_pass=self.become_pass,
- vault_pass=self.vault_password,
- transport=play.transport,
- is_playbook=True,
- module_vars=play.vars,
- play_vars=play.vars,
- play_file_vars=play.vars_file_vars,
- role_vars=play.role_vars,
- default_vars=play.default_vars,
- check=self.check,
- diff=self.diff,
- accelerate=play.accelerate,
- accelerate_port=play.accelerate_port,
- ).run()
- self.stats.compute(setup_results, setup=True)
-
- self.inventory.lift_restriction()
-
- # now for each result, load into the setup cache so we can
- # let runner template out future commands
- setup_ok = setup_results.get('contacted', {})
- for (host, result) in setup_ok.iteritems():
- utils.update_hash(self.SETUP_CACHE, host, {'module_setup': True})
- utils.update_hash(self.SETUP_CACHE, host, result.get('ansible_facts', {}))
- return setup_results
-
- # *****************************************************
-
-
- def generate_retry_inventory(self, replay_hosts):
- '''
- called by /usr/bin/ansible when a playbook run fails. It generates an inventory
- that allows re-running on ONLY the failed hosts. This may duplicate some
- variable information in group_vars/host_vars but that is ok, and expected.
- '''
-
- buf = StringIO.StringIO()
- for x in replay_hosts:
- buf.write("%s\n" % x)
- basedir = C.shell_expand_path(C.RETRY_FILES_SAVE_PATH)
- filename = "%s.retry" % os.path.basename(self.filename)
- filename = filename.replace(".yml","")
- filename = os.path.join(basedir, filename)
-
- try:
- if not os.path.exists(basedir):
- os.makedirs(basedir)
-
- fd = open(filename, 'w')
- fd.write(buf.getvalue())
- fd.close()
- except:
- ansible.callbacks.display(
- "\nERROR: could not create retry file. Check the value of \n"
- + "the configuration variable 'retry_files_save_path' or set \n"
- + "'retry_files_enabled' to False to avoid this message.\n",
- color='red'
- )
- return None
-
- return filename
-
- # *****************************************************
- def tasks_to_run_in_play(self, play):
-
- tasks = []
-
- for task in play.tasks():
- # only run the task if the requested tags match or has 'always' tag
- u = set(['untagged'])
- task_set = set(task.tags)
-
- if 'always' in task.tags:
- should_run = True
- else:
- if 'all' in self.only_tags:
- should_run = True
- else:
- should_run = False
- if 'tagged' in self.only_tags:
- if task_set != u:
- should_run = True
- elif 'untagged' in self.only_tags:
- if task_set == u:
- should_run = True
- else:
- if task_set.intersection(self.only_tags):
- should_run = True
-
- # Check for tags that we need to skip
- if 'all' in self.skip_tags:
- should_run = False
- else:
- if 'tagged' in self.skip_tags:
- if task_set != u:
- should_run = False
- elif 'untagged' in self.skip_tags:
- if task_set == u:
- should_run = False
- else:
- if should_run:
- if task_set.intersection(self.skip_tags):
- should_run = False
-
- if should_run:
- tasks.append(task)
-
- return tasks
-
- # *****************************************************
- def _run_play(self, play):
- ''' run a list of tasks for a given pattern, in order '''
-
- self.callbacks.on_play_start(play.name)
- # Get the hosts for this play
- play._play_hosts = self.inventory.list_hosts(play.hosts)
- # if no hosts matches this play, drop out
- if not play._play_hosts:
- self.callbacks.on_no_hosts_matched()
- return True
-
- # get facts from system
- self._do_setup_step(play)
-
- # now with that data, handle contentional variable file imports!
- all_hosts = self._trim_unavailable_hosts(play._play_hosts)
- play.update_vars_files(all_hosts, vault_password=self.vault_password)
- hosts_count = len(all_hosts)
-
- if play.serial.endswith("%"):
-
- # This is a percentage, so calculate it based on the
- # number of hosts
- serial_pct = int(play.serial.replace("%",""))
- serial = int((serial_pct/100.0) * len(all_hosts))
-
- # Ensure that no matter how small the percentage, serial
- # can never fall below 1, so that things actually happen
- serial = max(serial, 1)
- else:
- serial = int(play.serial)
-
- serialized_batch = []
- if serial <= 0:
- serialized_batch = [all_hosts]
- else:
- # do N forks all the way through before moving to next
- while len(all_hosts) > 0:
- play_hosts = []
- for x in range(serial):
- if len(all_hosts) > 0:
- play_hosts.append(all_hosts.pop(0))
- serialized_batch.append(play_hosts)
-
- task_errors = False
- for on_hosts in serialized_batch:
-
- # restrict the play to just the hosts we have in our on_hosts block that are
- # available.
- play._play_hosts = self._trim_unavailable_hosts(on_hosts)
- self.inventory.also_restrict_to(on_hosts)
-
- for task in self.tasks_to_run_in_play(play):
-
- if task.meta is not None:
- # meta tasks can force handlers to run mid-play
- if task.meta == 'flush_handlers':
- self.run_handlers(play)
-
- # skip calling the handler till the play is finished
- continue
-
- if not self._run_task(play, task, False):
- # whether no hosts matched is fatal or not depends if it was on the initial step.
- # if we got exactly no hosts on the first step (setup!) then the host group
- # just didn't match anything and that's ok
- return False
-
- # Get a new list of what hosts are left as available, the ones that
- # did not go fail/dark during the task
- host_list = self._trim_unavailable_hosts(play._play_hosts)
-
- # Set max_fail_pct to 0, So if any hosts fails, bail out
- if task.any_errors_fatal and len(host_list) < hosts_count:
- play.max_fail_pct = 0
-
- # If threshold for max nodes failed is exceeded, bail out.
- if play.serial > 0:
- # if serial is set, we need to shorten the size of host_count
- play_count = len(play._play_hosts)
- if (play_count - len(host_list)) > int((play.max_fail_pct)/100.0 * play_count):
- host_list = None
- else:
- if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
- host_list = None
-
- # if no hosts remain, drop out
- if not host_list:
- if play.force_handlers:
- task_errors = True
- break
- else:
- self.callbacks.on_no_hosts_remaining()
- return False
-
- # lift restrictions after each play finishes
- self.inventory.lift_also_restriction()
-
- if task_errors and not play.force_handlers:
- # if there were failed tasks and handler execution
- # is not forced, quit the play with an error
- return False
- else:
- # no errors, go ahead and execute all handlers
- if not self.run_handlers(play):
- return False
-
- return True
-
-
- def run_handlers(self, play):
- on_hosts = play._play_hosts
- hosts_count = len(on_hosts)
- for task in play.tasks():
- if task.meta is not None:
-
- fired_names = {}
- for handler in play.handlers():
- if len(handler.notified_by) > 0:
- self.inventory.restrict_to(handler.notified_by)
-
- # Resolve the variables first
- handler_name = template(play.basedir, handler.name, handler.module_vars)
- if handler_name not in fired_names:
- self._run_task(play, handler, True)
- # prevent duplicate handler includes from running more than once
- fired_names[handler_name] = 1
-
- host_list = self._trim_unavailable_hosts(play._play_hosts)
- if handler.any_errors_fatal and len(host_list) < hosts_count:
- play.max_fail_pct = 0
- if (hosts_count - len(host_list)) > int((play.max_fail_pct)/100.0 * hosts_count):
- host_list = None
- if not host_list and not play.force_handlers:
- self.callbacks.on_no_hosts_remaining()
- return False
-
- self.inventory.lift_restriction()
- new_list = handler.notified_by[:]
- for host in handler.notified_by:
- if host in on_hosts:
- while host in new_list:
- new_list.remove(host)
- handler.notified_by = new_list
-
- continue
-
- return True
diff --git a/v1/ansible/playbook/play.py b/v1/ansible/playbook/play.py
deleted file mode 100644
index 6ee85e0bf4..0000000000
--- a/v1/ansible/playbook/play.py
+++ /dev/null
@@ -1,949 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#############################################
-
-from ansible.utils.template import template
-from ansible import utils
-from ansible import errors
-from ansible.playbook.task import Task
-from ansible.module_utils.splitter import split_args, unquote
-import ansible.constants as C
-import pipes
-import shlex
-import os
-import sys
-import uuid
-
-
-class Play(object):
-
- _pb_common = [
- 'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
- 'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
- 'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
- 'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
- 'vault_password',
- ]
-
- __slots__ = _pb_common + [
- '_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir',
- 'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port',
- 'role_vars', 'transport', 'vars_file_vars',
- ]
-
- # to catch typos and so forth -- these are userland names
- # and don't line up 1:1 with how they are stored
- VALID_KEYS = frozenset(_pb_common + [
- 'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks',
- 'pre_tasks', 'role_names', 'tasks', 'user',
- ])
-
- # *************************************************
-
- def __init__(self, playbook, ds, basedir, vault_password=None):
- ''' constructor loads from a play datastructure '''
-
- for x in ds.keys():
- if not x in Play.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x)
-
- # allow all playbook keys to be set by --extra-vars
- self.vars = ds.get('vars', {})
- self.vars_prompt = ds.get('vars_prompt', {})
- self.playbook = playbook
- self.vars = self._get_vars()
- self.vars_file_vars = dict() # these are vars read in from vars_files:
- self.role_vars = dict() # these are vars read in from vars/main.yml files in roles
- self.basedir = basedir
- self.roles = ds.get('roles', None)
- self.tags = ds.get('tags', None)
- self.vault_password = vault_password
- self.environment = ds.get('environment', {})
-
- if self.tags is None:
- self.tags = []
- elif type(self.tags) in [ str, unicode ]:
- self.tags = self.tags.split(",")
- elif type(self.tags) != list:
- self.tags = []
-
- # make sure we have some special internal variables set, which
- # we use later when loading tasks and handlers
- load_vars = dict()
- load_vars['playbook_dir'] = os.path.abspath(self.basedir)
- if self.playbook.inventory.basedir() is not None:
- load_vars['inventory_dir'] = self.playbook.inventory.basedir()
- if self.playbook.inventory.src() is not None:
- load_vars['inventory_file'] = self.playbook.inventory.src()
-
- # We first load the vars files from the datastructure
- # so we have the default variables to pass into the roles
- self.vars_files = ds.get('vars_files', [])
- if not isinstance(self.vars_files, list):
- raise errors.AnsibleError('vars_files must be a list')
- processed_vars_files = self._update_vars_files_for_host(None)
-
- # now we load the roles into the datastructure
- self.included_roles = []
- ds = self._load_roles(self.roles, ds)
-
- # and finally re-process the vars files as they may have been updated
- # by the included roles, but exclude any which have been processed
- self.vars_files = utils.list_difference(ds.get('vars_files', []), processed_vars_files)
- if not isinstance(self.vars_files, list):
- raise errors.AnsibleError('vars_files must be a list')
-
- self._update_vars_files_for_host(None)
-
- # template everything to be efficient, but do not pre-mature template
- # tasks/handlers as they may have inventory scope overrides. We also
- # create a set of temporary variables for templating, so we don't
- # trample on the existing vars structures
- _tasks = ds.pop('tasks', [])
- _handlers = ds.pop('handlers', [])
-
- temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
- temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
-
- try:
- ds = template(basedir, ds, temp_vars)
- except errors.AnsibleError, e:
- utils.warning("non fatal error while trying to template play variables: %s" % (str(e)))
-
- ds['tasks'] = _tasks
- ds['handlers'] = _handlers
-
- self._ds = ds
-
- hosts = ds.get('hosts')
- if hosts is None:
- raise errors.AnsibleError('hosts declaration is required')
- elif isinstance(hosts, list):
- try:
- hosts = ';'.join(hosts)
- except TypeError,e:
- raise errors.AnsibleError('improper host declaration: %s' % str(e))
-
- self.serial = str(ds.get('serial', 0))
- self.hosts = hosts
- self.name = ds.get('name', self.hosts)
- self._tasks = ds.get('tasks', [])
- self._handlers = ds.get('handlers', [])
- self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user))
- self.remote_port = ds.get('port', self.playbook.remote_port)
- self.transport = ds.get('connection', self.playbook.transport)
- self.remote_port = self.remote_port
- self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false'))
- self.accelerate = utils.boolean(ds.get('accelerate', 'false'))
- self.accelerate_port = ds.get('accelerate_port', None)
- self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
- self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
- self.no_log = utils.boolean(ds.get('no_log', 'false'))
- self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
-
- # Fail out if user specifies conflicting privilege escalations
- if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
- raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together')
- if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together')
- if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')):
- raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
-
- # become settings are inherited and updated normally
- self.become = ds.get('become', self.playbook.become)
- self.become_method = ds.get('become_method', self.playbook.become_method)
- self.become_user = ds.get('become_user', self.playbook.become_user)
-
- # Make sure current play settings are reflected in become fields
- if 'sudo' in ds:
- self.become=ds['sudo']
- self.become_method='sudo'
- if 'sudo_user' in ds:
- self.become_user=ds['sudo_user']
- elif 'su' in ds:
- self.become=True
- self.become=ds['su']
- self.become_method='su'
- if 'su_user' in ds:
- self.become_user=ds['su_user']
-
- # gather_facts is not a simple boolean, as None means that a 'smart'
- # fact gathering mode will be used, so we need to be careful here as
- # calling utils.boolean(None) returns False
- self.gather_facts = ds.get('gather_facts', None)
- if self.gather_facts is not None:
- self.gather_facts = utils.boolean(self.gather_facts)
-
- load_vars['role_names'] = ds.get('role_names', [])
-
- self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars)
- self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars)
-
- # apply any missing tags to role tasks
- self._late_merge_role_tags()
-
- # place holder for the discovered hosts to be used in this play
- self._play_hosts = None
-
- # *************************************************
-
- def _get_role_path(self, role):
- """
- Returns the path on disk to the directory containing
- the role directories like tasks, templates, etc. Also
- returns any variables that were included with the role
- """
- orig_path = template(self.basedir,role,self.vars)
-
- role_vars = {}
- if type(orig_path) == dict:
- # what, not a path?
- role_name = orig_path.get('role', None)
- if role_name is None:
- raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path)
- role_vars = orig_path
- else:
- role_name = utils.role_spec_parse(orig_path)["name"]
-
- role_path = None
-
- possible_paths = [
- utils.path_dwim(self.basedir, os.path.join('roles', role_name)),
- utils.path_dwim(self.basedir, role_name)
- ]
-
- if C.DEFAULT_ROLES_PATH:
- search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep)
- for loc in search_locations:
- loc = os.path.expanduser(loc)
- possible_paths.append(utils.path_dwim(loc, role_name))
-
- for path_option in possible_paths:
- if os.path.isdir(path_option):
- role_path = path_option
- break
-
- if role_path is None:
- raise errors.AnsibleError("cannot find role in %s" % " or ".join(possible_paths))
-
- return (role_path, role_vars)
-
- def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0):
- # this number is arbitrary, but it seems sane
- if level > 20:
- raise errors.AnsibleError("too many levels of recursion while resolving role dependencies")
- for role in roles:
- role_path,role_vars = self._get_role_path(role)
-
- # save just the role params for this role, which exclude the special
- # keywords 'role', 'tags', and 'when'.
- role_params = role_vars.copy()
- for item in ('role', 'tags', 'when'):
- if item in role_params:
- del role_params[item]
-
- role_vars = utils.combine_vars(passed_vars, role_vars)
-
- vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')))
- vars_data = {}
- if os.path.isfile(vars):
- vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
- if vars_data:
- if not isinstance(vars_data, dict):
- raise errors.AnsibleError("vars from '%s' are not a dict" % vars)
- role_vars = utils.combine_vars(vars_data, role_vars)
-
- defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')))
- defaults_data = {}
- if os.path.isfile(defaults):
- defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
-
- # the meta directory contains the yaml that should
- # hold the list of dependencies (if any)
- meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')))
- if os.path.isfile(meta):
- data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
- if data:
- dependencies = data.get('dependencies',[])
- if dependencies is None:
- dependencies = []
- for dep in dependencies:
- allow_dupes = False
- (dep_path,dep_vars) = self._get_role_path(dep)
-
- # save the dep params, just as we did above
- dep_params = dep_vars.copy()
- for item in ('role', 'tags', 'when'):
- if item in dep_params:
- del dep_params[item]
-
- meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta')))
- if os.path.isfile(meta):
- meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
- if meta_data:
- allow_dupes = utils.boolean(meta_data.get('allow_duplicates',''))
-
- # if any tags were specified as role/dep variables, merge
- # them into the current dep_vars so they're passed on to any
- # further dependencies too, and so we only have one place
- # (dep_vars) to look for tags going forward
- def __merge_tags(var_obj):
- old_tags = dep_vars.get('tags', [])
- if isinstance(old_tags, basestring):
- old_tags = [old_tags, ]
- if isinstance(var_obj, dict):
- new_tags = var_obj.get('tags', [])
- if isinstance(new_tags, basestring):
- new_tags = [new_tags, ]
- else:
- new_tags = []
- return list(set(old_tags).union(set(new_tags)))
-
- dep_vars['tags'] = __merge_tags(role_vars)
- dep_vars['tags'] = __merge_tags(passed_vars)
-
- # if tags are set from this role, merge them
- # into the tags list for the dependent role
- if "tags" in passed_vars:
- for included_role_dep in dep_stack:
- included_dep_name = included_role_dep[0]
- included_dep_vars = included_role_dep[2]
- if included_dep_name == dep:
- if "tags" in included_dep_vars:
- included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"])))
- else:
- included_dep_vars["tags"] = passed_vars["tags"][:]
-
- dep_vars = utils.combine_vars(passed_vars, dep_vars)
- dep_vars = utils.combine_vars(role_vars, dep_vars)
-
- vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars')))
- vars_data = {}
- if os.path.isfile(vars):
- vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
- if vars_data:
- dep_vars = utils.combine_vars(dep_vars, vars_data)
- pass
-
- defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults')))
- dep_defaults_data = {}
- if os.path.isfile(defaults):
- dep_defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
- if 'role' in dep_vars:
- del dep_vars['role']
-
- if not allow_dupes:
- if dep in self.included_roles:
- # skip back to the top, since we don't want to
- # do anything else with this role
- continue
- else:
- self.included_roles.append(dep)
-
- def _merge_conditional(cur_conditionals, new_conditionals):
- if isinstance(new_conditionals, (basestring, bool)):
- cur_conditionals.append(new_conditionals)
- elif isinstance(new_conditionals, list):
- cur_conditionals.extend(new_conditionals)
-
- # pass along conditionals from roles to dep roles
- passed_when = passed_vars.get('when')
- role_when = role_vars.get('when')
- dep_when = dep_vars.get('when')
-
- tmpcond = []
- _merge_conditional(tmpcond, passed_when)
- _merge_conditional(tmpcond, role_when)
- _merge_conditional(tmpcond, dep_when)
-
- if len(tmpcond) > 0:
- dep_vars['when'] = tmpcond
-
- self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1)
- dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data])
-
- # only add the current role when we're at the top level,
- # otherwise we'll end up in a recursive loop
- if level == 0:
- self.included_roles.append(role)
- dep_stack.append([role, role_path, role_vars, role_params, defaults_data])
- return dep_stack
-
- def _load_role_vars_files(self, vars_files):
- # process variables stored in vars/main.yml files
- role_vars = {}
- for filename in vars_files:
- if os.path.exists(filename):
- new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
- if new_vars:
- if type(new_vars) != dict:
- raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars)))
- role_vars = utils.combine_vars(role_vars, new_vars)
-
- return role_vars
-
- def _load_role_defaults(self, defaults_files):
- # process default variables
- default_vars = {}
- for filename in defaults_files:
- if os.path.exists(filename):
- new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
- if new_default_vars:
- if type(new_default_vars) != dict:
- raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars)))
- default_vars = utils.combine_vars(default_vars, new_default_vars)
-
- return default_vars
-
- def _load_roles(self, roles, ds):
- # a role is a name that auto-includes the following if they exist
- # <rolename>/tasks/main.yml
- # <rolename>/handlers/main.yml
- # <rolename>/vars/main.yml
- # <rolename>/library
- # and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found
-
- if roles is None:
- roles = []
- if type(roles) != list:
- raise errors.AnsibleError("value of 'roles:' must be a list")
-
- new_tasks = []
- new_handlers = []
- role_vars_files = []
- defaults_files = []
-
- pre_tasks = ds.get('pre_tasks', None)
- if type(pre_tasks) != list:
- pre_tasks = []
- for x in pre_tasks:
- new_tasks.append(x)
-
- # flush handlers after pre_tasks
- new_tasks.append(dict(meta='flush_handlers'))
-
- roles = self._build_role_dependencies(roles, [], {})
-
- # give each role an uuid and
- # make role_path available as variable to the task
- for idx, val in enumerate(roles):
- this_uuid = str(uuid.uuid4())
- roles[idx][-3]['role_uuid'] = this_uuid
- roles[idx][-3]['role_path'] = roles[idx][1]
-
- role_names = []
-
- for (role, role_path, role_vars, role_params, default_vars) in roles:
- # special vars must be extracted from the dict to the included tasks
- special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ]
- special_vars = {}
- for k in special_keys:
- if k in role_vars:
- special_vars[k] = role_vars[k]
-
- task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks'))
- handler_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'handlers'))
- vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))
- meta_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))
- defaults_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))
-
- task = self._resolve_main(task_basepath)
- handler = self._resolve_main(handler_basepath)
- vars_file = self._resolve_main(vars_basepath)
- meta_file = self._resolve_main(meta_basepath)
- defaults_file = self._resolve_main(defaults_basepath)
-
- library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library'))
-
- missing = lambda f: not os.path.isfile(f)
- if missing(task) and missing(handler) and missing(vars_file) and missing(defaults_file) and missing(meta_file) and not os.path.isdir(library):
- raise errors.AnsibleError("found role at %s, but cannot find %s or %s or %s or %s or %s or %s" % (role_path, task, handler, vars_file, defaults_file, meta_file, library))
-
- if isinstance(role, dict):
- role_name = role['role']
- else:
- role_name = utils.role_spec_parse(role)["name"]
-
- role_names.append(role_name)
- if os.path.isfile(task):
- nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name)
- for k in special_keys:
- if k in special_vars:
- nt[k] = special_vars[k]
- new_tasks.append(nt)
- if os.path.isfile(handler):
- nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name)
- for k in special_keys:
- if k in special_vars:
- nt[k] = special_vars[k]
- new_handlers.append(nt)
- if os.path.isfile(vars_file):
- role_vars_files.append(vars_file)
- if os.path.isfile(defaults_file):
- defaults_files.append(defaults_file)
- if os.path.isdir(library):
- utils.plugins.module_finder.add_directory(library)
-
- tasks = ds.get('tasks', None)
- post_tasks = ds.get('post_tasks', None)
- handlers = ds.get('handlers', None)
- vars_files = ds.get('vars_files', None)
-
- if type(tasks) != list:
- tasks = []
- if type(handlers) != list:
- handlers = []
- if type(vars_files) != list:
- vars_files = []
- if type(post_tasks) != list:
- post_tasks = []
-
- new_tasks.extend(tasks)
- # flush handlers after tasks + role tasks
- new_tasks.append(dict(meta='flush_handlers'))
- new_tasks.extend(post_tasks)
- # flush handlers after post tasks
- new_tasks.append(dict(meta='flush_handlers'))
-
- new_handlers.extend(handlers)
-
- ds['tasks'] = new_tasks
- ds['handlers'] = new_handlers
- ds['role_names'] = role_names
-
- self.role_vars = self._load_role_vars_files(role_vars_files)
- self.default_vars = self._load_role_defaults(defaults_files)
-
- return ds
-
- # *************************************************
-
- def _resolve_main(self, basepath):
- ''' flexibly handle variations in main filenames '''
- # these filenames are acceptable:
- mains = (
- os.path.join(basepath, 'main'),
- os.path.join(basepath, 'main.yml'),
- os.path.join(basepath, 'main.yaml'),
- os.path.join(basepath, 'main.json'),
- )
- if sum([os.path.isfile(x) for x in mains]) > 1:
- raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
- else:
- for m in mains:
- if os.path.isfile(m):
- return m # exactly one main file
- return mains[0] # zero mains (we still need to return something)
-
- # *************************************************
-
- def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None,
- additional_conditions=None, original_file=None, role_name=None):
- ''' handle task and handler include statements '''
-
- results = []
- if tasks is None:
- # support empty handler files, and the like.
- tasks = []
- if additional_conditions is None:
- additional_conditions = []
- if vars is None:
- vars = {}
- if role_params is None:
- role_params = {}
- if default_vars is None:
- default_vars = {}
- if become_vars is None:
- become_vars = {}
-
- old_conditions = list(additional_conditions)
-
- for x in tasks:
-
- # prevent assigning the same conditions to each task on an include
- included_additional_conditions = list(old_conditions)
-
- if not isinstance(x, dict):
- raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file))
-
- # evaluate privilege escalation vars for current and child tasks
- included_become_vars = {}
- for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]:
- if k in x:
- included_become_vars[k] = x[k]
- elif k in become_vars:
- included_become_vars[k] = become_vars[k]
- x[k] = become_vars[k]
-
- task_vars = vars.copy()
- if original_file:
- task_vars['_original_file'] = original_file
-
- if 'meta' in x:
- if x['meta'] == 'flush_handlers':
- if role_name and 'role_name' not in x:
- x['role_name'] = role_name
- results.append(Task(self, x, module_vars=task_vars, role_name=role_name))
- continue
-
- if 'include' in x:
- tokens = split_args(str(x['include']))
- included_additional_conditions = list(additional_conditions)
- include_vars = {}
- for k in x:
- if k.startswith("with_"):
- if original_file:
- offender = " (in %s)" % original_file
- else:
- offender = ""
- utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True)
- elif k.startswith("when_"):
- utils.deprecated("\"when_<criteria>:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True)
- elif k == 'when':
- if isinstance(x[k], (basestring, bool)):
- included_additional_conditions.append(x[k])
- elif type(x[k]) is list:
- included_additional_conditions.extend(x[k])
- elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"):
- continue
- else:
- include_vars[k] = x[k]
-
- # get any role parameters specified
- role_params = x.get('role_params', {})
-
- # get any role default variables specified
- default_vars = x.get('default_vars', {})
- if not default_vars:
- default_vars = self.default_vars
- else:
- default_vars = utils.combine_vars(self.default_vars, default_vars)
-
- # append the vars defined with the include (from above)
- # as well as the old-style 'vars' element. The old-style
- # vars are given higher precedence here (just in case)
- task_vars = utils.combine_vars(task_vars, include_vars)
- if 'vars' in x:
- task_vars = utils.combine_vars(task_vars, x['vars'])
-
- new_role = None
- if 'role_name' in x:
- new_role = x['role_name']
-
- mv = task_vars.copy()
- for t in tokens[1:]:
- (k,v) = t.split("=", 1)
- v = unquote(v)
- mv[k] = template(self.basedir, v, mv)
- dirname = self.basedir
- if original_file:
- dirname = os.path.dirname(original_file)
-
- # temp vars are used here to avoid trampling on the existing vars structures
- temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
- temp_vars = utils.combine_vars(temp_vars, mv)
- temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
- include_file = template(dirname, tokens[0], temp_vars)
- include_filename = utils.path_dwim(dirname, include_file)
-
- data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password)
- if 'role_name' in x and data is not None:
- for y in data:
- if isinstance(y, dict) and 'include' in y:
- y['role_name'] = new_role
- loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
- results += loaded
- elif type(x) == dict:
- task = Task(
- self, x,
- module_vars=task_vars,
- play_vars=self.vars,
- play_file_vars=self.vars_file_vars,
- role_vars=self.role_vars,
- role_params=role_params,
- default_vars=default_vars,
- additional_conditions=list(additional_conditions),
- role_name=role_name
- )
- results.append(task)
- else:
- raise Exception("unexpected task type")
-
- for x in results:
- if self.tags is not None:
- x.tags.extend(self.tags)
-
- return results
-
- # *************************************************
-
- def tasks(self):
- ''' return task objects for this play '''
- return self._tasks
-
- def handlers(self):
- ''' return handler objects for this play '''
- return self._handlers
-
- # *************************************************
-
- def _get_vars(self):
- ''' load the vars section from a play, accounting for all sorts of variable features
- including loading from yaml files, prompting, and conditional includes of the first
- file found in a list. '''
-
- if self.vars is None:
- self.vars = {}
-
- if type(self.vars) not in [dict, list]:
- raise errors.AnsibleError("'vars' section must contain only key/value pairs")
-
- vars = {}
-
- # translate a list of vars into a dict
- if type(self.vars) == list:
- for item in self.vars:
- if getattr(item, 'items', None) is None:
- raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
- k, v = item.items()[0]
- vars[k] = v
- else:
- vars.update(self.vars)
-
- if type(self.vars_prompt) == list:
- for var in self.vars_prompt:
- if not 'name' in var:
- raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
-
- vname = var['name']
- prompt = var.get("prompt", vname)
- default = var.get("default", None)
- private = var.get("private", True)
-
- confirm = var.get("confirm", False)
- encrypt = var.get("encrypt", None)
- salt_size = var.get("salt_size", None)
- salt = var.get("salt", None)
-
- if vname not in self.playbook.extra_vars:
- vars[vname] = self.playbook.callbacks.on_vars_prompt(
- vname, private, prompt, encrypt, confirm, salt_size, salt, default
- )
-
- elif type(self.vars_prompt) == dict:
- for (vname, prompt) in self.vars_prompt.iteritems():
- prompt_msg = "%s: " % prompt
- if vname not in self.playbook.extra_vars:
- vars[vname] = self.playbook.callbacks.on_vars_prompt(
- varname=vname, private=False, prompt=prompt_msg, default=None
- )
-
- else:
- raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
-
- if type(self.playbook.extra_vars) == dict:
- vars = utils.combine_vars(vars, self.playbook.extra_vars)
-
- return vars
-
- # *************************************************
-
- def update_vars_files(self, hosts, vault_password=None):
- ''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
-
- # now loop through all the hosts...
- for h in hosts:
- self._update_vars_files_for_host(h, vault_password=vault_password)
-
- # *************************************************
-
- def compare_tags(self, tags):
- ''' given a list of tags that the user has specified, return two lists:
- matched_tags: tags were found within the current play and match those given
- by the user
- unmatched_tags: tags that were found within the current play but do not match
- any provided by the user '''
-
- # gather all the tags in all the tasks and handlers into one list
- # FIXME: isn't this in self.tags already?
-
- all_tags = []
- for task in self._tasks:
- if not task.meta:
- all_tags.extend(task.tags)
- for handler in self._handlers:
- all_tags.extend(handler.tags)
-
- # compare the lists of tags using sets and return the matched and unmatched
- all_tags_set = set(all_tags)
- tags_set = set(tags)
-
- matched_tags = all_tags_set.intersection(tags_set)
- unmatched_tags = all_tags_set.difference(tags_set)
-
- a = set(['always'])
- u = set(['untagged'])
- if 'always' in all_tags_set:
- matched_tags = matched_tags.union(a)
- unmatched_tags = all_tags_set.difference(a)
-
- if 'all' in tags_set:
- matched_tags = matched_tags.union(all_tags_set)
- unmatched_tags = set()
-
- if 'tagged' in tags_set:
- matched_tags = all_tags_set.difference(u)
- unmatched_tags = u
-
- if 'untagged' in tags_set and 'untagged' in all_tags_set:
- matched_tags = matched_tags.union(u)
- unmatched_tags = unmatched_tags.difference(u)
-
- return matched_tags, unmatched_tags
-
- # *************************************************
-
- def _late_merge_role_tags(self):
- # build a local dict of tags for roles
- role_tags = {}
- for task in self._ds['tasks']:
- if 'role_name' in task:
- this_role = task['role_name'] + "-" + task['vars']['role_uuid']
-
- if this_role not in role_tags:
- role_tags[this_role] = []
-
- if 'tags' in task['vars']:
- if isinstance(task['vars']['tags'], basestring):
- role_tags[this_role] += shlex.split(task['vars']['tags'])
- else:
- role_tags[this_role] += task['vars']['tags']
-
- # apply each role's tags to its tasks
- for idx, val in enumerate(self._tasks):
- if getattr(val, 'role_name', None) is not None:
- this_role = val.role_name + "-" + val.module_vars['role_uuid']
- if this_role in role_tags:
- self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role]))
-
- # *************************************************
-
- def _update_vars_files_for_host(self, host, vault_password=None):
-
- def generate_filenames(host, inject, filename):
-
- """ Render the raw filename into 3 forms """
-
- # filename2 is the templated version of the filename, which will
- # be fully rendered if any variables contained within it are
- # non-inventory related
- filename2 = template(self.basedir, filename, self.vars)
-
- # filename3 is the same as filename2, but when the host object is
- # available, inventory variables will be expanded as well since the
- # name is templated with the injected variables
- filename3 = filename2
- if host is not None:
- filename3 = template(self.basedir, filename2, inject)
-
- # filename4 is the dwim'd path, but may also be mixed-scope, so we use
- # both play scoped vars and host scoped vars to template the filepath
- if utils.contains_vars(filename3) and host is not None:
- inject.update(self.vars)
- filename4 = template(self.basedir, filename3, inject)
- filename4 = utils.path_dwim(self.basedir, filename4)
- else:
- filename4 = utils.path_dwim(self.basedir, filename3)
-
- return filename2, filename3, filename4
-
-
- def update_vars_cache(host, data, target_filename=None):
-
- """ update a host's varscache with new var data """
-
- self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
- if target_filename:
- self.playbook.callbacks.on_import_for_host(host, target_filename)
-
- def process_files(filename, filename2, filename3, filename4, host=None):
-
- """ pseudo-algorithm for deciding where new vars should go """
-
- data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password)
- if data:
- if type(data) != dict:
- raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
- if host is not None:
- target_filename = None
- if utils.contains_vars(filename2):
- if not utils.contains_vars(filename3):
- target_filename = filename3
- else:
- target_filename = filename4
- update_vars_cache(host, data, target_filename=target_filename)
- else:
- self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data)
- # we did process this file
- return True
- # we did not process this file
- return False
-
- # Enforce that vars_files is always a list
- if type(self.vars_files) != list:
- self.vars_files = [ self.vars_files ]
-
- # Build an inject if this is a host run started by self.update_vars_files
- if host is not None:
- inject = {}
- inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password))
- inject.update(self.playbook.SETUP_CACHE.get(host, {}))
- inject.update(self.playbook.VARS_CACHE.get(host, {}))
- else:
- inject = None
-
- processed = []
- for filename in self.vars_files:
- if type(filename) == list:
- # loop over all filenames, loading the first one, and failing if none found
- found = False
- sequence = []
- for real_filename in filename:
- filename2, filename3, filename4 = generate_filenames(host, inject, real_filename)
- sequence.append(filename4)
- if os.path.exists(filename4):
- found = True
- if process_files(filename, filename2, filename3, filename4, host=host):
- processed.append(filename)
- elif host is not None:
- self.playbook.callbacks.on_not_import_for_host(host, filename4)
- if found:
- break
- if not found and host is not None:
- raise errors.AnsibleError(
- "%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
- )
- else:
- # just one filename supplied, load it!
- filename2, filename3, filename4 = generate_filenames(host, inject, filename)
- if utils.contains_vars(filename4):
- continue
- if process_files(filename, filename2, filename3, filename4, host=host):
- processed.append(filename)
-
- return processed
diff --git a/v1/ansible/playbook/task.py b/v1/ansible/playbook/task.py
deleted file mode 100644
index 70c1bc8df6..0000000000
--- a/v1/ansible/playbook/task.py
+++ /dev/null
@@ -1,346 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import errors
-from ansible import utils
-from ansible.module_utils.splitter import split_args
-import os
-import ansible.utils.template as template
-import sys
-
-class Task(object):
-
- _t_common = [
- 'action', 'always_run', 'any_errors_fatal', 'args', 'become', 'become_method', 'become_pass',
- 'become_user', 'changed_when', 'delay', 'delegate_to', 'environment', 'failed_when',
- 'first_available_file', 'ignore_errors', 'local_action', 'meta', 'name', 'no_log',
- 'notify', 'register', 'remote_user', 'retries', 'run_once', 'su', 'su_pass', 'su_user',
- 'sudo', 'sudo_pass', 'sudo_user', 'tags', 'transport', 'until', 'when',
- ]
-
- __slots__ = [
- 'async_poll_interval', 'async_seconds', 'default_vars', 'first_available_file',
- 'items_lookup_plugin', 'items_lookup_terms', 'module_args', 'module_name', 'module_vars',
- 'notified_by', 'play', 'play_file_vars', 'play_vars', 'role_name', 'role_params', 'role_vars',
- ] + _t_common
-
- # to prevent typos and such
- VALID_KEYS = frozenset([
- 'async', 'connection', 'include', 'poll',
- ] + _t_common)
-
- def __init__(self, play, ds, module_vars=None, play_vars=None, play_file_vars=None, role_vars=None, role_params=None, default_vars=None, additional_conditions=None, role_name=None):
- ''' constructor loads from a task or handler datastructure '''
-
- # meta directives are used to tell things like ansible/playbook to run
- # operations like handler execution. Meta tasks are not executed
- # normally.
- if 'meta' in ds:
- self.meta = ds['meta']
- self.tags = []
- self.module_vars = module_vars
- self.role_name = role_name
- return
- else:
- self.meta = None
-
-
- library = os.path.join(play.basedir, 'library')
- if os.path.exists(library):
- utils.plugins.module_finder.add_directory(library)
-
- for x in ds.keys():
-
- # code to allow for saying "modulename: args" versus "action: modulename args"
- if x in utils.plugins.module_finder:
-
- if 'action' in ds:
- raise errors.AnsibleError("multiple actions specified in task: '%s' and '%s'" % (x, ds.get('name', ds['action'])))
- if isinstance(ds[x], dict):
- if 'args' in ds:
- raise errors.AnsibleError("can't combine args: and a dict for %s: in task %s" % (x, ds.get('name', "%s: %s" % (x, ds[x]))))
- ds['args'] = ds[x]
- ds[x] = ''
- elif ds[x] is None:
- ds[x] = ''
- if not isinstance(ds[x], basestring):
- raise errors.AnsibleError("action specified for task %s has invalid type %s" % (ds.get('name', "%s: %s" % (x, ds[x])), type(ds[x])))
- ds['action'] = x + " " + ds[x]
- ds.pop(x)
-
- # code to allow "with_glob" and to reference a lookup plugin named glob
- elif x.startswith("with_"):
- if isinstance(ds[x], basestring):
- param = ds[x].strip()
-
- plugin_name = x.replace("with_","")
- if plugin_name in utils.plugins.lookup_loader:
- ds['items_lookup_plugin'] = plugin_name
- ds['items_lookup_terms'] = ds[x]
- ds.pop(x)
- else:
- raise errors.AnsibleError("cannot find lookup plugin named %s for usage in with_%s" % (plugin_name, plugin_name))
-
- elif x in [ 'changed_when', 'failed_when', 'when']:
- if isinstance(ds[x], basestring):
- param = ds[x].strip()
- # Only a variable, no logic
- if (param.startswith('{{') and
- param.find('}}') == len(ds[x]) - 2 and
- param.find('|') == -1):
- utils.warning("It is unnecessary to use '{{' in conditionals, leave variables in loop expressions bare.")
- elif x.startswith("when_"):
- utils.deprecated("The 'when_' conditional has been removed. Switch to using the regular unified 'when' statements as described on docs.ansible.com.","1.5", removed=True)
-
- if 'when' in ds:
- raise errors.AnsibleError("multiple when_* statements specified in task %s" % (ds.get('name', ds['action'])))
- when_name = x.replace("when_","")
- ds['when'] = "%s %s" % (when_name, ds[x])
- ds.pop(x)
- elif not x in Task.VALID_KEYS:
- raise errors.AnsibleError("%s is not a legal parameter in an Ansible task or handler" % x)
-
- self.module_vars = module_vars
- self.play_vars = play_vars
- self.play_file_vars = play_file_vars
- self.role_vars = role_vars
- self.role_params = role_params
- self.default_vars = default_vars
- self.play = play
-
- # load various attributes
- self.name = ds.get('name', None)
- self.tags = [ 'untagged' ]
- self.register = ds.get('register', None)
- self.environment = ds.get('environment', play.environment)
- self.role_name = role_name
- self.no_log = utils.boolean(ds.get('no_log', "false")) or self.play.no_log
- self.run_once = utils.boolean(ds.get('run_once', 'false'))
-
- #Code to allow do until feature in a Task
- if 'until' in ds:
- if not ds.get('register'):
- raise errors.AnsibleError("register keyword is mandatory when using do until feature")
- self.module_vars['delay'] = ds.get('delay', 5)
- self.module_vars['retries'] = ds.get('retries', 3)
- self.module_vars['register'] = ds.get('register', None)
- self.until = ds.get('until')
- self.module_vars['until'] = self.until
-
- # rather than simple key=value args on the options line, these represent structured data and the values
- # can be hashes and lists, not just scalars
- self.args = ds.get('args', {})
-
- # get remote_user for task, then play, then playbook
- if ds.get('remote_user') is not None:
- self.remote_user = ds.get('remote_user')
- elif ds.get('remote_user', play.remote_user) is not None:
- self.remote_user = ds.get('remote_user', play.remote_user)
- else:
- self.remote_user = ds.get('remote_user', play.playbook.remote_user)
-
- # Fail out if user specifies privilege escalation params in conflict
- if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')):
- raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
-
- if (ds.get('become') or ds.get('become_user') or ds.get('become_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('incompatible parameters ("become", "become_user", "become_pass") and su params "su", "su_user", "sudo_pass" in task: %s' % self.name)
-
- if (ds.get('sudo') or ds.get('sudo_user') or ds.get('sudo_pass')) and (ds.get('su') or ds.get('su_user') or ds.get('su_pass')):
- raise errors.AnsibleError('incompatible parameters ("su", "su_user", "su_pass") and sudo params "sudo", "sudo_user", "sudo_pass" in task: %s' % self.name)
-
- self.become = utils.boolean(ds.get('become', play.become))
- self.become_method = ds.get('become_method', play.become_method)
- self.become_user = ds.get('become_user', play.become_user)
- self.become_pass = ds.get('become_pass', play.playbook.become_pass)
-
- # set only if passed in current task data
- if 'sudo' in ds or 'sudo_user' in ds:
- self.become_method='sudo'
-
- if 'sudo' in ds:
- self.become=ds['sudo']
- del ds['sudo']
- else:
- self.become=True
- if 'sudo_user' in ds:
- self.become_user = ds['sudo_user']
- del ds['sudo_user']
- if 'sudo_pass' in ds:
- self.become_pass = ds['sudo_pass']
- del ds['sudo_pass']
-
- elif 'su' in ds or 'su_user' in ds:
- self.become_method='su'
-
- if 'su' in ds:
- self.become=ds['su']
- else:
- self.become=True
- del ds['su']
- if 'su_user' in ds:
- self.become_user = ds['su_user']
- del ds['su_user']
- if 'su_pass' in ds:
- self.become_pass = ds['su_pass']
- del ds['su_pass']
-
- # Both are defined
- if ('action' in ds) and ('local_action' in ds):
- raise errors.AnsibleError("the 'action' and 'local_action' attributes can not be used together")
- # Both are NOT defined
- elif (not 'action' in ds) and (not 'local_action' in ds):
- raise errors.AnsibleError("'action' or 'local_action' attribute missing in task \"%s\"" % ds.get('name', '<Unnamed>'))
- # Only one of them is defined
- elif 'local_action' in ds:
- self.action = ds.get('local_action', '')
- self.delegate_to = '127.0.0.1'
- else:
- self.action = ds.get('action', '')
- self.delegate_to = ds.get('delegate_to', None)
- self.transport = ds.get('connection', ds.get('transport', play.transport))
-
- if isinstance(self.action, dict):
- if 'module' not in self.action:
- raise errors.AnsibleError("'module' attribute missing from action in task \"%s\"" % ds.get('name', '%s' % self.action))
- if self.args:
- raise errors.AnsibleError("'args' cannot be combined with dict 'action' in task \"%s\"" % ds.get('name', '%s' % self.action))
- self.args = self.action
- self.action = self.args.pop('module')
-
- # delegate_to can use variables
- if not (self.delegate_to is None):
- # delegate_to: localhost should use local transport
- if self.delegate_to in ['127.0.0.1', 'localhost']:
- self.transport = 'local'
-
- # notified by is used by Playbook code to flag which hosts
- # need to run a notifier
- self.notified_by = []
-
- # if no name is specified, use the action line as the name
- if self.name is None:
- self.name = self.action
-
- # load various attributes
- self.when = ds.get('when', None)
- self.changed_when = ds.get('changed_when', None)
- self.failed_when = ds.get('failed_when', None)
-
- # combine the default and module vars here for use in templating
- all_vars = self.default_vars.copy()
- all_vars = utils.combine_vars(all_vars, self.play_vars)
- all_vars = utils.combine_vars(all_vars, self.play_file_vars)
- all_vars = utils.combine_vars(all_vars, self.role_vars)
- all_vars = utils.combine_vars(all_vars, self.module_vars)
- all_vars = utils.combine_vars(all_vars, self.role_params)
-
- self.async_seconds = ds.get('async', 0) # not async by default
- self.async_seconds = template.template_from_string(play.basedir, self.async_seconds, all_vars)
- self.async_seconds = int(self.async_seconds)
- self.async_poll_interval = ds.get('poll', 10) # default poll = 10 seconds
- self.async_poll_interval = template.template_from_string(play.basedir, self.async_poll_interval, all_vars)
- self.async_poll_interval = int(self.async_poll_interval)
- self.notify = ds.get('notify', [])
- self.first_available_file = ds.get('first_available_file', None)
-
- self.items_lookup_plugin = ds.get('items_lookup_plugin', None)
- self.items_lookup_terms = ds.get('items_lookup_terms', None)
-
-
- self.ignore_errors = ds.get('ignore_errors', False)
- self.any_errors_fatal = ds.get('any_errors_fatal', play.any_errors_fatal)
-
- self.always_run = ds.get('always_run', False)
-
- # action should be a string
- if not isinstance(self.action, basestring):
- raise errors.AnsibleError("action is of type '%s' and not a string in task. name: %s" % (type(self.action).__name__, self.name))
-
- # notify can be a string or a list, store as a list
- if isinstance(self.notify, basestring):
- self.notify = [ self.notify ]
-
- # split the action line into a module name + arguments
- try:
- tokens = split_args(self.action)
- except Exception, e:
- if "unbalanced" in str(e):
- raise errors.AnsibleError("There was an error while parsing the task %s.\n" % repr(self.action) + \
- "Make sure quotes are matched or escaped properly")
- else:
- raise
- if len(tokens) < 1:
- raise errors.AnsibleError("invalid/missing action in task. name: %s" % self.name)
- self.module_name = tokens[0]
- self.module_args = ''
- if len(tokens) > 1:
- self.module_args = " ".join(tokens[1:])
-
- import_tags = self.module_vars.get('tags',[])
- if type(import_tags) in [int,float]:
- import_tags = str(import_tags)
- elif type(import_tags) in [str,unicode]:
- # allow the user to list comma delimited tags
- import_tags = import_tags.split(",")
-
- # handle mutually incompatible options
- incompatibles = [ x for x in [ self.first_available_file, self.items_lookup_plugin ] if x is not None ]
- if len(incompatibles) > 1:
- raise errors.AnsibleError("with_(plugin), and first_available_file are mutually incompatible in a single task")
-
- # make first_available_file accessible to Runner code
- if self.first_available_file:
- self.module_vars['first_available_file'] = self.first_available_file
- # make sure that the 'item' variable is set when using
- # first_available_file (issue #8220)
- if 'item' not in self.module_vars:
- self.module_vars['item'] = ''
-
- if self.items_lookup_plugin is not None:
- self.module_vars['items_lookup_plugin'] = self.items_lookup_plugin
- self.module_vars['items_lookup_terms'] = self.items_lookup_terms
-
- # allow runner to see delegate_to option
- self.module_vars['delegate_to'] = self.delegate_to
-
- # make some task attributes accessible to Runner code
- self.module_vars['ignore_errors'] = self.ignore_errors
- self.module_vars['register'] = self.register
- self.module_vars['changed_when'] = self.changed_when
- self.module_vars['failed_when'] = self.failed_when
- self.module_vars['always_run'] = self.always_run
-
- # tags allow certain parts of a playbook to be run without running the whole playbook
- apply_tags = ds.get('tags', None)
- if apply_tags is not None:
- if type(apply_tags) in [ str, unicode ]:
- self.tags.append(apply_tags)
- elif type(apply_tags) in [ int, float ]:
- self.tags.append(str(apply_tags))
- elif type(apply_tags) == list:
- self.tags.extend(apply_tags)
- self.tags.extend(import_tags)
-
- if len(self.tags) > 1:
- self.tags.remove('untagged')
-
- if additional_conditions:
- new_conditions = additional_conditions[:]
- if self.when:
- new_conditions.append(self.when)
- self.when = new_conditions
diff --git a/v1/ansible/runner/__init__.py b/v1/ansible/runner/__init__.py
deleted file mode 100644
index 4ff273778c..0000000000
--- a/v1/ansible/runner/__init__.py
+++ /dev/null
@@ -1,1517 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import multiprocessing
-import signal
-import os
-import pwd
-import Queue
-import random
-import traceback
-import tempfile
-import time
-import collections
-import socket
-import base64
-import sys
-import pipes
-import jinja2
-import subprocess
-import getpass
-
-import ansible.constants as C
-import ansible.inventory
-from ansible import utils
-from ansible.utils import template
-from ansible.utils import check_conditional
-from ansible.utils import string_functions
-from ansible import errors
-from ansible import module_common
-import poller
-import connection
-from return_data import ReturnData
-from ansible.callbacks import DefaultRunnerCallbacks, vv
-from ansible.module_common import ModuleReplacer
-from ansible.module_utils.splitter import split_args, unquote
-from ansible.cache import FactCache
-from ansible.utils import update_hash
-
-module_replacer = ModuleReplacer(strip_comments=False)
-
-try:
- from hashlib import sha1
-except ImportError:
- from sha import sha as sha1
-
-HAS_ATFORK=True
-try:
- from Crypto.Random import atfork
-except ImportError:
- HAS_ATFORK=False
-
-multiprocessing_runner = None
-
-OUTPUT_LOCKFILE = tempfile.TemporaryFile()
-PROCESS_LOCKFILE = tempfile.TemporaryFile()
-
-################################################
-
-def _executor_hook(job_queue, result_queue, new_stdin):
-
- # attempt workaround of https://github.com/newsapps/beeswithmachineguns/issues/17
- # this function also not present in CentOS 6
- if HAS_ATFORK:
- atfork()
-
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- while not job_queue.empty():
- try:
- host = job_queue.get(block=False)
- return_data = multiprocessing_runner._executor(host, new_stdin)
- result_queue.put(return_data)
- except Queue.Empty:
- pass
- except:
- traceback.print_exc()
-
-class HostVars(dict):
- ''' A special view of vars_cache that adds values from the inventory when needed. '''
-
- def __init__(self, vars_cache, inventory, vault_password=None):
- self.vars_cache = vars_cache
- self.inventory = inventory
- self.lookup = {}
- self.update(vars_cache)
- self.vault_password = vault_password
-
- def __getitem__(self, host):
- if host not in self.lookup:
- result = self.inventory.get_variables(host, vault_password=self.vault_password).copy()
- result.update(self.vars_cache.get(host, {}))
- self.lookup[host] = template.template('.', result, self.vars_cache)
- return self.lookup[host]
-
-
-class Runner(object):
- ''' core API interface to ansible '''
-
- # see bin/ansible for how this is used...
-
- def __init__(self,
- host_list=C.DEFAULT_HOST_LIST, # ex: /etc/ansible/hosts, legacy usage
- module_path=None, # ex: /usr/share/ansible
- module_name=C.DEFAULT_MODULE_NAME, # ex: copy
- module_args=C.DEFAULT_MODULE_ARGS, # ex: "src=/tmp/a dest=/tmp/b"
- forks=C.DEFAULT_FORKS, # parallelism level
- timeout=C.DEFAULT_TIMEOUT, # SSH timeout
- pattern=C.DEFAULT_PATTERN, # which hosts? ex: 'all', 'acme.example.org'
- remote_user=C.DEFAULT_REMOTE_USER, # ex: 'username'
- remote_pass=C.DEFAULT_REMOTE_PASS, # ex: 'password123' or None if using key
- remote_port=None, # if SSH on different ports
- private_key_file=C.DEFAULT_PRIVATE_KEY_FILE, # if not using keys/passwords
- background=0, # async poll every X seconds, else 0 for non-async
- basedir=None, # directory of playbook, if applicable
- setup_cache=None, # used to share fact data w/ other tasks
- vars_cache=None, # used to store variables about hosts
- transport=C.DEFAULT_TRANSPORT, # 'ssh', 'paramiko', 'local'
- conditional='True', # run only if this fact expression evals to true
- callbacks=None, # used for output
- module_vars=None, # a playbooks internals thing
- play_vars=None, #
- play_file_vars=None, #
- role_vars=None, #
- role_params=None, #
- default_vars=None, #
- extra_vars=None, # extra vars specified with he playbook(s)
- is_playbook=False, # running from playbook or not?
- inventory=None, # reference to Inventory object
- subset=None, # subset pattern
- check=False, # don't make any changes, just try to probe for potential changes
- diff=False, # whether to show diffs for template files that change
- environment=None, # environment variables (as dict) to use inside the command
- complex_args=None, # structured data in addition to module_args, must be a dict
- error_on_undefined_vars=C.DEFAULT_UNDEFINED_VAR_BEHAVIOR, # ex. False
- accelerate=False, # use accelerated connection
- accelerate_ipv6=False, # accelerated connection w/ IPv6
- accelerate_port=None, # port to use with accelerated connection
- vault_pass=None,
- run_hosts=None, # an optional list of pre-calculated hosts to run on
- no_log=False, # option to enable/disable logging for a given task
- run_once=False, # option to enable/disable host bypass loop for a given task
- become=False, # whether to run privilege escalation or not
- become_method=C.DEFAULT_BECOME_METHOD,
- become_user=C.DEFAULT_BECOME_USER, # ex: 'root'
- become_pass=C.DEFAULT_BECOME_PASS, # ex: 'password123' or None
- become_exe=C.DEFAULT_BECOME_EXE, # ex: /usr/local/bin/sudo
- ):
-
- # used to lock multiprocess inputs and outputs at various levels
- self.output_lockfile = OUTPUT_LOCKFILE
- self.process_lockfile = PROCESS_LOCKFILE
-
- if not complex_args:
- complex_args = {}
-
- # storage & defaults
- self.check = check
- self.diff = diff
- self.setup_cache = utils.default(setup_cache, lambda: ansible.cache.FactCache())
- self.vars_cache = utils.default(vars_cache, lambda: collections.defaultdict(dict))
- self.basedir = utils.default(basedir, lambda: os.getcwd())
- self.callbacks = utils.default(callbacks, lambda: DefaultRunnerCallbacks())
- self.generated_jid = str(random.randint(0, 999999999999))
- self.transport = transport
- self.inventory = utils.default(inventory, lambda: ansible.inventory.Inventory(host_list))
-
- self.module_vars = utils.default(module_vars, lambda: {})
- self.play_vars = utils.default(play_vars, lambda: {})
- self.play_file_vars = utils.default(play_file_vars, lambda: {})
- self.role_vars = utils.default(role_vars, lambda: {})
- self.role_params = utils.default(role_params, lambda: {})
- self.default_vars = utils.default(default_vars, lambda: {})
- self.extra_vars = utils.default(extra_vars, lambda: {})
-
- self.always_run = None
- self.connector = connection.Connector(self)
- self.conditional = conditional
- self.delegate_to = None
- self.module_name = module_name
- self.forks = int(forks)
- self.pattern = pattern
- self.module_args = module_args
- self.timeout = timeout
- self.remote_user = remote_user
- self.remote_pass = remote_pass
- self.remote_port = remote_port
- self.private_key_file = private_key_file
- self.background = background
- self.become = become
- self.become_method = become_method
- self.become_user_var = become_user
- self.become_user = None
- self.become_pass = become_pass
- self.become_exe = become_exe
- self.is_playbook = is_playbook
- self.environment = environment
- self.complex_args = complex_args
- self.error_on_undefined_vars = error_on_undefined_vars
- self.accelerate = accelerate
- self.accelerate_port = accelerate_port
- self.accelerate_ipv6 = accelerate_ipv6
- self.callbacks.runner = self
- self.omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
- self.vault_pass = vault_pass
- self.no_log = no_log
- self.run_once = run_once
-
- if self.transport == 'smart':
- # If the transport is 'smart', check to see if certain conditions
- # would prevent us from using ssh, and fallback to paramiko.
- # 'smart' is the default since 1.2.1/1.3
- self.transport = "ssh"
- if sys.platform.startswith('darwin') and self.remote_pass:
- # due to a current bug in sshpass on OSX, which can trigger
- # a kernel panic even for non-privileged users, we revert to
- # paramiko on that OS when a SSH password is specified
- self.transport = "paramiko"
- else:
- # see if SSH can support ControlPersist if not use paramiko
- cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
- if "Bad configuration option" in err:
- self.transport = "paramiko"
-
- # save the original transport, in case it gets
- # changed later via options like accelerate
- self.original_transport = self.transport
-
- # misc housekeeping
- if subset and self.inventory._subset is None:
- # don't override subset when passed from playbook
- self.inventory.subset(subset)
-
- # If we get a pre-built list of hosts to run on, from say a playbook, use them.
- # Also where we will store the hosts to run on once discovered
- self.run_hosts = run_hosts
-
- if self.transport == 'local':
- self.remote_user = pwd.getpwuid(os.geteuid())[0]
-
- if module_path is not None:
- for i in module_path.split(os.pathsep):
- utils.plugins.module_finder.add_directory(i)
-
- utils.plugins.push_basedir(self.basedir)
-
- # ensure we are using unique tmp paths
- random.seed()
- # *****************************************************
-
- def _complex_args_hack(self, complex_args, module_args):
- """
- ansible-playbook both allows specifying key=value string arguments and complex arguments
- however not all modules use our python common module system and cannot
- access these. An example might be a Bash module. This hack allows users to still pass "args"
- as a hash of simple scalars to those arguments and is short term. We could technically
- just feed JSON to the module, but that makes it hard on Bash consumers. The way this is implemented
- it does mean values in 'args' have LOWER priority than those on the key=value line, allowing
- args to provide yet another way to have pluggable defaults.
- """
- if complex_args is None:
- return module_args
- if not isinstance(complex_args, dict):
- raise errors.AnsibleError("complex arguments are not a dictionary: %s" % complex_args)
- for (k,v) in complex_args.iteritems():
- if isinstance(v, basestring):
- module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
- return module_args
-
- # *****************************************************
-
- def _transfer_str(self, conn, tmp, name, data):
- ''' transfer string to remote file '''
-
- if type(data) == dict:
- data = utils.jsonify(data)
-
- afd, afile = tempfile.mkstemp()
- afo = os.fdopen(afd, 'w')
- try:
- if not isinstance(data, unicode):
- #ensure the data is valid UTF-8
- data.decode('utf-8')
- else:
- data = data.encode('utf-8')
- afo.write(data)
- except:
- raise errors.AnsibleError("failure encoding into utf-8")
- afo.flush()
- afo.close()
-
- remote = conn.shell.join_path(tmp, name)
- try:
- conn.put_file(afile, remote)
- finally:
- os.unlink(afile)
- return remote
-
- # *****************************************************
-
- def _compute_environment_string(self, conn, inject=None):
- ''' what environment variables to use when running the command? '''
-
- enviro = {}
- if self.environment:
- enviro = template.template(self.basedir, self.environment, inject, convert_bare=True)
- enviro = utils.safe_eval(enviro)
- if type(enviro) != dict:
- raise errors.AnsibleError("environment must be a dictionary, received %s" % enviro)
-
- return conn.shell.env_prefix(**enviro)
-
- # *****************************************************
-
- def _compute_delegate(self, password, remote_inject):
-
- """ Build a dictionary of all attributes for the delegate host """
-
- delegate = {}
-
- # allow delegated host to be templated
- delegate['inject'] = remote_inject.copy()
-
- # set any interpreters
- interpreters = []
- for i in delegate['inject']:
- if i.startswith("ansible_") and i.endswith("_interpreter"):
- interpreters.append(i)
- for i in interpreters:
- del delegate['inject'][i]
- port = C.DEFAULT_REMOTE_PORT
-
- # get the vars for the delegate by its name
- try:
- this_info = delegate['inject']['hostvars'][self.delegate_to]
- except:
- # make sure the inject is empty for non-inventory hosts
- this_info = {}
-
- # get the real ssh_address for the delegate
- # and allow ansible_ssh_host to be templated
- delegate['ssh_host'] = template.template(
- self.basedir,
- this_info.get('ansible_ssh_host', self.delegate_to),
- this_info,
- fail_on_undefined=True
- )
-
- delegate['port'] = this_info.get('ansible_ssh_port', port)
- delegate['user'] = self._compute_delegate_user(self.delegate_to, delegate['inject'])
- delegate['pass'] = this_info.get('ansible_ssh_pass', password)
- delegate['private_key_file'] = this_info.get('ansible_ssh_private_key_file', self.private_key_file)
- delegate['transport'] = this_info.get('ansible_connection', self.transport)
- delegate['become_pass'] = this_info.get('ansible_become_pass', this_info.get('ansible_ssh_pass', self.become_pass))
-
- # Last chance to get private_key_file from global variables.
- # this is useful if delegated host is not defined in the inventory
- if delegate['private_key_file'] is None:
- delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None)
-
- if delegate['private_key_file'] is not None:
- delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file'])
-
- for i in this_info:
- if i.startswith("ansible_") and i.endswith("_interpreter"):
- delegate['inject'][i] = this_info[i]
-
- return delegate
-
- def _compute_delegate_user(self, host, inject):
-
- """ Calculate the remote user based on an order of preference """
-
- # inventory > playbook > original_host
-
- actual_user = inject.get('ansible_ssh_user', self.remote_user)
- thisuser = None
-
- try:
- if host in inject['hostvars']:
- if inject['hostvars'][host].get('ansible_ssh_user'):
- # user for delegate host in inventory
- thisuser = inject['hostvars'][host].get('ansible_ssh_user')
- else:
- # look up the variables for the host directly from inventory
- host_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
- if 'ansible_ssh_user' in host_vars:
- thisuser = host_vars['ansible_ssh_user']
- except errors.AnsibleError, e:
- # the hostname was not found in the inventory, so
- # we just ignore this and try the next method
- pass
-
- if thisuser is None and self.remote_user:
- # user defined by play/runner
- thisuser = self.remote_user
-
- if thisuser is not None:
- actual_user = thisuser
- else:
- # fallback to the inventory user of the play host
- #actual_user = inject.get('ansible_ssh_user', actual_user)
- actual_user = inject.get('ansible_ssh_user', self.remote_user)
-
- return actual_user
-
- def _count_module_args(self, args, allow_dupes=False):
- '''
- Count the number of k=v pairs in the supplied module args. This is
- basically a specialized version of parse_kv() from utils with a few
- minor changes.
- '''
- options = {}
- if args is not None:
- try:
- vargs = split_args(args)
- except Exception, e:
- if "unbalanced jinja2 block or quotes" in str(e):
- raise errors.AnsibleError("error parsing argument string '%s', try quoting the entire line." % args)
- else:
- raise
- for x in vargs:
- quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'")
- if "=" in x and not quoted:
- k, v = x.split("=",1)
- is_shell_module = self.module_name in ('command', 'shell')
- is_shell_param = k in ('creates', 'removes', 'chdir', 'executable')
- if k in options and not allow_dupes:
- if not(is_shell_module and not is_shell_param):
- raise errors.AnsibleError("a duplicate parameter was found in the argument string (%s)" % k)
- if is_shell_module and is_shell_param or not is_shell_module:
- options[k] = v
- return len(options)
-
-
- # *****************************************************
-
- def _execute_module(self, conn, tmp, module_name, args,
- async_jid=None, async_module=None, async_limit=None, inject=None, persist_files=False, complex_args=None, delete_remote_tmp=True):
-
- ''' transfer and run a module along with its arguments on the remote side'''
-
- # hack to support fireball mode
- if module_name == 'fireball':
- args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host))))
- if 'port' not in args:
- args += " port=%s" % C.ZEROMQ_PORT
-
- (
- module_style,
- shebang,
- module_data
- ) = self._configure_module(conn, module_name, args, inject, complex_args)
-
- # a remote tmp path may be necessary and not already created
- if self._late_needs_tmp_path(conn, tmp, module_style):
- tmp = self._make_tmp_path(conn)
-
- remote_module_path = conn.shell.join_path(tmp, module_name)
-
- if (module_style != 'new'
- or async_jid is not None
- or not conn.has_pipelining
- or not C.ANSIBLE_SSH_PIPELINING
- or C.DEFAULT_KEEP_REMOTE_FILES
- or self.become_method == 'su'):
- self._transfer_str(conn, tmp, module_name, module_data)
-
- environment_string = self._compute_environment_string(conn, inject)
-
- if "tmp" in tmp and (self.become and self.become_user != 'root'):
- # deal with possible umask issues once you become another user
- self._remote_chmod(conn, 'a+r', remote_module_path, tmp)
-
- cmd = ""
- in_data = None
- if module_style != 'new':
- if 'CHECKMODE=True' in args:
- # if module isn't using AnsibleModuleCommon infrastructure we can't be certain it knows how to
- # do --check mode, so to be safe we will not run it.
- return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot yet run check mode against old-style modules"))
- elif 'NO_LOG' in args:
- return ReturnData(conn=conn, result=dict(skipped=True, msg="cannot use no_log: with old-style modules"))
-
- args = template.template(self.basedir, args, inject)
-
- # decide whether we need to transfer JSON or key=value
- argsfile = None
- if module_style == 'non_native_want_json':
- if complex_args:
- complex_args.update(utils.parse_kv(args))
- argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(complex_args))
- else:
- argsfile = self._transfer_str(conn, tmp, 'arguments', utils.jsonify(utils.parse_kv(args)))
-
- else:
- argsfile = self._transfer_str(conn, tmp, 'arguments', args)
-
- if self.become and self.become_user != 'root':
- # deal with possible umask issues once become another user
- self._remote_chmod(conn, 'a+r', argsfile, tmp)
-
- if async_jid is None:
- cmd = "%s %s" % (remote_module_path, argsfile)
- else:
- cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
- else:
- if async_jid is None:
- if conn.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES and not self.become_method == 'su':
- in_data = module_data
- else:
- cmd = "%s" % (remote_module_path)
- else:
- cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]])
-
- if not shebang:
- raise errors.AnsibleError("module is missing interpreter line")
-
- rm_tmp = None
- if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if not self.become or self.become_user == 'root':
- # not sudoing or sudoing to root, so can cleanup files in the same step
- rm_tmp = tmp
-
- cmd = conn.shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
- cmd = cmd.strip()
-
- sudoable = True
- if module_name == "accelerate":
- # always run the accelerate module as the user
- # specified in the play, not the become_user
- sudoable = False
-
- res = self._low_level_exec_command(conn, cmd, tmp, become=self.become, sudoable=sudoable, in_data=in_data)
-
- if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
- if self.become and self.become_user != 'root':
- # not becoming root, so maybe can't delete files as that other user
- # have to clean up temp files as original user in a second step
- cmd2 = conn.shell.remove(tmp, recurse=True)
- self._low_level_exec_command(conn, cmd2, tmp, sudoable=False)
-
- data = utils.parse_json(res['stdout'], from_remote=True, no_exceptions=True)
- if 'parsed' in data and data['parsed'] == False:
- data['msg'] += res['stderr']
- return ReturnData(conn=conn, result=data)
-
- # *****************************************************
-
- def _executor(self, host, new_stdin):
- ''' handler for multiprocessing library '''
-
- try:
- fileno = sys.stdin.fileno()
- except ValueError:
- fileno = None
-
- try:
- self._new_stdin = new_stdin
- if not new_stdin and fileno is not None:
- try:
- self._new_stdin = os.fdopen(os.dup(fileno))
- except OSError, e:
- # couldn't dupe stdin, most likely because it's
- # not a valid file descriptor, so we just rely on
- # using the one that was passed in
- pass
-
- exec_rc = self._executor_internal(host, new_stdin)
- if type(exec_rc) != ReturnData:
- raise Exception("unexpected return type: %s" % type(exec_rc))
- # redundant, right?
- if not exec_rc.comm_ok:
- self.callbacks.on_unreachable(host, exec_rc.result)
- return exec_rc
- except errors.AnsibleError, ae:
- msg = str(ae)
- self.callbacks.on_unreachable(host, msg)
- return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
- except Exception:
- msg = traceback.format_exc()
- self.callbacks.on_unreachable(host, msg)
- return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
-
- # *****************************************************
-
- def get_combined_cache(self):
- # merge the VARS and SETUP caches for this host
- combined_cache = self.setup_cache.copy()
- return utils.merge_hash(combined_cache, self.vars_cache)
-
- def get_inject_vars(self, host):
- host_variables = self.inventory.get_variables(host, vault_password=self.vault_pass)
- combined_cache = self.get_combined_cache()
-
- # use combined_cache and host_variables to template the module_vars
- # we update the inject variables with the data we're about to template
- # since some of the variables we'll be replacing may be contained there too
- module_vars_inject = utils.combine_vars(host_variables, combined_cache.get(host, {}))
- module_vars_inject = utils.combine_vars(self.module_vars, module_vars_inject)
- module_vars = template.template(self.basedir, self.module_vars, module_vars_inject)
-
- # remove bad variables from the module vars, which may be in there due
- # the way role declarations are specified in playbooks
- if 'tags' in module_vars:
- del module_vars['tags']
- if 'when' in module_vars:
- del module_vars['when']
-
- # start building the dictionary of injected variables
- inject = {}
-
- # default vars are the lowest priority
- inject = utils.combine_vars(inject, self.default_vars)
- # next come inventory variables for the host
- inject = utils.combine_vars(inject, host_variables)
- # then the setup_cache which contains facts gathered
- inject = utils.combine_vars(inject, self.setup_cache.get(host, {}))
- # next come variables from vars and vars files
- inject = utils.combine_vars(inject, self.play_vars)
- inject = utils.combine_vars(inject, self.play_file_vars)
- # next come variables from role vars/main.yml files
- inject = utils.combine_vars(inject, self.role_vars)
- # then come the module variables
- inject = utils.combine_vars(inject, module_vars)
- # followed by vars_cache things (set_fact, include_vars, and
- # vars_files which had host-specific templating done)
- inject = utils.combine_vars(inject, self.vars_cache.get(host, {}))
- # role parameters next
- inject = utils.combine_vars(inject, self.role_params)
- # and finally -e vars are the highest priority
- inject = utils.combine_vars(inject, self.extra_vars)
- # and then special vars
- inject.setdefault('ansible_ssh_user', self.remote_user)
- inject['group_names'] = host_variables.get('group_names', [])
- inject['groups'] = self.inventory.groups_list()
- inject['vars'] = self.module_vars
- inject['defaults'] = self.default_vars
- inject['environment'] = self.environment
- inject['playbook_dir'] = os.path.abspath(self.basedir)
- inject['omit'] = self.omit_token
- inject['combined_cache'] = combined_cache
-
- return inject
-
- def _executor_internal(self, host, new_stdin):
- ''' executes any module one or more times '''
-
- # We build the proper injected dictionary for all future
- # templating operations in this run
- inject = self.get_inject_vars(host)
-
- # Then we selectively merge some variable dictionaries down to a
- # single dictionary, used to template the HostVars for this host
- temp_vars = self.inventory.get_variables(host, vault_password=self.vault_pass)
- temp_vars = utils.combine_vars(temp_vars, inject['combined_cache'] )
- temp_vars = utils.combine_vars(temp_vars, {'groups': inject['groups']})
- temp_vars = utils.combine_vars(temp_vars, self.play_vars)
- temp_vars = utils.combine_vars(temp_vars, self.play_file_vars)
- temp_vars = utils.combine_vars(temp_vars, self.extra_vars)
-
- hostvars = HostVars(temp_vars, self.inventory, vault_password=self.vault_pass)
-
- # and we save the HostVars in the injected dictionary so they
- # may be referenced from playbooks/templates
- inject['hostvars'] = hostvars
-
- host_connection = inject.get('ansible_connection', self.transport)
- if host_connection in [ 'paramiko', 'ssh', 'accelerate' ]:
- port = hostvars.get('ansible_ssh_port', self.remote_port)
- if port is None:
- port = C.DEFAULT_REMOTE_PORT
- else:
- # fireball, local, etc
- port = self.remote_port
-
- if self.inventory.basedir() is not None:
- inject['inventory_dir'] = self.inventory.basedir()
-
- if self.inventory.src() is not None:
- inject['inventory_file'] = self.inventory.src()
-
- # could be already set by playbook code
- inject.setdefault('ansible_version', utils.version_info(gitinfo=False))
-
- # allow with_foo to work in playbooks...
- items = None
- items_plugin = self.module_vars.get('items_lookup_plugin', None)
-
- if items_plugin is not None and items_plugin in utils.plugins.lookup_loader:
-
- basedir = self.basedir
- if '_original_file' in inject:
- basedir = os.path.dirname(inject['_original_file'])
- filesdir = os.path.join(basedir, '..', 'files')
- if os.path.exists(filesdir):
- basedir = filesdir
-
- try:
- items_terms = self.module_vars.get('items_lookup_terms', '')
- items_terms = template.template(basedir, items_terms, inject)
- items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=basedir).run(items_terms, inject=inject)
- except errors.AnsibleUndefinedVariable, e:
- if 'has no attribute' in str(e):
- # the undefined variable was an attribute of a variable that does
- # exist, so try and run this through the conditional check to see
- # if the user wanted to skip something on being undefined
- if utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=True):
- # the conditional check passed, so we have to fail here
- raise
- else:
- # the conditional failed, so we skip this task
- result = utils.jsonify(dict(changed=False, skipped=True))
- self.callbacks.on_skipped(host, None)
- return ReturnData(host=host, result=result)
- except errors.AnsibleError, e:
- raise
- except Exception, e:
- raise errors.AnsibleError("Unexpected error while executing task: %s" % str(e))
-
- # strip out any jinja2 template syntax within
- # the data returned by the lookup plugin
- items = utils._clean_data_struct(items, from_remote=True)
- if items is None:
- items = []
- else:
- if type(items) != list:
- raise errors.AnsibleError("lookup plugins have to return a list: %r" % items)
-
- if len(items) and utils.is_list_of_strings(items) and self.module_name in ( 'apt', 'yum', 'pkgng', 'zypper', 'dnf' ):
- # hack for apt, yum, and pkgng so that with_items maps back into a single module call
- use_these_items = []
- for x in items:
- inject['item'] = x
- if not self.conditional or utils.check_conditional(self.conditional, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- use_these_items.append(x)
- inject['item'] = ",".join(use_these_items)
- items = None
-
- def _safe_template_complex_args(args, inject):
- # Ensure the complex args here are a dictionary, but
- # first template them if they contain a variable
-
- returned_args = args
- if isinstance(args, basestring):
- # If the complex_args were evaluated to a dictionary and there are
- # more keys in the templated version than the evaled version, some
- # param inserted additional keys (the template() call also runs
- # safe_eval on the var if it looks like it's a datastructure). If the
- # evaled_args are not a dict, it's most likely a whole variable (ie.
- # args: {{var}}), in which case there's no way to detect the proper
- # count of params in the dictionary.
-
- templated_args = template.template(self.basedir, args, inject, convert_bare=True)
- evaled_args = utils.safe_eval(args)
-
- if isinstance(evaled_args, dict) and len(evaled_args) > 0 and len(evaled_args) != len(templated_args):
- raise errors.AnsibleError("a variable tried to insert extra parameters into the args for this task")
-
- # set the returned_args to the templated_args
- returned_args = templated_args
-
- # and a final check to make sure the complex args are a dict
- if returned_args is not None and not isinstance(returned_args, dict):
- raise errors.AnsibleError("args must be a dictionary, received %s" % returned_args)
-
- return returned_args
-
- # logic to decide how to run things depends on whether with_items is used
- if items is None:
- complex_args = _safe_template_complex_args(self.complex_args, inject)
- return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port, complex_args=complex_args)
- elif len(items) > 0:
-
- # executing using with_items, so make multiple calls
- # TODO: refactor
-
- if self.background > 0:
- raise errors.AnsibleError("lookup plugins (with_*) cannot be used with async tasks")
-
- all_comm_ok = True
- all_changed = False
- all_failed = False
- results = []
- for x in items:
- # use a fresh inject for each item
- this_inject = inject.copy()
- this_inject['item'] = x
-
- complex_args = _safe_template_complex_args(self.complex_args, this_inject)
-
- result = self._executor_internal_inner(
- host,
- self.module_name,
- self.module_args,
- this_inject,
- port,
- complex_args=complex_args
- )
-
- if 'stdout' in result.result and 'stdout_lines' not in result.result:
- result.result['stdout_lines'] = result.result['stdout'].splitlines()
-
- results.append(result.result)
- if result.comm_ok == False:
- all_comm_ok = False
- all_failed = True
- break
- for x in results:
- if x.get('changed') == True:
- all_changed = True
- if (x.get('failed') == True) or ('failed_when_result' in x and [x['failed_when_result']] or [('rc' in x) and (x['rc'] != 0)])[0]:
- all_failed = True
- break
- msg = 'All items completed'
- if all_failed:
- msg = "One or more items failed."
- rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg)
- if not all_failed:
- del rd_result['failed']
- return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result)
- else:
- self.callbacks.on_skipped(host, None)
- return ReturnData(host=host, comm_ok=True, result=dict(changed=False, skipped=True))
-
- # *****************************************************
-
- def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False, complex_args=None):
- ''' decides how to invoke a module '''
-
- # late processing of parameterized become_user (with_items,..)
- if self.become_user_var is not None:
- self.become_user = template.template(self.basedir, self.become_user_var, inject)
-
- # module_name may be dynamic (but cannot contain {{ ansible_ssh_user }})
- module_name = template.template(self.basedir, module_name, inject)
-
- if module_name in utils.plugins.action_loader:
- if self.background != 0:
- raise errors.AnsibleError("async mode is not supported with the %s module" % module_name)
- handler = utils.plugins.action_loader.get(module_name, self)
- elif self.background == 0:
- handler = utils.plugins.action_loader.get('normal', self)
- else:
- handler = utils.plugins.action_loader.get('async', self)
-
- if type(self.conditional) != list:
- self.conditional = [ self.conditional ]
-
- for cond in self.conditional:
-
- if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- result = dict(changed=False, skipped=True)
- if self.no_log:
- result = utils.censor_unlogged_data(result)
- self.callbacks.on_skipped(host, result)
- else:
- self.callbacks.on_skipped(host, inject.get('item',None))
- return ReturnData(host=host, result=utils.jsonify(result))
-
- if getattr(handler, 'setup', None) is not None:
- handler.setup(module_name, inject)
- conn = None
- actual_host = inject.get('ansible_ssh_host', host)
- # allow ansible_ssh_host to be templated
- actual_host = template.template(self.basedir, actual_host, inject, fail_on_undefined=True)
- actual_port = port
- actual_user = inject.get('ansible_ssh_user', self.remote_user)
- actual_pass = inject.get('ansible_ssh_pass', self.remote_pass)
- actual_transport = inject.get('ansible_connection', self.transport)
- actual_private_key_file = inject.get('ansible_ssh_private_key_file', self.private_key_file)
- actual_private_key_file = template.template(self.basedir, actual_private_key_file, inject, fail_on_undefined=True)
-
- self.become = utils.boolean(inject.get('ansible_become', inject.get('ansible_sudo', inject.get('ansible_su', self.become))))
- self.become_user = inject.get('ansible_become_user', inject.get('ansible_sudo_user', inject.get('ansible_su_user',self.become_user)))
- self.become_pass = inject.get('ansible_become_pass', inject.get('ansible_sudo_pass', inject.get('ansible_su_pass', self.become_pass)))
- self.become_exe = inject.get('ansible_become_exe', inject.get('ansible_sudo_exe', self.become_exe))
- self.become_method = inject.get('ansible_become_method', self.become_method)
-
- # select default root user in case self.become requested
- # but no user specified; happens e.g. in host vars when
- # just ansible_become=True is specified
- if self.become and self.become_user is None:
- self.become_user = 'root'
-
- if actual_private_key_file is not None:
- actual_private_key_file = os.path.expanduser(actual_private_key_file)
-
- if self.accelerate and actual_transport != 'local':
- #Fix to get the inventory name of the host to accelerate plugin
- if inject.get('ansible_ssh_host', None):
- self.accelerate_inventory_host = host
- else:
- self.accelerate_inventory_host = None
- # if we're using accelerated mode, force the
- # transport to accelerate
- actual_transport = "accelerate"
- if not self.accelerate_port:
- self.accelerate_port = C.ACCELERATE_PORT
-
- actual_port = inject.get('ansible_ssh_port', port)
-
- # the delegated host may have different SSH port configured, etc
- # and we need to transfer those, and only those, variables
- self.delegate_to = inject.get('delegate_to', None)
- if self.delegate_to:
- self.delegate_to = template.template(self.basedir, self.delegate_to, inject)
-
- if self.delegate_to is not None:
- delegate = self._compute_delegate(actual_pass, inject)
- actual_transport = delegate['transport']
- actual_host = delegate['ssh_host']
- actual_port = delegate['port']
- actual_user = delegate['user']
- actual_pass = delegate['pass']
- actual_private_key_file = delegate['private_key_file']
- self.become_pass = delegate.get('become_pass',delegate.get('sudo_pass'))
- inject = delegate['inject']
- # set resolved delegate_to into inject so modules can call _remote_checksum
- inject['delegate_to'] = self.delegate_to
-
- # user/pass may still contain variables at this stage
- actual_user = template.template(self.basedir, actual_user, inject)
- try:
- actual_pass = template.template(self.basedir, actual_pass, inject)
- self.become_pass = template.template(self.basedir, self.become_pass, inject)
- except:
- # ignore password template errors, could be triggered by password charaters #10468
- pass
-
- # make actual_user available as __magic__ ansible_ssh_user variable
- inject['ansible_ssh_user'] = actual_user
-
- try:
- if actual_transport == 'accelerate':
- # for accelerate, we stuff both ports into a single
- # variable so that we don't have to mangle other function
- # calls just to accommodate this one case
- actual_port = [actual_port, self.accelerate_port]
- elif actual_port is not None:
- actual_port = int(template.template(self.basedir, actual_port, inject))
- except ValueError, e:
- result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port)
- return ReturnData(host=host, comm_ok=False, result=result)
-
- try:
- if self.delegate_to or host != actual_host:
- delegate_host = host
- else:
- delegate_host = None
- conn = self.connector.connect(actual_host, actual_port, actual_user, actual_pass, actual_transport, actual_private_key_file, delegate_host)
-
- default_shell = getattr(conn, 'default_shell', '')
- shell_type = inject.get('ansible_shell_type')
- if not shell_type:
- if default_shell:
- shell_type = default_shell
- else:
- shell_type = os.path.basename(C.DEFAULT_EXECUTABLE)
-
- shell_plugin = utils.plugins.shell_loader.get(shell_type)
- if shell_plugin is None:
- shell_plugin = utils.plugins.shell_loader.get('sh')
- conn.shell = shell_plugin
-
- except errors.AnsibleConnectionFailed, e:
- result = dict(failed=True, msg="FAILED: %s" % str(e))
- return ReturnData(host=host, comm_ok=False, result=result)
-
- tmp = ''
- # action plugins may DECLARE via TRANSFERS_FILES = True that they need a remote tmp path working dir
- if self._early_needs_tmp_path(module_name, handler):
- tmp = self._make_tmp_path(conn)
-
- # allow module args to work as a dictionary
- # though it is usually a string
- if isinstance(module_args, dict):
- module_args = utils.serialize_args(module_args)
-
- # render module_args and complex_args templates
- try:
- # When templating module_args, we need to be careful to ensure
- # that no variables inadvertently (or maliciously) add params
- # to the list of args. We do this by counting the number of k=v
- # pairs before and after templating.
- num_args_pre = self._count_module_args(module_args, allow_dupes=True)
- module_args = template.template(self.basedir, module_args, inject, fail_on_undefined=self.error_on_undefined_vars)
- num_args_post = self._count_module_args(module_args)
- if num_args_pre != num_args_post:
- raise errors.AnsibleError("A variable inserted a new parameter into the module args. " + \
- "Be sure to quote variables if they contain equal signs (for example: \"{{var}}\").")
- # And we also make sure nothing added in special flags for things
- # like the command/shell module (ie. #USE_SHELL)
- if '#USE_SHELL' in module_args:
- raise errors.AnsibleError("A variable tried to add #USE_SHELL to the module arguments.")
- complex_args = template.template(self.basedir, complex_args, inject, fail_on_undefined=self.error_on_undefined_vars)
- except jinja2.exceptions.UndefinedError, e:
- raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
-
- # filter omitted arguments out from complex_args
- if complex_args:
- complex_args = dict(filter(lambda x: x[1] != self.omit_token, complex_args.iteritems()))
-
- # Filter omitted arguments out from module_args.
- # We do this with split_args instead of parse_kv to ensure
- # that things are not unquoted/requoted incorrectly
- args = split_args(module_args)
- final_args = []
- for arg in args:
- if '=' in arg:
- k,v = arg.split('=', 1)
- if unquote(v) != self.omit_token:
- final_args.append(arg)
- else:
- # not a k=v param, append it
- final_args.append(arg)
- module_args = ' '.join(final_args)
-
- result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
- # Code for do until feature
- until = self.module_vars.get('until', None)
- if until is not None and result.comm_ok:
- inject[self.module_vars.get('register')] = result.result
-
- cond = template.template(self.basedir, until, inject, expand_lists=False)
- if not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- retries = template.template(self.basedir, self.module_vars.get('retries'), inject, expand_lists=False)
- delay = self.module_vars.get('delay')
- for x in range(1, int(retries) + 1):
- # template the delay, cast to float and sleep
- delay = template.template(self.basedir, delay, inject, expand_lists=False)
- delay = float(delay)
- time.sleep(delay)
- tmp = ''
- if self._early_needs_tmp_path(module_name, handler):
- tmp = self._make_tmp_path(conn)
- result = handler.run(conn, tmp, module_name, module_args, inject, complex_args)
- result.result['attempts'] = x
- vv("Result from run %i is: %s" % (x, result.result))
- inject[self.module_vars.get('register')] = result.result
- cond = template.template(self.basedir, until, inject, expand_lists=False)
- if utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- break
- if result.result['attempts'] == retries and not utils.check_conditional(cond, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars):
- result.result['failed'] = True
- result.result['msg'] = "Task failed as maximum retries was encountered"
- else:
- result.result['attempts'] = 0
- conn.close()
-
- if not result.comm_ok:
- # connection or parsing errors...
- self.callbacks.on_unreachable(host, result.result)
- else:
- data = result.result
-
- # https://github.com/ansible/ansible/issues/4958
- if hasattr(sys.stdout, "isatty"):
- if "stdout" in data and sys.stdout.isatty():
- if not string_functions.isprintable(data['stdout']):
- data['stdout'] = ''.join(c for c in data['stdout'] if string_functions.isprintable(c))
-
- if 'item' in inject:
- result.result['item'] = inject['item']
-
- result.result['invocation'] = dict(
- module_args=module_args,
- module_name=module_name
- )
-
- changed_when = self.module_vars.get('changed_when')
- failed_when = self.module_vars.get('failed_when')
- if (changed_when is not None or failed_when is not None) and self.background == 0:
- register = self.module_vars.get('register')
- if register is not None:
- if 'stdout' in data:
- data['stdout_lines'] = data['stdout'].splitlines()
- inject[register] = data
- # only run the final checks if the async_status has finished,
- # or if we're not running an async_status check at all
- if (module_name == 'async_status' and "finished" in data) or module_name != 'async_status':
- if changed_when is not None and 'skipped' not in data:
- data['changed'] = utils.check_conditional(changed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
- if failed_when is not None and 'skipped' not in data:
- data['failed_when_result'] = data['failed'] = utils.check_conditional(failed_when, self.basedir, inject, fail_on_undefined=self.error_on_undefined_vars)
-
-
- if is_chained:
- # no callbacks
- return result
- if 'skipped' in data:
- self.callbacks.on_skipped(host, inject.get('item',None))
-
- if self.no_log:
- data = utils.censor_unlogged_data(data)
-
- if not result.is_successful():
- ignore_errors = self.module_vars.get('ignore_errors', False)
- self.callbacks.on_failed(host, data, ignore_errors)
- else:
- if self.diff:
- self.callbacks.on_file_diff(conn.host, result.diff)
- self.callbacks.on_ok(host, data)
-
- return result
-
- def _early_needs_tmp_path(self, module_name, handler):
- ''' detect if a tmp path should be created before the handler is called '''
- if module_name in utils.plugins.action_loader:
- return getattr(handler, 'TRANSFERS_FILES', False)
- # other modules never need tmp path at early stage
- return False
-
- def _late_needs_tmp_path(self, conn, tmp, module_style):
- if "tmp" in tmp:
- # tmp has already been created
- return False
- if not conn.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self.become_method == 'su':
- # tmp is necessary to store module source code
- return True
- if not conn.has_pipelining:
- # tmp is necessary to store the module source code
- # or we want to keep the files on the target system
- return True
- if module_style != "new":
- # even when conn has pipelining, old style modules need tmp to store arguments
- return True
- return False
-
-
- # *****************************************************
-
- def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False,
- executable=None, become=False, in_data=None):
- ''' execute a command string over SSH, return the output '''
- # this can be skipped with powershell modules when there is no analog to a Windows command (like chmod)
- if cmd:
-
- if executable is None:
- executable = C.DEFAULT_EXECUTABLE
-
- become_user = self.become_user
-
- # compare connection user to (su|sudo)_user and disable if the same
- # assume connection type is local if no user attribute
- this_user = getattr(conn, 'user', getpass.getuser())
- if (not become and this_user == become_user):
- sudoable = False
- become = False
-
- rc, stdin, stdout, stderr = conn.exec_command(cmd,
- tmp,
- become_user=become_user,
- sudoable=sudoable,
- executable=executable,
- in_data=in_data)
-
- if type(stdout) not in [ str, unicode ]:
- out = ''.join(stdout.readlines())
- else:
- out = stdout
-
- if type(stderr) not in [ str, unicode ]:
- err = ''.join(stderr.readlines())
- else:
- err = stderr
-
- if rc is not None:
- return dict(rc=rc, stdout=out, stderr=err)
- else:
- return dict(stdout=out, stderr=err)
-
- return dict(rc=None, stdout='', stderr='')
-
-
- # *****************************************************
-
- def _remote_chmod(self, conn, mode, path, tmp, sudoable=False, become=False):
- ''' issue a remote chmod command '''
- cmd = conn.shell.chmod(mode, path)
- return self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable, become=become)
-
- # *****************************************************
-
- def _remote_expand_user(self, conn, path, tmp):
- ''' takes a remote path and performs tilde expansion on the remote host '''
- if not path.startswith('~'):
- return path
-
- split_path = path.split(os.path.sep, 1)
- expand_path = split_path[0]
- if expand_path == '~':
- if self.become and self.become_user:
- expand_path = '~%s' % self.become_user
-
- cmd = conn.shell.expand_user(expand_path)
- data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False, become=False)
- initial_fragment = utils.last_non_blank_line(data['stdout'])
-
- if not initial_fragment:
- # Something went wrong trying to expand the path remotely. Return
- # the original string
- return path
-
- if len(split_path) > 1:
- return conn.shell.join_path(initial_fragment, *split_path[1:])
- else:
- return initial_fragment
-
- # *****************************************************
-
- def _remote_checksum(self, conn, tmp, path, inject):
- ''' takes a remote checksum and returns 1 if no file '''
-
- # Lookup the python interp from the host or delegate
-
- # host == inven_host when there is no delegate
- host = inject['inventory_hostname']
- if 'delegate_to' in inject:
- delegate = inject['delegate_to']
- if delegate:
- # host == None when the delegate is not in inventory
- host = None
- # delegate set, check whether the delegate has inventory vars
- delegate = template.template(self.basedir, delegate, inject)
- if delegate in inject['hostvars']:
- # host == delegate if we need to lookup the
- # python_interpreter from the delegate's inventory vars
- host = delegate
-
- if host:
- python_interp = inject['hostvars'][host].get('ansible_python_interpreter', 'python')
- else:
- python_interp = 'python'
-
- cmd = conn.shell.checksum(path, python_interp)
-
- #TODO: remove this horrible hack and find way to get checksum to work with other privilege escalation methods
- if self.become_method == 'sudo':
- sudoable = True
- else:
- sudoable = False
- data = self._low_level_exec_command(conn, cmd, tmp, sudoable=sudoable)
- data2 = utils.last_non_blank_line(data['stdout'])
- try:
- if data2 == '':
- # this may happen if the connection to the remote server
- # failed, so just return "INVALIDCHECKSUM" to avoid errors
- return "INVALIDCHECKSUM"
- else:
- return data2.split()[0]
- except IndexError:
- sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n")
- sys.stderr.write("command: %s\n" % cmd)
- sys.stderr.write("----\n")
- sys.stderr.write("output: %s\n" % data)
- sys.stderr.write("----\n")
- # this will signal that it changed and allow things to keep going
- return "INVALIDCHECKSUM"
-
- # *****************************************************
-
- def _make_tmp_path(self, conn):
- ''' make and return a temporary path on a remote box '''
- basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
- use_system_tmp = False
- if self.become and self.become_user != 'root':
- use_system_tmp = True
-
- tmp_mode = None
- if self.remote_user != 'root' or (self.become and self.become_user != 'root'):
- tmp_mode = 'a+rx'
-
- cmd = conn.shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
- result = self._low_level_exec_command(conn, cmd, None, sudoable=False)
-
- # error handling on this seems a little aggressive?
- if result['rc'] != 0:
- if result['rc'] == 5:
- output = 'Authentication failure.'
- elif result['rc'] == 255 and self.transport in ['ssh']:
- if utils.VERBOSITY > 3:
- output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
- else:
- output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
- elif 'No space left on device' in result['stderr']:
- output = result['stderr']
- else:
- output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
- if 'stdout' in result and result['stdout'] != '':
- output = output + ": %s" % result['stdout']
- raise errors.AnsibleError(output)
-
- rc = conn.shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
- # Catch failure conditions, files should never be
- # written to locations in /.
- if rc == '/':
- raise errors.AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basetmp, cmd))
- return rc
-
- # *****************************************************
-
- def _remove_tmp_path(self, conn, tmp_path):
- ''' Remove a tmp_path. '''
- if "-tmp-" in tmp_path:
- cmd = conn.shell.remove(tmp_path, recurse=True)
- self._low_level_exec_command(conn, cmd, None, sudoable=False)
- # If we have gotten here we have a working ssh configuration.
- # If ssh breaks we could leave tmp directories out on the remote system.
-
- # *****************************************************
-
- def _copy_module(self, conn, tmp, module_name, module_args, inject, complex_args=None):
- ''' transfer a module over SFTP, does not run it '''
- (
- module_style,
- module_shebang,
- module_data
- ) = self._configure_module(conn, module_name, module_args, inject, complex_args)
- module_remote_path = conn.shell.join_path(tmp, module_name)
-
- self._transfer_str(conn, tmp, module_name, module_data)
-
- return (module_remote_path, module_style, module_shebang)
-
- # *****************************************************
-
- def _configure_module(self, conn, module_name, module_args, inject, complex_args=None):
- ''' find module and configure it '''
-
- # Search module path(s) for named module.
- module_suffixes = getattr(conn, 'default_suffixes', None)
- module_path = utils.plugins.module_finder.find_plugin(module_name, module_suffixes)
- if module_path is None:
- module_path2 = utils.plugins.module_finder.find_plugin('ping', module_suffixes)
- if module_path2 is not None:
- raise errors.AnsibleFileNotFound("module %s not found in configured module paths" % (module_name))
- else:
- raise errors.AnsibleFileNotFound("module %s not found in configured module paths. Additionally, core modules are missing. If this is a checkout, run 'git submodule update --init --recursive' to correct this problem." % (module_name))
-
-
- # insert shared code and arguments into the module
- (module_data, module_style, module_shebang) = module_replacer.modify_module(
- module_path, complex_args, module_args, inject
- )
-
- return (module_style, module_shebang, module_data)
-
-
- # *****************************************************
-
-
- def _parallel_exec(self, hosts):
- ''' handles mulitprocessing when more than 1 fork is required '''
-
- manager = multiprocessing.Manager()
- job_queue = manager.Queue()
- for host in hosts:
- job_queue.put(host)
- result_queue = manager.Queue()
-
- try:
- fileno = sys.stdin.fileno()
- except ValueError:
- fileno = None
-
- workers = []
- for i in range(self.forks):
- new_stdin = None
- if fileno is not None:
- try:
- new_stdin = os.fdopen(os.dup(fileno))
- except OSError, e:
- # couldn't dupe stdin, most likely because it's
- # not a valid file descriptor, so we just rely on
- # using the one that was passed in
- pass
- prc = multiprocessing.Process(target=_executor_hook,
- args=(job_queue, result_queue, new_stdin))
- prc.start()
- workers.append(prc)
-
- try:
- for worker in workers:
- worker.join()
- except KeyboardInterrupt:
- for worker in workers:
- worker.terminate()
- worker.join()
-
- results = []
- try:
- while not result_queue.empty():
- results.append(result_queue.get(block=False))
- except socket.error:
- raise errors.AnsibleError("<interrupted>")
- return results
-
- # *****************************************************
-
- def _partition_results(self, results):
- ''' separate results by ones we contacted & ones we didn't '''
-
- if results is None:
- return None
- results2 = dict(contacted={}, dark={})
-
- for result in results:
- host = result.host
- if host is None:
- raise Exception("internal error, host not set")
- if result.communicated_ok():
- results2["contacted"][host] = result.result
- else:
- results2["dark"][host] = result.result
-
- # hosts which were contacted but never got a chance to return
- for host in self.run_hosts:
- if not (host in results2['dark'] or host in results2['contacted']):
- results2["dark"][host] = {}
- return results2
-
- # *****************************************************
-
- def run(self):
- ''' xfer & run module on all matched hosts '''
-
- # find hosts that match the pattern
- if not self.run_hosts:
- self.run_hosts = self.inventory.list_hosts(self.pattern)
- hosts = self.run_hosts
- if len(hosts) == 0:
- self.callbacks.on_no_hosts()
- return dict(contacted={}, dark={})
-
- global multiprocessing_runner
- multiprocessing_runner = self
- results = None
-
- # Check if this is an action plugin. Some of them are designed
- # to be ran once per group of hosts. Example module: pause,
- # run once per hostgroup, rather than pausing once per each
- # host.
- p = utils.plugins.action_loader.get(self.module_name, self)
-
- if self.forks == 0 or self.forks > len(hosts):
- self.forks = len(hosts)
-
- if (p and (getattr(p, 'BYPASS_HOST_LOOP', None)) or self.run_once):
-
- # Expose the current hostgroup to the bypassing plugins
- self.host_set = hosts
- # We aren't iterating over all the hosts in this
- # group. So, just choose the "delegate_to" host if that is defined and is
- # one of the targeted hosts, otherwise pick the first host in our group to
- # construct the conn object with.
- if self.delegate_to is not None and self.delegate_to in hosts:
- host = self.delegate_to
- else:
- host = hosts[0]
-
- result_data = self._executor(host, None).result
- # Create a ResultData item for each host in this group
- # using the returned result. If we didn't do this we would
- # get false reports of dark hosts.
- results = [ ReturnData(host=h, result=result_data, comm_ok=True) \
- for h in hosts ]
- del self.host_set
-
- elif self.forks > 1:
- try:
- results = self._parallel_exec(hosts)
- except IOError, ie:
- print ie.errno
- if ie.errno == 32:
- # broken pipe from Ctrl+C
- raise errors.AnsibleError("interrupted")
- raise
- else:
- results = [ self._executor(h, None) for h in hosts ]
-
- return self._partition_results(results)
-
- # *****************************************************
-
- def run_async(self, time_limit):
- ''' Run this module asynchronously and return a poller. '''
-
- self.background = time_limit
- results = self.run()
- return results, poller.AsyncPoller(results, self)
-
- # *****************************************************
-
- def noop_on_check(self, inject):
- ''' Should the runner run in check mode or not ? '''
-
- # initialize self.always_run on first call
- if self.always_run is None:
- self.always_run = self.module_vars.get('always_run', False)
- self.always_run = check_conditional(
- self.always_run, self.basedir, inject, fail_on_undefined=True)
-
- return (self.check and not self.always_run)
diff --git a/v1/ansible/runner/action_plugins/__init__.py b/v1/ansible/runner/action_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/runner/action_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/runner/action_plugins/add_host.py b/v1/ansible/runner/action_plugins/add_host.py
deleted file mode 100644
index 995b205b62..0000000000
--- a/v1/ansible/runner/action_plugins/add_host.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# Copyright 2012, Seth Vidal <skvidal@fedoraproject.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible.callbacks import vv
-from ansible.errors import AnsibleError as ae
-from ansible.runner.return_data import ReturnData
-from ansible.utils import parse_kv, combine_vars
-from ansible.inventory.host import Host
-from ansible.inventory.group import Group
-
-class ActionModule(object):
- ''' Create inventory hosts and groups in the memory inventory'''
-
- ### We need to be able to modify the inventory
- BYPASS_HOST_LOOP = True
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
-
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(parse_kv(module_args))
- if not 'hostname' in args and not 'name' in args:
- raise ae("'name' is a required argument.")
-
- result = {}
-
- # Parse out any hostname:port patterns
- new_name = args.get('name', args.get('hostname', None))
- vv("creating host via 'add_host': hostname=%s" % new_name)
-
- if ":" in new_name:
- new_name, new_port = new_name.split(":")
- args['ansible_ssh_port'] = new_port
-
- # redefine inventory and get group "all"
- inventory = self.runner.inventory
- allgroup = inventory.get_group('all')
-
- # check if host in cache, add if not
- if new_name in inventory._hosts_cache:
- new_host = inventory._hosts_cache[new_name]
- else:
- new_host = Host(new_name)
- # only groups can be added directly to inventory
- inventory._hosts_cache[new_name] = new_host
- allgroup.add_host(new_host)
-
- groupnames = args.get('groupname', args.get('groups', args.get('group', '')))
- # add it to the group if that was specified
- if groupnames:
- for group_name in groupnames.split(","):
- group_name = group_name.strip()
- if not inventory.get_group(group_name):
- new_group = Group(group_name)
- inventory.add_group(new_group)
- new_group.vars = inventory.get_group_variables(group_name, vault_password=inventory._vault_password)
- grp = inventory.get_group(group_name)
- grp.add_host(new_host)
-
- # add this host to the group cache
- if inventory._groups_list is not None:
- if group_name in inventory._groups_list:
- if new_host.name not in inventory._groups_list[group_name]:
- inventory._groups_list[group_name].append(new_host.name)
-
- vv("added host to group via add_host module: %s" % group_name)
- result['new_groups'] = groupnames.split(",")
-
-
- # actually load host vars
- new_host.vars = combine_vars(new_host.vars, inventory.get_host_variables(new_name, update_cached=True, vault_password=inventory._vault_password))
-
- # Add any passed variables to the new_host
- for k in args.keys():
- if not k in [ 'name', 'hostname', 'groupname', 'groups' ]:
- new_host.set_variable(k, args[k])
-
- result['new_host'] = new_name
-
- # clear pattern caching completely since it's unpredictable what
- # patterns may have referenced the group
- inventory.clear_pattern_cache()
-
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
-
-
diff --git a/v1/ansible/runner/action_plugins/assemble.py b/v1/ansible/runner/action_plugins/assemble.py
deleted file mode 100644
index 33a4838e32..0000000000
--- a/v1/ansible/runner/action_plugins/assemble.py
+++ /dev/null
@@ -1,158 +0,0 @@
-# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
-# Stephen Fromm <sfromm@gmail.com>
-# Brian Coca <briancoca+dev@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-
-import os
-import os.path
-import pipes
-import shutil
-import tempfile
-import base64
-import re
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def _assemble_from_fragments(self, src_path, delimiter=None, compiled_regexp=None):
- ''' assemble a file from a directory of fragments '''
- tmpfd, temp_path = tempfile.mkstemp()
- tmp = os.fdopen(tmpfd,'w')
- delimit_me = False
- add_newline = False
-
- for f in sorted(os.listdir(src_path)):
- if compiled_regexp and not compiled_regexp.search(f):
- continue
- fragment = "%s/%s" % (src_path, f)
- if not os.path.isfile(fragment):
- continue
- fragment_content = file(fragment).read()
-
- # always put a newline between fragments if the previous fragment didn't end with a newline.
- if add_newline:
- tmp.write('\n')
-
- # delimiters should only appear between fragments
- if delimit_me:
- if delimiter:
- # un-escape anything like newlines
- delimiter = delimiter.decode('unicode-escape')
- tmp.write(delimiter)
- # always make sure there's a newline after the
- # delimiter, so lines don't run together
- if delimiter[-1] != '\n':
- tmp.write('\n')
-
- tmp.write(fragment_content)
- delimit_me = True
- if fragment_content.endswith('\n'):
- add_newline = False
- else:
- add_newline = True
-
- tmp.close()
- return temp_path
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
-
- options.update(utils.parse_kv(module_args))
-
- src = options.get('src', None)
- dest = options.get('dest', None)
- delimiter = options.get('delimiter', None)
- remote_src = utils.boolean(options.get('remote_src', 'yes'))
- regexp = options.get('regexp', None)
-
-
- if src is None or dest is None:
- result = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- if remote_src:
- return self.runner._execute_module(conn, tmp, 'assemble', module_args, inject=inject, complex_args=complex_args)
- elif '_original_file' in inject:
- src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
- else:
- # the source is local, so expand it here
- src = os.path.expanduser(src)
-
- _re = None
- if regexp is not None:
- _re = re.compile(regexp)
-
- # Does all work assembling the file
- path = self._assemble_from_fragments(src, delimiter, _re)
-
- path_checksum = utils.checksum_s(path)
- dest = self.runner._remote_expand_user(conn, dest, tmp)
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
-
- if path_checksum != remote_checksum:
- resultant = file(path).read()
- if self.runner.diff:
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
- xfered = self.runner._transfer_str(conn, tmp, 'src', resultant)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
-
- # run the copy module
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(src),
- )
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=src, after=resultant))
- else:
- res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject)
- res.diff = dict(after=resultant)
- return res
- else:
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(src),
- )
-
- # make sure checkmod is passed on correctly
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- return self.runner._execute_module(conn, tmp, 'file', module_args_tmp, inject=inject)
diff --git a/v1/ansible/runner/action_plugins/assert.py b/v1/ansible/runner/action_plugins/assert.py
deleted file mode 100644
index a0e02dedb0..0000000000
--- a/v1/ansible/runner/action_plugins/assert.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2012, Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible import utils, errors
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- ''' Fail with custom message '''
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # note: the fail module does not need to pay attention to check mode
- # it always runs.
-
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(utils.parse_kv(module_args))
-
- msg = None
- if 'msg' in args:
- msg = args['msg']
-
- if not 'that' in args:
- raise errors.AnsibleError('conditional required in "that" string')
-
- if not isinstance(args['that'], list):
- args['that'] = [ args['that'] ]
-
- for that in args['that']:
- test_result = utils.check_conditional(that, self.runner.basedir, inject, fail_on_undefined=True)
- if not test_result:
- result = dict(
- failed = True,
- evaluated_to = test_result,
- assertion = that,
- )
- if msg:
- result['msg'] = msg
- return ReturnData(conn=conn, result=result)
-
- return ReturnData(conn=conn, result=dict(msg='all assertions passed'))
-
diff --git a/v1/ansible/runner/action_plugins/async.py b/v1/ansible/runner/action_plugins/async.py
deleted file mode 100644
index dc53d6fa6c..0000000000
--- a/v1/ansible/runner/action_plugins/async.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' transfer the given module name, plus the async module, then run it '''
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
-
- # shell and command module are the same
- if module_name == 'shell':
- module_name = 'command'
- module_args += " #USE_SHELL"
-
- if "tmp" not in tmp:
- tmp = self.runner._make_tmp_path(conn)
-
- (module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject, complex_args=complex_args)
- self.runner._remote_chmod(conn, 'a+rx', module_path, tmp)
-
- return self.runner._execute_module(conn, tmp, 'async_wrapper', module_args,
- async_module=module_path,
- async_jid=self.runner.generated_jid,
- async_limit=self.runner.background,
- inject=inject
- )
-
diff --git a/v1/ansible/runner/action_plugins/copy.py b/v1/ansible/runner/action_plugins/copy.py
deleted file mode 100644
index a6a5cb5a27..0000000000
--- a/v1/ansible/runner/action_plugins/copy.py
+++ /dev/null
@@ -1,381 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ansible import utils
-import ansible.constants as C
-import ansible.utils.template as template
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-import json
-import stat
-import tempfile
-import pipes
-
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- content = options.get('content', None)
- dest = options.get('dest', None)
- raw = utils.boolean(options.get('raw', 'no'))
- force = utils.boolean(options.get('force', 'yes'))
-
- # content with newlines is going to be escaped to safely load in yaml
- # now we need to unescape it so that the newlines are evaluated properly
- # when writing the file to disk
- if content:
- if isinstance(content, unicode):
- try:
- content = content.decode('unicode-escape')
- except UnicodeDecodeError:
- pass
-
- if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
- result=dict(failed=True, msg="src (or content) and dest are required")
- return ReturnData(conn=conn, result=result)
- elif (source is not None or 'first_available_file' in inject) and content is not None:
- result=dict(failed=True, msg="src and content are mutually exclusive")
- return ReturnData(conn=conn, result=result)
-
- # Check if the source ends with a "/"
- source_trailing_slash = False
- if source:
- source_trailing_slash = source.endswith("/")
-
- # Define content_tempfile in case we set it after finding content populated.
- content_tempfile = None
-
- # If content is defined make a temp file and write the content into it.
- if content is not None:
- try:
- # If content comes to us as a dict it should be decoded json.
- # We need to encode it back into a string to write it out.
- if type(content) is dict:
- content_tempfile = self._create_content_tempfile(json.dumps(content))
- else:
- content_tempfile = self._create_content_tempfile(content)
- source = content_tempfile
- except Exception, err:
- result = dict(failed=True, msg="could not write content temp file: %s" % err)
- return ReturnData(conn=conn, result=result)
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
- elif 'first_available_file' in inject:
- found = False
- for fn in inject.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- results = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, result=results)
- else:
- source = template.template(self.runner.basedir, source, inject)
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
- source_files = []
-
- # If source is a directory populate our list else source is a file and translate it to a tuple.
- if os.path.isdir(source):
- # Get the amount of spaces to remove to get the relative path.
- if source_trailing_slash:
- sz = len(source) + 1
- else:
- sz = len(source.rsplit('/', 1)[0]) + 1
-
- # Walk the directory and append the file tuples to source_files.
- for base_path, sub_folders, files in os.walk(source):
- for file in files:
- full_path = os.path.join(base_path, file)
- rel_path = full_path[sz:]
- source_files.append((full_path, rel_path))
-
- # If it's recursive copy, destination is always a dir,
- # explicitly mark it so (note - copy module relies on this).
- if not conn.shell.path_has_trailing_slash(dest):
- dest = conn.shell.join_path(dest, '')
- else:
- source_files.append((source, os.path.basename(source)))
-
- changed = False
- diffs = []
- module_result = {"changed": False}
-
- # A register for if we executed a module.
- # Used to cut down on command calls when not recursive.
- module_executed = False
-
- # Tell _execute_module to delete the file if there is one file.
- delete_remote_tmp = (len(source_files) == 1)
-
- # If this is a recursive action create a tmp_path that we can share as the _exec_module create is too late.
- if not delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- # expand any user home dir specifier
- dest = self.runner._remote_expand_user(conn, dest, tmp_path)
-
- for source_full, source_rel in source_files:
- # Generate a hash of the local file.
- local_checksum = utils.checksum(source_full)
-
- # If local_checksum is not defined we can't find the file so we should fail out.
- if local_checksum is None:
- result = dict(failed=True, msg="could not find src=%s" % source_full)
- return ReturnData(conn=conn, result=result)
-
- # This is kind of optimization - if user told us destination is
- # dir, do path manipulation right away, otherwise we still check
- # for dest being a dir via remote call below.
- if conn.shell.path_has_trailing_slash(dest):
- dest_file = conn.shell.join_path(dest, source_rel)
- else:
- dest_file = conn.shell.join_path(dest)
-
- # Attempt to get the remote checksum
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum == '3':
- # The remote_checksum was executed on a directory.
- if content is not None:
- # If source was defined as content remove the temporary file and fail out.
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- result = dict(failed=True, msg="can not use content with a dir as dest")
- return ReturnData(conn=conn, result=result)
- else:
- # Append the relative source location to the destination and retry remote_checksum
- dest_file = conn.shell.join_path(dest, source_rel)
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum == '4':
- result = dict(msg="python isn't present on the system. Unable to compute checksum", failed=True)
- return ReturnData(conn=conn, result=result)
-
- if remote_checksum != '1' and not force:
- # remote_file exists so continue to next iteration.
- continue
-
- if local_checksum != remote_checksum:
- # The checksums don't match and we will change or error out.
- changed = True
-
- # Create a tmp_path if missing only if this is not recursive.
- # If this is recursive we already have a tmp_path.
- if delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- if self.runner.diff and not raw:
- diff = self._get_diff_data(conn, tmp_path, inject, dest_file, source_full)
- else:
- diff = {}
-
- if self.runner.noop_on_check(inject):
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- diffs.append(diff)
- changed = True
- module_result = dict(changed=True)
- continue
-
- # Define a remote directory that we will copy the file to.
- tmp_src = tmp_path + 'source'
-
- if not raw:
- conn.put_file(source_full, tmp_src)
- else:
- conn.put_file(source_full, dest_file)
-
- # We have copied the file remotely and no longer require our content_tempfile
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root' and not raw:
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path)
-
- if raw:
- # Continue to next iteration if raw is defined.
- continue
-
- # Run the copy module
-
- # src and dest here come after original and override them
- # we pass dest only to make sure it includes trailing slash in case of recursive copy
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- module_return = self.runner._execute_module(conn, tmp_path, 'copy', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- else:
- # no need to transfer the file, already correct hash, but still need to call
- # the file module in case we want to change attributes
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- if raw:
- # Continue to next iteration if raw is defined.
- # self.runner._remove_tmp_path(conn, tmp_path)
- continue
-
- tmp_src = tmp_path + source_rel
-
- # Build temporary module_args.
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- # Execute the file module.
- module_return = self.runner._execute_module(conn, tmp_path, 'file', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- module_result = module_return.result
- if not module_result.get('checksum'):
- module_result['checksum'] = local_checksum
- if module_result.get('failed') == True:
- return module_return
- if module_result.get('changed') == True:
- changed = True
-
- # Delete tmp_path if we were recursive or if we did not execute a module.
- if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \
- or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
- self.runner._remove_tmp_path(conn, tmp_path)
-
- # the file module returns the file path as 'path', but
- # the copy module uses 'dest', so add it if it's not there
- if 'path' in module_result and 'dest' not in module_result:
- module_result['dest'] = module_result['path']
-
- # TODO: Support detailed status/diff for multiple files
- if len(source_files) == 1:
- result = module_result
- else:
- result = dict(dest=dest, src=source, changed=changed)
- if len(diffs) == 1:
- return ReturnData(conn=conn, result=result, diff=diffs[0])
- else:
- return ReturnData(conn=conn, result=result)
-
- def _create_content_tempfile(self, content):
- ''' Create a tempfile containing defined content '''
- fd, content_tempfile = tempfile.mkstemp()
- f = os.fdopen(fd, 'w')
- try:
- f.write(content)
- except Exception, err:
- os.remove(content_tempfile)
- raise Exception(err)
- finally:
- f.close()
- return content_tempfile
-
- def _get_diff_data(self, conn, tmp, inject, destination, source):
- peek_result = self.runner._execute_module(conn, tmp, 'file', "path=%s diff_peek=1" % destination, inject=inject, persist_files=True)
-
- if not peek_result.is_successful():
- return {}
-
- diff = {}
- if peek_result.result['state'] == 'absent':
- diff['before'] = ''
- elif peek_result.result['appears_binary']:
- diff['dst_binary'] = 1
- elif peek_result.result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % destination, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
- diff['before_header'] = destination
- diff['before'] = dest_contents
-
- src = open(source)
- src_contents = src.read(8192)
- st = os.stat(source)
- if "\x00" in src_contents:
- diff['src_binary'] = 1
- elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- src.seek(0)
- diff['after_header'] = source
- diff['after'] = src.read()
-
- return diff
-
- def _remove_tempfile_if_content_defined(self, content, content_tempfile):
- if content is not None:
- os.remove(content_tempfile)
-
-
- def _result_key_merge(self, options, results):
- # add keys to file module results to mimic copy
- if 'path' in results.result and 'dest' not in results.result:
- results.result['dest'] = results.result['path']
- del results.result['path']
- return results
diff --git a/v1/ansible/runner/action_plugins/debug.py b/v1/ansible/runner/action_plugins/debug.py
deleted file mode 100644
index eaf1364c3f..0000000000
--- a/v1/ansible/runner/action_plugins/debug.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright 2012, Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible import utils
-from ansible.utils import template
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- ''' Print statements during execution '''
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
- self.basedir = runner.basedir
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- args = {}
- if complex_args:
- args.update(complex_args)
-
- # attempt to prevent confusing messages when the variable didn't interpolate
- module_args = module_args.replace("{{ ","{{").replace(" }}","}}")
-
- kv = utils.parse_kv(module_args)
- args.update(kv)
-
- if not 'msg' in args and not 'var' in args:
- args['msg'] = 'Hello world!'
-
- result = {}
- if 'msg' in args:
- if 'fail' in args and utils.boolean(args['fail']):
- result = dict(failed=True, msg=args['msg'])
- else:
- result = dict(msg=args['msg'])
- elif 'var' in args and not utils.LOOKUP_REGEX.search(args['var']):
- results = template.template(self.basedir, args['var'], inject, convert_bare=True)
- result['var'] = { args['var']: results }
-
- # force flag to make debug output module always verbose
- result['verbose_always'] = True
-
- return ReturnData(conn=conn, result=result)
diff --git a/v1/ansible/runner/action_plugins/fail.py b/v1/ansible/runner/action_plugins/fail.py
deleted file mode 100644
index 2bbaf40313..0000000000
--- a/v1/ansible/runner/action_plugins/fail.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2012, Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- ''' Fail with custom message '''
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # note: the fail module does not need to pay attention to check mode
- # it always runs.
-
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(utils.parse_kv(module_args))
- if not 'msg' in args:
- args['msg'] = 'Failed as requested from task'
-
- result = dict(failed=True, msg=args['msg'])
- return ReturnData(conn=conn, result=result)
diff --git a/v1/ansible/runner/action_plugins/fetch.py b/v1/ansible/runner/action_plugins/fetch.py
deleted file mode 100644
index 27d2f6b3c6..0000000000
--- a/v1/ansible/runner/action_plugins/fetch.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-import random
-import traceback
-import tempfile
-import base64
-
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible import module_common
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for fetch operations '''
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not (yet) supported for this module'))
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- dest = options.get('dest', None)
- flat = options.get('flat', False)
- flat = utils.boolean(flat)
- fail_on_missing = options.get('fail_on_missing', False)
- fail_on_missing = utils.boolean(fail_on_missing)
- validate_checksum = options.get('validate_checksum', None)
- if validate_checksum is not None:
- validate_checksum = utils.boolean(validate_checksum)
- # Alias for validate_checksum (old way of specifying it)
- validate_md5 = options.get('validate_md5', None)
- if validate_md5 is not None:
- validate_md5 = utils.boolean(validate_md5)
- if validate_md5 is None and validate_checksum is None:
- # Default
- validate_checksum = True
- elif validate_checksum is None:
- validate_checksum = validate_md5
- elif validate_md5 is not None and validate_checksum is not None:
- results = dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified")
- return ReturnData(conn, result=results)
-
- if source is None or dest is None:
- results = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, result=results)
-
- source = conn.shell.join_path(source)
- source = self.runner._remote_expand_user(conn, source, tmp)
-
- # calculate checksum for the remote file
- remote_checksum = self.runner._remote_checksum(conn, tmp, source, inject)
-
- # use slurp if sudo and permissions are lacking
- remote_data = None
- if remote_checksum in ('1', '2') or self.runner.become:
- slurpres = self.runner._execute_module(conn, tmp, 'slurp', 'src=%s' % source, inject=inject)
- if slurpres.is_successful():
- if slurpres.result['encoding'] == 'base64':
- remote_data = base64.b64decode(slurpres.result['content'])
- if remote_data is not None:
- remote_checksum = utils.checksum_s(remote_data)
- # the source path may have been expanded on the
- # target system, so we compare it here and use the
- # expanded version if it's different
- remote_source = slurpres.result.get('source')
- if remote_source and remote_source != source:
- source = remote_source
-
- # calculate the destination name
- if os.path.sep not in conn.shell.join_path('a', ''):
- source_local = source.replace('\\', '/')
- else:
- source_local = source
-
- dest = os.path.expanduser(dest)
- if flat:
- if dest.endswith("/"):
- # if the path ends with "/", we'll use the source filename as the
- # destination filename
- base = os.path.basename(source_local)
- dest = os.path.join(dest, base)
- if not dest.startswith("/"):
- # if dest does not start with "/", we'll assume a relative path
- dest = utils.path_dwim(self.runner.basedir, dest)
- else:
- # files are saved in dest dir, with a subdir for each host, then the filename
- dest = "%s/%s/%s" % (utils.path_dwim(self.runner.basedir, dest), inject['inventory_hostname'], source_local)
-
- dest = dest.replace("//","/")
-
- if remote_checksum in ('0', '1', '2', '3', '4'):
- # these don't fail because you may want to transfer a log file that possibly MAY exist
- # but keep going to fetch other log files
- if remote_checksum == '0':
- result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False)
- elif remote_checksum == '1':
- if fail_on_missing:
- result = dict(failed=True, msg="the remote file does not exist", file=source)
- else:
- result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False)
- elif remote_checksum == '2':
- result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False)
- elif remote_checksum == '3':
- result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False)
- elif remote_checksum == '4':
- result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False)
- return ReturnData(conn=conn, result=result)
-
- # calculate checksum for the local file
- local_checksum = utils.checksum(dest)
-
- if remote_checksum != local_checksum:
- # create the containing directories, if needed
- if not os.path.isdir(os.path.dirname(dest)):
- os.makedirs(os.path.dirname(dest))
-
- # fetch the file and check for changes
- if remote_data is None:
- conn.fetch_file(source, dest)
- else:
- f = open(dest, 'w')
- f.write(remote_data)
- f.close()
- new_checksum = utils.secure_hash(dest)
- # For backwards compatibility. We'll return None on FIPS enabled
- # systems
- try:
- new_md5 = utils.md5(dest)
- except ValueError:
- new_md5 = None
-
- if validate_checksum and new_checksum != remote_checksum:
- result = dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
- return ReturnData(conn=conn, result=result)
- result = dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
- return ReturnData(conn=conn, result=result)
- else:
- # For backwards compatibility. We'll return None on FIPS enabled
- # systems
- try:
- local_md5 = utils.md5(dest)
- except ValueError:
- local_md5 = None
-
- result = dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)
- return ReturnData(conn=conn, result=result)
-
diff --git a/v1/ansible/runner/action_plugins/group_by.py b/v1/ansible/runner/action_plugins/group_by.py
deleted file mode 100644
index 25c2073fa0..0000000000
--- a/v1/ansible/runner/action_plugins/group_by.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible
-
-from ansible.callbacks import vv
-from ansible.errors import AnsibleError as ae
-from ansible.runner.return_data import ReturnData
-from ansible.utils import parse_kv, check_conditional
-import ansible.utils.template as template
-
-class ActionModule(object):
- ''' Create inventory groups based on variables '''
-
- ### We need to be able to modify the inventory
- BYPASS_HOST_LOOP = True
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- # the group_by module does not need to pay attention to check mode.
- # it always runs.
-
- # module_args and complex_args have already been templated for the first host.
- # Use them here only to check that a key argument is provided.
- args = {}
- if complex_args:
- args.update(complex_args)
- args.update(parse_kv(module_args))
- if not 'key' in args:
- raise ae("'key' is a required argument.")
-
- vv("created 'group_by' ActionModule: key=%s"%(args['key']))
-
- inventory = self.runner.inventory
-
- result = {'changed': False}
-
- ### find all groups
- groups = {}
-
- for host in self.runner.host_set:
- data = {}
- data.update(inject)
- data.update(inject['hostvars'][host])
- conds = self.runner.conditional
- if type(conds) != list:
- conds = [ conds ]
- next_host = False
- for cond in conds:
- if not check_conditional(cond, self.runner.basedir, data, fail_on_undefined=self.runner.error_on_undefined_vars):
- next_host = True
- break
- if next_host:
- continue
-
- # Template original module_args and complex_args from runner for each host.
- host_module_args = template.template(self.runner.basedir, self.runner.module_args, data)
- host_complex_args = template.template(self.runner.basedir, self.runner.complex_args, data)
- host_args = {}
- if host_complex_args:
- host_args.update(host_complex_args)
- host_args.update(parse_kv(host_module_args))
-
- group_name = host_args['key']
- group_name = group_name.replace(' ','-')
- if group_name not in groups:
- groups[group_name] = []
- groups[group_name].append(host)
-
- result['groups'] = groups
-
- ### add to inventory
- for group, hosts in groups.items():
- inv_group = inventory.get_group(group)
- if not inv_group:
- inv_group = ansible.inventory.Group(name=group)
- inventory.add_group(inv_group)
- inventory.get_group('all').add_child_group(inv_group)
- inv_group.vars = inventory.get_group_variables(group, update_cached=False, vault_password=inventory._vault_password)
- for host in hosts:
- if host in self.runner.inventory._vars_per_host:
- del self.runner.inventory._vars_per_host[host]
- inv_host = inventory.get_host(host)
- if not inv_host:
- inv_host = ansible.inventory.Host(name=host)
- if inv_group not in inv_host.get_groups():
- result['changed'] = True
- inv_group.add_host(inv_host)
-
- return ReturnData(conn=conn, comm_ok=True, result=result)
diff --git a/v1/ansible/runner/action_plugins/include_vars.py b/v1/ansible/runner/action_plugins/include_vars.py
deleted file mode 100644
index d6ce52cf00..0000000000
--- a/v1/ansible/runner/action_plugins/include_vars.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# (c) 2013-2014, Benno Joy <benno@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- if not module_args:
- result = dict(failed=True, msg="No source file given")
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
- source = module_args
- source = template.template(self.runner.basedir, source, inject)
-
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'vars', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- if os.path.exists(source):
- data = utils.parse_yaml_from_file(source, vault_password=self.runner.vault_pass)
- if data and type(data) != dict:
- raise errors.AnsibleError("%s must be stored as a dictionary/hash" % source)
- elif data is None:
- data = {}
- result = dict(ansible_facts=data)
- return ReturnData(conn=conn, comm_ok=True, result=result)
- else:
- result = dict(failed=True, msg="Source file not found.", file=source)
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
diff --git a/v1/ansible/runner/action_plugins/normal.py b/v1/ansible/runner/action_plugins/normal.py
deleted file mode 100644
index 8500c6641c..0000000000
--- a/v1/ansible/runner/action_plugins/normal.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pwd
-import random
-import traceback
-import tempfile
-
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible import module_common
-from ansible.runner.return_data import ReturnData
-from ansible.callbacks import vv, vvv
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' transfer & execute a module that is not 'copy' or 'template' '''
-
- module_args = self.runner._complex_args_hack(complex_args, module_args)
-
- if self.runner.noop_on_check(inject):
- if module_name in [ 'shell', 'command' ]:
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for %s' % module_name))
- # else let the module parsing code decide, though this will only be allowed for AnsibleModuleCommon using
- # python modules for now
- module_args += " CHECKMODE=True"
-
- if self.runner.no_log:
- module_args += " NO_LOG=True"
-
- # shell and command are the same module
- if module_name == 'shell':
- module_name = 'command'
- module_args += " #USE_SHELL"
-
- vv("REMOTE_MODULE %s %s" % (module_name, module_args), host=conn.host)
- return self.runner._execute_module(conn, tmp, module_name, module_args, inject=inject, complex_args=complex_args)
-
-
diff --git a/v1/ansible/runner/action_plugins/patch.py b/v1/ansible/runner/action_plugins/patch.py
deleted file mode 100644
index 29d4f7eca5..0000000000
--- a/v1/ansible/runner/action_plugins/patch.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-
-import os
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- src = options.get('src', None)
- dest = options.get('dest', None)
- remote_src = utils.boolean(options.get('remote_src', 'no'))
-
- if src is None:
- result = dict(failed=True, msg="src is required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- if remote_src:
- return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
-
- # Source is local
- if '_original_file' in inject:
- src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
- else:
- src = utils.path_dwim(self.runner.basedir, src)
-
- if tmp is None or "-tmp-" not in tmp:
- tmp = self.runner._make_tmp_path(conn)
-
- tmp_src = conn.shell.join_path(tmp, os.path.basename(src))
- conn.put_file(src, tmp_src)
-
- if self.runner.become and self.runner.become_user != 'root':
- if not self.runner.noop_on_check(inject):
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
-
- new_module_args = dict(
- src=tmp_src,
- )
-
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
-
- module_args = utils.merge_module_args(module_args, new_module_args)
-
- return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
diff --git a/v1/ansible/runner/action_plugins/pause.py b/v1/ansible/runner/action_plugins/pause.py
deleted file mode 100644
index d0c9b53db2..0000000000
--- a/v1/ansible/runner/action_plugins/pause.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.callbacks import vv
-from ansible.errors import AnsibleError as ae
-from ansible.runner.return_data import ReturnData
-from ansible.utils import getch, parse_kv
-import ansible.utils.template as template
-from termios import tcflush, TCIFLUSH
-import datetime
-import sys
-import time
-
-
-class ActionModule(object):
- ''' pauses execution for a length or time, or until input is received '''
-
- PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
- BYPASS_HOST_LOOP = True
-
- def __init__(self, runner):
- self.runner = runner
- # Set defaults
- self.duration_unit = 'minutes'
- self.prompt = None
- self.seconds = None
- self.result = {'changed': False,
- 'rc': 0,
- 'stderr': '',
- 'stdout': '',
- 'start': None,
- 'stop': None,
- 'delta': None,
- }
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' run the pause action module '''
-
- # note: this module does not need to pay attention to the 'check'
- # flag, it always runs
-
- hosts = ', '.join(self.runner.host_set)
- args = {}
- if complex_args:
- args.update(complex_args)
- # extra template call unneeded?
- args.update(parse_kv(template.template(self.runner.basedir, module_args, inject)))
-
- # Are 'minutes' or 'seconds' keys that exist in 'args'?
- if 'minutes' in args or 'seconds' in args:
- try:
- if 'minutes' in args:
- self.pause_type = 'minutes'
- # The time() command operates in seconds so we need to
- # recalculate for minutes=X values.
- self.seconds = int(args['minutes']) * 60
- else:
- self.pause_type = 'seconds'
- self.seconds = int(args['seconds'])
- self.duration_unit = 'seconds'
- except ValueError, e:
- raise ae("non-integer value given for prompt duration:\n%s" % str(e))
- # Is 'prompt' a key in 'args'?
- elif 'prompt' in args:
- self.pause_type = 'prompt'
- self.prompt = "[%s]\n%s:\n" % (hosts, args['prompt'])
- # Is 'args' empty, then this is the default prompted pause
- elif len(args.keys()) == 0:
- self.pause_type = 'prompt'
- self.prompt = "[%s]\nPress enter to continue:\n" % hosts
- # I have no idea what you're trying to do. But it's so wrong.
- else:
- raise ae("invalid pause type given. must be one of: %s" % \
- ", ".join(self.PAUSE_TYPES))
-
- vv("created 'pause' ActionModule: pause_type=%s, duration_unit=%s, calculated_seconds=%s, prompt=%s" % \
- (self.pause_type, self.duration_unit, self.seconds, self.prompt))
-
- ########################################################################
- # Begin the hard work!
- try:
- self._start()
- if not self.pause_type == 'prompt':
- print "[%s]\nPausing for %s seconds" % (hosts, self.seconds)
- time.sleep(self.seconds)
- else:
- # Clear out any unflushed buffered input which would
- # otherwise be consumed by raw_input() prematurely.
- tcflush(sys.stdin, TCIFLUSH)
- self.result['user_input'] = raw_input(self.prompt.encode(sys.stdout.encoding))
- except KeyboardInterrupt:
- while True:
- print '\nAction? (a)bort/(c)ontinue: '
- c = getch()
- if c == 'c':
- # continue playbook evaluation
- break
- elif c == 'a':
- # abort further playbook evaluation
- raise ae('user requested abort!')
- finally:
- self._stop()
-
- return ReturnData(conn=conn, result=self.result)
-
- def _start(self):
- ''' mark the time of execution for duration calculations later '''
- self.start = time.time()
- self.result['start'] = str(datetime.datetime.now())
- if not self.pause_type == 'prompt':
- print "(^C-c = continue early, ^C-a = abort)"
-
- def _stop(self):
- ''' calculate the duration we actually paused for and then
- finish building the task result string '''
- duration = time.time() - self.start
- self.result['stop'] = str(datetime.datetime.now())
- self.result['delta'] = int(duration)
-
- if self.duration_unit == 'minutes':
- duration = round(duration / 60.0, 2)
- else:
- duration = round(duration, 2)
-
- self.result['stdout'] = "Paused for %s %s" % (duration, self.duration_unit)
diff --git a/v1/ansible/runner/action_plugins/raw.py b/v1/ansible/runner/action_plugins/raw.py
deleted file mode 100644
index e52296b2e7..0000000000
--- a/v1/ansible/runner/action_plugins/raw.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
-
- if self.runner.noop_on_check(inject):
- # in --check mode, always skip this module execution
- return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True))
-
- executable = ''
- # From library/command, keep in sync
- r = re.compile(r'(^|\s)(executable)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)\s|$)')
- for m in r.finditer(module_args):
- v = m.group(4).replace("\\", "")
- if m.group(2) == "executable":
- executable = v
- module_args = r.sub("", module_args)
-
- result = self.runner._low_level_exec_command(conn, module_args, tmp, sudoable=True, executable=executable,
- become=self.runner.become)
- # for some modules (script, raw), the sudo success key
- # may leak into the stdout due to the way the sudo/su
- # command is constructed, so we filter that out here
- if result.get('stdout','').strip().startswith('BECOME-SUCCESS-'):
- result['stdout'] = re.sub(r'^((\r)?\n)?BECOME-SUCCESS.*(\r)?\n', '', result['stdout'])
-
- return ReturnData(conn=conn, result=result)
diff --git a/v1/ansible/runner/action_plugins/script.py b/v1/ansible/runner/action_plugins/script.py
deleted file mode 100644
index 1b1aadc7aa..0000000000
--- a/v1/ansible/runner/action_plugins/script.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-import shlex
-
-import ansible.constants as C
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-
-class ActionModule(object):
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- if self.runner.noop_on_check(inject):
- # in check mode, always skip this module
- return ReturnData(conn=conn, comm_ok=True,
- result=dict(skipped=True, msg='check mode not supported for this module'))
-
- # extract ansible reserved parameters
- # From library/command keep in sync
- creates = None
- removes = None
- r = re.compile(r'(^|\s)(creates|removes)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)')
- for m in r.finditer(module_args):
- v = m.group(4).replace("\\", "")
- if m.group(2) == "creates":
- creates = v
- elif m.group(2) == "removes":
- removes = v
- module_args = r.sub("", module_args)
-
- if creates:
- # do not run the command if the line contains creates=filename
- # and the filename already exists. This allows idempotence
- # of command executions.
- module_args_tmp = "path=%s" % creates
- module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
- complex_args=complex_args, persist_files=True)
- stat = module_return.result.get('stat', None)
- if stat and stat.get('exists', False):
- return ReturnData(
- conn=conn,
- comm_ok=True,
- result=dict(
- changed=False,
- msg=("skipped, since %s exists" % creates)
- )
- )
- if removes:
- # do not run the command if the line contains removes=filename
- # and the filename does not exist. This allows idempotence
- # of command executions.
- module_args_tmp = "path=%s" % removes
- module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
- complex_args=complex_args, persist_files=True)
- stat = module_return.result.get('stat', None)
- if stat and not stat.get('exists', False):
- return ReturnData(
- conn=conn,
- comm_ok=True,
- result=dict(
- changed=False,
- msg=("skipped, since %s does not exist" % removes)
- )
- )
-
- # Decode the result of shlex.split() to UTF8 to get around a bug in that's been fixed in Python 2.7 but not Python 2.6.
- # See: http://bugs.python.org/issue6988
- tokens = shlex.split(module_args.encode('utf8'))
- tokens = [s.decode('utf8') for s in tokens]
- # extract source script
- source = tokens[0]
-
- # FIXME: error handling
- args = " ".join(tokens[1:])
- source = template.template(self.runner.basedir, source, inject)
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # transfer the file to a remote tmp location
- source = source.replace('\x00', '') # why does this happen here?
- args = args.replace('\x00', '') # why does this happen here?
- tmp_src = conn.shell.join_path(tmp, os.path.basename(source))
- tmp_src = tmp_src.replace('\x00', '')
-
- conn.put_file(source, tmp_src)
-
- sudoable = True
- # set file permissions, more permissive when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- chmod_mode = 'a+rx'
- sudoable = False
- else:
- chmod_mode = '+rx'
- self.runner._remote_chmod(conn, chmod_mode, tmp_src, tmp, sudoable=sudoable, become=self.runner.become)
-
- # add preparation steps to one ssh roundtrip executing the script
- env_string = self.runner._compute_environment_string(conn, inject)
- module_args = ' '.join([env_string, tmp_src, args])
-
- handler = utils.plugins.action_loader.get('raw', self.runner)
- result = handler.run(conn, tmp, 'raw', module_args, inject)
-
- # clean up after
- if "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
- self.runner._remove_tmp_path(conn, tmp)
-
- result.result['changed'] = True
-
- return result
diff --git a/v1/ansible/runner/action_plugins/set_fact.py b/v1/ansible/runner/action_plugins/set_fact.py
deleted file mode 100644
index 7ac972cac6..0000000000
--- a/v1/ansible/runner/action_plugins/set_fact.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright 2013 Dag Wieers <dag@wieers.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils
-from ansible.runner.return_data import ReturnData
-
-class ActionModule(object):
-
- TRANSFERS_FILES = False
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for running operations on master '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
-
- # parse the k=v arguments and convert any special boolean
- # strings into proper booleans (issue #8629)
- parsed_args = utils.parse_kv(module_args)
- for k,v in parsed_args.iteritems():
- # convert certain strings to boolean values
- if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
- parsed_args[k] = utils.boolean(v)
-
- # and finally update the options with the parsed/modified args
- options.update(parsed_args)
-
- return ReturnData(conn=conn, result=dict(ansible_facts=options))
diff --git a/v1/ansible/runner/action_plugins/synchronize.py b/v1/ansible/runner/action_plugins/synchronize.py
deleted file mode 100644
index fb82194b00..0000000000
--- a/v1/ansible/runner/action_plugins/synchronize.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2012-2013, Timothy Appnel <tim@appnel.com>
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os.path
-
-from ansible import utils
-from ansible import constants
-from ansible.runner.return_data import ReturnData
-import ansible.utils.template as template
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
- self.inject = None
-
- def _get_absolute_path(self, path=None):
- if 'vars' in self.inject:
- if '_original_file' in self.inject['vars']:
- # roles
- original_path = path
- path = utils.path_dwim_relative(self.inject['_original_file'], 'files', path, self.runner.basedir)
- if original_path and original_path[-1] == '/' and path[-1] != '/':
- # make sure the dwim'd path ends in a trailing "/"
- # if the original path did
- path += '/'
-
- return path
-
- def _process_origin(self, host, path, user):
-
- if not host in ['127.0.0.1', 'localhost']:
- if user:
- return '%s@%s:%s' % (user, host, path)
- else:
- return '%s:%s' % (host, path)
- else:
- if not ':' in path:
- if not path.startswith('/'):
- path = self._get_absolute_path(path=path)
- return path
-
- def _process_remote(self, host, path, user):
- transport = self.runner.transport
- return_data = None
- if not host in ['127.0.0.1', 'localhost'] or transport != "local":
- if user:
- return_data = '%s@%s:%s' % (user, host, path)
- else:
- return_data = '%s:%s' % (host, path)
- else:
- return_data = path
-
- if not ':' in return_data:
- if not return_data.startswith('/'):
- return_data = self._get_absolute_path(path=return_data)
-
- return return_data
-
- def setup(self, module_name, inject):
- ''' Always default to localhost as delegate if None defined '''
-
- self.inject = inject
-
- # Store original transport and sudo values.
- self.original_transport = inject.get('ansible_connection', self.runner.transport)
- self.original_become = self.runner.become
- self.transport_overridden = False
-
- if inject.get('delegate_to') is None:
- inject['delegate_to'] = '127.0.0.1'
- # IF original transport is not local, override transport and disable sudo.
- if self.original_transport != 'local':
- inject['ansible_connection'] = 'local'
- self.transport_overridden = True
- self.runner.become = False
-
- def run(self, conn, tmp, module_name, module_args,
- inject, complex_args=None, **kwargs):
-
- ''' generates params and passes them on to the rsync module '''
-
- self.inject = inject
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- src = options.get('src', None)
- dest = options.get('dest', None)
- use_ssh_args = options.pop('use_ssh_args', None)
-
- src = template.template(self.runner.basedir, src, inject)
- dest = template.template(self.runner.basedir, dest, inject)
- use_ssh_args = template.template(self.runner.basedir, use_ssh_args, inject)
-
- try:
- options['local_rsync_path'] = inject['ansible_rsync_path']
- except KeyError:
- pass
-
- # from the perspective of the rsync call the delegate is the localhost
- src_host = '127.0.0.1'
- dest_host = inject.get('ansible_ssh_host', inject['inventory_hostname'])
-
- # allow ansible_ssh_host to be templated
- dest_host = template.template(self.runner.basedir, dest_host, inject, fail_on_undefined=True)
- dest_is_local = dest_host in ['127.0.0.1', 'localhost']
-
- # CHECK FOR NON-DEFAULT SSH PORT
- dest_port = options.get('dest_port')
- inv_port = inject.get('ansible_ssh_port', inject['inventory_hostname'])
- if inv_port != dest_port and inv_port != inject['inventory_hostname']:
- options['dest_port'] = inv_port
-
- # edge case: explicit delegate and dest_host are the same
- if dest_host == inject['delegate_to']:
- dest_host = '127.0.0.1'
-
- # SWITCH SRC AND DEST PER MODE
- if options.get('mode', 'push') == 'pull':
- (dest_host, src_host) = (src_host, dest_host)
-
- # CHECK DELEGATE HOST INFO
- use_delegate = False
- if conn.delegate != conn.host:
- if 'hostvars' in inject:
- if conn.delegate in inject['hostvars'] and self.original_transport != 'local':
- # use a delegate host instead of localhost
- use_delegate = True
-
- # COMPARE DELEGATE, HOST AND TRANSPORT
- process_args = False
- if not dest_host is src_host and self.original_transport != 'local':
- # interpret and inject remote host info into src or dest
- process_args = True
-
- # MUNGE SRC AND DEST PER REMOTE_HOST INFO
- if process_args or use_delegate:
-
- user = None
- if utils.boolean(options.get('set_remote_user', 'yes')):
- if use_delegate:
- user = inject['hostvars'][conn.delegate].get('ansible_ssh_user')
-
- if not use_delegate or not user:
- user = inject.get('ansible_ssh_user',
- self.runner.remote_user)
-
- if use_delegate:
- # FIXME
- private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
- else:
- private_key = inject.get('ansible_ssh_private_key_file', self.runner.private_key_file)
-
- private_key = template.template(self.runner.basedir, private_key, inject, fail_on_undefined=True)
-
- if not private_key is None:
- private_key = os.path.expanduser(private_key)
- options['private_key'] = private_key
-
- # use the mode to define src and dest's url
- if options.get('mode', 'push') == 'pull':
- # src is a remote path: <user>@<host>, dest is a local path
- src = self._process_remote(src_host, src, user)
- dest = self._process_origin(dest_host, dest, user)
- else:
- # src is a local path, dest is a remote path: <user>@<host>
- src = self._process_origin(src_host, src, user)
- dest = self._process_remote(dest_host, dest, user)
-
- options['src'] = src
- options['dest'] = dest
- if 'mode' in options:
- del options['mode']
- if use_ssh_args:
- options['ssh_args'] = constants.ANSIBLE_SSH_ARGS
-
- # Allow custom rsync path argument.
- rsync_path = options.get('rsync_path', None)
-
- # If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument.
- if not rsync_path and self.transport_overridden and self.original_become and not dest_is_local and self.runner.become_method == 'sudo':
- rsync_path = 'sudo rsync'
-
- # make sure rsync path is quoted.
- if rsync_path:
- options['rsync_path'] = '"' + rsync_path + '"'
-
- module_args = ""
- if self.runner.noop_on_check(inject):
- module_args = "CHECKMODE=True"
-
- # run the module and store the result
- result = self.runner._execute_module(conn, tmp, 'synchronize', module_args, complex_args=options, inject=inject)
-
- # reset the sudo property
- self.runner.become = self.original_become
-
- return result
-
diff --git a/v1/ansible/runner/action_plugins/template.py b/v1/ansible/runner/action_plugins/template.py
deleted file mode 100644
index 5c9be9e079..0000000000
--- a/v1/ansible/runner/action_plugins/template.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pipes
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def get_checksum(self, conn, tmp, dest, inject, try_directory=False, source=None):
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
-
- if remote_checksum in ('0', '2', '3', '4'):
- # Note: 1 means the file is not present which is fine; template
- # will create it. 3 means directory was specified instead of file
- # which requires special handling
- if try_directory and remote_checksum == '3' and source:
- # If the user specified a directory name as their dest then we
- # have to check the checksum of dest/basename(src). This is
- # the same behaviour as cp foo.txt /var/tmp/ so users expect
- # it to work.
- base = os.path.basename(source)
- dest = os.path.join(dest, base)
- remote_checksum = self.get_checksum(conn, tmp, dest, inject, try_directory=False)
- if remote_checksum not in ('0', '2', '3', '4'):
- return remote_checksum
-
- result = dict(failed=True, msg="failed to checksum remote file."
- " Checksum error code: %s" % remote_checksum)
- return ReturnData(conn=conn, comm_ok=True, result=result)
-
- return remote_checksum
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for template operations '''
-
- if not self.runner.is_playbook:
- raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks")
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- source = options.get('src', None)
- dest = options.get('dest', None)
-
- if (source is None and 'first_available_file' not in inject) or dest is None:
- result = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
-
- if 'first_available_file' in inject:
- found = False
- for fn in self.runner.module_vars.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'templates', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- result = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, comm_ok=False, result=result)
- else:
- source = template.template(self.runner.basedir, source, inject)
-
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'templates', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # template the source data locally & get ready to transfer
- try:
- resultant = template.template_from_file(self.runner.basedir, source, inject, vault_password=self.runner.vault_pass)
- except Exception, e:
- result = dict(failed=True, msg=type(e).__name__ + ": " + str(e))
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- # Expand any user home dir specification
- dest = self.runner._remote_expand_user(conn, dest, tmp)
-
- directory_prepended = False
- if dest.endswith("/"): # CCTODO: Fix path for Windows hosts.
- directory_prepended = True
- base = os.path.basename(source)
- dest = os.path.join(dest, base)
-
- local_checksum = utils.checksum_s(resultant)
- remote_checksum = self.get_checksum(conn, tmp, dest, inject, not directory_prepended, source=source)
-
- if local_checksum != remote_checksum:
-
- # template is different from the remote value
-
- # if showing diffs, we need to get the remote value
- dest_contents = ''
-
- if self.runner.diff:
- # using persist_files to keep the temp directory around to avoid needing to grab another
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
-
- xfered = self.runner._transfer_str(conn, tmp, 'source', resultant)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
-
- # run the copy module
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(source),
- follow=True,
- )
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant))
- else:
- res = self.runner._execute_module(conn, tmp, 'copy', module_args_tmp, inject=inject, complex_args=complex_args)
- if res.result.get('changed', False):
- res.diff = dict(before=dest_contents, after=resultant)
- return res
- else:
- # when running the file module based on the template data, we do
- # not want the source filename (the name of the template) to be used,
- # since this would mess up links, so we clear the src param and tell
- # the module to follow links. When doing that, we have to set
- # original_basename to the template just in case the dest is
- # a directory.
- module_args = ''
- new_module_args = dict(
- src=None,
- original_basename=os.path.basename(source),
- follow=True,
- )
- # be sure to inject the check mode param into the module args and
- # rely on the file module to report its changed status
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- options.update(new_module_args)
- return self.runner._execute_module(conn, tmp, 'file', module_args, inject=inject, complex_args=options)
-
diff --git a/v1/ansible/runner/action_plugins/unarchive.py b/v1/ansible/runner/action_plugins/unarchive.py
deleted file mode 100644
index 312a2265c0..0000000000
--- a/v1/ansible/runner/action_plugins/unarchive.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ansible import utils
-import ansible.utils.template as template
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-import pipes
-
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- dest = options.get('dest', None)
- copy = utils.boolean(options.get('copy', 'yes'))
- creates = options.get('creates', None)
-
- if source is None or dest is None:
- result = dict(failed=True, msg="src (or content) and dest are required")
- return ReturnData(conn=conn, result=result)
-
- if creates:
- # do not run the command if the line contains creates=filename
- # and the filename already exists. This allows idempotence
- # of command executions.
- module_args_tmp = ""
- complex_args_tmp = dict(path=creates, get_md5=False, get_checksum=False)
- module_return = self.runner._execute_module(conn, tmp, 'stat', module_args_tmp, inject=inject,
- complex_args=complex_args_tmp, delete_remote_tmp=False)
- stat = module_return.result.get('stat', None)
- if stat and stat.get('exists', False):
- return ReturnData(
- conn=conn,
- comm_ok=True,
- result=dict(
- changed=False,
- msg=("skipped, since %s exists" % creates)
- )
- )
-
- dest = self.runner._remote_expand_user(conn, dest, tmp) # CCTODO: Fix path for Windows hosts.
- source = template.template(self.runner.basedir, os.path.expanduser(source), inject)
- if copy:
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
- if remote_checksum == '4':
- result = dict(failed=True, msg="python isn't present on the system. Unable to compute checksum")
- return ReturnData(conn=conn, result=result)
- if remote_checksum != '3':
- result = dict(failed=True, msg="dest '%s' must be an existing dir" % dest)
- return ReturnData(conn=conn, result=result)
-
- if copy:
- # transfer the file to a remote tmp location
- tmp_src = tmp + 'source'
- conn.put_file(source, tmp_src)
-
- # handle diff mode client side
- # handle check mode client side
- # fix file permissions when the copy is done as a different user
- if copy:
- if self.runner.become and self.runner.become_user != 'root':
- if not self.runner.noop_on_check(inject):
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
- # Build temporary module_args.
- new_module_args = dict(
- src=tmp_src,
- original_basename=os.path.basename(source),
- )
-
- # make sure checkmod is passed on correctly
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
-
- module_args = utils.merge_module_args(module_args, new_module_args)
- else:
- module_args = "%s original_basename=%s" % (module_args, pipes.quote(os.path.basename(source)))
- # make sure checkmod is passed on correctly
- if self.runner.noop_on_check(inject):
- module_args += " CHECKMODE=True"
- return self.runner._execute_module(conn, tmp, 'unarchive', module_args, inject=inject, complex_args=complex_args)
diff --git a/v1/ansible/runner/action_plugins/win_copy.py b/v1/ansible/runner/action_plugins/win_copy.py
deleted file mode 100644
index a62dfb9985..0000000000
--- a/v1/ansible/runner/action_plugins/win_copy.py
+++ /dev/null
@@ -1,377 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-
-from ansible import utils
-import ansible.constants as C
-import ansible.utils.template as template
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-import json
-import stat
-import tempfile
-import pipes
-
-## fixes https://github.com/ansible/ansible/issues/3518
-# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-
-
-class ActionModule(object):
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for file transfer operations '''
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
- source = options.get('src', None)
- content = options.get('content', None)
- dest = options.get('dest', None)
- raw = utils.boolean(options.get('raw', 'no'))
- force = utils.boolean(options.get('force', 'yes'))
-
- # content with newlines is going to be escaped to safely load in yaml
- # now we need to unescape it so that the newlines are evaluated properly
- # when writing the file to disk
- if content:
- if isinstance(content, unicode):
- try:
- content = content.decode('unicode-escape')
- except UnicodeDecodeError:
- pass
-
- if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
- result=dict(failed=True, msg="src (or content) and dest are required")
- return ReturnData(conn=conn, result=result)
- elif (source is not None or 'first_available_file' in inject) and content is not None:
- result=dict(failed=True, msg="src and content are mutually exclusive")
- return ReturnData(conn=conn, result=result)
-
- # Check if the source ends with a "/"
- source_trailing_slash = False
- if source:
- source_trailing_slash = source.endswith("/")
-
- # Define content_tempfile in case we set it after finding content populated.
- content_tempfile = None
-
- # If content is defined make a temp file and write the content into it.
- if content is not None:
- try:
- # If content comes to us as a dict it should be decoded json.
- # We need to encode it back into a string to write it out.
- if type(content) is dict:
- content_tempfile = self._create_content_tempfile(json.dumps(content))
- else:
- content_tempfile = self._create_content_tempfile(content)
- source = content_tempfile
- except Exception, err:
- result = dict(failed=True, msg="could not write content temp file: %s" % err)
- return ReturnData(conn=conn, result=result)
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
- elif 'first_available_file' in inject:
- found = False
- for fn in inject.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- results = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, result=results)
- else:
- source = template.template(self.runner.basedir, source, inject)
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- # A list of source file tuples (full_path, relative_path) which will try to copy to the destination
- source_files = []
-
- # If source is a directory populate our list else source is a file and translate it to a tuple.
- if os.path.isdir(source):
- # Get the amount of spaces to remove to get the relative path.
- if source_trailing_slash:
- sz = len(source) + 1
- else:
- sz = len(source.rsplit('/', 1)[0]) + 1
-
- # Walk the directory and append the file tuples to source_files.
- for base_path, sub_folders, files in os.walk(source):
- for file in files:
- full_path = os.path.join(base_path, file)
- rel_path = full_path[sz:]
- source_files.append((full_path, rel_path))
-
- # If it's recursive copy, destination is always a dir,
- # explicitly mark it so (note - copy module relies on this).
- if not conn.shell.path_has_trailing_slash(dest):
- dest = conn.shell.join_path(dest, '')
- else:
- source_files.append((source, os.path.basename(source)))
-
- changed = False
- diffs = []
- module_result = {"changed": False}
-
- # A register for if we executed a module.
- # Used to cut down on command calls when not recursive.
- module_executed = False
-
- # Tell _execute_module to delete the file if there is one file.
- delete_remote_tmp = (len(source_files) == 1)
-
- # If this is a recursive action create a tmp_path that we can share as the _exec_module create is too late.
- if not delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- # expand any user home dir specifier
- dest = self.runner._remote_expand_user(conn, dest, tmp_path)
-
- for source_full, source_rel in source_files:
- # Generate a hash of the local file.
- local_checksum = utils.checksum(source_full)
-
- # If local_checksum is not defined we can't find the file so we should fail out.
- if local_checksum is None:
- result = dict(failed=True, msg="could not find src=%s" % source_full)
- return ReturnData(conn=conn, result=result)
-
- # This is kind of optimization - if user told us destination is
- # dir, do path manipulation right away, otherwise we still check
- # for dest being a dir via remote call below.
- if conn.shell.path_has_trailing_slash(dest):
- dest_file = conn.shell.join_path(dest, source_rel)
- else:
- dest_file = conn.shell.join_path(dest)
-
- # Attempt to get the remote checksum
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum == '3':
- # The remote_checksum was executed on a directory.
- if content is not None:
- # If source was defined as content remove the temporary file and fail out.
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- result = dict(failed=True, msg="can not use content with a dir as dest")
- return ReturnData(conn=conn, result=result)
- else:
- # Append the relative source location to the destination and retry remote_checksum.
- dest_file = conn.shell.join_path(dest, source_rel)
- remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
-
- if remote_checksum != '1' and not force:
- # remote_file does not exist so continue to next iteration.
- continue
-
- if local_checksum != remote_checksum:
- # The checksums don't match and we will change or error out.
- changed = True
-
- # Create a tmp_path if missing only if this is not recursive.
- # If this is recursive we already have a tmp_path.
- if delete_remote_tmp:
- if "-tmp-" not in tmp_path:
- tmp_path = self.runner._make_tmp_path(conn)
-
- if self.runner.diff and not raw:
- diff = self._get_diff_data(conn, tmp_path, inject, dest_file, source_full)
- else:
- diff = {}
-
- if self.runner.noop_on_check(inject):
- self._remove_tempfile_if_content_defined(content, content_tempfile)
- diffs.append(diff)
- changed = True
- module_result = dict(changed=True)
- continue
-
- # Define a remote directory that we will copy the file to.
- tmp_src = tmp_path + 'source'
-
- if not raw:
- conn.put_file(source_full, tmp_src)
- else:
- conn.put_file(source_full, dest_file)
-
- # We have copied the file remotely and no longer require our content_tempfile
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root' and not raw:
- self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path)
-
- if raw:
- # Continue to next iteration if raw is defined.
- continue
-
- # Run the copy module
-
- # src and dest here come after original and override them
- # we pass dest only to make sure it includes trailing slash in case of recursive copy
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- module_return = self.runner._execute_module(conn, tmp_path, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- else:
- # no need to transfer the file, already correct md5, but still need to call
- # the file module in case we want to change attributes
- self._remove_tempfile_if_content_defined(content, content_tempfile)
-
- if raw:
- # Continue to next iteration if raw is defined.
- # self.runner._remove_tmp_path(conn, tmp_path)
- continue
-
- tmp_src = tmp_path + source_rel
-
- # Build temporary module_args.
- new_module_args = dict(
- src=tmp_src,
- dest=dest,
- original_basename=source_rel
- )
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- if self.runner.no_log:
- new_module_args['NO_LOG'] = True
-
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- # Execute the file module.
- module_return = self.runner._execute_module(conn, tmp_path, 'win_file', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
- module_executed = True
-
- module_result = module_return.result
- if not module_result.get('checksum'):
- module_result['checksum'] = local_checksum
- if module_result.get('failed') == True:
- return module_return
- if module_result.get('changed') == True:
- changed = True
-
- # Delete tmp_path if we were recursive or if we did not execute a module.
- if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \
- or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
- self.runner._remove_tmp_path(conn, tmp_path)
-
- # the file module returns the file path as 'path', but
- # the copy module uses 'dest', so add it if it's not there
- if 'path' in module_result and 'dest' not in module_result:
- module_result['dest'] = module_result['path']
-
- # TODO: Support detailed status/diff for multiple files
- if len(source_files) == 1:
- result = module_result
- else:
- result = dict(dest=dest, src=source, changed=changed)
- if len(diffs) == 1:
- return ReturnData(conn=conn, result=result, diff=diffs[0])
- else:
- return ReturnData(conn=conn, result=result)
-
- def _create_content_tempfile(self, content):
- ''' Create a tempfile containing defined content '''
- fd, content_tempfile = tempfile.mkstemp()
- f = os.fdopen(fd, 'w')
- try:
- f.write(content)
- except Exception, err:
- os.remove(content_tempfile)
- raise Exception(err)
- finally:
- f.close()
- return content_tempfile
-
- def _get_diff_data(self, conn, tmp, inject, destination, source):
- peek_result = self.runner._execute_module(conn, tmp, 'win_file', "path=%s diff_peek=1" % destination, inject=inject, persist_files=True)
-
- if not peek_result.is_successful():
- return {}
-
- diff = {}
- if peek_result.result['state'] == 'absent':
- diff['before'] = ''
- elif peek_result.result['appears_binary']:
- diff['dst_binary'] = 1
- elif peek_result.result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % destination, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
- diff['before_header'] = destination
- diff['before'] = dest_contents
-
- src = open(source)
- src_contents = src.read(8192)
- st = os.stat(source)
- if "\x00" in src_contents:
- diff['src_binary'] = 1
- elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
- diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
- else:
- src.seek(0)
- diff['after_header'] = source
- diff['after'] = src.read()
-
- return diff
-
- def _remove_tempfile_if_content_defined(self, content, content_tempfile):
- if content is not None:
- os.remove(content_tempfile)
-
-
- def _result_key_merge(self, options, results):
- # add keys to file module results to mimic copy
- if 'path' in results.result and 'dest' not in results.result:
- results.result['dest'] = results.result['path']
- del results.result['path']
- return results
diff --git a/v1/ansible/runner/action_plugins/win_template.py b/v1/ansible/runner/action_plugins/win_template.py
deleted file mode 100644
index 7bde4bd510..0000000000
--- a/v1/ansible/runner/action_plugins/win_template.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import pipes
-from ansible.utils import template
-from ansible import utils
-from ansible import errors
-from ansible.runner.return_data import ReturnData
-import base64
-
-class ActionModule(object):
-
- TRANSFERS_FILES = True
-
- def __init__(self, runner):
- self.runner = runner
-
- def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
- ''' handler for template operations '''
-
- if not self.runner.is_playbook:
- raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks")
-
- # load up options
- options = {}
- if complex_args:
- options.update(complex_args)
- options.update(utils.parse_kv(module_args))
-
- source = options.get('src', None)
- dest = options.get('dest', None)
-
- if (source is None and 'first_available_file' not in inject) or dest is None:
- result = dict(failed=True, msg="src and dest are required")
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- # if we have first_available_file in our vars
- # look up the files and use the first one we find as src
-
- if 'first_available_file' in inject:
- found = False
- for fn in self.runner.module_vars.get('first_available_file'):
- fn_orig = fn
- fnt = template.template(self.runner.basedir, fn, inject)
- fnd = utils.path_dwim(self.runner.basedir, fnt)
- if not os.path.exists(fnd) and '_original_file' in inject:
- fnd = utils.path_dwim_relative(inject['_original_file'], 'templates', fnt, self.runner.basedir, check=False)
- if os.path.exists(fnd):
- source = fnd
- found = True
- break
- if not found:
- result = dict(failed=True, msg="could not find src in first_available_file list")
- return ReturnData(conn=conn, comm_ok=False, result=result)
- else:
- source = template.template(self.runner.basedir, source, inject)
-
- if '_original_file' in inject:
- source = utils.path_dwim_relative(inject['_original_file'], 'templates', source, self.runner.basedir)
- else:
- source = utils.path_dwim(self.runner.basedir, source)
-
- if conn.shell.path_has_trailing_slash(dest):
- base = os.path.basename(source)
- dest = conn.shell.join_path(dest, base)
-
- # template the source data locally & get ready to transfer
- try:
- resultant = template.template_from_file(self.runner.basedir, source, inject, vault_password=self.runner.vault_pass)
- except Exception, e:
- result = dict(failed=True, msg=type(e).__name__ + ": " + str(e))
- return ReturnData(conn=conn, comm_ok=False, result=result)
-
- local_checksum = utils.checksum_s(resultant)
- remote_checksum = self.runner._remote_checksum(conn, tmp, dest, inject)
-
- if local_checksum != remote_checksum:
-
- # template is different from the remote value
-
- # if showing diffs, we need to get the remote value
- dest_contents = ''
-
- if self.runner.diff:
- # using persist_files to keep the temp directory around to avoid needing to grab another
- dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % dest, inject=inject, persist_files=True)
- if 'content' in dest_result.result:
- dest_contents = dest_result.result['content']
- if dest_result.result['encoding'] == 'base64':
- dest_contents = base64.b64decode(dest_contents)
- else:
- raise Exception("unknown encoding, failed: %s" % dest_result.result)
-
- xfered = self.runner._transfer_str(conn, tmp, 'source', resultant)
-
- # fix file permissions when the copy is done as a different user
- if self.runner.become and self.runner.become_user != 'root':
- self.runner._remote_chmod(conn, 'a+r', xfered, tmp)
-
- # run the copy module
- new_module_args = dict(
- src=xfered,
- dest=dest,
- original_basename=os.path.basename(source),
- follow=True,
- )
- module_args_tmp = utils.merge_module_args(module_args, new_module_args)
-
- if self.runner.noop_on_check(inject):
- return ReturnData(conn=conn, comm_ok=True, result=dict(changed=True), diff=dict(before_header=dest, after_header=source, before=dest_contents, after=resultant))
- else:
- res = self.runner._execute_module(conn, tmp, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args)
- if res.result.get('changed', False):
- res.diff = dict(before=dest_contents, after=resultant)
- return res
- else:
- # when running the file module based on the template data, we do
- # not want the source filename (the name of the template) to be used,
- # since this would mess up links, so we clear the src param and tell
- # the module to follow links
- new_module_args = dict(
- src=None,
- follow=True,
- )
- # be sure to inject the check mode param into the module args and
- # rely on the file module to report its changed status
- if self.runner.noop_on_check(inject):
- new_module_args['CHECKMODE'] = True
- module_args = utils.merge_module_args(module_args, new_module_args)
- return self.runner._execute_module(conn, tmp, 'win_file', module_args, inject=inject, complex_args=complex_args)
-
diff --git a/v1/ansible/runner/connection.py b/v1/ansible/runner/connection.py
deleted file mode 100644
index 2ea484f70b..0000000000
--- a/v1/ansible/runner/connection.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# (c) 2012-2013, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-################################################
-
-import os
-import stat
-import errno
-
-from ansible import utils
-from ansible.errors import AnsibleError
-
-class Connector(object):
- ''' Handles abstract connections to remote hosts '''
-
- def __init__(self, runner):
- self.runner = runner
-
- def connect(self, host, port, user, password, transport, private_key_file, delegate_host):
- conn = utils.plugins.connection_loader.get(transport, self.runner, host, port, user=user, password=password, private_key_file=private_key_file)
- if conn is None:
- raise AnsibleError("unsupported connection type: %s" % transport)
- conn.delegate = delegate_host
- if private_key_file:
- # If private key is readable by user other than owner, flag an error
- st = None
- try:
- st = os.stat(private_key_file)
- except (IOError, OSError), e:
- if e.errno != errno.ENOENT: # file is missing, might be agent
- raise(e)
-
- if st is not None and st.st_mode & (stat.S_IRGRP | stat.S_IROTH):
- raise AnsibleError("private_key_file (%s) is group-readable or world-readable and thus insecure - "
- "you will probably get an SSH failure"
- % (private_key_file,))
- self.active = conn.connect()
- return self.active
diff --git a/v1/ansible/runner/connection_plugins/__init__.py b/v1/ansible/runner/connection_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/runner/connection_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/runner/connection_plugins/accelerate.py b/v1/ansible/runner/connection_plugins/accelerate.py
deleted file mode 100644
index 0627267c16..0000000000
--- a/v1/ansible/runner/connection_plugins/accelerate.py
+++ /dev/null
@@ -1,372 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import base64
-import socket
-import struct
-import time
-from ansible.callbacks import vvv, vvvv
-from ansible.errors import AnsibleError, AnsibleFileNotFound
-from ansible.runner.connection_plugins.ssh import Connection as SSHConnection
-from ansible.runner.connection_plugins.paramiko_ssh import Connection as ParamikoConnection
-from ansible import utils
-from ansible import constants
-
-# the chunk size to read and send, assuming mtu 1500 and
-# leaving room for base64 (+33%) encoding and header (8 bytes)
-# ((1400-8)/4)*3) = 1044
-# which leaves room for the TCP/IP header. We set this to a
-# multiple of the value to speed up file reads.
-CHUNK_SIZE=1044*20
-
-class Connection(object):
- ''' raw socket accelerated connection '''
-
- def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
-
- self.runner = runner
- self.host = host
- self.context = None
- self.conn = None
- self.user = user
- self.key = utils.key_for_hostname(host)
- self.port = port[0]
- self.accport = port[1]
- self.is_connected = False
- self.has_pipelining = False
- self.become_methods_supported=['sudo']
-
- if not self.port:
- self.port = constants.DEFAULT_REMOTE_PORT
- elif not isinstance(self.port, int):
- self.port = int(self.port)
-
- if not self.accport:
- self.accport = constants.ACCELERATE_PORT
- elif not isinstance(self.accport, int):
- self.accport = int(self.accport)
-
- if self.runner.original_transport == "paramiko":
- self.ssh = ParamikoConnection(
- runner=self.runner,
- host=self.host,
- port=self.port,
- user=self.user,
- password=password,
- private_key_file=private_key_file
- )
- else:
- self.ssh = SSHConnection(
- runner=self.runner,
- host=self.host,
- port=self.port,
- user=self.user,
- password=password,
- private_key_file=private_key_file
- )
-
- if not getattr(self.ssh, 'shell', None):
- self.ssh.shell = utils.plugins.shell_loader.get('sh')
-
- # attempt to work around shared-memory funness
- if getattr(self.runner, 'aes_keys', None):
- utils.AES_KEYS = self.runner.aes_keys
-
- def _execute_accelerate_module(self):
- args = "password=%s port=%s minutes=%d debug=%d ipv6=%s" % (
- base64.b64encode(self.key.__str__()),
- str(self.accport),
- constants.ACCELERATE_DAEMON_TIMEOUT,
- int(utils.VERBOSITY),
- self.runner.accelerate_ipv6,
- )
- if constants.ACCELERATE_MULTI_KEY:
- args += " multi_key=yes"
- inject = dict(password=self.key)
- if getattr(self.runner, 'accelerate_inventory_host', False):
- inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.runner.accelerate_inventory_host))
- else:
- inject = utils.combine_vars(inject, self.runner.inventory.get_variables(self.host))
- vvvv("attempting to start up the accelerate daemon...")
- self.ssh.connect()
- tmp_path = self.runner._make_tmp_path(self.ssh)
- return self.runner._execute_module(self.ssh, tmp_path, 'accelerate', args, inject=inject)
-
- def connect(self, allow_ssh=True):
- ''' activates the connection object '''
-
- try:
- if not self.is_connected:
- wrong_user = False
- tries = 3
- self.conn = socket.socket()
- self.conn.settimeout(constants.ACCELERATE_CONNECT_TIMEOUT)
- vvvv("attempting connection to %s via the accelerated port %d" % (self.host,self.accport))
- while tries > 0:
- try:
- self.conn.connect((self.host,self.accport))
- break
- except socket.error:
- vvvv("connection to %s failed, retrying..." % self.host)
- time.sleep(0.1)
- tries -= 1
- if tries == 0:
- vvv("Could not connect via the accelerated connection, exceeded # of tries")
- raise AnsibleError("FAILED")
- elif wrong_user:
- vvv("Restarting daemon with a different remote_user")
- raise AnsibleError("WRONG_USER")
-
- self.conn.settimeout(constants.ACCELERATE_TIMEOUT)
- if not self.validate_user():
- # the accelerated daemon was started with a
- # different remote_user. The above command
- # should have caused the accelerate daemon to
- # shutdown, so we'll reconnect.
- wrong_user = True
-
- except AnsibleError, e:
- if allow_ssh:
- if "WRONG_USER" in e:
- vvv("Switching users, waiting for the daemon on %s to shutdown completely..." % self.host)
- time.sleep(5)
- vvv("Falling back to ssh to startup accelerated mode")
- res = self._execute_accelerate_module()
- if not res.is_successful():
- raise AnsibleError("Failed to launch the accelerated daemon on %s (reason: %s)" % (self.host,res.result.get('msg')))
- return self.connect(allow_ssh=False)
- else:
- raise AnsibleError("Failed to connect to %s:%s" % (self.host,self.accport))
- self.is_connected = True
- return self
-
- def send_data(self, data):
- packed_len = struct.pack('!Q',len(data))
- return self.conn.sendall(packed_len + data)
-
- def recv_data(self):
- header_len = 8 # size of a packed unsigned long long
- data = b""
- try:
- vvvv("%s: in recv_data(), waiting for the header" % self.host)
- while len(data) < header_len:
- d = self.conn.recv(header_len - len(data))
- if not d:
- vvvv("%s: received nothing, bailing out" % self.host)
- return None
- data += d
- vvvv("%s: got the header, unpacking" % self.host)
- data_len = struct.unpack('!Q',data[:header_len])[0]
- data = data[header_len:]
- vvvv("%s: data received so far (expecting %d): %d" % (self.host,data_len,len(data)))
- while len(data) < data_len:
- d = self.conn.recv(data_len - len(data))
- if not d:
- vvvv("%s: received nothing, bailing out" % self.host)
- return None
- vvvv("%s: received %d bytes" % (self.host, len(d)))
- data += d
- vvvv("%s: received all of the data, returning" % self.host)
- return data
- except socket.timeout:
- raise AnsibleError("timed out while waiting to receive data")
-
- def validate_user(self):
- '''
- Checks the remote uid of the accelerated daemon vs. the
- one specified for this play and will cause the accel
- daemon to exit if they don't match
- '''
-
- vvvv("%s: sending request for validate_user" % self.host)
- data = dict(
- mode='validate_user',
- username=self.user,
- )
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- if self.send_data(data):
- raise AnsibleError("Failed to send command to %s" % self.host)
-
- vvvv("%s: waiting for validate_user response" % self.host)
- while True:
- # we loop here while waiting for the response, because a
- # long running command may cause us to receive keepalive packets
- # ({"pong":"true"}) rather than the response we want.
- response = self.recv_data()
- if not response:
- raise AnsibleError("Failed to get a response from %s" % self.host)
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
- if "pong" in response:
- # it's a keepalive, go back to waiting
- vvvv("%s: received a keepalive packet" % self.host)
- continue
- else:
- vvvv("%s: received the validate_user response: %s" % (self.host, response))
- break
-
- if response.get('failed'):
- return False
- else:
- return response.get('rc') == 0
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the remote host '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- if executable == "":
- executable = constants.DEFAULT_EXECUTABLE
-
- if self.runner.become and sudoable:
- cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
-
- vvv("EXEC COMMAND %s" % cmd)
-
- data = dict(
- mode='command',
- cmd=cmd,
- tmp_path=tmp_path,
- executable=executable,
- )
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- if self.send_data(data):
- raise AnsibleError("Failed to send command to %s" % self.host)
-
- while True:
- # we loop here while waiting for the response, because a
- # long running command may cause us to receive keepalive packets
- # ({"pong":"true"}) rather than the response we want.
- response = self.recv_data()
- if not response:
- raise AnsibleError("Failed to get a response from %s" % self.host)
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
- if "pong" in response:
- # it's a keepalive, go back to waiting
- vvvv("%s: received a keepalive packet" % self.host)
- continue
- else:
- vvvv("%s: received the response" % self.host)
- break
-
- return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
-
- def put_file(self, in_path, out_path):
-
- ''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
-
- if not os.path.exists(in_path):
- raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
-
- fd = file(in_path, 'rb')
- fstat = os.stat(in_path)
- try:
- vvv("PUT file is %d bytes" % fstat.st_size)
- last = False
- while fd.tell() <= fstat.st_size and not last:
- vvvv("file position currently %ld, file size is %ld" % (fd.tell(), fstat.st_size))
- data = fd.read(CHUNK_SIZE)
- if fd.tell() >= fstat.st_size:
- last = True
- data = dict(mode='put', data=base64.b64encode(data), out_path=out_path, last=last)
- if self.runner.become:
- data['user'] = self.runner.become_user
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
-
- if self.send_data(data):
- raise AnsibleError("failed to send the file to %s" % self.host)
-
- response = self.recv_data()
- if not response:
- raise AnsibleError("Failed to get a response from %s" % self.host)
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- if response.get('failed',False):
- raise AnsibleError("failed to put the file in the requested location")
- finally:
- fd.close()
- vvvv("waiting for final response after PUT")
- response = self.recv_data()
- if not response:
- raise AnsibleError("Failed to get a response from %s" % self.host)
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- if response.get('failed',False):
- raise AnsibleError("failed to put the file in the requested location")
-
- def fetch_file(self, in_path, out_path):
- ''' save a remote file to the specified path '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
-
- data = dict(mode='fetch', in_path=in_path)
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- if self.send_data(data):
- raise AnsibleError("failed to initiate the file fetch with %s" % self.host)
-
- fh = open(out_path, "w")
- try:
- bytes = 0
- while True:
- response = self.recv_data()
- if not response:
- raise AnsibleError("Failed to get a response from %s" % self.host)
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
- if response.get('failed', False):
- raise AnsibleError("Error during file fetch, aborting")
- out = base64.b64decode(response['data'])
- fh.write(out)
- bytes += len(out)
- # send an empty response back to signify we
- # received the last chunk without errors
- data = utils.jsonify(dict())
- data = utils.encrypt(self.key, data)
- if self.send_data(data):
- raise AnsibleError("failed to send ack during file fetch")
- if response.get('last', False):
- break
- finally:
- # we don't currently care about this final response,
- # we just receive it and drop it. It may be used at some
- # point in the future or we may just have the put/fetch
- # operations not send back a final response at all
- response = self.recv_data()
- vvv("FETCH wrote %d bytes to %s" % (bytes, out_path))
- fh.close()
-
- def close(self):
- ''' terminate the connection '''
- # Be a good citizen
- try:
- self.conn.close()
- except:
- pass
-
diff --git a/v1/ansible/runner/connection_plugins/chroot.py b/v1/ansible/runner/connection_plugins/chroot.py
deleted file mode 100644
index 3e96047287..0000000000
--- a/v1/ansible/runner/connection_plugins/chroot.py
+++ /dev/null
@@ -1,132 +0,0 @@
-# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import distutils.spawn
-import traceback
-import os
-import shutil
-import subprocess
-from ansible import errors
-from ansible import utils
-from ansible.callbacks import vvv
-import ansible.constants as C
-
-class Connection(object):
- ''' Local chroot based connections '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.chroot = host
- self.has_pipelining = False
- self.become_methods_supported=C.BECOME_METHODS
-
- if os.geteuid() != 0:
- raise errors.AnsibleError("chroot connection requires running as root")
-
- # we're running as root on the local system so do some
- # trivial checks for ensuring 'host' is actually a chroot'able dir
- if not os.path.isdir(self.chroot):
- raise errors.AnsibleError("%s is not a directory" % self.chroot)
-
- chrootsh = os.path.join(self.chroot, 'bin/sh')
- if not utils.is_executable(chrootsh):
- raise errors.AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
-
- self.chroot_cmd = distutils.spawn.find_executable('chroot')
- if not self.chroot_cmd:
- raise errors.AnsibleError("chroot command not found in PATH")
-
- self.runner = runner
- self.host = host
- # port is unused, since this is local
- self.port = port
-
- def connect(self, port=None):
- ''' connect to the chroot; nothing to do here '''
-
- vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
-
- return self
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the chroot '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- # We enter chroot as root so we ignore privlege escalation?
-
- if executable:
- local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
- else:
- local_cmd = '%s "%s" %s' % (self.chroot_cmd, self.chroot, cmd)
-
- vvv("EXEC %s" % (local_cmd), host=self.chroot)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to chroot '''
-
- if not out_path.startswith(os.path.sep):
- out_path = os.path.join(os.path.sep, out_path)
- normpath = os.path.normpath(out_path)
- out_path = os.path.join(self.chroot, normpath[1:])
-
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from chroot to local '''
-
- if not in_path.startswith(os.path.sep):
- in_path = os.path.join(os.path.sep, in_path)
- normpath = os.path.normpath(in_path)
- in_path = os.path.join(self.chroot, normpath[1:])
-
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/connection_plugins/fireball.py b/v1/ansible/runner/connection_plugins/fireball.py
deleted file mode 100644
index 562fc2eccf..0000000000
--- a/v1/ansible/runner/connection_plugins/fireball.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import json
-import os
-import base64
-from ansible.callbacks import vvv
-from ansible import utils
-from ansible import errors
-from ansible import constants
-
-HAVE_ZMQ=False
-
-try:
- import zmq
- HAVE_ZMQ=True
-except ImportError:
- pass
-
-class Connection(object):
- ''' ZeroMQ accelerated connection '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
-
- self.runner = runner
- self.has_pipelining = False
-
- # attempt to work around shared-memory funness
- if getattr(self.runner, 'aes_keys', None):
- utils.AES_KEYS = self.runner.aes_keys
-
- self.host = host
- self.key = utils.key_for_hostname(host)
- self.context = None
- self.socket = None
-
- if port is None:
- self.port = constants.ZEROMQ_PORT
- else:
- self.port = port
-
- self.become_methods_supported=[]
-
- def connect(self):
- ''' activates the connection object '''
-
- if not HAVE_ZMQ:
- raise errors.AnsibleError("zmq is not installed")
-
- # this is rough/temporary and will likely be optimized later ...
- self.context = zmq.Context()
- socket = self.context.socket(zmq.REQ)
- addr = "tcp://%s:%s" % (self.host, self.port)
- socket.connect(addr)
- self.socket = socket
-
- return self
-
- def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the remote host '''
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- vvv("EXEC COMMAND %s" % cmd)
-
- if self.runner.become and sudoable:
- raise errors.AnsibleError(
- "When using fireball, do not specify sudo or su to run your tasks. " +
- "Instead sudo the fireball action with sudo. " +
- "Task will communicate with the fireball already running in sudo mode."
- )
-
- data = dict(
- mode='command',
- cmd=cmd,
- tmp_path=tmp_path,
- executable=executable,
- )
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr',''))
-
- def put_file(self, in_path, out_path):
-
- ''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
-
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- data = file(in_path).read()
- data = base64.b64encode(data)
-
- data = dict(mode='put', data=data, out_path=out_path)
- # TODO: support chunked file transfer
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
-
- # no meaningful response needed for this
-
- def fetch_file(self, in_path, out_path):
- ''' save a remote file to the specified path '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
-
- data = dict(mode='fetch', in_path=in_path)
- data = utils.jsonify(data)
- data = utils.encrypt(self.key, data)
- self.socket.send(data)
-
- response = self.socket.recv()
- response = utils.decrypt(self.key, response)
- response = utils.parse_json(response)
- response = response['data']
- response = base64.b64decode(response)
-
- fh = open(out_path, "w")
- fh.write(response)
- fh.close()
-
- def close(self):
- ''' terminate the connection '''
- # Be a good citizen
- try:
- self.socket.close()
- self.context.term()
- except:
- pass
-
diff --git a/v1/ansible/runner/connection_plugins/funcd.py b/v1/ansible/runner/connection_plugins/funcd.py
deleted file mode 100644
index 92b7f53605..0000000000
--- a/v1/ansible/runner/connection_plugins/funcd.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-# (c) 2013, Michael Scherer <misc@zarb.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# ---
-# The func transport permit to use ansible over func. For people who have already setup
-# func and that wish to play with ansible, this permit to move gradually to ansible
-# without having to redo completely the setup of the network.
-
-HAVE_FUNC=False
-try:
- import func.overlord.client as fc
- HAVE_FUNC=True
-except ImportError:
- pass
-
-import os
-from ansible.callbacks import vvv
-from ansible import errors
-import tempfile
-import shutil
-
-
-class Connection(object):
- ''' Func-based connections '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.runner = runner
- self.host = host
- self.has_pipelining = False
- # port is unused, this go on func
- self.port = port
-
- def connect(self, port=None):
- if not HAVE_FUNC:
- raise errors.AnsibleError("func is not installed")
-
- self.client = fc.Client(self.host)
- return self
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False,
- executable='/bin/sh', in_data=None):
- ''' run a command on the remote minion '''
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- # totally ignores privlege escalation
- vvv("EXEC %s" % (cmd), host=self.host)
- p = self.client.command.run(cmd)[self.host]
- return (p[0], '', p[1], p[2])
-
- def _normalize_path(self, path, prefix):
- if not path.startswith(os.path.sep):
- path = os.path.join(os.path.sep, path)
- normpath = os.path.normpath(path)
- return os.path.join(prefix, normpath[1:])
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to remote '''
-
- out_path = self._normalize_path(out_path, '/')
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- self.client.local.copyfile.send(in_path, out_path)
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from remote to local '''
-
- in_path = self._normalize_path(in_path, '/')
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- # need to use a tmp dir due to difference of semantic for getfile
- # ( who take a # directory as destination) and fetch_file, who
- # take a file directly
- tmpdir = tempfile.mkdtemp(prefix="func_ansible")
- self.client.local.getfile.get(in_path, tmpdir)
- shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)),
- out_path)
- shutil.rmtree(tmpdir)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/connection_plugins/jail.py b/v1/ansible/runner/connection_plugins/jail.py
deleted file mode 100644
index c7b61bc638..0000000000
--- a/v1/ansible/runner/connection_plugins/jail.py
+++ /dev/null
@@ -1,153 +0,0 @@
-# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-# (c) 2013, Michael Scherer <misc@zarb.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import distutils.spawn
-import traceback
-import os
-import shutil
-import subprocess
-from ansible import errors
-from ansible.callbacks import vvv
-import ansible.constants as C
-
-class Connection(object):
- ''' Local chroot based connections '''
-
- def _search_executable(self, executable):
- cmd = distutils.spawn.find_executable(executable)
- if not cmd:
- raise errors.AnsibleError("%s command not found in PATH") % executable
- return cmd
-
- def list_jails(self):
- p = subprocess.Popen([self.jls_cmd, '-q', 'name'],
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
-
- return stdout.split()
-
- def get_jail_path(self):
- p = subprocess.Popen([self.jls_cmd, '-j', self.jail, '-q', 'path'],
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
- # remove \n
- return stdout[:-1]
-
-
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.jail = host
- self.runner = runner
- self.host = host
- self.has_pipelining = False
- self.become_methods_supported=C.BECOME_METHODS
-
- if os.geteuid() != 0:
- raise errors.AnsibleError("jail connection requires running as root")
-
- self.jls_cmd = self._search_executable('jls')
- self.jexec_cmd = self._search_executable('jexec')
-
- if not self.jail in self.list_jails():
- raise errors.AnsibleError("incorrect jail name %s" % self.jail)
-
-
- self.host = host
- # port is unused, since this is local
- self.port = port
-
- def connect(self, port=None):
- ''' connect to the chroot; nothing to do here '''
-
- vvv("THIS IS A LOCAL CHROOT DIR", host=self.jail)
-
- return self
-
- # a modifier
- def _generate_cmd(self, executable, cmd):
- if executable:
- local_cmd = [self.jexec_cmd, self.jail, executable, '-c', cmd]
- else:
- local_cmd = '%s "%s" %s' % (self.jexec_cmd, self.jail, cmd)
- return local_cmd
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the chroot '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- # Ignores privilege escalation
- local_cmd = self._generate_cmd(executable, cmd)
-
- vvv("EXEC %s" % (local_cmd), host=self.jail)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def _normalize_path(self, path, prefix):
- if not path.startswith(os.path.sep):
- path = os.path.join(os.path.sep, path)
- normpath = os.path.normpath(path)
- return os.path.join(prefix, normpath[1:])
-
- def _copy_file(self, in_path, out_path):
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to chroot '''
-
- out_path = self._normalize_path(out_path, self.get_jail_path())
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.jail)
-
- self._copy_file(in_path, out_path)
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from chroot to local '''
-
- in_path = self._normalize_path(in_path, self.get_jail_path())
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.jail)
-
- self._copy_file(in_path, out_path)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/connection_plugins/libvirt_lxc.py b/v1/ansible/runner/connection_plugins/libvirt_lxc.py
deleted file mode 100644
index 832b78251c..0000000000
--- a/v1/ansible/runner/connection_plugins/libvirt_lxc.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-# (c) 2013, Michael Scherer <misc@zarb.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import distutils.spawn
-import os
-import subprocess
-from ansible import errors
-from ansible.callbacks import vvv
-import ansible.constants as C
-
-class Connection(object):
- ''' Local lxc based connections '''
-
- def _search_executable(self, executable):
- cmd = distutils.spawn.find_executable(executable)
- if not cmd:
- raise errors.AnsibleError("%s command not found in PATH") % executable
- return cmd
-
- def _check_domain(self, domain):
- p = subprocess.Popen([self.cmd, '-q', '-c', 'lxc:///', 'dominfo', domain],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- p.communicate()
- if p.returncode:
- raise errors.AnsibleError("%s is not a lxc defined in libvirt" % domain)
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.lxc = host
-
- self.cmd = self._search_executable('virsh')
-
- self._check_domain(host)
-
- self.runner = runner
- self.host = host
- # port is unused, since this is local
- self.port = port
- self.become_methods_supported=C.BECOME_METHODS
-
- def connect(self, port=None):
- ''' connect to the lxc; nothing to do here '''
-
- vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
-
- return self
-
- def _generate_cmd(self, executable, cmd):
- if executable:
- local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
- else:
- local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd)
- return local_cmd
-
- def exec_command(self, cmd, tmp_path, become_user, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the chroot '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- # We ignore privilege escalation!
- local_cmd = self._generate_cmd(executable, cmd)
-
- vvv("EXEC %s" % (local_cmd), host=self.lxc)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def _normalize_path(self, path, prefix):
- if not path.startswith(os.path.sep):
- path = os.path.join(os.path.sep, path)
- normpath = os.path.normpath(path)
- return os.path.join(prefix, normpath[1:])
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to lxc '''
-
- out_path = self._normalize_path(out_path, '/')
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc)
-
- local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/tee', out_path]
- vvv("EXEC %s" % (local_cmd), host=self.lxc)
-
- p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = p.communicate(open(in_path,'rb').read())
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from lxc to local '''
-
- in_path = self._normalize_path(in_path, '/')
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc)
-
- local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/cat', in_path]
- vvv("EXEC %s" % (local_cmd), host=self.lxc)
-
- p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = p.communicate()
- open(out_path,'wb').write(stdout)
-
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/connection_plugins/local.py b/v1/ansible/runner/connection_plugins/local.py
deleted file mode 100644
index beaeb1ae50..0000000000
--- a/v1/ansible/runner/connection_plugins/local.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import traceback
-import os
-import pipes
-import shutil
-import subprocess
-import select
-import fcntl
-from ansible import errors
-from ansible import utils
-from ansible.callbacks import vvv
-
-
-class Connection(object):
- ''' Local based connections '''
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.runner = runner
- self.host = host
- # port is unused, since this is local
- self.port = port
- self.has_pipelining = False
-
- # TODO: add su(needs tty), pbrun, pfexec
- self.become_methods_supported=['sudo']
-
- def connect(self, port=None):
- ''' connect to the local host; nothing to do here '''
-
- return self
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the local host '''
-
- # su requires to be run from a terminal, and therefore isn't supported here (yet?)
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- if self.runner.become and sudoable:
- local_cmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '-H', self.runner.become_exe)
- else:
- if executable:
- local_cmd = executable.split() + ['-c', cmd]
- else:
- local_cmd = cmd
- executable = executable.split()[0] if executable else None
-
- vvv("EXEC %s" % (local_cmd), host=self.host)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir, executable=executable,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- if self.runner.become and sudoable and self.runner.become_pass:
- fcntl.fcntl(p.stdout, fcntl.F_SETFL,
- fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL,
- fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- become_output = ''
- while success_key not in become_output:
-
- if prompt and become_output.endswith(prompt):
- break
- if utils.su_prompts.check_su_prompt(become_output):
- break
-
- rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
- [p.stdout, p.stderr], self.runner.timeout)
- if p.stdout in rfd:
- chunk = p.stdout.read()
- elif p.stderr in rfd:
- chunk = p.stderr.read()
- else:
- stdout, stderr = p.communicate()
- raise errors.AnsibleError('timeout waiting for %s password prompt:\n' % self.runner.become_method + become_output)
- if not chunk:
- stdout, stderr = p.communicate()
- raise errors.AnsibleError('%s output closed while waiting for password prompt:\n' % self.runner.become_method + become_output)
- become_output += chunk
- if success_key not in become_output:
- p.stdin.write(self.runner.become_pass + '\n')
- fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to local '''
-
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def fetch_file(self, in_path, out_path):
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- ''' fetch a file from local to local -- for copatibility '''
- self.put_file(in_path, out_path)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/connection_plugins/paramiko_ssh.py b/v1/ansible/runner/connection_plugins/paramiko_ssh.py
deleted file mode 100644
index 8eaf97c3f6..0000000000
--- a/v1/ansible/runner/connection_plugins/paramiko_ssh.py
+++ /dev/null
@@ -1,419 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-# ---
-# The paramiko transport is provided because many distributions, in particular EL6 and before
-# do not support ControlPersist in their SSH implementations. This is needed on the Ansible
-# control machine to be reasonably efficient with connections. Thus paramiko is faster
-# for most users on these platforms. Users with ControlPersist capability can consider
-# using -c ssh or configuring the transport in ansible.cfg.
-
-import warnings
-import os
-import pipes
-import socket
-import random
-import logging
-import tempfile
-import traceback
-import fcntl
-import re
-import sys
-from termios import tcflush, TCIFLUSH
-from binascii import hexlify
-from ansible.callbacks import vvv
-from ansible import errors
-from ansible import utils
-from ansible import constants as C
-
-AUTHENTICITY_MSG="""
-paramiko: The authenticity of host '%s' can't be established.
-The %s key fingerprint is %s.
-Are you sure you want to continue connecting (yes/no)?
-"""
-
-# prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/
-HAVE_PARAMIKO=False
-with warnings.catch_warnings():
- warnings.simplefilter("ignore")
- try:
- import paramiko
- HAVE_PARAMIKO=True
- logging.getLogger("paramiko").setLevel(logging.WARNING)
- except ImportError:
- pass
-
-class MyAddPolicy(object):
- """
- Based on AutoAddPolicy in paramiko so we can determine when keys are added
- and also prompt for input.
-
- Policy for automatically adding the hostname and new host key to the
- local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
- """
-
- def __init__(self, runner):
- self.runner = runner
-
- def missing_host_key(self, client, hostname, key):
-
- if C.HOST_KEY_CHECKING:
-
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
-
- old_stdin = sys.stdin
- sys.stdin = self.runner._new_stdin
- fingerprint = hexlify(key.get_fingerprint())
- ktype = key.get_name()
-
- # clear out any premature input on sys.stdin
- tcflush(sys.stdin, TCIFLUSH)
-
- inp = raw_input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
- sys.stdin = old_stdin
- if inp not in ['yes','y','']:
- fcntl.flock(self.runner.output_lockfile, fcntl.LOCK_UN)
- fcntl.flock(self.runner.process_lockfile, fcntl.LOCK_UN)
- raise errors.AnsibleError("host connection rejected by user")
-
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
-
-
- key._added_by_ansible_this_time = True
-
- # existing implementation below:
- client._host_keys.add(hostname, key.get_name(), key)
-
- # host keys are actually saved in close() function below
- # in order to control ordering.
-
-
-# keep connection objects on a per host basis to avoid repeated attempts to reconnect
-
-SSH_CONNECTION_CACHE = {}
-SFTP_CONNECTION_CACHE = {}
-
-class Connection(object):
- ''' SSH based connections with Paramiko '''
-
- def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
-
- self.ssh = None
- self.sftp = None
- self.runner = runner
- self.host = host
- self.port = port or 22
- self.user = user
- self.password = password
- self.private_key_file = private_key_file
- self.has_pipelining = False
-
- # TODO: add pbrun, pfexec
- self.become_methods_supported=['sudo', 'su', 'pbrun']
-
- def _cache_key(self):
- return "%s__%s__" % (self.host, self.user)
-
- def connect(self):
- cache_key = self._cache_key()
- if cache_key in SSH_CONNECTION_CACHE:
- self.ssh = SSH_CONNECTION_CACHE[cache_key]
- else:
- self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
- return self
-
- def _connect_uncached(self):
- ''' activates the connection object '''
-
- if not HAVE_PARAMIKO:
- raise errors.AnsibleError("paramiko is not installed")
-
- vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self.user, self.port, self.host), host=self.host)
-
- ssh = paramiko.SSHClient()
-
- self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
-
- if C.HOST_KEY_CHECKING:
- ssh.load_system_host_keys()
-
- ssh.set_missing_host_key_policy(MyAddPolicy(self.runner))
-
- allow_agent = True
-
- if self.password is not None:
- allow_agent = False
-
- try:
-
- if self.private_key_file:
- key_filename = os.path.expanduser(self.private_key_file)
- elif self.runner.private_key_file:
- key_filename = os.path.expanduser(self.runner.private_key_file)
- else:
- key_filename = None
- ssh.connect(self.host, username=self.user, allow_agent=allow_agent, look_for_keys=True,
- key_filename=key_filename, password=self.password,
- timeout=self.runner.timeout, port=self.port)
-
- except Exception, e:
-
- msg = str(e)
- if "PID check failed" in msg:
- raise errors.AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
- elif "Private key file is encrypted" in msg:
- msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
- self.user, self.host, self.port, msg)
- raise errors.AnsibleConnectionFailed(msg)
- else:
- raise errors.AnsibleConnectionFailed(msg)
-
- return ssh
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the remote host '''
-
- if self.runner.become and sudoable and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- bufsize = 4096
-
- try:
-
- self.ssh.get_transport().set_keepalive(5)
- chan = self.ssh.get_transport().open_session()
-
- except Exception, e:
-
- msg = "Failed to open session"
- if len(str(e)) > 0:
- msg += ": %s" % str(e)
- raise errors.AnsibleConnectionFailed(msg)
-
- no_prompt_out = ''
- no_prompt_err = ''
- if not (self.runner.become and sudoable):
-
- if executable:
- quoted_command = executable + ' -c ' + pipes.quote(cmd)
- else:
- quoted_command = cmd
- vvv("EXEC %s" % quoted_command, host=self.host)
- chan.exec_command(quoted_command)
-
- else:
-
- # sudo usually requires a PTY (cf. requiretty option), therefore
- # we give it one by default (pty=True in ansble.cfg), and we try
- # to initialise from the calling environment
- if C.PARAMIKO_PTY:
- chan.get_pty(term=os.getenv('TERM', 'vt100'),
- width=int(os.getenv('COLUMNS', 0)),
- height=int(os.getenv('LINES', 0)))
- if self.runner.become and sudoable:
- shcmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
-
- vvv("EXEC %s" % shcmd, host=self.host)
- become_output = ''
-
- try:
-
- chan.exec_command(shcmd)
-
- if self.runner.become_pass:
-
- while True:
-
- if success_key in become_output or \
- (prompt and become_output.endswith(prompt)) or \
- utils.su_prompts.check_su_prompt(become_output):
- break
- chunk = chan.recv(bufsize)
-
- if not chunk:
- if 'unknown user' in become_output:
- raise errors.AnsibleError(
- 'user %s does not exist' % become_user)
- else:
- raise errors.AnsibleError('ssh connection ' +
- 'closed waiting for password prompt')
- become_output += chunk
-
- if success_key not in become_output:
-
- if sudoable:
- chan.sendall(self.runner.become_pass + '\n')
- else:
- no_prompt_out += become_output
- no_prompt_err += become_output
-
- except socket.timeout:
-
- raise errors.AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
-
- stdout = ''.join(chan.makefile('rb', bufsize))
- stderr = ''.join(chan.makefile_stderr('rb', bufsize))
-
- return (chan.recv_exit_status(), '', no_prompt_out + stdout, no_prompt_out + stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to remote '''
-
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
-
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
-
- try:
- self.sftp = self.ssh.open_sftp()
- except Exception, e:
- raise errors.AnsibleError("failed to open a SFTP connection (%s)" % e)
-
- try:
- self.sftp.put(in_path, out_path)
- except IOError:
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def _connect_sftp(self):
-
- cache_key = "%s__%s__" % (self.host, self.user)
- if cache_key in SFTP_CONNECTION_CACHE:
- return SFTP_CONNECTION_CACHE[cache_key]
- else:
- result = SFTP_CONNECTION_CACHE[cache_key] = self.connect().ssh.open_sftp()
- return result
-
- def fetch_file(self, in_path, out_path):
- ''' save a remote file to the specified path '''
-
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
-
- try:
- self.sftp = self._connect_sftp()
- except Exception, e:
- raise errors.AnsibleError("failed to open a SFTP connection (%s)", e)
-
- try:
- self.sftp.get(in_path, out_path)
- except IOError:
- raise errors.AnsibleError("failed to transfer file from %s" % in_path)
-
- def _any_keys_added(self):
-
- added_any = False
- for hostname, keys in self.ssh._host_keys.iteritems():
- for keytype, key in keys.iteritems():
- added_this_time = getattr(key, '_added_by_ansible_this_time', False)
- if added_this_time:
- return True
- return False
-
- def _save_ssh_host_keys(self, filename):
- '''
- not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
- don't complain about it :)
- '''
-
- if not self._any_keys_added():
- return False
-
- path = os.path.expanduser("~/.ssh")
- if not os.path.exists(path):
- os.makedirs(path)
-
- f = open(filename, 'w')
-
- for hostname, keys in self.ssh._host_keys.iteritems():
-
- for keytype, key in keys.iteritems():
-
- # was f.write
- added_this_time = getattr(key, '_added_by_ansible_this_time', False)
- if not added_this_time:
- f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
-
- for hostname, keys in self.ssh._host_keys.iteritems():
-
- for keytype, key in keys.iteritems():
- added_this_time = getattr(key, '_added_by_ansible_this_time', False)
- if added_this_time:
- f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
-
- f.close()
-
- def close(self):
- ''' terminate the connection '''
-
- cache_key = self._cache_key()
- SSH_CONNECTION_CACHE.pop(cache_key, None)
- SFTP_CONNECTION_CACHE.pop(cache_key, None)
-
- if self.sftp is not None:
- self.sftp.close()
-
- if C.HOST_KEY_CHECKING and C.PARAMIKO_RECORD_HOST_KEYS and self._any_keys_added():
-
- # add any new SSH host keys -- warning -- this could be slow
- lockfile = self.keyfile.replace("known_hosts",".known_hosts.lock")
- dirname = os.path.dirname(self.keyfile)
- if not os.path.exists(dirname):
- os.makedirs(dirname)
-
- KEY_LOCK = open(lockfile, 'w')
- fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
-
- try:
- # just in case any were added recently
-
- self.ssh.load_system_host_keys()
- self.ssh._host_keys.update(self.ssh._system_host_keys)
-
- # gather information about the current key file, so
- # we can ensure the new file has the correct mode/owner
-
- key_dir = os.path.dirname(self.keyfile)
- key_stat = os.stat(self.keyfile)
-
- # Save the new keys to a temporary file and move it into place
- # rather than rewriting the file. We set delete=False because
- # the file will be moved into place rather than cleaned up.
-
- tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
- os.chmod(tmp_keyfile.name, key_stat.st_mode & 07777)
- os.chown(tmp_keyfile.name, key_stat.st_uid, key_stat.st_gid)
-
- self._save_ssh_host_keys(tmp_keyfile.name)
- tmp_keyfile.close()
-
- os.rename(tmp_keyfile.name, self.keyfile)
-
- except:
-
- # unable to save keys, including scenario when key was invalid
- # and caught earlier
- traceback.print_exc()
- pass
- fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
-
- self.ssh.close()
-
diff --git a/v1/ansible/runner/connection_plugins/ssh.py b/v1/ansible/runner/connection_plugins/ssh.py
deleted file mode 100644
index 036175f6a9..0000000000
--- a/v1/ansible/runner/connection_plugins/ssh.py
+++ /dev/null
@@ -1,460 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import re
-import subprocess
-import shlex
-import pipes
-import random
-import select
-import fcntl
-import hmac
-import pwd
-import gettext
-import pty
-from hashlib import sha1
-import ansible.constants as C
-from ansible.callbacks import vvv
-from ansible import errors
-from ansible import utils
-
-
-class Connection(object):
- ''' ssh based connections '''
-
- def __init__(self, runner, host, port, user, password, private_key_file, *args, **kwargs):
- self.runner = runner
- self.host = host
- self.ipv6 = ':' in self.host
- self.port = port
- self.user = str(user)
- self.password = password
- self.private_key_file = private_key_file
- self.HASHED_KEY_MAGIC = "|1|"
- self.has_pipelining = True
-
- # TODO: add pbrun, pfexec
- self.become_methods_supported=['sudo', 'su', 'pbrun']
-
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- self.cp_dir = utils.prepare_writeable_dir('$HOME/.ansible/cp',mode=0700)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
-
- def connect(self):
- ''' connect to the remote host '''
-
- vvv("ESTABLISH CONNECTION FOR USER: %s" % self.user, host=self.host)
-
- self.common_args = []
- extra_args = C.ANSIBLE_SSH_ARGS
- if extra_args is not None:
- # make sure there is no empty string added as this can produce weird errors
- self.common_args += [x.strip() for x in shlex.split(extra_args) if x.strip()]
- else:
- self.common_args += ["-o", "ControlMaster=auto",
- "-o", "ControlPersist=60s",
- "-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
-
- cp_in_use = False
- cp_path_set = False
- for arg in self.common_args:
- if "ControlPersist" in arg:
- cp_in_use = True
- if "ControlPath" in arg:
- cp_path_set = True
-
- if cp_in_use and not cp_path_set:
- self.common_args += ["-o", "ControlPath=\"%s\"" % (C.ANSIBLE_SSH_CONTROL_PATH % dict(directory=self.cp_dir))]
-
- if not C.HOST_KEY_CHECKING:
- self.common_args += ["-o", "StrictHostKeyChecking=no"]
-
- if self.port is not None:
- self.common_args += ["-o", "Port=%d" % (self.port)]
- if self.private_key_file is not None:
- self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.private_key_file)]
- elif self.runner.private_key_file is not None:
- self.common_args += ["-o", "IdentityFile=\"%s\"" % os.path.expanduser(self.runner.private_key_file)]
- if self.password:
- self.common_args += ["-o", "GSSAPIAuthentication=no",
- "-o", "PubkeyAuthentication=no"]
- else:
- self.common_args += ["-o", "KbdInteractiveAuthentication=no",
- "-o", "PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey",
- "-o", "PasswordAuthentication=no"]
- if self.user != pwd.getpwuid(os.geteuid())[0]:
- self.common_args += ["-o", "User="+self.user]
- self.common_args += ["-o", "ConnectTimeout=%d" % self.runner.timeout]
-
- return self
-
- def _run(self, cmd, indata):
- if indata:
- # do not use pseudo-pty
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = p.stdin
- else:
- # try to use upseudo-pty
- try:
- # Make sure stdin is a proper (pseudo) pty to avoid: tcgetattr errors
- master, slave = pty.openpty()
- p = subprocess.Popen(cmd, stdin=slave,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = os.fdopen(master, 'w', 0)
- os.close(slave)
- except:
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdin = p.stdin
-
- return (p, stdin)
-
- def _password_cmd(self):
- if self.password:
- try:
- p = subprocess.Popen(["sshpass"], stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- p.communicate()
- except OSError:
- raise errors.AnsibleError("to use the 'ssh' connection type with passwords, you must install the sshpass program")
- (self.rfd, self.wfd) = os.pipe()
- return ["sshpass", "-d%d" % self.rfd]
- return []
-
- def _send_password(self):
- if self.password:
- os.close(self.rfd)
- os.write(self.wfd, "%s\n" % self.password)
- os.close(self.wfd)
-
- def _communicate(self, p, stdin, indata, sudoable=False, prompt=None):
- fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
- # We can't use p.communicate here because the ControlMaster may have stdout open as well
- stdout = ''
- stderr = ''
- rpipes = [p.stdout, p.stderr]
- if indata:
- try:
- stdin.write(indata)
- stdin.close()
- except:
- raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
- # Read stdout/stderr from process
- while True:
- rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
-
- # fail early if the become password is wrong
- if self.runner.become and sudoable:
- incorrect_password = gettext.dgettext(self.runner.become_method, C.BECOME_ERROR_STRINGS[self.runner.become_method])
-
- if prompt:
- if self.runner.become_pass:
- if stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
- raise errors.AnsibleError('Incorrect become password')
-
- if stdout.endswith(prompt):
- raise errors.AnsibleError('Missing become password')
- elif stdout.endswith("%s\r\n%s" % (incorrect_password, prompt)):
- raise errors.AnsibleError('Incorrect become password')
-
- if p.stdout in rfd:
- dat = os.read(p.stdout.fileno(), 9000)
- stdout += dat
- if dat == '':
- rpipes.remove(p.stdout)
- if p.stderr in rfd:
- dat = os.read(p.stderr.fileno(), 9000)
- stderr += dat
- if dat == '':
- rpipes.remove(p.stderr)
- # only break out if no pipes are left to read or
- # the pipes are completely read and
- # the process is terminated
- if (not rpipes or not rfd) and p.poll() is not None:
- break
- # No pipes are left to read but process is not yet terminated
- # Only then it is safe to wait for the process to be finished
- # NOTE: Actually p.poll() is always None here if rpipes is empty
- elif not rpipes and p.poll() == None:
- p.wait()
- # The process is terminated. Since no pipes to read from are
- # left, there is no need to call select() again.
- break
- # close stdin after process is terminated and stdout/stderr are read
- # completely (see also issue #848)
- stdin.close()
- return (p.returncode, stdout, stderr)
-
- def not_in_host_file(self, host):
- if 'USER' in os.environ:
- user_host_file = os.path.expandvars("~${USER}/.ssh/known_hosts")
- else:
- user_host_file = "~/.ssh/known_hosts"
- user_host_file = os.path.expanduser(user_host_file)
-
- host_file_list = []
- host_file_list.append(user_host_file)
- host_file_list.append("/etc/ssh/ssh_known_hosts")
- host_file_list.append("/etc/ssh/ssh_known_hosts2")
-
- hfiles_not_found = 0
- for hf in host_file_list:
- if not os.path.exists(hf):
- hfiles_not_found += 1
- continue
- try:
- host_fh = open(hf)
- except IOError, e:
- hfiles_not_found += 1
- continue
- else:
- data = host_fh.read()
- host_fh.close()
-
- for line in data.split("\n"):
- line = line.strip()
- if line is None or " " not in line:
- continue
- tokens = line.split()
- if not tokens:
- continue
- if tokens[0].find(self.HASHED_KEY_MAGIC) == 0:
- # this is a hashed known host entry
- try:
- (kn_salt,kn_host) = tokens[0][len(self.HASHED_KEY_MAGIC):].split("|",2)
- hash = hmac.new(kn_salt.decode('base64'), digestmod=sha1)
- hash.update(host)
- if hash.digest() == kn_host.decode('base64'):
- return False
- except:
- # invalid hashed host key, skip it
- continue
- else:
- # standard host file entry
- if host in tokens[0]:
- return False
-
- if (hfiles_not_found == len(host_file_list)):
- vvv("EXEC previous known host file not found for %s" % host)
- return True
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable='/bin/sh', in_data=None):
- ''' run a command on the remote host '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- ssh_cmd = self._password_cmd()
- ssh_cmd += ["ssh", "-C"]
- if not in_data:
- # we can only use tty when we are not pipelining the modules. piping data into /usr/bin/python
- # inside a tty automatically invokes the python interactive-mode but the modules are not
- # compatible with the interactive-mode ("unexpected indent" mainly because of empty lines)
- ssh_cmd += ["-tt"]
- if utils.VERBOSITY > 3:
- ssh_cmd += ["-vvv"]
- else:
- if self.runner.module_name == 'raw':
- ssh_cmd += ["-q"]
- else:
- ssh_cmd += ["-v"]
- ssh_cmd += self.common_args
-
- if self.ipv6:
- ssh_cmd += ['-6']
- ssh_cmd += [self.host]
-
- if self.runner.become and sudoable:
- becomecmd, prompt, success_key = utils.make_become_cmd(cmd, become_user, executable, self.runner.become_method, '', self.runner.become_exe)
- ssh_cmd.append(becomecmd)
- else:
- prompt = None
- if executable:
- ssh_cmd.append(executable + ' -c ' + pipes.quote(cmd))
- else:
- ssh_cmd.append(cmd)
-
- vvv("EXEC %s" % ' '.join(ssh_cmd), host=self.host)
-
- not_in_host_file = self.not_in_host_file(self.host)
-
- if C.HOST_KEY_CHECKING and not_in_host_file:
- # lock around the initial SSH connectivity so the user prompt about whether to add
- # the host to known hosts is not intermingled with multiprocess output.
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_EX)
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_EX)
-
- # create process
- (p, stdin) = self._run(ssh_cmd, in_data)
-
- self._send_password()
-
- no_prompt_out = ''
- no_prompt_err = ''
- if sudoable and self.runner.become and self.runner.become_pass:
- # several cases are handled for escalated privileges with password
- # * NOPASSWD (tty & no-tty): detect success_key on stdout
- # * without NOPASSWD:
- # * detect prompt on stdout (tty)
- # * detect prompt on stderr (no-tty)
- fcntl.fcntl(p.stdout, fcntl.F_SETFL,
- fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
- fcntl.fcntl(p.stderr, fcntl.F_SETFL,
- fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
- become_output = ''
- become_errput = ''
-
- while True:
- if success_key in become_output or \
- (prompt and become_output.endswith(prompt)) or \
- utils.su_prompts.check_su_prompt(become_output):
- break
-
- rfd, wfd, efd = select.select([p.stdout, p.stderr], [],
- [p.stdout], self.runner.timeout)
- if p.stderr in rfd:
- chunk = p.stderr.read()
- if not chunk:
- raise errors.AnsibleError('ssh connection closed waiting for a privilege escalation password prompt')
- become_errput += chunk
- incorrect_password = gettext.dgettext(
- "become", "Sorry, try again.")
- if become_errput.strip().endswith("%s%s" % (prompt, incorrect_password)):
- raise errors.AnsibleError('Incorrect become password')
- elif prompt and become_errput.endswith(prompt):
- stdin.write(self.runner.become_pass + '\n')
-
- if p.stdout in rfd:
- chunk = p.stdout.read()
- if not chunk:
- raise errors.AnsibleError('ssh connection closed waiting for %s password prompt' % self.runner.become_method)
- become_output += chunk
-
- if not rfd:
- # timeout. wrap up process communication
- stdout = p.communicate()
- raise errors.AnsibleError('ssh connection error while waiting for %s password prompt' % self.runner.become_method)
-
- if success_key in become_output:
- no_prompt_out += become_output
- no_prompt_err += become_errput
- elif sudoable:
- stdin.write(self.runner.become_pass + '\n')
-
- (returncode, stdout, stderr) = self._communicate(p, stdin, in_data, sudoable=sudoable, prompt=prompt)
-
- if C.HOST_KEY_CHECKING and not_in_host_file:
- # lock around the initial SSH connectivity so the user prompt about whether to add
- # the host to known hosts is not intermingled with multiprocess output.
- fcntl.lockf(self.runner.output_lockfile, fcntl.LOCK_UN)
- fcntl.lockf(self.runner.process_lockfile, fcntl.LOCK_UN)
- controlpersisterror = 'Bad configuration option: ControlPersist' in stderr or \
- 'unknown configuration option: ControlPersist' in stderr
-
- if C.HOST_KEY_CHECKING:
- if ssh_cmd[0] == "sshpass" and p.returncode == 6:
- raise errors.AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
-
- if p.returncode != 0 and controlpersisterror:
- raise errors.AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
- if p.returncode == 255 and (in_data or self.runner.module_name == 'raw'):
- raise errors.AnsibleError('SSH Error: data could not be sent to the remote host. Make sure this host can be reached over ssh')
- if p.returncode == 255:
- ip = None
- port = None
- for line in stderr.splitlines():
- match = re.search(
- 'Connecting to .*\[(\d+\.\d+\.\d+\.\d+)\] port (\d+)',
- line)
- if match:
- ip = match.group(1)
- port = match.group(2)
- if 'UNPROTECTED PRIVATE KEY FILE' in stderr:
- lines = [line for line in stderr.splitlines()
- if 'ignore key:' in line]
- else:
- lines = stderr.splitlines()[-1:]
- if ip and port:
- lines.append(' while connecting to %s:%s' % (ip, port))
- lines.append(
- 'It is sometimes useful to re-run the command using -vvvv, '
- 'which prints SSH debug output to help diagnose the issue.')
- raise errors.AnsibleError('SSH Error: %s' % '\n'.join(lines))
-
- return (p.returncode, '', no_prompt_out + stdout, no_prompt_err + stderr)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to remote '''
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- cmd = self._password_cmd()
-
- host = self.host
- if self.ipv6:
- host = '[%s]' % host
-
- if C.DEFAULT_SCP_IF_SSH:
- cmd += ["scp"] + self.common_args
- cmd += [in_path,host + ":" + pipes.quote(out_path)]
- indata = None
- else:
- cmd += ["sftp"] + self.common_args + [host]
- indata = "put %s %s\n" % (pipes.quote(in_path), pipes.quote(out_path))
-
- (p, stdin) = self._run(cmd, indata)
-
- self._send_password()
-
- (returncode, stdout, stderr) = self._communicate(p, stdin, indata)
-
- if returncode != 0:
- raise errors.AnsibleError("failed to transfer file to %s:\n%s\n%s" % (out_path, stdout, stderr))
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from remote to local '''
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- cmd = self._password_cmd()
-
- host = self.host
- if self.ipv6:
- host = '[%s]' % host
-
- if C.DEFAULT_SCP_IF_SSH:
- cmd += ["scp"] + self.common_args
- cmd += [host + ":" + in_path, out_path]
- indata = None
- else:
- cmd += ["sftp"] + self.common_args + [host]
- indata = "get %s %s\n" % (in_path, out_path)
-
- p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- self._send_password()
- stdout, stderr = p.communicate(indata)
-
- if p.returncode != 0:
- raise errors.AnsibleError("failed to transfer file from %s:\n%s\n%s" % (in_path, stdout, stderr))
-
- def close(self):
- ''' not applicable since we're executing openssh binaries '''
- pass
-
diff --git a/v1/ansible/runner/connection_plugins/winrm.py b/v1/ansible/runner/connection_plugins/winrm.py
deleted file mode 100644
index b41a74c8e1..0000000000
--- a/v1/ansible/runner/connection_plugins/winrm.py
+++ /dev/null
@@ -1,270 +0,0 @@
-# (c) 2014, Chris Church <chris@ninemoreminutes.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-
-import base64
-import os
-import re
-import shlex
-import traceback
-import urlparse
-from ansible import errors
-from ansible import utils
-from ansible.callbacks import vvv, vvvv, verbose
-from ansible.runner.shell_plugins import powershell
-
-try:
- from winrm import Response
- from winrm.exceptions import WinRMTransportError
- from winrm.protocol import Protocol
-except ImportError:
- raise errors.AnsibleError("winrm is not installed")
-
-HAVE_KERBEROS = False
-try:
- import kerberos
- HAVE_KERBEROS = True
-except ImportError:
- pass
-
-def vvvvv(msg, host=None):
- verbose(msg, host=host, caplevel=4)
-
-class Connection(object):
- '''WinRM connections over HTTP/HTTPS.'''
-
- transport_schemes = {
- 'http': [('kerberos', 'http'), ('plaintext', 'http'), ('plaintext', 'https')],
- 'https': [('kerberos', 'https'), ('plaintext', 'https')],
- }
-
- def __init__(self, runner, host, port, user, password, *args, **kwargs):
- self.runner = runner
- self.host = host
- self.port = port
- self.user = user
- self.password = password
- self.has_pipelining = False
- self.default_shell = 'powershell'
- self.default_suffixes = ['.ps1', '']
- self.protocol = None
- self.shell_id = None
- self.delegate = None
-
- # Add runas support
- #self.become_methods_supported=['runas']
- self.become_methods_supported=[]
-
- def _winrm_connect(self):
- '''
- Establish a WinRM connection over HTTP/HTTPS.
- '''
- port = self.port or 5986
- vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
- (self.user, port, self.host), host=self.host)
- netloc = '%s:%d' % (self.host, port)
- exc = None
- for transport, scheme in self.transport_schemes['http' if port == 5985 else 'https']:
- if transport == 'kerberos' and (not HAVE_KERBEROS or not '@' in self.user):
- continue
- if transport == 'kerberos':
- realm = self.user.split('@', 1)[1].strip() or None
- else:
- realm = None
- endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', ''))
- vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint),
- host=self.host)
- protocol = Protocol(endpoint, transport=transport,
- username=self.user, password=self.password,
- realm=realm)
- try:
- protocol.send_message('')
- return protocol
- except WinRMTransportError, exc:
- err_msg = str(exc)
- if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
- raise errors.AnsibleError("the connection attempt timed out")
- m = re.search(r'Code\s+?(\d{3})', err_msg)
- if m:
- code = int(m.groups()[0])
- if code == 401:
- raise errors.AnsibleError("the username/password specified for this server was incorrect")
- elif code == 411:
- return protocol
- vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host)
- continue
- if exc:
- raise errors.AnsibleError(str(exc))
-
- def _winrm_exec(self, command, args=(), from_exec=False):
- if from_exec:
- vvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
- else:
- vvvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
- if not self.protocol:
- self.protocol = self._winrm_connect()
- if not self.shell_id:
- self.shell_id = self.protocol.open_shell()
- command_id = None
- try:
- command_id = self.protocol.run_command(self.shell_id, command, args)
- response = Response(self.protocol.get_command_output(self.shell_id, command_id))
- if from_exec:
- vvvv('WINRM RESULT %r' % response, host=self.host)
- else:
- vvvvv('WINRM RESULT %r' % response, host=self.host)
- vvvvv('WINRM STDOUT %s' % response.std_out, host=self.host)
- vvvvv('WINRM STDERR %s' % response.std_err, host=self.host)
- return response
- finally:
- if command_id:
- self.protocol.cleanup_command(self.shell_id, command_id)
-
- def connect(self):
- if not self.protocol:
- self.protocol = self._winrm_connect()
- return self
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None):
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- cmd = cmd.encode('utf-8')
- cmd_parts = shlex.split(cmd, posix=False)
- if '-EncodedCommand' in cmd_parts:
- encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
- decoded_cmd = base64.b64decode(encoded_cmd)
- vvv("EXEC %s" % decoded_cmd, host=self.host)
- else:
- vvv("EXEC %s" % cmd, host=self.host)
- # For script/raw support.
- if cmd_parts and cmd_parts[0].lower().endswith('.ps1'):
- script = powershell._build_file_cmd(cmd_parts, quote_args=False)
- cmd_parts = powershell._encode_script(script, as_list=True)
- try:
- result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
- except Exception, e:
- traceback.print_exc()
- raise errors.AnsibleError("failed to exec cmd %s" % cmd)
- return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8'))
-
- def put_file(self, in_path, out_path):
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- with open(in_path) as in_file:
- in_size = os.path.getsize(in_path)
- script_template = '''
- $s = [System.IO.File]::OpenWrite("%s");
- [void]$s.Seek(%d, [System.IO.SeekOrigin]::Begin);
- $b = [System.Convert]::FromBase64String("%s");
- [void]$s.Write($b, 0, $b.length);
- [void]$s.SetLength(%d);
- [void]$s.Close();
- '''
- # Determine max size of data we can pass per command.
- script = script_template % (powershell._escape(out_path), in_size, '', in_size)
- cmd = powershell._encode_script(script)
- # Encode script with no data, subtract its length from 8190 (max
- # windows command length), divide by 2.67 (UTF16LE base64 command
- # encoding), then by 1.35 again (data base64 encoding).
- buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35)
- for offset in xrange(0, in_size, buffer_size):
- try:
- out_data = in_file.read(buffer_size)
- if offset == 0:
- if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'):
- out_path = out_path + '.ps1'
- b64_data = base64.b64encode(out_data)
- script = script_template % (powershell._escape(out_path), offset, b64_data, in_size)
- vvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self.host)
- cmd_parts = powershell._encode_script(script, as_list=True)
- result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
- if result.status_code != 0:
- raise IOError(result.std_err.encode('utf-8'))
- except Exception:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def fetch_file(self, in_path, out_path):
- out_path = out_path.replace('\\', '/')
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
- buffer_size = 2**19 # 0.5MB chunks
- if not os.path.exists(os.path.dirname(out_path)):
- os.makedirs(os.path.dirname(out_path))
- out_file = None
- try:
- offset = 0
- while True:
- try:
- script = '''
- If (Test-Path -PathType Leaf "%(path)s")
- {
- $stream = [System.IO.File]::OpenRead("%(path)s");
- $stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
- $buffer = New-Object Byte[] %(buffer_size)d;
- $bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
- $bytes = $buffer[0..($bytesRead-1)];
- [System.Convert]::ToBase64String($bytes);
- $stream.Close() | Out-Null;
- }
- ElseIf (Test-Path -PathType Container "%(path)s")
- {
- Write-Host "[DIR]";
- }
- Else
- {
- Write-Error "%(path)s does not exist";
- Exit 1;
- }
- ''' % dict(buffer_size=buffer_size, path=powershell._escape(in_path), offset=offset)
- vvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self.host)
- cmd_parts = powershell._encode_script(script, as_list=True)
- result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
- if result.status_code != 0:
- raise IOError(result.std_err.encode('utf-8'))
- if result.std_out.strip() == '[DIR]':
- data = None
- else:
- data = base64.b64decode(result.std_out.strip())
- if data is None:
- if not os.path.exists(out_path):
- os.makedirs(out_path)
- break
- else:
- if not out_file:
- # If out_path is a directory and we're expecting a file, bail out now.
- if os.path.isdir(out_path):
- break
- out_file = open(out_path, 'wb')
- out_file.write(data)
- if len(data) < buffer_size:
- break
- offset += len(data)
- except Exception:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
- finally:
- if out_file:
- out_file.close()
-
- def close(self):
- if self.protocol and self.shell_id:
- self.protocol.close_shell(self.shell_id)
- self.shell_id = None
diff --git a/v1/ansible/runner/connection_plugins/zone.py b/v1/ansible/runner/connection_plugins/zone.py
deleted file mode 100644
index fd3242cb6e..0000000000
--- a/v1/ansible/runner/connection_plugins/zone.py
+++ /dev/null
@@ -1,162 +0,0 @@
-# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# and chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-# and jail.py (c) 2013, Michael Scherer <misc@zarb.org>
-# (c) 2015, Dagobert Michelsen <dam@baltic-online.de>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import distutils.spawn
-import traceback
-import os
-import shutil
-import subprocess
-from subprocess import Popen,PIPE
-from ansible import errors
-from ansible.callbacks import vvv
-import ansible.constants as C
-
-class Connection(object):
- ''' Local zone based connections '''
-
- def _search_executable(self, executable):
- cmd = distutils.spawn.find_executable(executable)
- if not cmd:
- raise errors.AnsibleError("%s command not found in PATH") % executable
- return cmd
-
- def list_zones(self):
- pipe = subprocess.Popen([self.zoneadm_cmd, 'list', '-ip'],
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- #stdout, stderr = p.communicate()
- zones = []
- for l in pipe.stdout.readlines():
- # 1:work:running:/zones/work:3126dc59-9a07-4829-cde9-a816e4c5040e:native:shared
- s = l.split(':')
- if s[1] != 'global':
- zones.append(s[1])
-
- return zones
-
- def get_zone_path(self):
- #solaris10vm# zoneadm -z cswbuild list -p
- #-:cswbuild:installed:/zones/cswbuild:479f3c4b-d0c6-e97b-cd04-fd58f2c0238e:native:shared
- pipe = subprocess.Popen([self.zoneadm_cmd, '-z', self.zone, 'list', '-p'],
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- #stdout, stderr = p.communicate()
- path = pipe.stdout.readlines()[0].split(':')[3]
- return path + '/root'
-
- def __init__(self, runner, host, port, *args, **kwargs):
- self.zone = host
- self.runner = runner
- self.host = host
- self.has_pipelining = False
- self.become_methods_supported=C.BECOME_METHODS
-
- if os.geteuid() != 0:
- raise errors.AnsibleError("zone connection requires running as root")
-
- self.zoneadm_cmd = self._search_executable('zoneadm')
- self.zlogin_cmd = self._search_executable('zlogin')
-
- if not self.zone in self.list_zones():
- raise errors.AnsibleError("incorrect zone name %s" % self.zone)
-
-
- self.host = host
- # port is unused, since this is local
- self.port = port
-
- def connect(self, port=None):
- ''' connect to the zone; nothing to do here '''
-
- vvv("THIS IS A LOCAL ZONE DIR", host=self.zone)
-
- return self
-
- # a modifier
- def _generate_cmd(self, executable, cmd):
- if executable:
- local_cmd = [self.zlogin_cmd, self.zone, executable, cmd]
- else:
- local_cmd = '%s "%s" %s' % (self.zlogin_cmd, self.zone, cmd)
- return local_cmd
-
- def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False, executable=None, in_data=None):
- ''' run a command on the zone '''
-
- if sudoable and self.runner.become and self.runner.become_method not in self.become_methods_supported:
- raise errors.AnsibleError("Internal Error: this module does not support running commands via %s" % self.runner.become_method)
-
- if in_data:
- raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
-
- # We happily ignore privilege escalation
- if executable == '/bin/sh':
- executable = None
- local_cmd = self._generate_cmd(executable, cmd)
-
- vvv("EXEC %s" % (local_cmd), host=self.zone)
- p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
- cwd=self.runner.basedir,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout, stderr = p.communicate()
- return (p.returncode, '', stdout, stderr)
-
- def _normalize_path(self, path, prefix):
- if not path.startswith(os.path.sep):
- path = os.path.join(os.path.sep, path)
- normpath = os.path.normpath(path)
- return os.path.join(prefix, normpath[1:])
-
- def _copy_file(self, in_path, out_path):
- if not os.path.exists(in_path):
- raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
- try:
- shutil.copyfile(in_path, out_path)
- except shutil.Error:
- traceback.print_exc()
- raise errors.AnsibleError("failed to copy: %s and %s are the same" % (in_path, out_path))
- except IOError:
- traceback.print_exc()
- raise errors.AnsibleError("failed to transfer file to %s" % out_path)
-
- def put_file(self, in_path, out_path):
- ''' transfer a file from local to zone '''
-
- out_path = self._normalize_path(out_path, self.get_zone_path())
- vvv("PUT %s TO %s" % (in_path, out_path), host=self.zone)
-
- self._copy_file(in_path, out_path)
-
- def fetch_file(self, in_path, out_path):
- ''' fetch a file from zone to local '''
-
- in_path = self._normalize_path(in_path, self.get_zone_path())
- vvv("FETCH %s TO %s" % (in_path, out_path), host=self.zone)
-
- self._copy_file(in_path, out_path)
-
- def close(self):
- ''' terminate the connection; nothing to do here '''
- pass
diff --git a/v1/ansible/runner/filter_plugins/__init__.py b/v1/ansible/runner/filter_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/runner/filter_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/runner/filter_plugins/core.py b/v1/ansible/runner/filter_plugins/core.py
deleted file mode 100644
index f81da6f894..0000000000
--- a/v1/ansible/runner/filter_plugins/core.py
+++ /dev/null
@@ -1,431 +0,0 @@
-# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-
-import sys
-import base64
-import json
-import os.path
-import types
-import pipes
-import glob
-import re
-import crypt
-import hashlib
-import string
-from functools import partial
-import operator as py_operator
-from random import SystemRandom, shuffle
-import uuid
-
-import yaml
-from jinja2.filters import environmentfilter
-from distutils.version import LooseVersion, StrictVersion
-
-from ansible import errors
-from ansible.utils.hashing import md5s, checksum_s
-from ansible.utils.unicode import unicode_wrap, to_unicode
-
-
-UUID_NAMESPACE_ANSIBLE = uuid.UUID('361E6D51-FAEC-444A-9079-341386DA8E2E')
-
-
-def to_nice_yaml(*a, **kw):
- '''Make verbose, human readable yaml'''
- transformed = yaml.safe_dump(*a, indent=4, allow_unicode=True, default_flow_style=False, **kw)
- return to_unicode(transformed)
-
-def to_json(a, *args, **kw):
- ''' Convert the value to JSON '''
- return json.dumps(a, *args, **kw)
-
-def to_nice_json(a, *args, **kw):
- '''Make verbose, human readable JSON'''
- # python-2.6's json encoder is buggy (can't encode hostvars)
- if sys.version_info < (2, 7):
- try:
- import simplejson
- except ImportError:
- pass
- else:
- try:
- major = int(simplejson.__version__.split('.')[0])
- except:
- pass
- else:
- if major >= 2:
- return simplejson.dumps(a, indent=4, sort_keys=True, *args, **kw)
- # Fallback to the to_json filter
- return to_json(a, *args, **kw)
- return json.dumps(a, indent=4, sort_keys=True, *args, **kw)
-
-def failed(*a, **kw):
- ''' Test if task result yields failed '''
- item = a[0]
- if type(item) != dict:
- raise errors.AnsibleFilterError("|failed expects a dictionary")
- rc = item.get('rc',0)
- failed = item.get('failed',False)
- if rc != 0 or failed:
- return True
- else:
- return False
-
-def success(*a, **kw):
- ''' Test if task result yields success '''
- return not failed(*a, **kw)
-
-def changed(*a, **kw):
- ''' Test if task result yields changed '''
- item = a[0]
- if type(item) != dict:
- raise errors.AnsibleFilterError("|changed expects a dictionary")
- if not 'changed' in item:
- changed = False
- if ('results' in item # some modules return a 'results' key
- and type(item['results']) == list
- and type(item['results'][0]) == dict):
- for result in item['results']:
- changed = changed or result.get('changed', False)
- else:
- changed = item.get('changed', False)
- return changed
-
-def skipped(*a, **kw):
- ''' Test if task result yields skipped '''
- item = a[0]
- if type(item) != dict:
- raise errors.AnsibleFilterError("|skipped expects a dictionary")
- skipped = item.get('skipped', False)
- return skipped
-
-def mandatory(a):
- ''' Make a variable mandatory '''
- try:
- a
- except NameError:
- raise errors.AnsibleFilterError('Mandatory variable not defined.')
- else:
- return a
-
-def bool(a):
- ''' return a bool for the arg '''
- if a is None or type(a) == bool:
- return a
- if type(a) in types.StringTypes:
- a = a.lower()
- if a in ['yes', 'on', '1', 'true', 1]:
- return True
- else:
- return False
-
-def quote(a):
- ''' return its argument quoted for shell usage '''
- return pipes.quote(a)
-
-def fileglob(pathname):
- ''' return list of matched files for glob '''
- return glob.glob(pathname)
-
-def regex(value='', pattern='', ignorecase=False, match_type='search'):
- ''' Expose `re` as a boolean filter using the `search` method by default.
- This is likely only useful for `search` and `match` which already
- have their own filters.
- '''
- if ignorecase:
- flags = re.I
- else:
- flags = 0
- _re = re.compile(pattern, flags=flags)
- _bool = __builtins__.get('bool')
- return _bool(getattr(_re, match_type, 'search')(value))
-
-def match(value, pattern='', ignorecase=False):
- ''' Perform a `re.match` returning a boolean '''
- return regex(value, pattern, ignorecase, 'match')
-
-def search(value, pattern='', ignorecase=False):
- ''' Perform a `re.search` returning a boolean '''
- return regex(value, pattern, ignorecase, 'search')
-
-def regex_replace(value='', pattern='', replacement='', ignorecase=False):
- ''' Perform a `re.sub` returning a string '''
-
- if not isinstance(value, basestring):
- value = str(value)
-
- if ignorecase:
- flags = re.I
- else:
- flags = 0
- _re = re.compile(pattern, flags=flags)
- return _re.sub(replacement, value)
-
-def ternary(value, true_val, false_val):
- ''' value ? true_val : false_val '''
- if value:
- return true_val
- else:
- return false_val
-
-
-def version_compare(value, version, operator='eq', strict=False):
- ''' Perform a version comparison on a value '''
- op_map = {
- '==': 'eq', '=': 'eq', 'eq': 'eq',
- '<': 'lt', 'lt': 'lt',
- '<=': 'le', 'le': 'le',
- '>': 'gt', 'gt': 'gt',
- '>=': 'ge', 'ge': 'ge',
- '!=': 'ne', '<>': 'ne', 'ne': 'ne'
- }
-
- if strict:
- Version = StrictVersion
- else:
- Version = LooseVersion
-
- if operator in op_map:
- operator = op_map[operator]
- else:
- raise errors.AnsibleFilterError('Invalid operator type')
-
- try:
- method = getattr(py_operator, operator)
- return method(Version(str(value)), Version(str(version)))
- except Exception, e:
- raise errors.AnsibleFilterError('Version comparison: %s' % e)
-
-@environmentfilter
-def rand(environment, end, start=None, step=None):
- r = SystemRandom()
- if isinstance(end, (int, long)):
- if not start:
- start = 0
- if not step:
- step = 1
- return r.randrange(start, end, step)
- elif hasattr(end, '__iter__'):
- if start or step:
- raise errors.AnsibleFilterError('start and step can only be used with integer values')
- return r.choice(end)
- else:
- raise errors.AnsibleFilterError('random can only be used on sequences and integers')
-
-def randomize_list(mylist):
- try:
- mylist = list(mylist)
- shuffle(mylist)
- except:
- pass
- return mylist
-
-def get_hash(data, hashtype='sha1'):
-
- try: # see if hash is supported
- h = hashlib.new(hashtype)
- except:
- return None
-
- h.update(data)
- return h.hexdigest()
-
-def get_encrypted_password(password, hashtype='sha512', salt=None):
-
- # TODO: find a way to construct dynamically from system
- cryptmethod= {
- 'md5': '1',
- 'blowfish': '2a',
- 'sha256': '5',
- 'sha512': '6',
- }
-
- hastype = hashtype.lower()
- if hashtype in cryptmethod:
- if salt is None:
- r = SystemRandom()
- salt = ''.join([r.choice(string.ascii_letters + string.digits) for _ in range(16)])
-
- saltstring = "$%s$%s" % (cryptmethod[hashtype],salt)
- encrypted = crypt.crypt(password,saltstring)
- return encrypted
-
- return None
-
-def to_uuid(string):
- return str(uuid.uuid5(UUID_NAMESPACE_ANSIBLE, str(string)))
-
-def comment(text, style='plain', **kw):
- # Predefined comment types
- comment_styles = {
- 'plain': {
- 'decoration': '# '
- },
- 'erlang': {
- 'decoration': '% '
- },
- 'c': {
- 'decoration': '// '
- },
- 'cblock': {
- 'beginning': '/*',
- 'decoration': ' * ',
- 'end': ' */'
- },
- 'xml': {
- 'beginning': '<!--',
- 'decoration': ' - ',
- 'end': '-->'
- }
- }
-
- # Pointer to the right comment type
- style_params = comment_styles[style]
-
- if 'decoration' in kw:
- prepostfix = kw['decoration']
- else:
- prepostfix = style_params['decoration']
-
- # Default params
- p = {
- 'newline': '\n',
- 'beginning': '',
- 'prefix': (prepostfix).rstrip(),
- 'prefix_count': 1,
- 'decoration': '',
- 'postfix': (prepostfix).rstrip(),
- 'postfix_count': 1,
- 'end': ''
- }
-
- # Update default params
- p.update(style_params)
- p.update(kw)
-
- # Compose substrings for the final string
- str_beginning = ''
- if p['beginning']:
- str_beginning = "%s%s" % (p['beginning'], p['newline'])
- str_prefix = str(
- "%s%s" % (p['prefix'], p['newline'])) * int(p['prefix_count'])
- str_text = ("%s%s" % (
- p['decoration'],
- # Prepend each line of the text with the decorator
- text.replace(
- p['newline'], "%s%s" % (p['newline'], p['decoration'])))).replace(
- # Remove trailing spaces when only decorator is on the line
- "%s%s" % (p['decoration'], p['newline']),
- "%s%s" % (p['decoration'].rstrip(), p['newline']))
- str_postfix = p['newline'].join(
- [''] + [p['postfix'] for x in range(p['postfix_count'])])
- str_end = ''
- if p['end']:
- str_end = "%s%s" % (p['newline'], p['end'])
-
- # Return the final string
- return "%s%s%s%s%s" % (
- str_beginning,
- str_prefix,
- str_text,
- str_postfix,
- str_end)
-
-
-class FilterModule(object):
- ''' Ansible core jinja2 filters '''
-
- def filters(self):
- return {
- # base 64
- 'b64decode': partial(unicode_wrap, base64.b64decode),
- 'b64encode': partial(unicode_wrap, base64.b64encode),
-
- # uuid
- 'to_uuid': to_uuid,
-
- # json
- 'to_json': to_json,
- 'to_nice_json': to_nice_json,
- 'from_json': json.loads,
-
- # yaml
- 'to_yaml': yaml.safe_dump,
- 'to_nice_yaml': to_nice_yaml,
- 'from_yaml': yaml.safe_load,
-
- # path
- 'basename': partial(unicode_wrap, os.path.basename),
- 'dirname': partial(unicode_wrap, os.path.dirname),
- 'expanduser': partial(unicode_wrap, os.path.expanduser),
- 'realpath': partial(unicode_wrap, os.path.realpath),
- 'relpath': partial(unicode_wrap, os.path.relpath),
-
- # failure testing
- 'failed' : failed,
- 'success' : success,
-
- # changed testing
- 'changed' : changed,
-
- # skip testing
- 'skipped' : skipped,
-
- # variable existence
- 'mandatory': mandatory,
-
- # value as boolean
- 'bool': bool,
-
- # quote string for shell usage
- 'quote': quote,
-
- # hash filters
- # md5 hex digest of string
- 'md5': md5s,
- # sha1 hex digeset of string
- 'sha1': checksum_s,
- # checksum of string as used by ansible for checksuming files
- 'checksum': checksum_s,
- # generic hashing
- 'password_hash': get_encrypted_password,
- 'hash': get_hash,
-
- # file glob
- 'fileglob': fileglob,
-
- # regex
- 'match': match,
- 'search': search,
- 'regex': regex,
- 'regex_replace': regex_replace,
-
- # ? : ;
- 'ternary': ternary,
-
- # list
- # version comparison
- 'version_compare': version_compare,
-
- # random stuff
- 'random': rand,
- 'shuffle': randomize_list,
-
- # comment-style decoration of string
- 'comment': comment,
- }
diff --git a/v1/ansible/runner/filter_plugins/ipaddr.py b/v1/ansible/runner/filter_plugins/ipaddr.py
deleted file mode 100644
index 5d9d6e3136..0000000000
--- a/v1/ansible/runner/filter_plugins/ipaddr.py
+++ /dev/null
@@ -1,659 +0,0 @@
-# (c) 2014, Maciej Delmanowski <drybjed@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from functools import partial
-
-try:
- import netaddr
-except ImportError:
- # in this case, we'll make the filters return error messages (see bottom)
- netaddr = None
-else:
- class mac_linux(netaddr.mac_unix):
- pass
- mac_linux.word_fmt = '%.2x'
-
-from ansible import errors
-
-
-# ---- IP address and network query helpers ----
-
-def _empty_ipaddr_query(v, vtype):
- # We don't have any query to process, so just check what type the user
- # expects, and return the IP address in a correct format
- if v:
- if vtype == 'address':
- return str(v.ip)
- elif vtype == 'network':
- return str(v)
-
-def _6to4_query(v, vtype, value):
- if v.version == 4:
-
- if v.size == 1:
- ipconv = str(v.ip)
- elif v.size > 1:
- if v.ip != v.network:
- ipconv = str(v.ip)
- else:
- ipconv = False
-
- if ipaddr(ipconv, 'public'):
- numbers = list(map(int, ipconv.split('.')))
-
- try:
- return '2002:{:02x}{:02x}:{:02x}{:02x}::1/48'.format(*numbers)
- except:
- return False
-
- elif v.version == 6:
- if vtype == 'address':
- if ipaddr(str(v), '2002::/16'):
- return value
- elif vtype == 'network':
- if v.ip != v.network:
- if ipaddr(str(v.ip), '2002::/16'):
- return value
- else:
- return False
-
-def _ip_query(v):
- if v.size == 1:
- return str(v.ip)
- if v.size > 1:
- if v.ip != v.network:
- return str(v.ip)
-
-def _gateway_query(v):
- if v.size > 1:
- if v.ip != v.network:
- return str(v.ip) + '/' + str(v.prefixlen)
-
-def _bool_ipaddr_query(v):
- if v:
- return True
-
-def _broadcast_query(v):
- if v.size > 1:
- return str(v.broadcast)
-
-def _cidr_query(v):
- return str(v)
-
-def _cidr_lookup_query(v, iplist, value):
- try:
- if v in iplist:
- return value
- except:
- return False
-
-def _host_query(v):
- if v.size == 1:
- return str(v)
- elif v.size > 1:
- if v.ip != v.network:
- return str(v.ip) + '/' + str(v.prefixlen)
-
-def _hostmask_query(v):
- return str(v.hostmask)
-
-def _int_query(v, vtype):
- if vtype == 'address':
- return int(v.ip)
- elif vtype == 'network':
- return str(int(v.ip)) + '/' + str(int(v.prefixlen))
-
-def _ipv4_query(v, value):
- if v.version == 6:
- try:
- return str(v.ipv4())
- except:
- return False
- else:
- return value
-
-def _ipv6_query(v, value):
- if v.version == 4:
- return str(v.ipv6())
- else:
- return value
-
-def _link_local_query(v, value):
- v_ip = netaddr.IPAddress(str(v.ip))
- if v.version == 4:
- if ipaddr(str(v_ip), '169.254.0.0/24'):
- return value
-
- elif v.version == 6:
- if ipaddr(str(v_ip), 'fe80::/10'):
- return value
-
-def _loopback_query(v, value):
- v_ip = netaddr.IPAddress(str(v.ip))
- if v_ip.is_loopback():
- return value
-
-def _multicast_query(v, value):
- if v.is_multicast():
- return value
-
-def _net_query(v):
- if v.size > 1:
- if v.ip == v.network:
- return str(v.network) + '/' + str(v.prefixlen)
-
-def _netmask_query(v):
- if v.size > 1:
- return str(v.netmask)
-
-def _network_query(v):
- if v.size > 1:
- return str(v.network)
-
-def _prefix_query(v):
- return int(v.prefixlen)
-
-def _private_query(v, value):
- if v.is_private():
- return value
-
-def _public_query(v, value):
- v_ip = netaddr.IPAddress(str(v.ip))
- if v_ip.is_unicast() and not v_ip.is_private() and \
- not v_ip.is_loopback() and not v_ip.is_netmask() and \
- not v_ip.is_hostmask():
- return value
-
-def _revdns_query(v):
- v_ip = netaddr.IPAddress(str(v.ip))
- return v_ip.reverse_dns
-
-def _size_query(v):
- return v.size
-
-def _subnet_query(v):
- return str(v.cidr)
-
-def _type_query(v):
- if v.size == 1:
- return 'address'
- if v.size > 1:
- if v.ip != v.network:
- return 'address'
- else:
- return 'network'
-
-def _unicast_query(v, value):
- if v.is_unicast():
- return value
-
-def _version_query(v):
- return v.version
-
-def _wrap_query(v, vtype, value):
- if v.version == 6:
- if vtype == 'address':
- return '[' + str(v.ip) + ']'
- elif vtype == 'network':
- return '[' + str(v.ip) + ']/' + str(v.prefixlen)
- else:
- return value
-
-
-# ---- HWaddr query helpers ----
-def _bare_query(v):
- v.dialect = netaddr.mac_bare
- return str(v)
-
-def _bool_hwaddr_query(v):
- if v:
- return True
-
-def _cisco_query(v):
- v.dialect = netaddr.mac_cisco
- return str(v)
-
-def _empty_hwaddr_query(v, value):
- if v:
- return value
-
-def _linux_query(v):
- v.dialect = mac_linux
- return str(v)
-
-def _postgresql_query(v):
- v.dialect = netaddr.mac_pgsql
- return str(v)
-
-def _unix_query(v):
- v.dialect = netaddr.mac_unix
- return str(v)
-
-def _win_query(v):
- v.dialect = netaddr.mac_eui48
- return str(v)
-
-
-# ---- IP address and network filters ----
-
-def ipaddr(value, query = '', version = False, alias = 'ipaddr'):
- ''' Check if string is an IP address or network and filter it '''
-
- query_func_extra_args = {
- '': ('vtype',),
- '6to4': ('vtype', 'value'),
- 'cidr_lookup': ('iplist', 'value'),
- 'int': ('vtype',),
- 'ipv4': ('value',),
- 'ipv6': ('value',),
- 'link-local': ('value',),
- 'loopback': ('value',),
- 'lo': ('value',),
- 'multicast': ('value',),
- 'private': ('value',),
- 'public': ('value',),
- 'unicast': ('value',),
- 'wrap': ('vtype', 'value'),
- }
- query_func_map = {
- '': _empty_ipaddr_query,
- '6to4': _6to4_query,
- 'address': _ip_query,
- 'address/prefix': _gateway_query,
- 'bool': _bool_ipaddr_query,
- 'broadcast': _broadcast_query,
- 'cidr': _cidr_query,
- 'cidr_lookup': _cidr_lookup_query,
- 'gateway': _gateway_query,
- 'gw': _gateway_query,
- 'host': _host_query,
- 'host/prefix': _gateway_query,
- 'hostmask': _hostmask_query,
- 'hostnet': _gateway_query,
- 'int': _int_query,
- 'ip': _ip_query,
- 'ipv4': _ipv4_query,
- 'ipv6': _ipv6_query,
- 'link-local': _link_local_query,
- 'lo': _loopback_query,
- 'loopback': _loopback_query,
- 'multicast': _multicast_query,
- 'net': _net_query,
- 'netmask': _netmask_query,
- 'network': _network_query,
- 'prefix': _prefix_query,
- 'private': _private_query,
- 'public': _public_query,
- 'revdns': _revdns_query,
- 'router': _gateway_query,
- 'size': _size_query,
- 'subnet': _subnet_query,
- 'type': _type_query,
- 'unicast': _unicast_query,
- 'v4': _ipv4_query,
- 'v6': _ipv6_query,
- 'version': _version_query,
- 'wrap': _wrap_query,
- }
-
- vtype = None
-
- if not value:
- return False
-
- elif value == True:
- return False
-
- # Check if value is a list and parse each element
- elif isinstance(value, (list, tuple)):
-
- _ret = []
- for element in value:
- if ipaddr(element, str(query), version):
- _ret.append(ipaddr(element, str(query), version))
-
- if _ret:
- return _ret
- else:
- return list()
-
- # Check if value is a number and convert it to an IP address
- elif str(value).isdigit():
-
- # We don't know what IP version to assume, so let's check IPv4 first,
- # then IPv6
- try:
- if ((not version) or (version and version == 4)):
- v = netaddr.IPNetwork('0.0.0.0/0')
- v.value = int(value)
- v.prefixlen = 32
- elif version and version == 6:
- v = netaddr.IPNetwork('::/0')
- v.value = int(value)
- v.prefixlen = 128
-
- # IPv4 didn't work the first time, so it definitely has to be IPv6
- except:
- try:
- v = netaddr.IPNetwork('::/0')
- v.value = int(value)
- v.prefixlen = 128
-
- # The value is too big for IPv6. Are you a nanobot?
- except:
- return False
-
- # We got an IP address, let's mark it as such
- value = str(v)
- vtype = 'address'
-
- # value has not been recognized, check if it's a valid IP string
- else:
- try:
- v = netaddr.IPNetwork(value)
-
- # value is a valid IP string, check if user specified
- # CIDR prefix or just an IP address, this will indicate default
- # output format
- try:
- address, prefix = value.split('/')
- vtype = 'network'
- except:
- vtype = 'address'
-
- # value hasn't been recognized, maybe it's a numerical CIDR?
- except:
- try:
- address, prefix = value.split('/')
- address.isdigit()
- address = int(address)
- prefix.isdigit()
- prefix = int(prefix)
-
- # It's not numerical CIDR, give up
- except:
- return False
-
- # It is something, so let's try and build a CIDR from the parts
- try:
- v = netaddr.IPNetwork('0.0.0.0/0')
- v.value = address
- v.prefixlen = prefix
-
- # It's not a valid IPv4 CIDR
- except:
- try:
- v = netaddr.IPNetwork('::/0')
- v.value = address
- v.prefixlen = prefix
-
- # It's not a valid IPv6 CIDR. Give up.
- except:
- return False
-
- # We have a valid CIDR, so let's write it in correct format
- value = str(v)
- vtype = 'network'
-
- # We have a query string but it's not in the known query types. Check if
- # that string is a valid subnet, if so, we can check later if given IP
- # address/network is inside that specific subnet
- try:
- ### ?? 6to4 and link-local were True here before. Should they still?
- if query and (query not in query_func_map or query == 'cidr_lookup') and ipaddr(query, 'network'):
- iplist = netaddr.IPSet([netaddr.IPNetwork(query)])
- query = 'cidr_lookup'
- except:
- pass
-
- # This code checks if value maches the IP version the user wants, ie. if
- # it's any version ("ipaddr()"), IPv4 ("ipv4()") or IPv6 ("ipv6()")
- # If version does not match, return False
- if version and v.version != version:
- return False
-
- extras = []
- for arg in query_func_extra_args.get(query, tuple()):
- extras.append(locals()[arg])
- try:
- return query_func_map[query](v, *extras)
- except KeyError:
- try:
- float(query)
- if v.size == 1:
- if vtype == 'address':
- return str(v.ip)
- elif vtype == 'network':
- return str(v)
-
- elif v.size > 1:
- try:
- return str(v[query]) + '/' + str(v.prefixlen)
- except:
- return False
-
- else:
- return value
-
- except:
- raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
-
- return False
-
-
-def ipwrap(value, query = ''):
- try:
- if isinstance(value, (list, tuple)):
- _ret = []
- for element in value:
- if ipaddr(element, query, version = False, alias = 'ipwrap'):
- _ret.append(ipaddr(element, 'wrap'))
- else:
- _ret.append(element)
-
- return _ret
- else:
- _ret = ipaddr(value, query, version = False, alias = 'ipwrap')
- if _ret:
- return ipaddr(_ret, 'wrap')
- else:
- return value
-
- except:
- return value
-
-
-def ipv4(value, query = ''):
- return ipaddr(value, query, version = 4, alias = 'ipv4')
-
-
-def ipv6(value, query = ''):
- return ipaddr(value, query, version = 6, alias = 'ipv6')
-
-
-# Split given subnet into smaller subnets or find out the biggest subnet of
-# a given IP address with given CIDR prefix
-# Usage:
-#
-# - address or address/prefix | ipsubnet
-# returns CIDR subnet of a given input
-#
-# - address/prefix | ipsubnet(cidr)
-# returns number of possible subnets for given CIDR prefix
-#
-# - address/prefix | ipsubnet(cidr, index)
-# returns new subnet with given CIDR prefix
-#
-# - address | ipsubnet(cidr)
-# returns biggest subnet with given CIDR prefix that address belongs to
-#
-# - address | ipsubnet(cidr, index)
-# returns next indexed subnet which contains given address
-def ipsubnet(value, query = '', index = 'x'):
- ''' Manipulate IPv4/IPv6 subnets '''
-
- try:
- vtype = ipaddr(value, 'type')
- if vtype == 'address':
- v = ipaddr(value, 'cidr')
- elif vtype == 'network':
- v = ipaddr(value, 'subnet')
-
- value = netaddr.IPNetwork(v)
- except:
- return False
-
- if not query:
- return str(value)
-
- elif str(query).isdigit():
- vsize = ipaddr(v, 'size')
- query = int(query)
-
- try:
- float(index)
- index = int(index)
-
- if vsize > 1:
- try:
- return str(list(value.subnet(query))[index])
- except:
- return False
-
- elif vsize == 1:
- try:
- return str(value.supernet(query)[index])
- except:
- return False
-
- except:
- if vsize > 1:
- try:
- return str(len(list(value.subnet(query))))
- except:
- return False
-
- elif vsize == 1:
- try:
- return str(value.supernet(query)[0])
- except:
- return False
-
- return False
-
-# Returns the nth host within a network described by value.
-# Usage:
-#
-# - address or address/prefix | nthhost(nth)
-# returns the nth host within the given network
-def nthhost(value, query=''):
- ''' Get the nth host within a given network '''
- try:
- vtype = ipaddr(value, 'type')
- if vtype == 'address':
- v = ipaddr(value, 'cidr')
- elif vtype == 'network':
- v = ipaddr(value, 'subnet')
-
- value = netaddr.IPNetwork(v)
- except:
- return False
-
- if not query:
- return False
-
- try:
- vsize = ipaddr(v, 'size')
- nth = int(query)
- if value.size > nth:
- return value[nth]
-
- except ValueError:
- return False
-
- return False
-
-
-# ---- HWaddr / MAC address filters ----
-
-def hwaddr(value, query = '', alias = 'hwaddr'):
- ''' Check if string is a HW/MAC address and filter it '''
-
- query_func_extra_args = {
- '': ('value',),
- }
- query_func_map = {
- '': _empty_hwaddr_query,
- 'bare': _bare_query,
- 'bool': _bool_hwaddr_query,
- 'cisco': _cisco_query,
- 'eui48': _win_query,
- 'linux': _linux_query,
- 'pgsql': _postgresql_query,
- 'postgresql': _postgresql_query,
- 'psql': _postgresql_query,
- 'unix': _unix_query,
- 'win': _win_query,
- }
-
- try:
- v = netaddr.EUI(value)
- except:
- if query and query != 'bool':
- raise errors.AnsibleFilterError(alias + ': not a hardware address: %s' % value)
-
- extras = []
- for arg in query_func_extra_args.get(query, tuple()):
- extras.append(locals()[arg])
- try:
- return query_func_map[query](v, *extras)
- except KeyError:
- raise errors.AnsibleFilterError(alias + ': unknown filter type: %s' % query)
-
- return False
-
-def macaddr(value, query = ''):
- return hwaddr(value, query, alias = 'macaddr')
-
-def _need_netaddr(f_name, *args, **kwargs):
- raise errors.AnsibleFilterError('The {0} filter requires python-netaddr be'
- ' installed on the ansible controller'.format(f_name))
-
-# ---- Ansible filters ----
-
-class FilterModule(object):
- ''' IP address and network manipulation filters '''
- filter_map = {
- # IP addresses and networks
- 'ipaddr': ipaddr,
- 'ipwrap': ipwrap,
- 'ipv4': ipv4,
- 'ipv6': ipv6,
- 'ipsubnet': ipsubnet,
- 'nthhost': nthhost,
-
- # MAC / HW addresses
- 'hwaddr': hwaddr,
- 'macaddr': macaddr
- }
-
- def filters(self):
- if netaddr:
- return self.filter_map
- else:
- # Need to install python-netaddr for these filters to work
- return dict((f, partial(_need_netaddr, f)) for f in self.filter_map)
diff --git a/v1/ansible/runner/filter_plugins/mathstuff.py b/v1/ansible/runner/filter_plugins/mathstuff.py
deleted file mode 100644
index c6a49485a4..0000000000
--- a/v1/ansible/runner/filter_plugins/mathstuff.py
+++ /dev/null
@@ -1,126 +0,0 @@
-# (c) 2014, Brian Coca <bcoca@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from __future__ import absolute_import
-
-import math
-import collections
-from ansible import errors
-
-def unique(a):
- if isinstance(a,collections.Hashable):
- c = set(a)
- else:
- c = []
- for x in a:
- if x not in c:
- c.append(x)
- return c
-
-def intersect(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) & set(b)
- else:
- c = unique(filter(lambda x: x in b, a))
- return c
-
-def difference(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) - set(b)
- else:
- c = unique(filter(lambda x: x not in b, a))
- return c
-
-def symmetric_difference(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) ^ set(b)
- else:
- c = unique(filter(lambda x: x not in intersect(a,b), union(a,b)))
- return c
-
-def union(a, b):
- if isinstance(a,collections.Hashable) and isinstance(b,collections.Hashable):
- c = set(a) | set(b)
- else:
- c = unique(a + b)
- return c
-
-def min(a):
- _min = __builtins__.get('min')
- return _min(a);
-
-def max(a):
- _max = __builtins__.get('max')
- return _max(a);
-
-def isnotanumber(x):
- try:
- return math.isnan(x)
- except TypeError:
- return False
-
-
-def logarithm(x, base=math.e):
- try:
- if base == 10:
- return math.log10(x)
- else:
- return math.log(x, base)
- except TypeError, e:
- raise errors.AnsibleFilterError('log() can only be used on numbers: %s' % str(e))
-
-
-def power(x, y):
- try:
- return math.pow(x, y)
- except TypeError, e:
- raise errors.AnsibleFilterError('pow() can only be used on numbers: %s' % str(e))
-
-
-def inversepower(x, base=2):
- try:
- if base == 2:
- return math.sqrt(x)
- else:
- return math.pow(x, 1.0/float(base))
- except TypeError, e:
- raise errors.AnsibleFilterError('root() can only be used on numbers: %s' % str(e))
-
-
-class FilterModule(object):
- ''' Ansible math jinja2 filters '''
-
- def filters(self):
- return {
- # general math
- 'isnan': isnotanumber,
- 'min' : min,
- 'max' : max,
-
- # exponents and logarithms
- 'log': logarithm,
- 'pow': power,
- 'root': inversepower,
-
- # set theory
- 'unique' : unique,
- 'intersect': intersect,
- 'difference': difference,
- 'symmetric_difference': symmetric_difference,
- 'union': union,
-
- }
diff --git a/v1/ansible/runner/lookup_plugins/__init__.py b/v1/ansible/runner/lookup_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/runner/lookup_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/runner/lookup_plugins/cartesian.py b/v1/ansible/runner/lookup_plugins/cartesian.py
deleted file mode 100644
index ab7bba0f0f..0000000000
--- a/v1/ansible/runner/lookup_plugins/cartesian.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# (c) 2013, Bradley Young <young.bradley@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-import ansible.errors as errors
-from itertools import product
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- elif isinstance(term, tuple):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
- """
- Create the cartesian product of lists
- [1, 2, 3], [a, b] -> [1, a], [1, b], [2, a], [2, b], [3, a], [3, b]
- """
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def __lookup_injects(self, terms, inject):
- results = []
- for x in terms:
- intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
- results.append(intermediate)
- return results
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms = self.__lookup_injects(terms, inject)
-
- my_list = terms[:]
- if len(my_list) == 0:
- raise errors.AnsibleError("with_cartesian requires at least one element in each list")
- return [flatten(x) for x in product(*my_list)]
-
-
diff --git a/v1/ansible/runner/lookup_plugins/consul_kv.py b/v1/ansible/runner/lookup_plugins/consul_kv.py
deleted file mode 100755
index 522fa8deb7..0000000000
--- a/v1/ansible/runner/lookup_plugins/consul_kv.py
+++ /dev/null
@@ -1,128 +0,0 @@
-# (c) 2015, Steve Gargan <steve.gargan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-'''
-Lookup plugin to grab metadata from a consul key value store.
-============================================================
-
-Plugin will lookup metadata for a playbook from the key value store in a
-consul cluster. Values can be easily set in the kv store with simple rest
-commands e.g.
-
-curl -X PUT -d 'some-value' http://localhost:8500/v1/kv/ansible/somedata
-
-this can then be looked up in a playbook as follows
-
-- debug: msg='key contains {{item}}'
- with_consul_kv:
- - 'key/to/retrieve'
-
-
-Parameters can be provided after the key be more specific about what to retrieve e.g.
-
-- debug: msg='key contains {{item}}'
- with_consul_kv:
- - 'key/to recurse=true token=E6C060A9-26FB-407A-B83E-12DDAFCB4D98')}}'
-
-recurse: if true, will retrieve all the values that have the given key as prefix
-index: if the key has a value with the specified index then this is returned
- allowing access to historical values.
-token: acl token to allow access to restricted values.
-
-By default this will lookup keys via the consul agent running on http://localhost:8500
-this can be changed by setting the env variable 'ANSIBLE_CONSUL_URL' to point to the url
-of the kv store you'd like to use.
-
-'''
-
-######################################################################
-
-import os
-import sys
-from urlparse import urlparse
-from ansible import utils, errors
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-try:
- import consul
-except ImportError, e:
- print "failed=True msg='python-consul required for this module. "\
- "see http://python-consul.readthedocs.org/en/latest/#installation'"
- sys.exit(1)
-
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
-
- self.basedir = basedir
- self.agent_url = 'http://localhost:8500'
- if os.getenv('ANSIBLE_CONSUL_URL') is not None:
- self.agent_url = os.environ['ANSIBLE_CONSUL_URL']
-
- def run(self, terms, inject=None, **kwargs):
-
- u = urlparse(self.agent_url)
- consul_api = consul.Consul(host=u.hostname, port=u.port)
-
- values = []
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- try:
- for term in terms:
- params = self.parse_params(term)
- results = consul_api.kv.get(params['key'],
- token=params['token'],
- index=params['index'],
- recurse=params['recurse'])
- if results[1]:
- # responds with a single or list of result maps
- if isinstance(results[1], list):
- for r in results[1]:
- values.append(r['Value'])
- else:
- values.append(results[1]['Value'])
- except Exception, e:
- raise errors.AnsibleError(
- "Error locating '%s' in kv store. Error was %s" % (term, e))
-
- return values
-
- def parse_params(self, term):
- params = term.split(' ')
-
- paramvals = {
- 'key': params[0],
- 'token': None,
- 'recurse': False,
- 'index': None
- }
-
- # parameters specified?
- try:
- for param in params[1:]:
- if param and len(param) > 0:
- name, value = param.split('=')
- assert name in paramvals, "% not a valid consul lookup parameter" % name
- paramvals[name] = value
- except (ValueError, AssertionError), e:
- raise errors.AnsibleError(e)
-
- return paramvals
diff --git a/v1/ansible/runner/lookup_plugins/csvfile.py b/v1/ansible/runner/lookup_plugins/csvfile.py
deleted file mode 100644
index a9ea8ed90c..0000000000
--- a/v1/ansible/runner/lookup_plugins/csvfile.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import os
-import codecs
-import csv
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def read_csv(self, filename, key, delimiter, dflt=None, col=1):
-
- try:
- f = codecs.open(filename, 'r', encoding='utf-8')
- creader = csv.reader(f, delimiter=delimiter)
-
- for row in creader:
- if row[0] == key:
- return row[int(col)]
- except Exception, e:
- raise errors.AnsibleError("csvfile: %s" % str(e))
-
- return dflt
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- params = term.split()
- key = params[0]
-
- paramvals = {
- 'file' : 'ansible.csv',
- 'default' : None,
- 'delimiter' : "TAB",
- 'col' : "1", # column to return
- }
-
- # parameters specified?
- try:
- for param in params[1:]:
- name, value = param.split('=')
- assert(name in paramvals)
- if name == 'delimiter':
- paramvals[name] = str(value)
- else:
- paramvals[name] = value
- except (ValueError, AssertionError), e:
- raise errors.AnsibleError(e)
-
- if paramvals['delimiter'] == 'TAB':
- paramvals['delimiter'] = "\t"
-
- path = utils.path_dwim(self.basedir, paramvals['file'])
-
- var = self.read_csv(path, key, paramvals['delimiter'], paramvals['default'], paramvals['col'])
- if var is not None:
- if type(var) is list:
- for v in var:
- ret.append(v)
- else:
- ret.append(var)
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/dict.py b/v1/ansible/runner/lookup_plugins/dict.py
deleted file mode 100644
index cda1546598..0000000000
--- a/v1/ansible/runner/lookup_plugins/dict.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# (c) 2014, Kent R. Spillner <kspillner@acm.org>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
-
-def flatten_hash_to_list(terms):
- ret = []
- for key in terms:
- ret.append({'key': key, 'value': terms[key]})
- return ret
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, dict):
- raise errors.AnsibleError("with_dict expects a dict")
-
- return flatten_hash_to_list(terms)
diff --git a/v1/ansible/runner/lookup_plugins/dig.py b/v1/ansible/runner/lookup_plugins/dig.py
deleted file mode 100644
index a549a4a157..0000000000
--- a/v1/ansible/runner/lookup_plugins/dig.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# (c) 2015, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import socket
-HAVE_DNS=False
-try:
- import dns.resolver
- import dns.reversename
- from dns.rdatatype import *
- from dns.exception import DNSException
- HAVE_DNS=True
-except ImportError:
- pass
-
-def make_rdata_dict(rdata):
- ''' While the 'dig' lookup plugin supports anything which dnspython supports
- out of the box, the following supported_types list describes which
- DNS query types we can convert to a dict.
-
- Note: adding support for RRSIG is hard work. :)
- '''
- supported_types = {
- A : ['address'],
- AAAA : ['address'],
- CNAME : ['target'],
- DNAME : ['target'],
- DLV : ['algorithm', 'digest_type', 'key_tag', 'digest'],
- DNSKEY : ['flags', 'algorithm', 'protocol', 'key'],
- DS : ['algorithm', 'digest_type', 'key_tag', 'digest'],
- HINFO : ['cpu', 'os'],
- LOC : ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'],
- MX : ['preference', 'exchange'],
- NAPTR : ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'],
- NS : ['target'],
- NSEC3PARAM : ['algorithm', 'flags', 'iterations', 'salt'],
- PTR : ['target'],
- RP : ['mbox', 'txt'],
- # RRSIG : ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'],
- SOA : ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'],
- SPF : ['strings'],
- SRV : ['priority', 'weight', 'port', 'target'],
- SSHFP : ['algorithm', 'fp_type', 'fingerprint'],
- TLSA : ['usage', 'selector', 'mtype', 'cert'],
- TXT : ['strings'],
- }
-
- rd = {}
-
- if rdata.rdtype in supported_types:
- fields = supported_types[rdata.rdtype]
- for f in fields:
- val = rdata.__getattribute__(f)
-
- if type(val) == dns.name.Name:
- val = dns.name.Name.to_text(val)
-
- if rdata.rdtype == DLV and f == 'digest':
- val = dns.rdata._hexify(rdata.digest).replace(' ', '')
- if rdata.rdtype == DS and f == 'digest':
- val = dns.rdata._hexify(rdata.digest).replace(' ', '')
- if rdata.rdtype == DNSKEY and f == 'key':
- val = dns.rdata._base64ify(rdata.key).replace(' ', '')
- if rdata.rdtype == NSEC3PARAM and f == 'salt':
- val = dns.rdata._hexify(rdata.salt).replace(' ', '')
- if rdata.rdtype == SSHFP and f == 'fingerprint':
- val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '')
- if rdata.rdtype == TLSA and f == 'cert':
- val = dns.rdata._hexify(rdata.cert).replace(' ', '')
-
-
- rd[f] = val
-
- return rd
-
-# ==============================================================
-# dig: Lookup DNS records
-#
-# --------------------------------------------------------------
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- if HAVE_DNS == False:
- raise errors.AnsibleError("Can't LOOKUP(dig): module dns.resolver is not installed")
-
- def run(self, terms, inject=None, **kwargs):
-
- '''
- terms contains a string with things to `dig' for. We support the
- following formats:
- example.com # A record
- example.com qtype=A # same
- example.com/TXT # specific qtype
- example.com qtype=txt # same
- 192.168.1.2/PTR # reverse PTR
- ^^ shortcut for 2.1.168.192.in-addr.arpa/PTR
- example.net/AAAA @nameserver # query specified server
- ^^^ can be comma-sep list of names/addresses
-
- ... flat=0 # returns a dict; default is 1 == string
- '''
- terms = terms.split()
-
- # Create Resolver object so that we can set NS if necessary
- myres = dns.resolver.Resolver()
- edns_size = 4096
- myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size)
-
- domain = None
- qtype = 'A'
- flat = True
-
- for t in terms:
- if t.startswith('@'): # e.g. "@10.0.1.2,192.168.1.1" is ok.
- nsset = t[1:].split(',')
- nameservers = []
- for ns in nsset:
- # Check if we have a valid IP address. If so, use that, otherwise
- # try to resolve name to address using system's resolver. If that
- # fails we bail out.
- try:
- socket.inet_aton(ns)
- nameservers.append(ns)
- except:
- try:
- nsaddr = dns.resolver.query(ns)[0].address
- nameservers.append(nsaddr)
- except Exception, e:
- raise errors.AnsibleError("dns lookup NS: ", str(e))
- myres.nameservers = nameservers
- continue
- if '=' in t:
- try:
- opt, arg = t.split('=')
- except:
- pass
-
- if opt == 'qtype':
- qtype = arg.upper()
- elif opt == 'flat':
- flat = int(arg)
-
- continue
-
- if '/' in t:
- try:
- domain, qtype = t.split('/')
- except:
- domain = t
- else:
- domain = t
-
- # print "--- domain = {0} qtype={1}".format(domain, qtype)
-
- ret = []
-
- if qtype.upper() == 'PTR':
- try:
- n = dns.reversename.from_address(domain)
- domain = n.to_text()
- except dns.exception.SyntaxError:
- pass
- except Exception, e:
- raise errors.AnsibleError("dns.reversename unhandled exception", str(e))
-
- try:
- answers = myres.query(domain, qtype)
- for rdata in answers:
- s = rdata.to_text()
- if qtype.upper() == 'TXT':
- s = s[1:-1] # Strip outside quotes on TXT rdata
-
- if flat:
- ret.append(s)
- else:
- try:
- rd = make_rdata_dict(rdata)
- rd['owner'] = answers.canonical_name.to_text()
- rd['type'] = dns.rdatatype.to_text(rdata.rdtype)
- rd['ttl'] = answers.rrset.ttl
-
- ret.append(rd)
- except Exception, e:
- ret.append(str(e))
-
- except dns.resolver.NXDOMAIN:
- ret.append('NXDOMAIN')
- except dns.resolver.NoAnswer:
- ret.append("")
- except dns.resolver.Timeout:
- ret.append('')
- except dns.exception.DNSException, e:
- raise errors.AnsibleError("dns.resolver unhandled exception", e)
-
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/dnstxt.py b/v1/ansible/runner/lookup_plugins/dnstxt.py
deleted file mode 100644
index 4fa47bf4ee..0000000000
--- a/v1/ansible/runner/lookup_plugins/dnstxt.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import os
-HAVE_DNS=False
-try:
- import dns.resolver
- from dns.exception import DNSException
- HAVE_DNS=True
-except ImportError:
- pass
-
-# ==============================================================
-# DNSTXT: DNS TXT records
-#
-# key=domainname
-# TODO: configurable resolver IPs
-# --------------------------------------------------------------
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- if HAVE_DNS == False:
- raise errors.AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- domain = term.split()[0]
- string = []
- try:
- answers = dns.resolver.query(domain, 'TXT')
- for rdata in answers:
- s = rdata.to_text()
- string.append(s[1:-1]) # Strip outside quotes on TXT rdata
-
- except dns.resolver.NXDOMAIN:
- string = 'NXDOMAIN'
- except dns.resolver.Timeout:
- string = ''
- except dns.exception.DNSException, e:
- raise errors.AnsibleError("dns.resolver unhandled exception", e)
-
- ret.append(''.join(string))
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/env.py b/v1/ansible/runner/lookup_plugins/env.py
deleted file mode 100644
index d4f85356ed..0000000000
--- a/v1/ansible/runner/lookup_plugins/env.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-from ansible.utils import template
-import os
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- try:
- terms = template.template(self.basedir, terms, inject)
- except Exception, e:
- pass
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- var = term.split()[0]
- ret.append(os.getenv(var, ''))
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/etcd.py b/v1/ansible/runner/lookup_plugins/etcd.py
deleted file mode 100644
index a758a2fb0b..0000000000
--- a/v1/ansible/runner/lookup_plugins/etcd.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils
-import os
-import urllib2
-try:
- import json
-except ImportError:
- import simplejson as json
-
-# this can be made configurable, not should not use ansible.cfg
-ANSIBLE_ETCD_URL = 'http://127.0.0.1:4001'
-if os.getenv('ANSIBLE_ETCD_URL') is not None:
- ANSIBLE_ETCD_URL = os.environ['ANSIBLE_ETCD_URL']
-
-class etcd():
- def __init__(self, url=ANSIBLE_ETCD_URL):
- self.url = url
- self.baseurl = '%s/v1/keys' % (self.url)
-
- def get(self, key):
- url = "%s/%s" % (self.baseurl, key)
-
- data = None
- value = ""
- try:
- r = urllib2.urlopen(url)
- data = r.read()
- except:
- return value
-
- try:
- # {"action":"get","key":"/name","value":"Jane Jolie","index":5}
- item = json.loads(data)
- if 'value' in item:
- value = item['value']
- if 'errorCode' in item:
- value = "ENOENT"
- except:
- raise
- pass
-
- return value
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
- self.etcd = etcd()
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- key = term.split()[0]
- value = self.etcd.get(key)
- ret.append(value)
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/file.py b/v1/ansible/runner/lookup_plugins/file.py
deleted file mode 100644
index 70bae6653a..0000000000
--- a/v1/ansible/runner/lookup_plugins/file.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import os
-import codecs
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- ret = []
-
- # this can happen if the variable contains a string, strictly not desired for lookup
- # plugins, but users may try it, so make it work.
- if not isinstance(terms, list):
- terms = [ terms ]
-
- for term in terms:
- basedir_path = utils.path_dwim(self.basedir, term)
- relative_path = None
- playbook_path = None
-
- # Special handling of the file lookup, used primarily when the
- # lookup is done from a role. If the file isn't found in the
- # basedir of the current file, use dwim_relative to look in the
- # role/files/ directory, and finally the playbook directory
- # itself (which will be relative to the current working dir)
- if '_original_file' in inject:
- relative_path = utils.path_dwim_relative(inject['_original_file'], 'files', term, self.basedir, check=False)
- if 'playbook_dir' in inject:
- playbook_path = os.path.join(inject['playbook_dir'], term)
-
- for path in (basedir_path, relative_path, playbook_path):
- if path and os.path.exists(path):
- ret.append(codecs.open(path, encoding="utf8").read().rstrip())
- break
- else:
- raise errors.AnsibleError("could not locate file in lookup: %s" % term)
-
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/fileglob.py b/v1/ansible/runner/lookup_plugins/fileglob.py
deleted file mode 100644
index 7d3cbb92be..0000000000
--- a/v1/ansible/runner/lookup_plugins/fileglob.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import glob
-from ansible import utils
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- ret = []
-
- for term in terms:
-
- dwimmed = utils.path_dwim(self.basedir, term)
- globbed = glob.glob(dwimmed)
- ret.extend(g for g in globbed if os.path.isfile(g))
-
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/first_found.py b/v1/ansible/runner/lookup_plugins/first_found.py
deleted file mode 100644
index a48b56a3c2..0000000000
--- a/v1/ansible/runner/lookup_plugins/first_found.py
+++ /dev/null
@@ -1,194 +0,0 @@
-# (c) 2013, seth vidal <skvidal@fedoraproject.org> red hat, inc
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-
-# take a list of files and (optionally) a list of paths
-# return the first existing file found in the paths
-# [file1, file2, file3], [path1, path2, path3]
-# search order is:
-# path1/file1
-# path1/file2
-# path1/file3
-# path2/file1
-# path2/file2
-# path2/file3
-# path3/file1
-# path3/file2
-# path3/file3
-
-# first file found with os.path.exists() is returned
-# no file matches raises ansibleerror
-# EXAMPLES
-# - name: copy first existing file found to /some/file
-# action: copy src=$item dest=/some/file
-# with_first_found:
-# - files: foo ${inventory_hostname} bar
-# paths: /tmp/production /tmp/staging
-
-# that will look for files in this order:
-# /tmp/production/foo
-# ${inventory_hostname}
-# bar
-# /tmp/staging/foo
-# ${inventory_hostname}
-# bar
-
-# - name: copy first existing file found to /some/file
-# action: copy src=$item dest=/some/file
-# with_first_found:
-# - files: /some/place/foo ${inventory_hostname} /some/place/else
-
-# that will look for files in this order:
-# /some/place/foo
-# $relative_path/${inventory_hostname}
-# /some/place/else
-
-# example - including tasks:
-# tasks:
-# - include: $item
-# with_first_found:
-# - files: generic
-# paths: tasks/staging tasks/production
-# this will include the tasks in the file generic where it is found first (staging or production)
-
-# example simple file lists
-#tasks:
-#- name: first found file
-# action: copy src=$item dest=/etc/file.cfg
-# with_first_found:
-# - files: foo.${inventory_hostname} foo
-
-
-# example skipping if no matched files
-# First_found also offers the ability to control whether or not failing
-# to find a file returns an error or not
-#
-#- name: first found file - or skip
-# action: copy src=$item dest=/etc/file.cfg
-# with_first_found:
-# - files: foo.${inventory_hostname}
-# skip: true
-
-# example a role with default configuration and configuration per host
-# you can set multiple terms with their own files and paths to look through.
-# consider a role that sets some configuration per host falling back on a default config.
-#
-#- name: some configuration template
-# template: src={{ item }} dest=/etc/file.cfg mode=0444 owner=root group=root
-# with_first_found:
-# - files:
-# - ${inventory_hostname}/etc/file.cfg
-# paths:
-# - ../../../templates.overwrites
-# - ../../../templates
-# - files:
-# - etc/file.cfg
-# paths:
-# - templates
-
-# the above will return an empty list if the files cannot be found at all
-# if skip is unspecificed or if it is set to false then it will return a list
-# error which can be caught bye ignore_errors: true for that action.
-
-# finally - if you want you can use it, in place to replace first_available_file:
-# you simply cannot use the - files, path or skip options. simply replace
-# first_available_file with with_first_found and leave the file listing in place
-#
-#
-# - name: with_first_found like first_available_file
-# action: copy src=$item dest=/tmp/faftest
-# with_first_found:
-# - ../files/foo
-# - ../files/bar
-# - ../files/baz
-# ignore_errors: true
-
-
-from ansible import utils, errors
-import os
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- result = None
- anydict = False
- skip = False
-
- for term in terms:
- if isinstance(term, dict):
- anydict = True
-
- total_search = []
- if anydict:
- for term in terms:
- if isinstance(term, dict):
- files = term.get('files', [])
- paths = term.get('paths', [])
- skip = utils.boolean(term.get('skip', False))
-
- filelist = files
- if isinstance(files, basestring):
- files = files.replace(',', ' ')
- files = files.replace(';', ' ')
- filelist = files.split(' ')
-
- pathlist = paths
- if paths:
- if isinstance(paths, basestring):
- paths = paths.replace(',', ' ')
- paths = paths.replace(':', ' ')
- paths = paths.replace(';', ' ')
- pathlist = paths.split(' ')
-
- if not pathlist:
- total_search = filelist
- else:
- for path in pathlist:
- for fn in filelist:
- f = os.path.join(path, fn)
- total_search.append(f)
- else:
- total_search.append(term)
- else:
- total_search = terms
-
- for fn in total_search:
- if inject and '_original_file' in inject:
- # check the templates and vars directories too,
- # if they exist
- for roledir in ('templates', 'vars'):
- path = utils.path_dwim(os.path.join(self.basedir, '..', roledir), fn)
- if os.path.exists(path):
- return [path]
- # if none of the above were found, just check the
- # current filename against the basedir (this will already
- # have ../files from runner, if it's a role task
- path = utils.path_dwim(self.basedir, fn)
- if os.path.exists(path):
- return [path]
- else:
- if skip:
- return []
- else:
- return [None]
-
diff --git a/v1/ansible/runner/lookup_plugins/flattened.py b/v1/ansible/runner/lookup_plugins/flattened.py
deleted file mode 100644
index 6d9dd613be..0000000000
--- a/v1/ansible/runner/lookup_plugins/flattened.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-import ansible.errors as errors
-
-
-def check_list_of_one_list(term):
- # make sure term is not a list of one (list of one..) item
- # return the final non list item if so
-
- if isinstance(term,list) and len(term) == 1:
- term = term[0]
- if isinstance(term,list):
- term = check_list_of_one_list(term)
-
- return term
-
-
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
-
- def flatten(self, terms, inject):
-
- ret = []
- for term in terms:
- term = check_list_of_one_list(term)
-
- if term == 'None' or term == 'null':
- # ignore undefined items
- break
-
- if isinstance(term, basestring):
- # convert a variable to a list
- term2 = utils.listify_lookup_plugin_terms(term, self.basedir, inject)
- # but avoid converting a plain string to a list of one string
- if term2 != [ term ]:
- term = term2
-
- if isinstance(term, list):
- # if it's a list, check recursively for items that are a list
- term = self.flatten(term, inject)
- ret.extend(term)
- else:
- ret.append(term)
-
- return ret
-
-
- def run(self, terms, inject=None, **kwargs):
-
- # see if the string represents a list and convert to list if so
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, list):
- raise errors.AnsibleError("with_flattened expects a list")
-
- ret = self.flatten(terms, inject)
- return ret
-
diff --git a/v1/ansible/runner/lookup_plugins/indexed_items.py b/v1/ansible/runner/lookup_plugins/indexed_items.py
deleted file mode 100644
index c1db1fdee2..0000000000
--- a/v1/ansible/runner/lookup_plugins/indexed_items.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, list):
- raise errors.AnsibleError("with_indexed_items expects a list")
-
- items = flatten(terms)
- return zip(range(len(items)), items)
-
diff --git a/v1/ansible/runner/lookup_plugins/inventory_hostnames.py b/v1/ansible/runner/lookup_plugins/inventory_hostnames.py
deleted file mode 100644
index 98523e1398..0000000000
--- a/v1/ansible/runner/lookup_plugins/inventory_hostnames.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-# (c) 2013, Steven Dossett <sdossett@panath.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
-import ansible.inventory as inventory
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
- if 'runner' in kwargs:
- self.host_list = kwargs['runner'].inventory.host_list
- else:
- raise errors.AnsibleError("inventory_hostnames must be used as a loop. Example: \"with_inventory_hostnames: \'all\'\"")
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, list):
- raise errors.AnsibleError("with_inventory_hostnames expects a list")
- return flatten(inventory.Inventory(self.host_list).list_hosts(terms))
-
diff --git a/v1/ansible/runner/lookup_plugins/items.py b/v1/ansible/runner/lookup_plugins/items.py
deleted file mode 100644
index 85e77d5380..0000000000
--- a/v1/ansible/runner/lookup_plugins/items.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import safe_eval
-import ansible.utils as utils
-import ansible.errors as errors
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if not isinstance(terms, list) and not isinstance(terms,set):
- raise errors.AnsibleError("with_items expects a list or a set")
-
- return flatten(terms)
-
-
diff --git a/v1/ansible/runner/lookup_plugins/lines.py b/v1/ansible/runner/lookup_plugins/lines.py
deleted file mode 100644
index 5d4b70a857..0000000000
--- a/v1/ansible/runner/lookup_plugins/lines.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import subprocess
-from ansible import utils, errors
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- ret = []
- for term in terms:
- p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
- if p.returncode == 0:
- ret.extend(stdout.splitlines())
- else:
- raise errors.AnsibleError("lookup_plugin.lines(%s) returned %d" % (term, p.returncode))
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/nested.py b/v1/ansible/runner/lookup_plugins/nested.py
deleted file mode 100644
index 29c4a7d21c..0000000000
--- a/v1/ansible/runner/lookup_plugins/nested.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-from ansible.utils import safe_eval
-import ansible.errors as errors
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- elif isinstance(term, tuple):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-def combine(a,b):
- results = []
- for x in a:
- for y in b:
- results.append(flatten([x,y]))
- return results
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def __lookup_injects(self, terms, inject):
- results = []
- for x in terms:
- intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
- results.append(intermediate)
- return results
-
- def run(self, terms, inject=None, **kwargs):
-
- # this code is common with 'items.py' consider moving to utils if we need it again
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms = self.__lookup_injects(terms, inject)
-
- my_list = terms[:]
- my_list.reverse()
- result = []
- if len(my_list) == 0:
- raise errors.AnsibleError("with_nested requires at least one element in the nested list")
- result = my_list.pop()
- while len(my_list) > 0:
- result2 = combine(result, my_list.pop())
- result = result2
- new_result = []
- for x in result:
- new_result.append(flatten(x))
- return new_result
-
-
diff --git a/v1/ansible/runner/lookup_plugins/password.py b/v1/ansible/runner/lookup_plugins/password.py
deleted file mode 100644
index a066887e2c..0000000000
--- a/v1/ansible/runner/lookup_plugins/password.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
-# (c) 2013, Javier Candeira <javier@candeira.com>
-# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import os
-import errno
-from string import ascii_letters, digits
-import string
-import random
-
-
-class LookupModule(object):
-
- LENGTH = 20
-
- def __init__(self, length=None, encrypt=None, basedir=None, **kwargs):
- self.basedir = basedir
-
- def random_salt(self):
- salt_chars = ascii_letters + digits + './'
- return utils.random_password(length=8, chars=salt_chars)
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- ret = []
-
- for term in terms:
- # you can't have escaped spaces in yor pathname
- params = term.split()
- relpath = params[0]
-
- paramvals = {
- 'length': LookupModule.LENGTH,
- 'encrypt': None,
- 'chars': ['ascii_letters','digits',".,:-_"],
- }
-
- # get non-default parameters if specified
- try:
- for param in params[1:]:
- name, value = param.split('=')
- assert(name in paramvals)
- if name == 'length':
- paramvals[name] = int(value)
- elif name == 'chars':
- use_chars=[]
- if ",," in value:
- use_chars.append(',')
- use_chars.extend(value.replace(',,',',').split(','))
- paramvals['chars'] = use_chars
- else:
- paramvals[name] = value
- except (ValueError, AssertionError), e:
- raise errors.AnsibleError(e)
-
- length = paramvals['length']
- encrypt = paramvals['encrypt']
- use_chars = paramvals['chars']
-
- # get password or create it if file doesn't exist
- path = utils.path_dwim(self.basedir, relpath)
- if not os.path.exists(path):
- pathdir = os.path.dirname(path)
- if not os.path.isdir(pathdir):
- try:
- os.makedirs(pathdir, mode=0700)
- except OSError, e:
- raise errors.AnsibleError("cannot create the path for the password lookup: %s (error was %s)" % (pathdir, str(e)))
-
- chars = "".join([getattr(string,c,c) for c in use_chars]).replace('"','').replace("'",'')
- password = ''.join(random.choice(chars) for _ in range(length))
-
- if encrypt is not None:
- salt = self.random_salt()
- content = '%s salt=%s' % (password, salt)
- else:
- content = password
- with open(path, 'w') as f:
- os.chmod(path, 0600)
- f.write(content + '\n')
- else:
- content = open(path).read().rstrip()
- sep = content.find(' ')
-
- if sep >= 0:
- password = content[:sep]
- salt = content[sep+1:].split('=')[1]
- else:
- password = content
- salt = None
-
- # crypt requested, add salt if missing
- if (encrypt is not None and not salt):
- salt = self.random_salt()
- content = '%s salt=%s' % (password, salt)
- with open(path, 'w') as f:
- os.chmod(path, 0600)
- f.write(content + '\n')
- # crypt not requested, remove salt if present
- elif (encrypt is None and salt):
- with open(path, 'w') as f:
- os.chmod(path, 0600)
- f.write(password + '\n')
-
- if encrypt:
- password = utils.do_encrypt(password, encrypt, salt=salt)
-
- ret.append(password)
-
- return ret
-
diff --git a/v1/ansible/runner/lookup_plugins/pipe.py b/v1/ansible/runner/lookup_plugins/pipe.py
deleted file mode 100644
index 0cd9e1cda5..0000000000
--- a/v1/ansible/runner/lookup_plugins/pipe.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import subprocess
-from ansible import utils, errors
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- '''
- http://docs.python.org/2/library/subprocess.html#popen-constructor
-
- The shell argument (which defaults to False) specifies whether to use the
- shell as the program to execute. If shell is True, it is recommended to pass
- args as a string rather than as a sequence
-
- https://github.com/ansible/ansible/issues/6550
- '''
- term = str(term)
-
- p = subprocess.Popen(term, cwd=self.basedir, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
- (stdout, stderr) = p.communicate()
- if p.returncode == 0:
- ret.append(stdout.decode("utf-8").rstrip())
- else:
- raise errors.AnsibleError("lookup_plugin.pipe(%s) returned %d" % (term, p.returncode))
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/random_choice.py b/v1/ansible/runner/lookup_plugins/random_choice.py
deleted file mode 100644
index 9b32c2f119..0000000000
--- a/v1/ansible/runner/lookup_plugins/random_choice.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# (c) 2013, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import random
-from ansible import utils
-
-# useful for introducing chaos ... or just somewhat reasonably fair selection
-# amongst available mirrors
-#
-# tasks:
-# - debug: msg=$item
-# with_random_choice:
-# - one
-# - two
-# - three
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- return [ random.choice(terms) ]
-
diff --git a/v1/ansible/runner/lookup_plugins/redis_kv.py b/v1/ansible/runner/lookup_plugins/redis_kv.py
deleted file mode 100644
index 22c5c3754f..0000000000
--- a/v1/ansible/runner/lookup_plugins/redis_kv.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils, errors
-import os
-HAVE_REDIS=False
-try:
- import redis # https://github.com/andymccurdy/redis-py/
- HAVE_REDIS=True
-except ImportError:
- pass
-import re
-
-# ==============================================================
-# REDISGET: Obtain value from a GET on a Redis key. Terms
-# expected: 0 = URL, 1 = Key
-# URL may be empty, in which case redis://localhost:6379 assumed
-# --------------------------------------------------------------
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- if HAVE_REDIS == False:
- raise errors.AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- ret = []
- for term in terms:
- (url,key) = term.split(',')
- if url == "":
- url = 'redis://localhost:6379'
-
- # urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
- # Redis' from_url() doesn't work here.
-
- p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
-
- try:
- m = re.search(p, url)
- host = m.group('host')
- port = int(m.group('port'))
- except AttributeError:
- raise errors.AnsibleError("Bad URI in redis lookup")
-
- try:
- conn = redis.Redis(host=host, port=port)
- res = conn.get(key)
- if res is None:
- res = ""
- ret.append(res)
- except:
- ret.append("") # connection failed or key not found
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/sequence.py b/v1/ansible/runner/lookup_plugins/sequence.py
deleted file mode 100644
index 68b0bbec90..0000000000
--- a/v1/ansible/runner/lookup_plugins/sequence.py
+++ /dev/null
@@ -1,216 +0,0 @@
-# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.errors import AnsibleError
-import ansible.utils as utils
-from re import compile as re_compile, IGNORECASE
-
-# shortcut format
-NUM = "(0?x?[0-9a-f]+)"
-SHORTCUT = re_compile(
- "^(" + # Group 0
- NUM + # Group 1: Start
- "-)?" +
- NUM + # Group 2: End
- "(/" + # Group 3
- NUM + # Group 4: Stride
- ")?" +
- "(:(.+))?$", # Group 5, Group 6: Format String
- IGNORECASE
-)
-
-
-class LookupModule(object):
- """
- sequence lookup module
-
- Used to generate some sequence of items. Takes arguments in two forms.
-
- The simple / shortcut form is:
-
- [start-]end[/stride][:format]
-
- As indicated by the brackets: start, stride, and format string are all
- optional. The format string is in the style of printf. This can be used
- to pad with zeros, format in hexadecimal, etc. All of the numerical values
- can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
- Negative numbers are not supported.
-
- Some examples:
-
- 5 -> ["1","2","3","4","5"]
- 5-8 -> ["5", "6", "7", "8"]
- 2-10/2 -> ["2", "4", "6", "8", "10"]
- 4:host%02d -> ["host01","host02","host03","host04"]
-
- The standard Ansible key-value form is accepted as well. For example:
-
- start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
-
- This format takes an alternate form of "end" called "count", which counts
- some number from the starting value. For example:
-
- count=5 -> ["1", "2", "3", "4", "5"]
- start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
- start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
- start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
-
- The count option is mostly useful for avoiding off-by-one errors and errors
- calculating the number of entries in a sequence when a stride is specified.
- """
-
- def __init__(self, basedir, **kwargs):
- """absorb any keyword args"""
- self.basedir = basedir
-
- def reset(self):
- """set sensible defaults"""
- self.start = 1
- self.count = None
- self.end = None
- self.stride = 1
- self.format = "%d"
-
- def parse_kv_args(self, args):
- """parse key-value style arguments"""
- for arg in ["start", "end", "count", "stride"]:
- try:
- arg_raw = args.pop(arg, None)
- if arg_raw is None:
- continue
- arg_cooked = int(arg_raw, 0)
- setattr(self, arg, arg_cooked)
- except ValueError:
- raise AnsibleError(
- "can't parse arg %s=%r as integer"
- % (arg, arg_raw)
- )
- if 'format' in args:
- self.format = args.pop("format")
- if args:
- raise AnsibleError(
- "unrecognized arguments to with_sequence: %r"
- % args.keys()
- )
-
- def parse_simple_args(self, term):
- """parse the shortcut forms, return True/False"""
- match = SHORTCUT.match(term)
- if not match:
- return False
-
- _, start, end, _, stride, _, format = match.groups()
-
- if start is not None:
- try:
- start = int(start, 0)
- except ValueError:
- raise AnsibleError("can't parse start=%s as integer" % start)
- if end is not None:
- try:
- end = int(end, 0)
- except ValueError:
- raise AnsibleError("can't parse end=%s as integer" % end)
- if stride is not None:
- try:
- stride = int(stride, 0)
- except ValueError:
- raise AnsibleError("can't parse stride=%s as integer" % stride)
-
- if start is not None:
- self.start = start
- if end is not None:
- self.end = end
- if stride is not None:
- self.stride = stride
- if format is not None:
- self.format = format
-
- def sanity_check(self):
- if self.count is None and self.end is None:
- raise AnsibleError(
- "must specify count or end in with_sequence"
- )
- elif self.count is not None and self.end is not None:
- raise AnsibleError(
- "can't specify both count and end in with_sequence"
- )
- elif self.count is not None:
- # convert count to end
- if self.count != 0:
- self.end = self.start + self.count * self.stride - 1
- else:
- self.start = 0
- self.end = 0
- self.stride = 0
- del self.count
- if self.stride > 0 and self.end < self.start:
- raise AnsibleError("to count backwards make stride negative")
- if self.stride < 0 and self.end > self.start:
- raise AnsibleError("to count forward don't make stride negative")
- if self.format.count('%') != 1:
- raise AnsibleError("bad formatting string: %s" % self.format)
-
- def generate_sequence(self):
- if self.stride > 0:
- adjust = 1
- else:
- adjust = -1
- numbers = xrange(self.start, self.end + adjust, self.stride)
-
- for i in numbers:
- try:
- formatted = self.format % i
- yield formatted
- except (ValueError, TypeError):
- raise AnsibleError(
- "problem formatting %r with %r" % self.format
- )
-
- def run(self, terms, inject=None, **kwargs):
- results = []
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- for term in terms:
- try:
- self.reset() # clear out things for this iteration
-
- try:
- if not self.parse_simple_args(term):
- self.parse_kv_args(utils.parse_kv(term))
- except Exception:
- raise AnsibleError(
- "unknown error parsing with_sequence arguments: %r"
- % term
- )
-
- self.sanity_check()
-
- if self.start != self.end:
- results.extend(self.generate_sequence())
- except AnsibleError:
- raise
- except Exception, e:
- raise AnsibleError(
- "unknown error generating sequence: %s" % str(e)
- )
-
- return results
diff --git a/v1/ansible/runner/lookup_plugins/subelements.py b/v1/ansible/runner/lookup_plugins/subelements.py
deleted file mode 100644
index f33aae717d..0000000000
--- a/v1/ansible/runner/lookup_plugins/subelements.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# (c) 2013, Serge van Ginderachter <serge@vanginderachter.be>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-import ansible.errors as errors
-
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
-
- def run(self, terms, inject=None, **kwargs):
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms[0] = utils.listify_lookup_plugin_terms(terms[0], self.basedir, inject)
-
- if not isinstance(terms, list) or not len(terms) == 2:
- raise errors.AnsibleError(
- "subelements lookup expects a list of two items, first a dict or a list, and second a string")
- terms[0] = utils.listify_lookup_plugin_terms(terms[0], self.basedir, inject)
- if not isinstance(terms[0], (list, dict)) or not isinstance(terms[1], basestring):
- raise errors.AnsibleError(
- "subelements lookup expects a list of two items, first a dict or a list, and second a string")
-
- if isinstance(terms[0], dict): # convert to list:
- if terms[0].get('skipped',False) != False:
- # the registered result was completely skipped
- return []
- elementlist = []
- for key in terms[0].iterkeys():
- elementlist.append(terms[0][key])
- else:
- elementlist = terms[0]
- subelement = terms[1]
-
- ret = []
- for item0 in elementlist:
- if not isinstance(item0, dict):
- raise errors.AnsibleError("subelements lookup expects a dictionary, got '%s'" %item0)
- if item0.get('skipped',False) != False:
- # this particular item is to be skipped
- continue
- if not subelement in item0:
- raise errors.AnsibleError("could not find '%s' key in iterated item '%s'" % (subelement, item0))
- if not isinstance(item0[subelement], list):
- raise errors.AnsibleError("the key %s should point to a list, got '%s'" % (subelement, item0[subelement]))
- sublist = item0.pop(subelement, [])
- for item1 in sublist:
- ret.append((item0, item1))
-
- return ret
-
diff --git a/v1/ansible/runner/lookup_plugins/template.py b/v1/ansible/runner/lookup_plugins/template.py
deleted file mode 100644
index e009b6b76b..0000000000
--- a/v1/ansible/runner/lookup_plugins/template.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.utils import template
-import ansible.utils as utils
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- ret = []
- for term in terms:
- ret.append(template.template_from_file(self.basedir, term, inject))
- return ret
diff --git a/v1/ansible/runner/lookup_plugins/together.py b/v1/ansible/runner/lookup_plugins/together.py
deleted file mode 100644
index 07332c9fb9..0000000000
--- a/v1/ansible/runner/lookup_plugins/together.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# (c) 2013, Bradley Young <young.bradley@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import ansible.utils as utils
-from ansible.utils import safe_eval
-import ansible.errors as errors
-from itertools import izip_longest
-
-def flatten(terms):
- ret = []
- for term in terms:
- if isinstance(term, list):
- ret.extend(term)
- elif isinstance(term, tuple):
- ret.extend(term)
- else:
- ret.append(term)
- return ret
-
-class LookupModule(object):
- """
- Transpose a list of arrays:
- [1, 2, 3], [4, 5, 6] -> [1, 4], [2, 5], [3, 6]
- Replace any empty spots in 2nd array with None:
- [1, 2], [3] -> [1, 3], [2, None]
- """
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def __lookup_injects(self, terms, inject):
- results = []
- for x in terms:
- intermediate = utils.listify_lookup_plugin_terms(x, self.basedir, inject)
- results.append(intermediate)
- return results
-
- def run(self, terms, inject=None, **kwargs):
-
- # this code is common with 'items.py' consider moving to utils if we need it again
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
- terms = self.__lookup_injects(terms, inject)
-
- my_list = terms[:]
- if len(my_list) == 0:
- raise errors.AnsibleError("with_together requires at least one element in each list")
- return [flatten(x) for x in izip_longest(*my_list, fillvalue=None)]
-
-
diff --git a/v1/ansible/runner/lookup_plugins/url.py b/v1/ansible/runner/lookup_plugins/url.py
deleted file mode 100644
index b42b3b14da..0000000000
--- a/v1/ansible/runner/lookup_plugins/url.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# (c) 2015, Brian Coca <bcoca@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils
-import urllib2
-
-class LookupModule(object):
-
- def __init__(self, basedir=None, **kwargs):
- self.basedir = basedir
-
- def run(self, terms, inject=None, **kwargs):
-
- terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- ret = []
- for term in terms:
- try:
- r = urllib2.Request(term)
- response = urllib2.urlopen(r)
- except URLError, e:
- utils.warnings("Failed lookup url for %s : %s" % (term, str(e)))
- continue
- except HTTPError, e:
- utils.warnings("Received HTTP error for %s : %s" % (term, str(e)))
- continue
-
- for line in response.read().splitlines():
- ret.append(line)
-
- return ret
diff --git a/v1/ansible/runner/poller.py b/v1/ansible/runner/poller.py
deleted file mode 100644
index 0218481415..0000000000
--- a/v1/ansible/runner/poller.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import time
-
-from ansible import errors
-
-class AsyncPoller(object):
- """ Manage asynchronous jobs. """
-
- def __init__(self, results, runner):
- self.runner = runner
-
- self.results = { 'contacted': {}, 'dark': {}}
- self.hosts_to_poll = []
- self.completed = False
-
- # flag to determine if at least one host was contacted
- self.active = False
- # True to work with the `and` below
- skipped = True
- jid = None
- for (host, res) in results['contacted'].iteritems():
- if res.get('started', False):
- self.hosts_to_poll.append(host)
- jid = res.get('ansible_job_id', None)
- self.runner.vars_cache[host]['ansible_job_id'] = jid
- self.active = True
- else:
- skipped = skipped and res.get('skipped', False)
- self.runner.vars_cache[host]['ansible_job_id'] = ''
- self.results['contacted'][host] = res
- for (host, res) in results['dark'].iteritems():
- self.runner.vars_cache[host]['ansible_job_id'] = ''
- self.results['dark'][host] = res
-
- if not skipped:
- if jid is None:
- raise errors.AnsibleError("unexpected error: unable to determine jid")
- if len(self.hosts_to_poll)==0:
- raise errors.AnsibleError("unexpected error: no hosts to poll")
-
- def poll(self):
- """ Poll the job status.
-
- Returns the changes in this iteration."""
- self.runner.module_name = 'async_status'
- self.runner.module_args = "jid={{ansible_job_id}}"
- self.runner.pattern = "*"
- self.runner.background = 0
- self.runner.complex_args = None
-
- self.runner.inventory.restrict_to(self.hosts_to_poll)
- results = self.runner.run()
- self.runner.inventory.lift_restriction()
-
- hosts = []
- poll_results = { 'contacted': {}, 'dark': {}, 'polled': {}}
- for (host, res) in results['contacted'].iteritems():
- if res.get('started',False):
- hosts.append(host)
- poll_results['polled'][host] = res
- else:
- self.results['contacted'][host] = res
- poll_results['contacted'][host] = res
- if res.get('failed', False) or res.get('rc', 0) != 0:
- self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host]['ansible_job_id'])
- else:
- self.runner.callbacks.on_async_ok(host, res, self.runner.vars_cache[host]['ansible_job_id'])
- for (host, res) in results['dark'].iteritems():
- self.results['dark'][host] = res
- poll_results['dark'][host] = res
- if host in self.hosts_to_poll:
- self.runner.callbacks.on_async_failed(host, res, self.runner.vars_cache[host].get('ansible_job_id','XX'))
-
- self.hosts_to_poll = hosts
- if len(hosts)==0:
- self.completed = True
-
- return poll_results
-
- def wait(self, seconds, poll_interval):
- """ Wait a certain time for job completion, check status every poll_interval. """
- # jid is None when all hosts were skipped
- if not self.active:
- return self.results
-
- clock = seconds - poll_interval
- while (clock >= 0 and not self.completed):
- time.sleep(poll_interval)
-
- poll_results = self.poll()
-
- for (host, res) in poll_results['polled'].iteritems():
- if res.get('started'):
- self.runner.callbacks.on_async_poll(host, res, self.runner.vars_cache[host]['ansible_job_id'], clock)
-
- clock = clock - poll_interval
-
- return self.results
diff --git a/v1/ansible/runner/return_data.py b/v1/ansible/runner/return_data.py
deleted file mode 100644
index 8cee506fde..0000000000
--- a/v1/ansible/runner/return_data.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible import utils
-
-class ReturnData(object):
- ''' internal return class for runner execute methods, not part of public API signature '''
-
- __slots__ = [ 'result', 'comm_ok', 'host', 'diff' ]
-
- def __init__(self, conn=None, host=None, result=None,
- comm_ok=True, diff=dict()):
-
- # which host is this ReturnData about?
- if conn is not None:
- self.host = conn.host
- delegate = getattr(conn, 'delegate', None)
- if delegate is not None:
- self.host = delegate
-
- else:
- self.host = host
-
- self.result = result
- self.comm_ok = comm_ok
-
- # if these values are set and used with --diff we can show
- # changes made to particular files
- self.diff = diff
-
- if type(self.result) in [ str, unicode ]:
- self.result = utils.parse_json(self.result, from_remote=True, no_exceptions=True)
-
- if self.host is None:
- raise Exception("host not set")
- if type(self.result) != dict:
- raise Exception("dictionary result expected")
-
- def communicated_ok(self):
- return self.comm_ok
-
- def is_successful(self):
- return self.comm_ok and (self.result.get('failed', False) == False) and ('failed_when_result' in self.result and [not self.result['failed_when_result']] or [self.result.get('rc',0) == 0])[0]
-
diff --git a/v1/ansible/runner/shell_plugins/__init__.py b/v1/ansible/runner/shell_plugins/__init__.py
deleted file mode 100644
index e69de29bb2..0000000000
--- a/v1/ansible/runner/shell_plugins/__init__.py
+++ /dev/null
diff --git a/v1/ansible/runner/shell_plugins/csh.py b/v1/ansible/runner/shell_plugins/csh.py
deleted file mode 100644
index 4e9f8c8af7..0000000000
--- a/v1/ansible/runner/shell_plugins/csh.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# (c) 2014, Chris Church <chris@ninemoreminutes.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.runner.shell_plugins.sh import ShellModule as ShModule
-
-class ShellModule(ShModule):
-
- # How to end lines in a python script one-liner
- _SHELL_EMBEDDED_PY_EOL = '\\\n'
-
- def env_prefix(self, **kwargs):
- return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
diff --git a/v1/ansible/runner/shell_plugins/fish.py b/v1/ansible/runner/shell_plugins/fish.py
deleted file mode 100644
index 137c013c12..0000000000
--- a/v1/ansible/runner/shell_plugins/fish.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# (c) 2014, Chris Church <chris@ninemoreminutes.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-from ansible.runner.shell_plugins.sh import ShellModule as ShModule
-
-class ShellModule(ShModule):
-
- def env_prefix(self, **kwargs):
- return 'env %s' % super(ShellModule, self).env_prefix(**kwargs)
diff --git a/v1/ansible/runner/shell_plugins/powershell.py b/v1/ansible/runner/shell_plugins/powershell.py
deleted file mode 100644
index 850b380edd..0000000000
--- a/v1/ansible/runner/shell_plugins/powershell.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# (c) 2014, Chris Church <chris@ninemoreminutes.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import base64
-import os
-import re
-import random
-import shlex
-import time
-
-_common_args = ['PowerShell', '-NoProfile', '-NonInteractive']
-
-# Primarily for testing, allow explicitly specifying PowerShell version via
-# an environment variable.
-_powershell_version = os.environ.get('POWERSHELL_VERSION', None)
-if _powershell_version:
- _common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:]
-
-def _escape(value, include_vars=False):
- '''Return value escaped for use in PowerShell command.'''
- # http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
- # http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python
- subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'),
- ('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'),
- ('\'', '`\''), ('`', '``'), ('\x00', '`0')]
- if include_vars:
- subs.append(('$', '`$'))
- pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs)
- substs = [s for p, s in subs]
- replace = lambda m: substs[m.lastindex - 1]
- return re.sub(pattern, replace, value)
-
-def _encode_script(script, as_list=False):
- '''Convert a PowerShell script to a single base64-encoded command.'''
- script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
- encoded_script = base64.b64encode(script.encode('utf-16-le'))
- cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
- if as_list:
- return cmd_parts
- return ' '.join(cmd_parts)
-
-def _build_file_cmd(cmd_parts, quote_args=True):
- '''Build command line to run a file, given list of file name plus args.'''
- if quote_args:
- cmd_parts = ['"%s"' % x for x in cmd_parts]
- return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + cmd_parts)
-
-class ShellModule(object):
-
- def env_prefix(self, **kwargs):
- return ''
-
- def join_path(self, *args):
- return os.path.join(*args).replace('/', '\\')
-
- def path_has_trailing_slash(self, path):
- # Allow Windows paths to be specified using either slash.
- return path.endswith('/') or path.endswith('\\')
-
- def chmod(self, mode, path):
- return ''
-
- def remove(self, path, recurse=False):
- path = _escape(path)
- if recurse:
- return _encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
- else:
- return _encode_script('''Remove-Item "%s" -Force;''' % path)
-
- def mkdtemp(self, basefile, system=False, mode=None):
- basefile = _escape(basefile)
- # FIXME: Support system temp path!
- return _encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile)
-
- def expand_user(self, user_home_path):
- # PowerShell only supports "~" (not "~username"). Resolve-Path ~ does
- # not seem to work remotely, though by default we are always starting
- # in the user's home directory.
- if user_home_path == '~':
- script = 'Write-Host (Get-Location).Path'
- elif user_home_path.startswith('~\\'):
- script = 'Write-Host ((Get-Location).Path + "%s")' % _escape(user_home_path[1:])
- else:
- script = 'Write-Host "%s"' % _escape(user_home_path)
- return _encode_script(script)
-
- def checksum(self, path, python_interp):
- path = _escape(path)
- script = '''
- If (Test-Path -PathType Leaf "%(path)s")
- {
- $sp = new-object -TypeName System.Security.Cryptography.SHA1CryptoServiceProvider;
- $fp = [System.IO.File]::Open("%(path)s", [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
- [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
- $fp.Dispose();
- }
- ElseIf (Test-Path -PathType Container "%(path)s")
- {
- Write-Host "3";
- }
- Else
- {
- Write-Host "1";
- }
- ''' % dict(path=path)
- return _encode_script(script)
-
- def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
- cmd = cmd.encode('utf-8')
- cmd_parts = shlex.split(cmd, posix=False)
- if not cmd_parts[0].lower().endswith('.ps1'):
- cmd_parts[0] = '%s.ps1' % cmd_parts[0]
- script = _build_file_cmd(cmd_parts, quote_args=False)
- if rm_tmp:
- rm_tmp = _escape(rm_tmp)
- script = '%s; Remove-Item "%s" -Force -Recurse;' % (script, rm_tmp)
- return _encode_script(script)
diff --git a/v1/ansible/runner/shell_plugins/sh.py b/v1/ansible/runner/shell_plugins/sh.py
deleted file mode 100644
index 81810bcf8f..0000000000
--- a/v1/ansible/runner/shell_plugins/sh.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# (c) 2014, Chris Church <chris@ninemoreminutes.com>
-#
-# This file is part of Ansible.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-import pipes
-import ansible.constants as C
-
-_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
-
-class ShellModule(object):
-
- # How to end lines in a python script one-liner
- _SHELL_EMBEDDED_PY_EOL = '\n'
-
- def env_prefix(self, **kwargs):
- '''Build command prefix with environment variables.'''
- env = dict(
- LANG = C.DEFAULT_MODULE_LANG,
- LC_CTYPE = C.DEFAULT_MODULE_LANG,
- LC_MESSAGES = C.DEFAULT_MODULE_LANG,
- )
- env.update(kwargs)
- return ' '.join(['%s=%s' % (k, pipes.quote(unicode(v))) for k,v in env.items()])
-
- def join_path(self, *args):
- return os.path.join(*args)
-
- def path_has_trailing_slash(self, path):
- return path.endswith('/')
-
- def chmod(self, mode, path):
- path = pipes.quote(path)
- return 'chmod %s %s' % (mode, path)
-
- def remove(self, path, recurse=False):
- path = pipes.quote(path)
- if recurse:
- return "rm -rf %s >/dev/null 2>&1" % path
- else:
- return "rm -f %s >/dev/null 2>&1" % path
-
- def mkdtemp(self, basefile=None, system=False, mode=None):
- if not basefile:
- basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
- basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile)
- if system and basetmp.startswith('$HOME'):
- basetmp = self.join_path('/tmp', basefile)
- cmd = 'mkdir -p %s' % basetmp
- if mode:
- cmd += ' && chmod %s %s' % (mode, basetmp)
- cmd += ' && echo %s' % basetmp
- return cmd
-
- def expand_user(self, user_home_path):
- ''' Return a command to expand tildes in a path
-
- It can be either "~" or "~username". We use the POSIX definition of
- a username:
- http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
- http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
- '''
-
- # Check that the user_path to expand is safe
- if user_home_path != '~':
- if not _USER_HOME_PATH_RE.match(user_home_path):
- # pipes.quote will make the shell return the string verbatim
- user_home_path = pipes.quote(user_home_path)
- return 'echo %s' % user_home_path
-
- def checksum(self, path, python_interp):
- # The following test needs to be SH-compliant. BASH-isms will
- # not work if /bin/sh points to a non-BASH shell.
- #
- # In the following test, each condition is a check and logical
- # comparison (|| or &&) that sets the rc value. Every check is run so
- # the last check in the series to fail will be the rc that is
- # returned.
- #
- # If a check fails we error before invoking the hash functions because
- # hash functions may successfully take the hash of a directory on BSDs
- # (UFS filesystem?) which is not what the rest of the ansible code
- # expects
- #
- # If all of the available hashing methods fail we fail with an rc of
- # 0. This logic is added to the end of the cmd at the bottom of this
- # function.
-
- # Return codes:
- # checksum: success!
- # 0: Unknown error
- # 1: Remote file does not exist
- # 2: No read permissions on the file
- # 3: File is a directory
- # 4: No python interpreter
-
- # Quoting gets complex here. We're writing a python string that's
- # used by a variety of shells on the remote host to invoke a python
- # "one-liner".
- shell_escaped_path = pipes.quote(path)
- test = "rc=flag; [ -r %(p)s ] || rc=2; [ -f %(p)s ] || rc=1; [ -d %(p)s ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"%(p)s && exit 0" % dict(p=shell_escaped_path, i=python_interp)
- csums = [
- "({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
- "({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
- ]
-
- cmd = " || ".join(csums)
- cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path)
- return cmd
-
- def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
- cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
- new_cmd = " ".join(cmd_parts)
- if rm_tmp:
- new_cmd = '%s; rm -rf %s >/dev/null 2>&1' % (new_cmd, rm_tmp)
- return new_cmd
diff --git a/v1/ansible/utils/__init__.py b/v1/ansible/utils/__init__.py
deleted file mode 100644
index eb6fa2a712..0000000000
--- a/v1/ansible/utils/__init__.py
+++ /dev/null
@@ -1,1662 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import errno
-import sys
-import re
-import os
-import shlex
-import yaml
-import copy
-import optparse
-import operator
-from ansible import errors
-from ansible import __version__
-from ansible.utils.display_functions import *
-from ansible.utils.plugins import *
-from ansible.utils.su_prompts import *
-from ansible.utils.hashing import secure_hash, secure_hash_s, checksum, checksum_s, md5, md5s
-from ansible.callbacks import display
-from ansible.module_utils.splitter import split_args, unquote
-from ansible.module_utils.basic import heuristic_log_sanitize
-from ansible.utils.unicode import to_bytes, to_unicode
-import ansible.constants as C
-import ast
-import time
-import StringIO
-import stat
-import termios
-import tty
-import pipes
-import random
-import difflib
-import warnings
-import traceback
-import getpass
-import sys
-import subprocess
-import contextlib
-
-from vault import VaultLib
-
-VERBOSITY=0
-
-MAX_FILE_SIZE_FOR_DIFF=1*1024*1024
-
-# caching the compilation of the regex used
-# to check for lookup calls within data
-LOOKUP_REGEX = re.compile(r'lookup\s*\(')
-PRINT_CODE_REGEX = re.compile(r'(?:{[{%]|[%}]})')
-CODE_REGEX = re.compile(r'(?:{%|%})')
-
-
-try:
- # simplejson can be much faster if it's available
- import simplejson as json
-except ImportError:
- import json
-
-try:
- from yaml import CSafeLoader as Loader
-except ImportError:
- from yaml import SafeLoader as Loader
-
-PASSLIB_AVAILABLE = False
-try:
- import passlib.hash
- PASSLIB_AVAILABLE = True
-except:
- pass
-
-try:
- import builtin
-except ImportError:
- import __builtin__ as builtin
-
-KEYCZAR_AVAILABLE=False
-try:
- try:
- # some versions of pycrypto may not have this?
- from Crypto.pct_warnings import PowmInsecureWarning
- except ImportError:
- PowmInsecureWarning = RuntimeWarning
-
- with warnings.catch_warnings(record=True) as warning_handler:
- warnings.simplefilter("error", PowmInsecureWarning)
- try:
- import keyczar.errors as key_errors
- from keyczar.keys import AesKey
- except PowmInsecureWarning:
- system_warning(
- "The version of gmp you have installed has a known issue regarding " + \
- "timing vulnerabilities when used with pycrypto. " + \
- "If possible, you should update it (i.e. yum update gmp)."
- )
- warnings.resetwarnings()
- warnings.simplefilter("ignore")
- import keyczar.errors as key_errors
- from keyczar.keys import AesKey
- KEYCZAR_AVAILABLE=True
-except ImportError:
- pass
-
-
-###############################################################
-# Abstractions around keyczar
-###############################################################
-
-def key_for_hostname(hostname):
- # fireball mode is an implementation of ansible firing up zeromq via SSH
- # to use no persistent daemons or key management
-
- if not KEYCZAR_AVAILABLE:
- raise errors.AnsibleError("python-keyczar must be installed on the control machine to use accelerated modes")
-
- key_path = os.path.expanduser(C.ACCELERATE_KEYS_DIR)
- if not os.path.exists(key_path):
- os.makedirs(key_path, mode=0700)
- os.chmod(key_path, int(C.ACCELERATE_KEYS_DIR_PERMS, 8))
- elif not os.path.isdir(key_path):
- raise errors.AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
-
- if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
- raise errors.AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
-
- key_path = os.path.join(key_path, hostname)
-
- # use new AES keys every 2 hours, which means fireball must not allow running for longer either
- if not os.path.exists(key_path) or (time.time() - os.path.getmtime(key_path) > 60*60*2):
- key = AesKey.Generate()
- fd = os.open(key_path, os.O_WRONLY | os.O_CREAT, int(C.ACCELERATE_KEYS_FILE_PERMS, 8))
- fh = os.fdopen(fd, 'w')
- fh.write(str(key))
- fh.close()
- return key
- else:
- if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
- raise errors.AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
- fh = open(key_path)
- key = AesKey.Read(fh.read())
- fh.close()
- return key
-
-def encrypt(key, msg):
- return key.Encrypt(msg)
-
-def decrypt(key, msg):
- try:
- return key.Decrypt(msg)
- except key_errors.InvalidSignatureError:
- raise errors.AnsibleError("decryption failed")
-
-###############################################################
-# UTILITY FUNCTIONS FOR COMMAND LINE TOOLS
-###############################################################
-
-def read_vault_file(vault_password_file):
- """Read a vault password from a file or if executable, execute the script and
- retrieve password from STDOUT
- """
- if vault_password_file:
- this_path = os.path.realpath(os.path.expanduser(vault_password_file))
- if is_executable(this_path):
- try:
- # STDERR not captured to make it easier for users to prompt for input in their scripts
- p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
- except OSError, e:
- raise errors.AnsibleError("problem running %s (%s)" % (' '.join(this_path), e))
- stdout, stderr = p.communicate()
- vault_pass = stdout.strip('\r\n')
- else:
- try:
- f = open(this_path, "rb")
- vault_pass=f.read().strip()
- f.close()
- except (OSError, IOError), e:
- raise errors.AnsibleError("Could not read %s: %s" % (this_path, e))
-
- return vault_pass
- else:
- return None
-
-def err(msg):
- ''' print an error message to stderr '''
-
- print >> sys.stderr, msg
-
-def exit(msg, rc=1):
- ''' quit with an error to stdout and a failure code '''
-
- err(msg)
- sys.exit(rc)
-
-def jsonify(result, format=False):
- ''' format JSON output (uncompressed or uncompressed) '''
-
- if result is None:
- return "{}"
- result2 = result.copy()
- for key, value in result2.items():
- if type(value) is str:
- result2[key] = value.decode('utf-8', 'ignore')
-
- indent = None
- if format:
- indent = 4
-
- try:
- return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
- except UnicodeDecodeError:
- return json.dumps(result2, sort_keys=True, indent=indent)
-
-def write_tree_file(tree, hostname, buf):
- ''' write something into treedir/hostname '''
-
- # TODO: might be nice to append playbook runs per host in a similar way
- # in which case, we'd want append mode.
- path = os.path.join(tree, hostname)
- fd = open(path, "w+")
- fd.write(buf)
- fd.close()
-
-def is_failed(result):
- ''' is a given JSON result a failed result? '''
-
- return ((result.get('rc', 0) != 0) or (result.get('failed', False) in [ True, 'True', 'true']))
-
-def is_changed(result):
- ''' is a given JSON result a changed result? '''
-
- return (result.get('changed', False) in [ True, 'True', 'true'])
-
-def check_conditional(conditional, basedir, inject, fail_on_undefined=False):
- from ansible.utils import template
-
- if conditional is None or conditional == '':
- return True
-
- if isinstance(conditional, list):
- for x in conditional:
- if not check_conditional(x, basedir, inject, fail_on_undefined=fail_on_undefined):
- return False
- return True
-
- if not isinstance(conditional, basestring):
- return conditional
-
- conditional = conditional.replace("jinja2_compare ","")
- # allow variable names
- if conditional in inject and '-' not in to_unicode(inject[conditional], nonstring='simplerepr'):
- conditional = to_unicode(inject[conditional], nonstring='simplerepr')
- conditional = template.template(basedir, conditional, inject, fail_on_undefined=fail_on_undefined)
- original = to_unicode(conditional, nonstring='simplerepr').replace("jinja2_compare ","")
- # a Jinja2 evaluation that results in something Python can eval!
- presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
- conditional = template.template(basedir, presented, inject)
- val = conditional.strip()
- if val == presented:
- # the templating failed, meaning most likely a
- # variable was undefined. If we happened to be
- # looking for an undefined variable, return True,
- # otherwise fail
- if "is undefined" in conditional:
- return True
- elif "is defined" in conditional:
- return False
- else:
- raise errors.AnsibleError("error while evaluating conditional: %s" % original)
- elif val == "True":
- return True
- elif val == "False":
- return False
- else:
- raise errors.AnsibleError("unable to evaluate conditional: %s" % original)
-
-def is_executable(path):
- '''is the given path executable?'''
- return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE]
- or stat.S_IXGRP & os.stat(path)[stat.ST_MODE]
- or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
-
-def unfrackpath(path):
- '''
- returns a path that is free of symlinks, environment
- variables, relative path traversals and symbols (~)
- example:
- '$HOME/../../var/mail' becomes '/var/spool/mail'
- '''
- return os.path.normpath(os.path.realpath(os.path.expandvars(os.path.expanduser(path))))
-
-def prepare_writeable_dir(tree,mode=0777):
- ''' make sure a directory exists and is writeable '''
-
- # modify the mode to ensure the owner at least
- # has read/write access to this directory
- mode |= 0700
-
- # make sure the tree path is always expanded
- # and normalized and free of symlinks
- tree = unfrackpath(tree)
-
- if not os.path.exists(tree):
- try:
- os.makedirs(tree, mode)
- except (IOError, OSError), e:
- raise errors.AnsibleError("Could not make dir %s: %s" % (tree, e))
- if not os.access(tree, os.W_OK):
- raise errors.AnsibleError("Cannot write to path %s" % tree)
- return tree
-
-def path_dwim(basedir, given):
- '''
- make relative paths work like folks expect.
- '''
-
- if given.startswith("'"):
- given = given[1:-1]
-
- if given.startswith("/"):
- return os.path.abspath(given)
- elif given.startswith("~"):
- return os.path.abspath(os.path.expanduser(given))
- else:
- if basedir is None:
- basedir = "."
- return os.path.abspath(os.path.join(basedir, given))
-
-def path_dwim_relative(original, dirname, source, playbook_base, check=True):
- ''' find one file in a directory one level up in a dir named dirname relative to current '''
- # (used by roles code)
-
- from ansible.utils import template
-
-
- basedir = os.path.dirname(original)
- if os.path.islink(basedir):
- basedir = unfrackpath(basedir)
- template2 = os.path.join(basedir, dirname, source)
- else:
- template2 = os.path.join(basedir, '..', dirname, source)
- source2 = path_dwim(basedir, template2)
- if os.path.exists(source2):
- return source2
- obvious_local_path = path_dwim(playbook_base, source)
- if os.path.exists(obvious_local_path):
- return obvious_local_path
- if check:
- raise errors.AnsibleError("input file not found at %s or %s" % (source2, obvious_local_path))
- return source2 # which does not exist
-
-def repo_url_to_role_name(repo_url):
- # gets the role name out of a repo like
- # http://git.example.com/repos/repo.git" => "repo"
-
- if '://' not in repo_url and '@' not in repo_url:
- return repo_url
- trailing_path = repo_url.split('/')[-1]
- if trailing_path.endswith('.git'):
- trailing_path = trailing_path[:-4]
- if trailing_path.endswith('.tar.gz'):
- trailing_path = trailing_path[:-7]
- if ',' in trailing_path:
- trailing_path = trailing_path.split(',')[0]
- return trailing_path
-
-
-def role_spec_parse(role_spec):
- # takes a repo and a version like
- # git+http://git.example.com/repos/repo.git,v1.0
- # and returns a list of properties such as:
- # {
- # 'scm': 'git',
- # 'src': 'http://git.example.com/repos/repo.git',
- # 'version': 'v1.0',
- # 'name': 'repo'
- # }
-
- role_spec = role_spec.strip()
- role_version = ''
- default_role_versions = dict(git='master', hg='tip')
- if role_spec == "" or role_spec.startswith("#"):
- return (None, None, None, None)
-
- tokens = [s.strip() for s in role_spec.split(',')]
-
- # assume https://github.com URLs are git+https:// URLs and not
- # tarballs unless they end in '.zip'
- if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
- tokens[0] = 'git+' + tokens[0]
-
- if '+' in tokens[0]:
- (scm, role_url) = tokens[0].split('+')
- else:
- scm = None
- role_url = tokens[0]
- if len(tokens) >= 2:
- role_version = tokens[1]
- if len(tokens) == 3:
- role_name = tokens[2]
- else:
- role_name = repo_url_to_role_name(tokens[0])
- if scm and not role_version:
- role_version = default_role_versions.get(scm, '')
- return dict(scm=scm, src=role_url, version=role_version, name=role_name)
-
-
-def role_yaml_parse(role):
- if 'role' in role:
- # Old style: {role: "galaxy.role,version,name", other_vars: "here" }
- role_info = role_spec_parse(role['role'])
- if isinstance(role_info, dict):
- # Warning: Slight change in behaviour here. name may be being
- # overloaded. Previously, name was only a parameter to the role.
- # Now it is both a parameter to the role and the name that
- # ansible-galaxy will install under on the local system.
- if 'name' in role and 'name' in role_info:
- del role_info['name']
- role.update(role_info)
- else:
- # New style: { src: 'galaxy.role,version,name', other_vars: "here" }
- if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
- role["src"] = "git+" + role["src"]
-
- if '+' in role["src"]:
- (scm, src) = role["src"].split('+')
- role["scm"] = scm
- role["src"] = src
-
- if 'name' not in role:
- role["name"] = repo_url_to_role_name(role["src"])
-
- if 'version' not in role:
- role['version'] = ''
-
- if 'scm' not in role:
- role['scm'] = None
-
- return role
-
-
-def json_loads(data):
- ''' parse a JSON string and return a data structure '''
- try:
- loaded = json.loads(data)
- except ValueError,e:
- raise errors.AnsibleError("Unable to read provided data as JSON: %s" % str(e))
-
- return loaded
-
-def _clean_data(orig_data, from_remote=False, from_inventory=False):
- ''' remove jinja2 template tags from a string '''
-
- if not isinstance(orig_data, basestring):
- return orig_data
-
- # when the data is marked as having come from a remote, we always
- # replace any print blocks (ie. {{var}}), however when marked as coming
- # from inventory we only replace print blocks that contain a call to
- # a lookup plugin (ie. {{lookup('foo','bar'))}})
- replace_prints = from_remote or (from_inventory and '{{' in orig_data and LOOKUP_REGEX.search(orig_data) is not None)
-
- regex = PRINT_CODE_REGEX if replace_prints else CODE_REGEX
-
- with contextlib.closing(StringIO.StringIO(orig_data)) as data:
- # these variables keep track of opening block locations, as we only
- # want to replace matched pairs of print/block tags
- print_openings = []
- block_openings = []
- for mo in regex.finditer(orig_data):
- token = mo.group(0)
- token_start = mo.start(0)
-
- if token[0] == '{':
- if token == '{%':
- block_openings.append(token_start)
- elif token == '{{':
- print_openings.append(token_start)
-
- elif token[1] == '}':
- prev_idx = None
- if token == '%}' and block_openings:
- prev_idx = block_openings.pop()
- elif token == '}}' and print_openings:
- prev_idx = print_openings.pop()
-
- if prev_idx is not None:
- # replace the opening
- data.seek(prev_idx, os.SEEK_SET)
- data.write('{#')
- # replace the closing
- data.seek(token_start, os.SEEK_SET)
- data.write('#}')
-
- else:
- assert False, 'Unhandled regex match'
-
- return data.getvalue()
-
-def _clean_data_struct(orig_data, from_remote=False, from_inventory=False):
- '''
- walk a complex data structure, and use _clean_data() to
- remove any template tags that may exist
- '''
- if not from_remote and not from_inventory:
- raise errors.AnsibleErrors("when cleaning data, you must specify either from_remote or from_inventory")
- if isinstance(orig_data, dict):
- data = orig_data.copy()
- for key in data:
- new_key = _clean_data_struct(key, from_remote, from_inventory)
- new_val = _clean_data_struct(data[key], from_remote, from_inventory)
- if key != new_key:
- del data[key]
- data[new_key] = new_val
- elif isinstance(orig_data, list):
- data = orig_data[:]
- for i in range(0, len(data)):
- data[i] = _clean_data_struct(data[i], from_remote, from_inventory)
- elif isinstance(orig_data, basestring):
- data = _clean_data(orig_data, from_remote, from_inventory)
- else:
- data = orig_data
- return data
-
-def parse_json(raw_data, from_remote=False, from_inventory=False, no_exceptions=False):
- ''' this version for module return data only '''
-
- orig_data = raw_data
-
- # ignore stuff like tcgetattr spewage or other warnings
- data = filter_leading_non_json_lines(raw_data)
-
- try:
- results = json.loads(data)
- except:
- if no_exceptions:
- return dict(failed=True, parsed=False, msg=raw_data)
- else:
- raise
-
- if from_remote:
- results = _clean_data_struct(results, from_remote, from_inventory)
-
- return results
-
-def serialize_args(args):
- '''
- Flattens a dictionary args to a k=v string
- '''
- module_args = ""
- for (k,v) in args.iteritems():
- if isinstance(v, basestring):
- module_args = "%s=%s %s" % (k, pipes.quote(v), module_args)
- elif isinstance(v, bool):
- module_args = "%s=%s %s" % (k, str(v), module_args)
- return module_args.strip()
-
-def merge_module_args(current_args, new_args):
- '''
- merges either a dictionary or string of k=v pairs with another string of k=v pairs,
- and returns a new k=v string without duplicates.
- '''
- if not isinstance(current_args, basestring):
- raise errors.AnsibleError("expected current_args to be a basestring")
- # we use parse_kv to split up the current args into a dictionary
- final_args = parse_kv(current_args)
- if isinstance(new_args, dict):
- final_args.update(new_args)
- elif isinstance(new_args, basestring):
- new_args_kv = parse_kv(new_args)
- final_args.update(new_args_kv)
- return serialize_args(final_args)
-
-def parse_yaml(data, path_hint=None):
- ''' convert a yaml string to a data structure. Also supports JSON, ssssssh!!!'''
-
- stripped_data = data.lstrip()
- loaded = None
- if stripped_data.startswith("{") or stripped_data.startswith("["):
- # since the line starts with { or [ we can infer this is a JSON document.
- try:
- loaded = json.loads(data)
- except ValueError, ve:
- if path_hint:
- raise errors.AnsibleError(path_hint + ": " + str(ve))
- else:
- raise errors.AnsibleError(str(ve))
- else:
- # else this is pretty sure to be a YAML document
- loaded = yaml.load(data, Loader=Loader)
-
- return loaded
-
-def process_common_errors(msg, probline, column):
- replaced = probline.replace(" ","")
-
- if ":{{" in replaced and "}}" in replaced:
- msg = msg + """
-This one looks easy to fix. YAML thought it was looking for the start of a
-hash/dictionary and was confused to see a second "{". Most likely this was
-meant to be an ansible template evaluation instead, so we have to give the
-parser a small hint that we wanted a string instead. The solution here is to
-just quote the entire value.
-
-For instance, if the original line was:
-
- app_path: {{ base_path }}/foo
-
-It should be written as:
-
- app_path: "{{ base_path }}/foo"
-"""
- return msg
-
- elif len(probline) and len(probline) > 1 and len(probline) > column and probline[column] == ":" and probline.count(':') > 1:
- msg = msg + """
-This one looks easy to fix. There seems to be an extra unquoted colon in the line
-and this is confusing the parser. It was only expecting to find one free
-colon. The solution is just add some quotes around the colon, or quote the
-entire line after the first colon.
-
-For instance, if the original line was:
-
- copy: src=file.txt dest=/path/filename:with_colon.txt
-
-It can be written as:
-
- copy: src=file.txt dest='/path/filename:with_colon.txt'
-
-Or:
-
- copy: 'src=file.txt dest=/path/filename:with_colon.txt'
-
-
-"""
- return msg
- else:
- parts = probline.split(":")
- if len(parts) > 1:
- middle = parts[1].strip()
- match = False
- unbalanced = False
- if middle.startswith("'") and not middle.endswith("'"):
- match = True
- elif middle.startswith('"') and not middle.endswith('"'):
- match = True
- if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and probline.count("'") > 2 or probline.count('"') > 2:
- unbalanced = True
- if match:
- msg = msg + """
-This one looks easy to fix. It seems that there is a value started
-with a quote, and the YAML parser is expecting to see the line ended
-with the same kind of quote. For instance:
-
- when: "ok" in result.stdout
-
-Could be written as:
-
- when: '"ok" in result.stdout'
-
-or equivalently:
-
- when: "'ok' in result.stdout"
-
-"""
- return msg
-
- if unbalanced:
- msg = msg + """
-We could be wrong, but this one looks like it might be an issue with
-unbalanced quotes. If starting a value with a quote, make sure the
-line ends with the same set of quotes. For instance this arbitrary
-example:
-
- foo: "bad" "wolf"
-
-Could be written as:
-
- foo: '"bad" "wolf"'
-
-"""
- return msg
-
- return msg
-
-def process_yaml_error(exc, data, path=None, show_content=True):
- if hasattr(exc, 'problem_mark'):
- mark = exc.problem_mark
- if show_content:
- if mark.line -1 >= 0:
- before_probline = data.split("\n")[mark.line-1]
- else:
- before_probline = ''
- probline = data.split("\n")[mark.line]
- arrow = " " * mark.column + "^"
- msg = """Syntax Error while loading YAML script, %s
-Note: The error may actually appear before this position: line %s, column %s
-
-%s
-%s
-%s""" % (path, mark.line + 1, mark.column + 1, before_probline, probline, arrow)
-
- unquoted_var = None
- if '{{' in probline and '}}' in probline:
- if '"{{' not in probline or "'{{" not in probline:
- unquoted_var = True
-
- if not unquoted_var:
- msg = process_common_errors(msg, probline, mark.column)
- else:
- msg = msg + """
-We could be wrong, but this one looks like it might be an issue with
-missing quotes. Always quote template expression brackets when they
-start a value. For instance:
-
- with_items:
- - {{ foo }}
-
-Should be written as:
-
- with_items:
- - "{{ foo }}"
-
-"""
- else:
- # most likely displaying a file with sensitive content,
- # so don't show any of the actual lines of yaml just the
- # line number itself
- msg = """Syntax error while loading YAML script, %s
-The error appears to have been on line %s, column %s, but may actually
-be before there depending on the exact syntax problem.
-""" % (path, mark.line + 1, mark.column + 1)
-
- else:
- # No problem markers means we have to throw a generic
- # "stuff messed up" type message. Sry bud.
- if path:
- msg = "Could not parse YAML. Check over %s again." % path
- else:
- msg = "Could not parse YAML."
- raise errors.AnsibleYAMLValidationFailed(msg)
-
-
-def parse_yaml_from_file(path, vault_password=None):
- ''' convert a yaml file to a data structure '''
-
- data = None
- show_content = True
-
- try:
- data = open(path).read()
- except IOError:
- raise errors.AnsibleError("file could not read: %s" % path)
-
- vault = VaultLib(password=vault_password)
- if vault.is_encrypted(data):
- # if the file is encrypted and no password was specified,
- # the decrypt call would throw an error, but we check first
- # since the decrypt function doesn't know the file name
- if vault_password is None:
- raise errors.AnsibleError("A vault password must be specified to decrypt %s" % path)
- data = vault.decrypt(data)
- show_content = False
-
- try:
- return parse_yaml(data, path_hint=path)
- except yaml.YAMLError, exc:
- process_yaml_error(exc, data, path, show_content)
-
-def parse_kv(args):
- ''' convert a string of key/value items to a dict '''
- options = {}
- if args is not None:
- try:
- vargs = split_args(args)
- except ValueError, ve:
- if 'no closing quotation' in str(ve).lower():
- raise errors.AnsibleError("error parsing argument string, try quoting the entire line.")
- else:
- raise
- for x in vargs:
- if "=" in x:
- k, v = x.split("=",1)
- options[k.strip()] = unquote(v.strip())
- return options
-
-def _validate_both_dicts(a, b):
-
- if not (isinstance(a, dict) and isinstance(b, dict)):
- raise errors.AnsibleError(
- "failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__)
- )
-
-def merge_hash(a, b):
- ''' recursively merges hash b into a
- keys from b take precedence over keys from a '''
-
- result = {}
-
- # we check here as well as in combine_vars() since this
- # function can work recursively with nested dicts
- _validate_both_dicts(a, b)
-
- for dicts in a, b:
- # next, iterate over b keys and values
- for k, v in dicts.iteritems():
- # if there's already such key in a
- # and that key contains dict
- if k in result and isinstance(result[k], dict):
- # merge those dicts recursively
- result[k] = merge_hash(a[k], v)
- else:
- # otherwise, just copy a value from b to a
- result[k] = v
-
- return result
-
-def default(value, function):
- ''' syntactic sugar around lazy evaluation of defaults '''
- if value is None:
- return function()
- return value
-
-
-def _git_repo_info(repo_path):
- ''' returns a string containing git branch, commit id and commit date '''
- result = None
- if os.path.exists(repo_path):
- # Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
- if os.path.isfile(repo_path):
- try:
- gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
- # There is a possibility the .git file to have an absolute path.
- if os.path.isabs(gitdir):
- repo_path = gitdir
- else:
- repo_path = os.path.join(repo_path[:-4], gitdir)
- except (IOError, AttributeError):
- return ''
- f = open(os.path.join(repo_path, "HEAD"))
- branch = f.readline().split('/')[-1].rstrip("\n")
- f.close()
- branch_path = os.path.join(repo_path, "refs", "heads", branch)
- if os.path.exists(branch_path):
- f = open(branch_path)
- commit = f.readline()[:10]
- f.close()
- else:
- # detached HEAD
- commit = branch[:10]
- branch = 'detached HEAD'
- branch_path = os.path.join(repo_path, "HEAD")
-
- date = time.localtime(os.stat(branch_path).st_mtime)
- if time.daylight == 0:
- offset = time.timezone
- else:
- offset = time.altzone
- result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit,
- time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36)
- else:
- result = ''
- return result
-
-
-def _gitinfo():
- basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
- repo_path = os.path.join(basedir, '.git')
- result = _git_repo_info(repo_path)
- submodules = os.path.join(basedir, '.gitmodules')
- if not os.path.exists(submodules):
- return result
- f = open(submodules)
- for line in f:
- tokens = line.strip().split(' ')
- if tokens[0] == 'path':
- submodule_path = tokens[2]
- submodule_info =_git_repo_info(os.path.join(basedir, submodule_path, '.git'))
- if not submodule_info:
- submodule_info = ' not found - use git submodule update --init ' + submodule_path
- result += "\n {0}: {1}".format(submodule_path, submodule_info)
- f.close()
- return result
-
-
-def version(prog):
- result = "{0} {1}".format(prog, __version__)
- gitinfo = _gitinfo()
- if gitinfo:
- result = result + " {0}".format(gitinfo)
- result = result + "\n configured module search path = %s" % C.DEFAULT_MODULE_PATH
- return result
-
-def version_info(gitinfo=False):
- if gitinfo:
- # expensive call, user with care
- ansible_version_string = version('')
- else:
- ansible_version_string = __version__
- ansible_version = ansible_version_string.split()[0]
- ansible_versions = ansible_version.split('.')
- for counter in range(len(ansible_versions)):
- if ansible_versions[counter] == "":
- ansible_versions[counter] = 0
- try:
- ansible_versions[counter] = int(ansible_versions[counter])
- except:
- pass
- if len(ansible_versions) < 3:
- for counter in range(len(ansible_versions), 3):
- ansible_versions.append(0)
- return {'string': ansible_version_string.strip(),
- 'full': ansible_version,
- 'major': ansible_versions[0],
- 'minor': ansible_versions[1],
- 'revision': ansible_versions[2]}
-
-def getch():
- ''' read in a single character '''
- fd = sys.stdin.fileno()
- old_settings = termios.tcgetattr(fd)
- try:
- tty.setraw(sys.stdin.fileno())
- ch = sys.stdin.read(1)
- finally:
- termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
- return ch
-
-def sanitize_output(arg_string):
- ''' strips private info out of a string '''
-
- private_keys = ('password', 'login_password')
-
- output = []
- for part in arg_string.split():
- try:
- (k, v) = part.split('=', 1)
- except ValueError:
- v = heuristic_log_sanitize(part)
- output.append(v)
- continue
-
- if k in private_keys:
- v = 'VALUE_HIDDEN'
- else:
- v = heuristic_log_sanitize(v)
- output.append('%s=%s' % (k, v))
-
- output = ' '.join(output)
- return output
-
-
-####################################################################
-# option handling code for /usr/bin/ansible and ansible-playbook
-# below this line
-
-class SortedOptParser(optparse.OptionParser):
- '''Optparser which sorts the options by opt before outputting --help'''
-
- def format_help(self, formatter=None):
- self.option_list.sort(key=operator.methodcaller('get_opt_string'))
- return optparse.OptionParser.format_help(self, formatter=None)
-
-def increment_debug(option, opt, value, parser):
- global VERBOSITY
- VERBOSITY += 1
-
-def base_parser(constants=C, usage="", output_opts=False, runas_opts=False,
- async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, diff_opts=False):
- ''' create an options parser for any ansible script '''
-
- parser = SortedOptParser(usage, version=version("%prog"))
- parser.add_option('-v','--verbose', default=False, action="callback",
- callback=increment_debug, help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
-
- parser.add_option('-f','--forks', dest='forks', default=constants.DEFAULT_FORKS, type='int',
- help="specify number of parallel processes to use (default=%s)" % constants.DEFAULT_FORKS)
- parser.add_option('-i', '--inventory-file', dest='inventory',
- help="specify inventory host file (default=%s)" % constants.DEFAULT_HOST_LIST,
- default=constants.DEFAULT_HOST_LIST)
- parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
- help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-u', '--user', default=constants.DEFAULT_REMOTE_USER, dest='remote_user',
- help='connect as this user (default=%s)' % constants.DEFAULT_REMOTE_USER)
- parser.add_option('-k', '--ask-pass', default=False, dest='ask_pass', action='store_true',
- help='ask for SSH password')
- parser.add_option('--private-key', default=constants.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
- help='use this file to authenticate the connection')
- parser.add_option('--ask-vault-pass', default=False, dest='ask_vault_pass', action='store_true',
- help='ask for vault password')
- parser.add_option('--vault-password-file', default=constants.DEFAULT_VAULT_PASSWORD_FILE,
- dest='vault_password_file', help="vault password file")
- parser.add_option('--list-hosts', dest='listhosts', action='store_true',
- help='outputs a list of matching hosts; does not execute anything else')
- parser.add_option('-M', '--module-path', dest='module_path',
- help="specify path(s) to module library (default=%s)" % constants.DEFAULT_MODULE_PATH,
- default=None)
-
- if subset_opts:
- parser.add_option('-l', '--limit', default=constants.DEFAULT_SUBSET, dest='subset',
- help='further limit selected hosts to an additional pattern')
-
- parser.add_option('-T', '--timeout', default=constants.DEFAULT_TIMEOUT, type='int',
- dest='timeout',
- help="override the SSH timeout in seconds (default=%s)" % constants.DEFAULT_TIMEOUT)
-
- if output_opts:
- parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
- help='condense output')
- parser.add_option('-t', '--tree', dest='tree', default=None,
- help='log output to this directory')
-
- if runas_opts:
- # priv user defaults to root later on to enable detecting when this option was given here
- parser.add_option('-K', '--ask-sudo-pass', default=constants.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password (deprecated, use become)')
- parser.add_option('--ask-su-pass', default=constants.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
- help='ask for su password (deprecated, use become)')
- parser.add_option("-s", "--sudo", default=constants.DEFAULT_SUDO, action="store_true", dest='sudo',
- help="run operations with sudo (nopasswd) (deprecated, use become)")
- parser.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
- help='desired sudo user (default=root) (deprecated, use become)')
- parser.add_option('-S', '--su', default=constants.DEFAULT_SU, action='store_true',
- help='run operations with su (deprecated, use become)')
- parser.add_option('-R', '--su-user', default=None,
- help='run operations with su as this user (default=%s) (deprecated, use become)' % constants.DEFAULT_SU_USER)
-
- # consolidated privilege escalation (become)
- parser.add_option("-b", "--become", default=constants.DEFAULT_BECOME, action="store_true", dest='become',
- help="run operations with become (nopasswd implied)")
- parser.add_option('--become-method', dest='become_method', default=constants.DEFAULT_BECOME_METHOD, type='string',
- help="privilege escalation method to use (default=%s), valid choices: [ %s ]" % (constants.DEFAULT_BECOME_METHOD, ' | '.join(constants.BECOME_METHODS)))
- parser.add_option('--become-user', default=None, dest='become_user', type='string',
- help='run operations as this user (default=%s)' % constants.DEFAULT_BECOME_USER)
- parser.add_option('--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
- help='ask for privilege escalation password')
-
-
- if connect_opts:
- parser.add_option('-c', '--connection', dest='connection',
- default=constants.DEFAULT_TRANSPORT,
- help="connection type to use (default=%s)" % constants.DEFAULT_TRANSPORT)
-
- if async_opts:
- parser.add_option('-P', '--poll', default=constants.DEFAULT_POLL_INTERVAL, type='int',
- dest='poll_interval',
- help="set the poll interval if using -B (default=%s)" % constants.DEFAULT_POLL_INTERVAL)
- parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
- help='run asynchronously, failing after X seconds (default=N/A)')
-
- if check_opts:
- parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
- help="don't make any changes; instead, try to predict some of the changes that may occur"
- )
-
- if diff_opts:
- parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
- help="when changing (small) files and templates, show the differences in those files; works great with --check"
- )
-
- return parser
-
-def parse_extra_vars(extra_vars_opts, vault_pass):
- extra_vars = {}
- for extra_vars_opt in extra_vars_opts:
- extra_vars_opt = to_unicode(extra_vars_opt)
- if extra_vars_opt.startswith(u"@"):
- # Argument is a YAML file (JSON is a subset of YAML)
- extra_vars = combine_vars(extra_vars, parse_yaml_from_file(extra_vars_opt[1:], vault_password=vault_pass))
- elif extra_vars_opt and extra_vars_opt[0] in u'[{':
- # Arguments as YAML
- extra_vars = combine_vars(extra_vars, parse_yaml(extra_vars_opt))
- else:
- # Arguments as Key-value
- extra_vars = combine_vars(extra_vars, parse_kv(extra_vars_opt))
- return extra_vars
-
-def ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=False, confirm_vault=False, confirm_new=False):
-
- vault_pass = None
- new_vault_pass = None
-
- if ask_vault_pass:
- vault_pass = getpass.getpass(prompt="Vault password: ")
-
- if ask_vault_pass and confirm_vault:
- vault_pass2 = getpass.getpass(prompt="Confirm Vault password: ")
- if vault_pass != vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- if ask_new_vault_pass:
- new_vault_pass = getpass.getpass(prompt="New Vault password: ")
-
- if ask_new_vault_pass and confirm_new:
- new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
- if new_vault_pass != new_vault_pass2:
- raise errors.AnsibleError("Passwords do not match")
-
- # enforce no newline chars at the end of passwords
- if vault_pass:
- vault_pass = to_bytes(vault_pass, errors='strict', nonstring='simplerepr').strip()
- if new_vault_pass:
- new_vault_pass = to_bytes(new_vault_pass, errors='strict', nonstring='simplerepr').strip()
-
- return vault_pass, new_vault_pass
-
-def ask_passwords(ask_pass=False, become_ask_pass=False, ask_vault_pass=False, become_method=C.DEFAULT_BECOME_METHOD):
- sshpass = None
- becomepass = None
- vaultpass = None
- become_prompt = ''
-
- if ask_pass:
- sshpass = getpass.getpass(prompt="SSH password: ")
- become_prompt = "%s password[defaults to SSH password]: " % become_method.upper()
- if sshpass:
- sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
- else:
- become_prompt = "%s password: " % become_method.upper()
-
- if become_ask_pass:
- becomepass = getpass.getpass(prompt=become_prompt)
- if ask_pass and becomepass == '':
- becomepass = sshpass
- if becomepass:
- becomepass = to_bytes(becomepass)
-
- if ask_vault_pass:
- vaultpass = getpass.getpass(prompt="Vault password: ")
- if vaultpass:
- vaultpass = to_bytes(vaultpass, errors='strict', nonstring='simplerepr').strip()
-
- return (sshpass, becomepass, vaultpass)
-
-
-def choose_pass_prompt(options):
-
- if options.ask_su_pass:
- return 'su'
- elif options.ask_sudo_pass:
- return 'sudo'
-
- return options.become_method
-
-def normalize_become_options(options):
-
- options.become_ask_pass = options.become_ask_pass or options.ask_sudo_pass or options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
- options.become_user = options.become_user or options.sudo_user or options.su_user or C.DEFAULT_BECOME_USER
-
- if options.become:
- pass
- elif options.sudo:
- options.become = True
- options.become_method = 'sudo'
- elif options.su:
- options.become = True
- options.become_method = 'su'
-
-
-def do_encrypt(result, encrypt, salt_size=None, salt=None):
- if PASSLIB_AVAILABLE:
- try:
- crypt = getattr(passlib.hash, encrypt)
- except:
- raise errors.AnsibleError("passlib does not support '%s' algorithm" % encrypt)
-
- if salt_size:
- result = crypt.encrypt(result, salt_size=salt_size)
- elif salt:
- result = crypt.encrypt(result, salt=salt)
- else:
- result = crypt.encrypt(result)
- else:
- raise errors.AnsibleError("passlib must be installed to encrypt vars_prompt values")
-
- return result
-
-def last_non_blank_line(buf):
-
- all_lines = buf.splitlines()
- all_lines.reverse()
- for line in all_lines:
- if (len(line) > 0):
- return line
- # shouldn't occur unless there's no output
- return ""
-
-def filter_leading_non_json_lines(buf):
- '''
- used to avoid random output from SSH at the top of JSON output, like messages from
- tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
-
- need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
- filter only leading lines since multiline JSON is valid.
- '''
-
- filtered_lines = StringIO.StringIO()
- stop_filtering = False
- for line in buf.splitlines():
- if stop_filtering or line.startswith('{') or line.startswith('['):
- stop_filtering = True
- filtered_lines.write(line + '\n')
- return filtered_lines.getvalue()
-
-def boolean(value):
- val = str(value)
- if val.lower() in [ "true", "t", "y", "1", "yes" ]:
- return True
- else:
- return False
-
-def make_become_cmd(cmd, user, shell, method, flags=None, exe=None):
- """
- helper function for connection plugins to create privilege escalation commands
- """
-
- randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32))
- success_key = 'BECOME-SUCCESS-%s' % randbits
- prompt = None
- becomecmd = None
-
- shell = shell or '$SHELL'
-
- if method == 'sudo':
- # Rather than detect if sudo wants a password this time, -k makes sudo always ask for
- # a password if one is required. Passing a quoted compound command to sudo (or sudo -s)
- # directly doesn't work, so we shellquote it with pipes.quote() and pass the quoted
- # string to the user's shell. We loop reading output until we see the randomly-generated
- # sudo prompt set with the -p option.
- prompt = '[sudo via ansible, key=%s] password: ' % randbits
- exe = exe or C.DEFAULT_SUDO_EXE
- becomecmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % \
- (exe, exe, flags or C.DEFAULT_SUDO_FLAGS, prompt, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif method == 'su':
- exe = exe or C.DEFAULT_SU_EXE
- flags = flags or C.DEFAULT_SU_FLAGS
- becomecmd = '%s %s %s -c "%s -c %s"' % (exe, flags, user, shell, pipes.quote('echo %s; %s' % (success_key, cmd)))
-
- elif method == 'pbrun':
- prompt = 'assword:'
- exe = exe or 'pbrun'
- flags = flags or ''
- becomecmd = '%s -b -l %s -u %s "%s"' % (exe, flags, user, pipes.quote('echo %s; %s' % (success_key,cmd)))
-
- elif method == 'pfexec':
- exe = exe or 'pfexec'
- flags = flags or ''
- # No user as it uses it's own exec_attr to figure it out
- becomecmd = '%s %s "%s"' % (exe, flags, pipes.quote('echo %s; %s' % (success_key,cmd)))
-
- if becomecmd is None:
- raise errors.AnsibleError("Privilege escalation method not found: %s" % method)
-
- return (('%s -c ' % shell) + pipes.quote(becomecmd), prompt, success_key)
-
-
-def make_sudo_cmd(sudo_exe, sudo_user, executable, cmd):
- """
- helper function for connection plugins to create sudo commands
- """
- return make_become_cmd(cmd, sudo_user, executable, 'sudo', C.DEFAULT_SUDO_FLAGS, sudo_exe)
-
-
-def make_su_cmd(su_user, executable, cmd):
- """
- Helper function for connection plugins to create direct su commands
- """
- return make_become_cmd(cmd, su_user, executable, 'su', C.DEFAULT_SU_FLAGS, C.DEFAULT_SU_EXE)
-
-def get_diff(diff):
- # called by --diff usage in playbook and runner via callbacks
- # include names in diffs 'before' and 'after' and do diff -U 10
-
- try:
- with warnings.catch_warnings():
- warnings.simplefilter('ignore')
- ret = []
- if 'dst_binary' in diff:
- ret.append("diff skipped: destination file appears to be binary\n")
- if 'src_binary' in diff:
- ret.append("diff skipped: source file appears to be binary\n")
- if 'dst_larger' in diff:
- ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
- if 'src_larger' in diff:
- ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
- if 'before' in diff and 'after' in diff:
- if 'before_header' in diff:
- before_header = "before: %s" % diff['before_header']
- else:
- before_header = 'before'
- if 'after_header' in diff:
- after_header = "after: %s" % diff['after_header']
- else:
- after_header = 'after'
- differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
- for line in list(differ):
- ret.append(line)
- return u"".join(ret)
- except UnicodeDecodeError:
- return ">> the files are different, but the diff library cannot compare unicode strings"
-
-def is_list_of_strings(items):
- for x in items:
- if not isinstance(x, basestring):
- return False
- return True
-
-def list_union(a, b):
- result = []
- for x in a:
- if x not in result:
- result.append(x)
- for x in b:
- if x not in result:
- result.append(x)
- return result
-
-def list_intersection(a, b):
- result = []
- for x in a:
- if x in b and x not in result:
- result.append(x)
- return result
-
-def list_difference(a, b):
- result = []
- for x in a:
- if x not in b and x not in result:
- result.append(x)
- for x in b:
- if x not in a and x not in result:
- result.append(x)
- return result
-
-def contains_vars(data):
- '''
- returns True if the data contains a variable pattern
- '''
- return "$" in data or "{{" in data
-
-def safe_eval(expr, locals={}, include_exceptions=False):
- '''
- This is intended for allowing things like:
- with_items: a_list_variable
-
- Where Jinja2 would return a string but we do not want to allow it to
- call functions (outside of Jinja2, where the env is constrained). If
- the input data to this function came from an untrusted (remote) source,
- it should first be run through _clean_data_struct() to ensure the data
- is further sanitized prior to evaluation.
-
- Based on:
- http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
- '''
-
- # this is the whitelist of AST nodes we are going to
- # allow in the evaluation. Any node type other than
- # those listed here will raise an exception in our custom
- # visitor class defined below.
- SAFE_NODES = set(
- (
- ast.Add,
- ast.BinOp,
- ast.Call,
- ast.Compare,
- ast.Dict,
- ast.Div,
- ast.Expression,
- ast.List,
- ast.Load,
- ast.Mult,
- ast.Num,
- ast.Name,
- ast.Str,
- ast.Sub,
- ast.Tuple,
- ast.UnaryOp,
- )
- )
-
- # AST node types were expanded after 2.6
- if not sys.version.startswith('2.6'):
- SAFE_NODES.union(
- set(
- (ast.Set,)
- )
- )
-
- filter_list = []
- for filter in filter_loader.all():
- filter_list.extend(filter.filters().keys())
-
- CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list
-
- class CleansingNodeVisitor(ast.NodeVisitor):
- def generic_visit(self, node, inside_call=False):
- if type(node) not in SAFE_NODES:
- raise Exception("invalid expression (%s)" % expr)
- elif isinstance(node, ast.Call):
- inside_call = True
- elif isinstance(node, ast.Name) and inside_call:
- if hasattr(builtin, node.id) and node.id not in CALL_WHITELIST:
- raise Exception("invalid function: %s" % node.id)
- # iterate over all child nodes
- for child_node in ast.iter_child_nodes(node):
- self.generic_visit(child_node, inside_call)
-
- if not isinstance(expr, basestring):
- # already templated to a datastructure, perhaps?
- if include_exceptions:
- return (expr, None)
- return expr
-
- cnv = CleansingNodeVisitor()
- try:
- parsed_tree = ast.parse(expr, mode='eval')
- cnv.visit(parsed_tree)
- compiled = compile(parsed_tree, expr, 'eval')
- result = eval(compiled, {}, locals)
-
- if include_exceptions:
- return (result, None)
- else:
- return result
- except SyntaxError, e:
- # special handling for syntax errors, we just return
- # the expression string back as-is
- if include_exceptions:
- return (expr, None)
- return expr
- except Exception, e:
- if include_exceptions:
- return (expr, e)
- return expr
-
-
-def listify_lookup_plugin_terms(terms, basedir, inject):
-
- from ansible.utils import template
-
- if isinstance(terms, basestring):
- # someone did:
- # with_items: alist
- # OR
- # with_items: {{ alist }}
-
- stripped = terms.strip()
- if not (stripped.startswith('{') or stripped.startswith('[')) and \
- not stripped.startswith("/") and \
- not stripped.startswith('set([') and \
- not LOOKUP_REGEX.search(terms):
- # if not already a list, get ready to evaluate with Jinja2
- # not sure why the "/" is in above code :)
- try:
- new_terms = template.template(basedir, "{{ %s }}" % terms, inject)
- if isinstance(new_terms, basestring) and "{{" in new_terms:
- pass
- else:
- terms = new_terms
- except:
- pass
-
- if '{' in terms or '[' in terms:
- # Jinja2 already evaluated a variable to a list.
- # Jinja2-ified list needs to be converted back to a real type
- # TODO: something a bit less heavy than eval
- return safe_eval(terms)
-
- if isinstance(terms, basestring):
- terms = [ terms ]
-
- return terms
-
-def combine_vars(a, b):
-
- _validate_both_dicts(a, b)
-
- if C.DEFAULT_HASH_BEHAVIOUR == "merge":
- return merge_hash(a, b)
- else:
- return dict(a.items() + b.items())
-
-def random_password(length=20, chars=C.DEFAULT_PASSWORD_CHARS):
- '''Return a random password string of length containing only chars.'''
-
- password = []
- while len(password) < length:
- new_char = os.urandom(1)
- if new_char in chars:
- password.append(new_char)
-
- return ''.join(password)
-
-def before_comment(msg):
- ''' what's the part of a string before a comment? '''
- msg = msg.replace("\#","**NOT_A_COMMENT**")
- msg = msg.split("#")[0]
- msg = msg.replace("**NOT_A_COMMENT**","#")
- return msg
-
-def load_vars(basepath, results, vault_password=None):
- """
- Load variables from any potential yaml filename combinations of basepath,
- returning result.
- """
-
- paths_to_check = [ "".join([basepath, ext])
- for ext in C.YAML_FILENAME_EXTENSIONS ]
-
- found_paths = []
-
- for path in paths_to_check:
- found, results = _load_vars_from_path(path, results, vault_password=vault_password)
- if found:
- found_paths.append(path)
-
-
- # disallow the potentially confusing situation that there are multiple
- # variable files for the same name. For example if both group_vars/all.yml
- # and group_vars/all.yaml
- if len(found_paths) > 1:
- raise errors.AnsibleError("Multiple variable files found. "
- "There should only be one. %s" % ( found_paths, ))
-
- return results
-
-## load variables from yaml files/dirs
-# e.g. host/group_vars
-#
-def _load_vars_from_path(path, results, vault_password=None):
- """
- Robustly access the file at path and load variables, carefully reporting
- errors in a friendly/informative way.
-
- Return the tuple (found, new_results, )
- """
-
- try:
- # in the case of a symbolic link, we want the stat of the link itself,
- # not its target
- pathstat = os.lstat(path)
- except os.error, err:
- # most common case is that nothing exists at that path.
- if err.errno == errno.ENOENT:
- return False, results
- # otherwise this is a condition we should report to the user
- raise errors.AnsibleError(
- "%s is not accessible: %s."
- " Please check its permissions." % ( path, err.strerror))
-
- # symbolic link
- if stat.S_ISLNK(pathstat.st_mode):
- try:
- target = os.path.realpath(path)
- except os.error, err2:
- raise errors.AnsibleError("The symbolic link at %s "
- "is not readable: %s. Please check its permissions."
- % (path, err2.strerror, ))
- # follow symbolic link chains by recursing, so we repeat the same
- # permissions checks above and provide useful errors.
- return _load_vars_from_path(target, results, vault_password)
-
- # directory
- if stat.S_ISDIR(pathstat.st_mode):
-
- # support organizing variables across multiple files in a directory
- return True, _load_vars_from_folder(path, results, vault_password=vault_password)
-
- # regular file
- elif stat.S_ISREG(pathstat.st_mode):
- data = parse_yaml_from_file(path, vault_password=vault_password)
- if data and type(data) != dict:
- raise errors.AnsibleError(
- "%s must be stored as a dictionary/hash" % path)
- elif data is None:
- data = {}
-
- # combine vars overrides by default but can be configured to do a
- # hash merge in settings
- results = combine_vars(results, data)
- return True, results
-
- # something else? could be a fifo, socket, device, etc.
- else:
- raise errors.AnsibleError("Expected a variable file or directory "
- "but found a non-file object at path %s" % (path, ))
-
-def _load_vars_from_folder(folder_path, results, vault_password=None):
- """
- Load all variables within a folder recursively.
- """
-
- # this function and _load_vars_from_path are mutually recursive
-
- try:
- names = os.listdir(folder_path)
- except os.error, err:
- raise errors.AnsibleError(
- "This folder cannot be listed: %s: %s."
- % ( folder_path, err.strerror))
-
- # evaluate files in a stable order rather than whatever order the
- # filesystem lists them.
- names.sort()
-
- # do not parse hidden files or dirs, e.g. .svn/
- paths = [os.path.join(folder_path, name) for name in names
- if not name.startswith('.')
- and os.path.splitext(name)[1] in C.YAML_FILENAME_EXTENSIONS]
- for path in paths:
- _found, results = _load_vars_from_path(path, results, vault_password=vault_password)
- return results
-
-def update_hash(hash, key, new_value):
- ''' used to avoid nested .update calls on the parent '''
-
- value = hash.get(key, {})
- value.update(new_value)
- hash[key] = value
-
-def censor_unlogged_data(data):
- '''
- used when the no_log: True attribute is passed to a task to keep data from a callback.
- NOT intended to prevent variable registration, but only things from showing up on
- screen
- '''
- new_data = {}
- for (x,y) in data.iteritems():
- if x in [ 'skipped', 'changed', 'failed', 'rc' ]:
- new_data[x] = y
- new_data['censored'] = 'results hidden due to no_log parameter'
- return new_data
-
-def check_mutually_exclusive_privilege(options, parser):
-
- # privilege escalation command line arguments need to be mutually exclusive
- if (options.su or options.su_user or options.ask_su_pass) and \
- (options.sudo or options.sudo_user or options.ask_sudo_pass) or \
- (options.su or options.su_user or options.ask_su_pass) and \
- (options.become or options.become_user or options.become_ask_pass) or \
- (options.sudo or options.sudo_user or options.ask_sudo_pass) and \
- (options.become or options.become_user or options.become_ask_pass):
-
- parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') "
- "and su arguments ('-su', '--su-user', and '--ask-su-pass') "
- "and become arguments ('--become', '--become-user', and '--ask-become-pass')"
- " are exclusive of each other")
-
-
diff --git a/v1/ansible/utils/cmd_functions.py b/v1/ansible/utils/cmd_functions.py
deleted file mode 100644
index 6525260f10..0000000000
--- a/v1/ansible/utils/cmd_functions.py
+++ /dev/null
@@ -1,59 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import sys
-import shlex
-import subprocess
-import select
-
-def run_cmd(cmd, live=False, readsize=10):
-
- #readsize = 10
-
- cmdargs = shlex.split(cmd)
- p = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-
- stdout = ''
- stderr = ''
- rpipes = [p.stdout, p.stderr]
- while True:
- rfd, wfd, efd = select.select(rpipes, [], rpipes, 1)
-
- if p.stdout in rfd:
- dat = os.read(p.stdout.fileno(), readsize)
- if live:
- sys.stdout.write(dat)
- stdout += dat
- if dat == '':
- rpipes.remove(p.stdout)
- if p.stderr in rfd:
- dat = os.read(p.stderr.fileno(), readsize)
- stderr += dat
- if live:
- sys.stdout.write(dat)
- if dat == '':
- rpipes.remove(p.stderr)
- # only break out if we've emptied the pipes, or there is nothing to
- # read from and the process has finished.
- if (not rpipes or not rfd) and p.poll() is not None:
- break
- # Calling wait while there are still pipes to read can cause a lock
- elif not rpipes and p.poll() == None:
- p.wait()
-
- return p.returncode, stdout, stderr
diff --git a/v1/ansible/utils/display_functions.py b/v1/ansible/utils/display_functions.py
deleted file mode 100644
index 2233c81657..0000000000
--- a/v1/ansible/utils/display_functions.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import textwrap
-
-from ansible import constants as C
-from ansible import errors
-from ansible.callbacks import display
-
-__all__ = ['deprecated', 'warning', 'system_warning']
-
-# list of all deprecation messages to prevent duplicate display
-deprecations = {}
-warns = {}
-
-def deprecated(msg, version, removed=False):
- ''' used to print out a deprecation message.'''
-
- if not removed and not C.DEPRECATION_WARNINGS:
- return
-
- if not removed:
- if version:
- new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in version %s." % (msg, version)
- else:
- new_msg = "\n[DEPRECATION WARNING]: %s. This feature will be removed in a future release." % (msg)
- new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
- else:
- raise errors.AnsibleError("[DEPRECATED]: %s. Please update your playbooks." % msg)
-
- wrapped = textwrap.wrap(new_msg, 79)
- new_msg = "\n".join(wrapped) + "\n"
-
- if new_msg not in deprecations:
- display(new_msg, color='purple', stderr=True)
- deprecations[new_msg] = 1
-
-def warning(msg):
- new_msg = "\n[WARNING]: %s" % msg
- wrapped = textwrap.wrap(new_msg, 79)
- new_msg = "\n".join(wrapped) + "\n"
- if new_msg not in warns:
- display(new_msg, color='bright purple', stderr=True)
- warns[new_msg] = 1
-
-def system_warning(msg):
- if C.SYSTEM_WARNINGS:
- warning(msg)
-
diff --git a/v1/ansible/utils/hashing.py b/v1/ansible/utils/hashing.py
deleted file mode 100644
index a7d142e5bd..0000000000
--- a/v1/ansible/utils/hashing.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-
-# Note, sha1 is the only hash algorithm compatible with python2.4 and with
-# FIPS-140 mode (as of 11-2014)
-try:
- from hashlib import sha1 as sha1
-except ImportError:
- from sha import sha as sha1
-
-# Backwards compat only
-try:
- from hashlib import md5 as _md5
-except ImportError:
- try:
- from md5 import md5 as _md5
- except ImportError:
- # Assume we're running in FIPS mode here
- _md5 = None
-
-def secure_hash_s(data, hash_func=sha1):
- ''' Return a secure hash hex digest of data. '''
-
- digest = hash_func()
- try:
- digest.update(data)
- except UnicodeEncodeError:
- digest.update(data.encode('utf-8'))
- return digest.hexdigest()
-
-def secure_hash(filename, hash_func=sha1):
- ''' Return a secure hash hex digest of local file, None if file is not present or a directory. '''
-
- if not os.path.exists(filename) or os.path.isdir(filename):
- return None
- digest = hash_func()
- blocksize = 64 * 1024
- try:
- infile = open(filename, 'rb')
- block = infile.read(blocksize)
- while block:
- digest.update(block)
- block = infile.read(blocksize)
- infile.close()
- except IOError, e:
- raise errors.AnsibleError("error while accessing the file %s, error was: %s" % (filename, e))
- return digest.hexdigest()
-
-# The checksum algorithm must match with the algorithm in ShellModule.checksum() method
-checksum = secure_hash
-checksum_s = secure_hash_s
-
-# Backwards compat functions. Some modules include md5s in their return values
-# Continue to support that for now. As of ansible-1.8, all of those modules
-# should also return "checksum" (sha1 for now)
-# Do not use md5 unless it is needed for:
-# 1) Optional backwards compatibility
-# 2) Compliance with a third party protocol
-#
-# MD5 will not work on systems which are FIPS-140-2 compliant.
-
-def md5s(data):
- if not _md5:
- raise ValueError('MD5 not available. Possibly running in FIPS mode')
- return secure_hash_s(data, _md5)
-
-def md5(filename):
- if not _md5:
- raise ValueError('MD5 not available. Possibly running in FIPS mode')
- return secure_hash(filename, _md5)
-
diff --git a/v1/ansible/utils/module_docs.py b/v1/ansible/utils/module_docs.py
deleted file mode 100644
index c692057172..0000000000
--- a/v1/ansible/utils/module_docs.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python
-# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import sys
-import ast
-import yaml
-import traceback
-
-from collections import MutableMapping, MutableSet, MutableSequence
-
-from ansible import utils
-
-# modules that are ok that they do not have documentation strings
-BLACKLIST_MODULES = [
- 'async_wrapper', 'accelerate', 'async_status'
-]
-
-def get_docstring(filename, verbose=False):
- """
- Search for assignment of the DOCUMENTATION and EXAMPLES variables
- in the given file.
- Parse DOCUMENTATION from YAML and return the YAML doc or None
- together with EXAMPLES, as plain text.
-
- DOCUMENTATION can be extended using documentation fragments
- loaded by the PluginLoader from the module_docs_fragments
- directory.
- """
-
- doc = None
- plainexamples = None
- returndocs = None
-
- try:
- # Thank you, Habbie, for this bit of code :-)
- M = ast.parse(''.join(open(filename)))
- for child in M.body:
- if isinstance(child, ast.Assign):
- if 'DOCUMENTATION' in (t.id for t in child.targets):
- doc = yaml.safe_load(child.value.s)
- fragment_slug = doc.get('extends_documentation_fragment',
- 'doesnotexist').lower()
-
- # Allow the module to specify a var other than DOCUMENTATION
- # to pull the fragment from, using dot notation as a separator
- if '.' in fragment_slug:
- fragment_name, fragment_var = fragment_slug.split('.', 1)
- fragment_var = fragment_var.upper()
- else:
- fragment_name, fragment_var = fragment_slug, 'DOCUMENTATION'
-
-
- if fragment_slug != 'doesnotexist':
- fragment_class = utils.plugins.fragment_loader.get(fragment_name)
- assert fragment_class is not None
-
- fragment_yaml = getattr(fragment_class, fragment_var, '{}')
- fragment = yaml.safe_load(fragment_yaml)
-
- if fragment.has_key('notes'):
- notes = fragment.pop('notes')
- if notes:
- if not doc.has_key('notes'):
- doc['notes'] = []
- doc['notes'].extend(notes)
-
- if 'options' not in fragment.keys():
- raise Exception("missing options in fragment, possibly misformatted?")
-
- for key, value in fragment.items():
- if not doc.has_key(key):
- doc[key] = value
- else:
- if isinstance(doc[key], MutableMapping):
- doc[key].update(value)
- elif isinstance(doc[key], MutableSet):
- doc[key].add(value)
- elif isinstance(doc[key], MutableSequence):
- doc[key] = sorted(frozenset(doc[key] + value))
- else:
- raise Exception("Attempt to extend a documentation fragement of unknown type")
-
- if 'EXAMPLES' in (t.id for t in child.targets):
- plainexamples = child.value.s[1:] # Skip first empty line
-
- if 'RETURN' in (t.id for t in child.targets):
- returndocs = child.value.s[1:]
- except:
- traceback.print_exc() # temp
- if verbose == True:
- traceback.print_exc()
- print "unable to parse %s" % filename
- return doc, plainexamples, returndocs
-
diff --git a/v1/ansible/utils/module_docs_fragments b/v1/ansible/utils/module_docs_fragments
deleted file mode 120000
index 83aef9ec19..0000000000
--- a/v1/ansible/utils/module_docs_fragments
+++ /dev/null
@@ -1 +0,0 @@
-../../../lib/ansible/utils/module_docs_fragments \ No newline at end of file
diff --git a/v1/ansible/utils/plugins.py b/v1/ansible/utils/plugins.py
deleted file mode 100644
index 14953d8f44..0000000000
--- a/v1/ansible/utils/plugins.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import os.path
-import sys
-import glob
-import imp
-from ansible import constants as C
-from ansible import errors
-
-MODULE_CACHE = {}
-PATH_CACHE = {}
-PLUGIN_PATH_CACHE = {}
-_basedirs = []
-
-def push_basedir(basedir):
- # avoid pushing the same absolute dir more than once
- basedir = os.path.realpath(basedir)
- if basedir not in _basedirs:
- _basedirs.insert(0, basedir)
-
-class PluginLoader(object):
-
- '''
- PluginLoader loads plugins from the configured plugin directories.
-
- It searches for plugins by iterating through the combined list of
- play basedirs, configured paths, and the python path.
- The first match is used.
- '''
-
- def __init__(self, class_name, package, config, subdir, aliases={}):
-
- self.class_name = class_name
- self.package = package
- self.config = config
- self.subdir = subdir
- self.aliases = aliases
-
- if not class_name in MODULE_CACHE:
- MODULE_CACHE[class_name] = {}
- if not class_name in PATH_CACHE:
- PATH_CACHE[class_name] = None
- if not class_name in PLUGIN_PATH_CACHE:
- PLUGIN_PATH_CACHE[class_name] = {}
-
- self._module_cache = MODULE_CACHE[class_name]
- self._paths = PATH_CACHE[class_name]
- self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
-
- self._extra_dirs = []
- self._searched_paths = set()
-
- def print_paths(self):
- ''' Returns a string suitable for printing of the search path '''
-
- # Uses a list to get the order right
- ret = []
- for i in self._get_paths():
- if i not in ret:
- ret.append(i)
- return os.pathsep.join(ret)
-
- def _all_directories(self, dir):
- results = []
- results.append(dir)
- for root, subdirs, files in os.walk(dir):
- if '__init__.py' in files:
- for x in subdirs:
- results.append(os.path.join(root,x))
- return results
-
- def _get_package_paths(self):
- ''' Gets the path of a Python package '''
-
- paths = []
- if not self.package:
- return []
- if not hasattr(self, 'package_path'):
- m = __import__(self.package)
- parts = self.package.split('.')[1:]
- self.package_path = os.path.join(os.path.dirname(m.__file__), *parts)
- paths.extend(self._all_directories(self.package_path))
- return paths
-
- def _get_paths(self):
- ''' Return a list of paths to search for plugins in '''
-
- if self._paths is not None:
- return self._paths
-
- ret = self._extra_dirs[:]
- for basedir in _basedirs:
- fullpath = os.path.realpath(os.path.join(basedir, self.subdir))
- if os.path.isdir(fullpath):
-
- files = glob.glob("%s/*" % fullpath)
-
- # allow directories to be two levels deep
- files2 = glob.glob("%s/*/*" % fullpath)
-
- if files2 is not None:
- files.extend(files2)
-
- for file in files:
- if os.path.isdir(file) and file not in ret:
- ret.append(file)
- if fullpath not in ret:
- ret.append(fullpath)
-
- # look in any configured plugin paths, allow one level deep for subcategories
- if self.config is not None:
- configured_paths = self.config.split(os.pathsep)
- for path in configured_paths:
- path = os.path.realpath(os.path.expanduser(path))
- contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
- for c in contents:
- if os.path.isdir(c) and c not in ret:
- ret.append(c)
- if path not in ret:
- ret.append(path)
-
- # look for any plugins installed in the package subtree
- ret.extend(self._get_package_paths())
-
- # cache and return the result
- self._paths = ret
- return ret
-
-
- def add_directory(self, directory, with_subdir=False):
- ''' Adds an additional directory to the search path '''
-
- directory = os.path.realpath(directory)
-
- if directory is not None:
- if with_subdir:
- directory = os.path.join(directory, self.subdir)
- if directory not in self._extra_dirs:
- # append the directory and invalidate the path cache
- self._extra_dirs.append(directory)
- self._paths = None
-
- def find_plugin(self, name, suffixes=None):
- ''' Find a plugin named name '''
-
- if not suffixes:
- if self.class_name:
- suffixes = ['.py']
- else:
- suffixes = ['.py', '']
-
- potential_names = frozenset('%s%s' % (name, s) for s in suffixes)
- for full_name in potential_names:
- if full_name in self._plugin_path_cache:
- return self._plugin_path_cache[full_name]
-
- found = None
- for path in [p for p in self._get_paths() if p not in self._searched_paths]:
- if os.path.isdir(path):
- full_paths = (os.path.join(path, f) for f in os.listdir(path))
- for full_path in (f for f in full_paths if os.path.isfile(f)):
- for suffix in suffixes:
- if full_path.endswith(suffix):
- full_name = os.path.basename(full_path)
- break
- else: # Yes, this is a for-else: http://bit.ly/1ElPkyg
- continue
-
- if full_name not in self._plugin_path_cache:
- self._plugin_path_cache[full_name] = full_path
-
- self._searched_paths.add(path)
- for full_name in potential_names:
- if full_name in self._plugin_path_cache:
- return self._plugin_path_cache[full_name]
-
- # if nothing is found, try finding alias/deprecated
- if not name.startswith('_'):
- for alias_name in ('_%s' % n for n in potential_names):
- # We've already cached all the paths at this point
- if alias_name in self._plugin_path_cache:
- return self._plugin_path_cache[alias_name]
-
- return None
-
- def has_plugin(self, name):
- ''' Checks if a plugin named name exists '''
-
- return self.find_plugin(name) is not None
-
- __contains__ = has_plugin
-
- def get(self, name, *args, **kwargs):
- ''' instantiates a plugin of the given name using arguments '''
-
- if name in self.aliases:
- name = self.aliases[name]
- path = self.find_plugin(name)
- if path is None:
- return None
- if path not in self._module_cache:
- self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
- return getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
-
- def all(self, *args, **kwargs):
- ''' instantiates all plugins with the same arguments '''
-
- for i in self._get_paths():
- matches = glob.glob(os.path.join(i, "*.py"))
- matches.sort()
- for path in matches:
- name, ext = os.path.splitext(os.path.basename(path))
- if name.startswith("_"):
- continue
- if path not in self._module_cache:
- self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
- yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
-
-action_loader = PluginLoader(
- 'ActionModule',
- 'ansible.runner.action_plugins',
- C.DEFAULT_ACTION_PLUGIN_PATH,
- 'action_plugins'
-)
-
-cache_loader = PluginLoader(
- 'CacheModule',
- 'ansible.cache',
- C.DEFAULT_CACHE_PLUGIN_PATH,
- 'cache_plugins'
-)
-
-callback_loader = PluginLoader(
- 'CallbackModule',
- 'ansible.callback_plugins',
- C.DEFAULT_CALLBACK_PLUGIN_PATH,
- 'callback_plugins'
-)
-
-connection_loader = PluginLoader(
- 'Connection',
- 'ansible.runner.connection_plugins',
- C.DEFAULT_CONNECTION_PLUGIN_PATH,
- 'connection_plugins',
- aliases={'paramiko': 'paramiko_ssh'}
-)
-
-shell_loader = PluginLoader(
- 'ShellModule',
- 'ansible.runner.shell_plugins',
- 'shell_plugins',
- 'shell_plugins',
-)
-
-module_finder = PluginLoader(
- '',
- 'ansible.modules',
- C.DEFAULT_MODULE_PATH,
- 'library'
-)
-
-lookup_loader = PluginLoader(
- 'LookupModule',
- 'ansible.runner.lookup_plugins',
- C.DEFAULT_LOOKUP_PLUGIN_PATH,
- 'lookup_plugins'
-)
-
-vars_loader = PluginLoader(
- 'VarsModule',
- 'ansible.inventory.vars_plugins',
- C.DEFAULT_VARS_PLUGIN_PATH,
- 'vars_plugins'
-)
-
-filter_loader = PluginLoader(
- 'FilterModule',
- 'ansible.runner.filter_plugins',
- C.DEFAULT_FILTER_PLUGIN_PATH,
- 'filter_plugins'
-)
-
-fragment_loader = PluginLoader(
- 'ModuleDocFragment',
- 'ansible.utils.module_docs_fragments',
- os.path.join(os.path.dirname(__file__), 'module_docs_fragments'),
- '',
-)
diff --git a/v1/ansible/utils/string_functions.py b/v1/ansible/utils/string_functions.py
deleted file mode 100644
index 3b452718f7..0000000000
--- a/v1/ansible/utils/string_functions.py
+++ /dev/null
@@ -1,18 +0,0 @@
-def isprintable(instring):
- if isinstance(instring, str):
- #http://stackoverflow.com/a/3637294
- import string
- printset = set(string.printable)
- isprintable = set(instring).issubset(printset)
- return isprintable
- else:
- return True
-
-def count_newlines_from_end(str):
- i = len(str)
- while i > 0:
- if str[i-1] != '\n':
- break
- i -= 1
- return len(str) - i
-
diff --git a/v1/ansible/utils/su_prompts.py b/v1/ansible/utils/su_prompts.py
deleted file mode 100644
index 04e98e1c45..0000000000
--- a/v1/ansible/utils/su_prompts.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-SU_PROMPT_LOCALIZATIONS = [
- 'Password',
- '암호',
- 'パスワード',
- 'Adgangskode',
- 'Contraseña',
- 'Contrasenya',
- 'Hasło',
- 'Heslo',
- 'Jelszó',
- 'Lösenord',
- 'Mật khẩu',
- 'Mot de passe',
- 'Parola',
- 'Parool',
- 'Pasahitza',
- 'Passord',
- 'Passwort',
- 'Salasana',
- 'Sandi',
- 'Senha',
- 'Wachtwoord',
- 'ססמה',
- 'Лозинка',
- 'Парола',
- 'Пароль',
- 'गुप्तशब्द',
- 'शब्दकूट',
- 'సంకేతపదము',
- 'හස්පදය',
- '密码',
- '密碼',
-]
-
-SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE)
-
-def check_su_prompt(data):
- return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
-
diff --git a/v1/ansible/utils/template.py b/v1/ansible/utils/template.py
deleted file mode 100644
index 368b2067c3..0000000000
--- a/v1/ansible/utils/template.py
+++ /dev/null
@@ -1,405 +0,0 @@
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import re
-import codecs
-import jinja2
-from jinja2.runtime import StrictUndefined
-from jinja2.exceptions import TemplateSyntaxError
-import yaml
-import json
-from ansible import errors
-import ansible.constants as C
-import time
-import subprocess
-import datetime
-import pwd
-import ast
-import traceback
-from numbers import Number
-from types import NoneType
-
-from ansible.utils.string_functions import count_newlines_from_end
-from ansible.utils import to_bytes, to_unicode
-
-class Globals(object):
-
- FILTERS = None
-
- def __init__(self):
- pass
-
-def _get_filters():
- ''' return filter plugin instances '''
-
- if Globals.FILTERS is not None:
- return Globals.FILTERS
-
- from ansible import utils
- plugins = [ x for x in utils.plugins.filter_loader.all()]
- filters = {}
- for fp in plugins:
- filters.update(fp.filters())
- Globals.FILTERS = filters
-
- return Globals.FILTERS
-
-def _get_extensions():
- ''' return jinja2 extensions to load '''
-
- '''
- if some extensions are set via jinja_extensions in ansible.cfg, we try
- to load them with the jinja environment
- '''
- jinja_exts = []
- if C.DEFAULT_JINJA2_EXTENSIONS:
- '''
- Let's make sure the configuration directive doesn't contain spaces
- and split extensions in an array
- '''
- jinja_exts = C.DEFAULT_JINJA2_EXTENSIONS.replace(" ", "").split(',')
-
- return jinja_exts
-
-class Flags:
- LEGACY_TEMPLATE_WARNING = False
-
-# TODO: refactor this file
-
-FILTER_PLUGINS = None
-_LISTRE = re.compile(r"(\w+)\[(\d+)\]")
-
-# A regex for checking to see if a variable we're trying to
-# expand is just a single variable name.
-SINGLE_VAR = re.compile(r"^{{\s*(\w*)\s*}}$")
-
-JINJA2_OVERRIDE = '#jinja2:'
-JINJA2_ALLOWED_OVERRIDES = ['trim_blocks', 'lstrip_blocks', 'newline_sequence', 'keep_trailing_newline']
-
-def lookup(name, *args, **kwargs):
- from ansible import utils
- instance = utils.plugins.lookup_loader.get(name.lower(), basedir=kwargs.get('basedir',None))
- tvars = kwargs.get('vars', None)
-
- wantlist = kwargs.pop('wantlist', False)
-
- if instance is not None:
- try:
- ran = instance.run(*args, inject=tvars, **kwargs)
- except errors.AnsibleError:
- raise
- except jinja2.exceptions.UndefinedError, e:
- raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
- except Exception, e:
- raise errors.AnsibleError('Unexpected error in during lookup: %s' % e)
- if ran and not wantlist:
- ran = ",".join(ran)
- return ran
- else:
- raise errors.AnsibleError("lookup plugin (%s) not found" % name)
-
-def template(basedir, varname, templatevars, lookup_fatal=True, depth=0, expand_lists=True, convert_bare=False, fail_on_undefined=False, filter_fatal=True):
- ''' templates a data structure by traversing it and substituting for other data structures '''
- from ansible import utils
- try:
- if convert_bare and isinstance(varname, basestring):
- first_part = varname.split(".")[0].split("[")[0]
- if first_part in templatevars and '{{' not in varname and '$' not in varname:
- varname = "{{%s}}" % varname
-
- if isinstance(varname, basestring):
- if '{{' in varname or '{%' in varname:
- try:
- varname = template_from_string(basedir, varname, templatevars, fail_on_undefined)
- except errors.AnsibleError, e:
- raise errors.AnsibleError("Failed to template %s: %s" % (varname, str(e)))
-
- # template_from_string may return non strings for the case where the var is just
- # a reference to a single variable, so we should re_check before we do further evals
- if isinstance(varname, basestring):
- if (varname.startswith("{") and not varname.startswith("{{")) or varname.startswith("["):
- eval_results = utils.safe_eval(varname, locals=templatevars, include_exceptions=True)
- if eval_results[1] is None:
- varname = eval_results[0]
-
- return varname
-
- elif isinstance(varname, (list, tuple)):
- return [template(basedir, v, templatevars, lookup_fatal, depth, expand_lists, convert_bare, fail_on_undefined, filter_fatal) for v in varname]
- elif isinstance(varname, dict):
- d = {}
- for (k, v) in varname.iteritems():
- d[k] = template(basedir, v, templatevars, lookup_fatal, depth, expand_lists, convert_bare, fail_on_undefined, filter_fatal)
- return d
- else:
- return varname
- except errors.AnsibleFilterError:
- if filter_fatal:
- raise
- else:
- return varname
-
-
-class _jinja2_vars(object):
- '''
- Helper class to template all variable content before jinja2 sees it.
- This is done by hijacking the variable storage that jinja2 uses, and
- overriding __contains__ and __getitem__ to look like a dict. Added bonus
- is avoiding duplicating the large hashes that inject tends to be.
- To facilitate using builtin jinja2 things like range, globals are handled
- here.
- extras is a list of locals to also search for variables.
- '''
-
- def __init__(self, basedir, vars, globals, fail_on_undefined, *extras):
- self.basedir = basedir
- self.vars = vars
- self.globals = globals
- self.fail_on_undefined = fail_on_undefined
- self.extras = extras
-
- def __contains__(self, k):
- if k in self.vars:
- return True
- for i in self.extras:
- if k in i:
- return True
- if k in self.globals:
- return True
- return False
-
- def __getitem__(self, varname):
- from ansible.runner import HostVars
- if varname not in self.vars:
- for i in self.extras:
- if varname in i:
- return i[varname]
- if varname in self.globals:
- return self.globals[varname]
- else:
- raise KeyError("undefined variable: %s" % varname)
- var = self.vars[varname]
- # HostVars is special, return it as-is, as is the special variable
- # 'vars', which contains the vars structure
- var = to_unicode(var, nonstring="passthru")
- if isinstance(var, dict) and varname == "vars" or isinstance(var, HostVars):
- return var
- else:
- return template(self.basedir, var, self.vars, fail_on_undefined=self.fail_on_undefined)
-
- def add_locals(self, locals):
- '''
- If locals are provided, create a copy of self containing those
- locals in addition to what is already in this variable proxy.
- '''
- if locals is None:
- return self
- return _jinja2_vars(self.basedir, self.vars, self.globals, self.fail_on_undefined, locals, *self.extras)
-
-class J2Template(jinja2.environment.Template):
- '''
- This class prevents Jinja2 from running _jinja2_vars through dict()
- Without this, {% include %} and similar will create new contexts unlike
- the special one created in template_from_file. This ensures they are all
- alike, except for potential locals.
- '''
- def new_context(self, vars=None, shared=False, locals=None):
- return jinja2.runtime.Context(self.environment, vars.add_locals(locals), self.name, self.blocks)
-
-def template_from_file(basedir, path, vars, vault_password=None):
- ''' run a file through the templating engine '''
-
- fail_on_undefined = C.DEFAULT_UNDEFINED_VAR_BEHAVIOR
-
- from ansible import utils
- realpath = utils.path_dwim(basedir, path)
- loader=jinja2.FileSystemLoader([basedir,os.path.dirname(realpath)])
-
- def my_lookup(*args, **kwargs):
- kwargs['vars'] = vars
- return lookup(*args, basedir=basedir, **kwargs)
- def my_finalize(thing):
- return thing if thing is not None else ''
-
- environment = jinja2.Environment(loader=loader, trim_blocks=True, extensions=_get_extensions())
- environment.filters.update(_get_filters())
- environment.globals['lookup'] = my_lookup
- environment.globals['finalize'] = my_finalize
- if fail_on_undefined:
- environment.undefined = StrictUndefined
-
- try:
- data = codecs.open(realpath, encoding="utf8").read()
- except UnicodeDecodeError:
- raise errors.AnsibleError("unable to process as utf-8: %s" % realpath)
- except:
- raise errors.AnsibleError("unable to read %s" % realpath)
-
- # Get jinja env overrides from template
- if data.startswith(JINJA2_OVERRIDE):
- eol = data.find('\n')
- line = data[len(JINJA2_OVERRIDE):eol]
- data = data[eol+1:]
- for pair in line.split(','):
- (key,val) = pair.split(':')
- key = key.strip()
- if key in JINJA2_ALLOWED_OVERRIDES:
- setattr(environment, key, ast.literal_eval(val.strip()))
-
-
- environment.template_class = J2Template
- try:
- t = environment.from_string(data)
- except TemplateSyntaxError, e:
- # Throw an exception which includes a more user friendly error message
- values = {'name': realpath, 'lineno': e.lineno, 'error': str(e)}
- msg = 'file: %(name)s, line number: %(lineno)s, error: %(error)s' % \
- values
- error = errors.AnsibleError(msg)
- raise error
- vars = vars.copy()
- try:
- template_uid = pwd.getpwuid(os.stat(realpath).st_uid).pw_name
- except:
- template_uid = os.stat(realpath).st_uid
- vars['template_host'] = os.uname()[1]
- vars['template_path'] = realpath
- vars['template_mtime'] = datetime.datetime.fromtimestamp(os.path.getmtime(realpath))
- vars['template_uid'] = template_uid
- vars['template_fullpath'] = os.path.abspath(realpath)
- vars['template_run_date'] = datetime.datetime.now()
-
- managed_default = C.DEFAULT_MANAGED_STR
- managed_str = managed_default.format(
- host = vars['template_host'],
- uid = vars['template_uid'],
- file = to_bytes(vars['template_path'])
- )
- vars['ansible_managed'] = time.strftime(
- managed_str,
- time.localtime(os.path.getmtime(realpath))
- )
-
- # This line performs deep Jinja2 magic that uses the _jinja2_vars object for vars
- # Ideally, this could use some API where setting shared=True and the object won't get
- # passed through dict(o), but I have not found that yet.
- try:
- res = jinja2.utils.concat(t.root_render_func(t.new_context(_jinja2_vars(basedir, vars, t.globals, fail_on_undefined), shared=True)))
- except jinja2.exceptions.UndefinedError, e:
- raise errors.AnsibleUndefinedVariable("One or more undefined variables: %s" % str(e))
- except jinja2.exceptions.TemplateNotFound, e:
- # Throw an exception which includes a more user friendly error message
- # This likely will happen for included sub-template. Not that besides
- # pure "file not found" it may happen due to Jinja2's "security"
- # checks on path.
- values = {'name': realpath, 'subname': str(e)}
- msg = 'file: %(name)s, error: Cannot find/not allowed to load (include) template %(subname)s' % \
- values
- error = errors.AnsibleError(msg)
- raise error
-
- # The low level calls above do not preserve the newline
- # characters at the end of the input data, so we use the
- # calculate the difference in newlines and append them
- # to the resulting output for parity
- res_newlines = count_newlines_from_end(res)
- data_newlines = count_newlines_from_end(data)
- if data_newlines > res_newlines:
- res += '\n' * (data_newlines - res_newlines)
-
- if isinstance(res, unicode):
- # do not try to re-template a unicode string
- result = res
- else:
- result = template(basedir, res, vars)
-
- return result
-
-def template_from_string(basedir, data, vars, fail_on_undefined=False):
- ''' run a string through the (Jinja2) templating engine '''
- try:
- if type(data) == str:
- data = unicode(data, 'utf-8')
-
- # Check to see if the string we are trying to render is just referencing a single
- # var. In this case we don't want to accidentally change the type of the variable
- # to a string by using the jinja template renderer. We just want to pass it.
- only_one = SINGLE_VAR.match(data)
- if only_one:
- var_name = only_one.group(1)
- if var_name in vars:
- resolved_val = vars[var_name]
- if isinstance(resolved_val, (bool, Number, NoneType)):
- return resolved_val
-
- def my_finalize(thing):
- return thing if thing is not None else ''
-
- environment = jinja2.Environment(trim_blocks=True, undefined=StrictUndefined, extensions=_get_extensions(), finalize=my_finalize)
- environment.filters.update(_get_filters())
- environment.template_class = J2Template
-
- if '_original_file' in vars:
- basedir = os.path.dirname(vars['_original_file'])
- filesdir = os.path.abspath(os.path.join(basedir, '..', 'files'))
- if os.path.exists(filesdir):
- basedir = filesdir
-
- # 6227
- if isinstance(data, unicode):
- try:
- data = data.decode('utf-8')
- except UnicodeEncodeError, e:
- pass
-
- try:
- t = environment.from_string(data)
- except TemplateSyntaxError, e:
- raise errors.AnsibleError("template error while templating string: %s" % str(e))
- except Exception, e:
- if 'recursion' in str(e):
- raise errors.AnsibleError("recursive loop detected in template string: %s" % data)
- else:
- return data
-
- def my_lookup(*args, **kwargs):
- kwargs['vars'] = vars
- return lookup(*args, basedir=basedir, **kwargs)
-
- t.globals['lookup'] = my_lookup
- t.globals['finalize'] = my_finalize
- jvars =_jinja2_vars(basedir, vars, t.globals, fail_on_undefined)
- new_context = t.new_context(jvars, shared=True)
- rf = t.root_render_func(new_context)
- try:
- res = jinja2.utils.concat(rf)
- except TypeError, te:
- if 'StrictUndefined' in str(te):
- raise errors.AnsibleUndefinedVariable(
- "Unable to look up a name or access an attribute in template string. " + \
- "Make sure your variable name does not contain invalid characters like '-'."
- )
- else:
- raise errors.AnsibleError("an unexpected type error occurred. Error was %s" % te)
- return res
- except (jinja2.exceptions.UndefinedError, errors.AnsibleUndefinedVariable):
- if fail_on_undefined:
- raise
- else:
- return data
-
diff --git a/v1/ansible/utils/unicode.py b/v1/ansible/utils/unicode.py
deleted file mode 100644
index 7bd035c007..0000000000
--- a/v1/ansible/utils/unicode.py
+++ /dev/null
@@ -1,248 +0,0 @@
-# (c) 2012-2014, Toshio Kuraotmi <a.badger@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-# to_bytes and to_unicode were written by Toshio Kuratomi for the
-# python-kitchen library https://pypi.python.org/pypi/kitchen
-# They are licensed in kitchen under the terms of the GPLv2+
-# They were copied and modified for use in ansible by Toshio in Jan 2015
-# (simply removing the deprecated features)
-
-#: Aliases for the utf-8 codec
-_UTF8_ALIASES = frozenset(('utf-8', 'UTF-8', 'utf8', 'UTF8', 'utf_8', 'UTF_8',
- 'utf', 'UTF', 'u8', 'U8'))
-#: Aliases for the latin-1 codec
-_LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1',
- 'latin', 'LATIN', 'l1', 'L1', 'cp819', 'CP819', '8859', 'iso8859-1',
- 'ISO8859-1', 'iso-8859-1', 'ISO-8859-1'))
-
-# EXCEPTION_CONVERTERS is defined below due to using to_unicode
-
-def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
- '''Convert an object into a :class:`unicode` string
-
- :arg obj: Object to convert to a :class:`unicode` string. This should
- normally be a byte :class:`str`
- :kwarg encoding: What encoding to try converting the byte :class:`str` as.
- Defaults to :term:`utf-8`
- :kwarg errors: If errors are found while decoding, perform this action.
- Defaults to ``replace`` which replaces the invalid bytes with
- a character that means the bytes were unable to be decoded. Other
- values are the same as the error handling schemes in the `codec base
- classes
- <http://docs.python.org/library/codecs.html#codec-base-classes>`_.
- For instance ``strict`` which raises an exception and ``ignore`` which
- simply omits the non-decodable characters.
- :kwarg nonstring: How to treat nonstring values. Possible values are:
-
- :simplerepr: Attempt to call the object's "simple representation"
- method and return that value. Python-2.3+ has two methods that
- try to return a simple representation: :meth:`object.__unicode__`
- and :meth:`object.__str__`. We first try to get a usable value
- from :meth:`object.__unicode__`. If that fails we try the same
- with :meth:`object.__str__`.
- :empty: Return an empty :class:`unicode` string
- :strict: Raise a :exc:`TypeError`
- :passthru: Return the object unchanged
- :repr: Attempt to return a :class:`unicode` string of the repr of the
- object
-
- Default is ``simplerepr``
-
- :raises TypeError: if :attr:`nonstring` is ``strict`` and
- a non-:class:`basestring` object is passed in or if :attr:`nonstring`
- is set to an unknown value
- :raises UnicodeDecodeError: if :attr:`errors` is ``strict`` and
- :attr:`obj` is not decodable using the given encoding
- :returns: :class:`unicode` string or the original object depending on the
- value of :attr:`nonstring`.
-
- Usually this should be used on a byte :class:`str` but it can take both
- byte :class:`str` and :class:`unicode` strings intelligently. Nonstring
- objects are handled in different ways depending on the setting of the
- :attr:`nonstring` parameter.
-
- The default values of this function are set so as to always return
- a :class:`unicode` string and never raise an error when converting from
- a byte :class:`str` to a :class:`unicode` string. However, when you do
- not pass validly encoded text (or a nonstring object), you may end up with
- output that you don't expect. Be sure you understand the requirements of
- your data, not just ignore errors by passing it through this function.
- '''
- # Could use isbasestring/isunicode here but we want this code to be as
- # fast as possible
- if isinstance(obj, basestring):
- if isinstance(obj, unicode):
- return obj
- if encoding in _UTF8_ALIASES:
- return unicode(obj, 'utf-8', errors)
- if encoding in _LATIN1_ALIASES:
- return unicode(obj, 'latin-1', errors)
- return obj.decode(encoding, errors)
-
- if not nonstring:
- nonstring = 'simplerepr'
- if nonstring == 'empty':
- return u''
- elif nonstring == 'passthru':
- return obj
- elif nonstring == 'simplerepr':
- try:
- simple = obj.__unicode__()
- except (AttributeError, UnicodeError):
- simple = None
- if not simple:
- try:
- simple = str(obj)
- except UnicodeError:
- try:
- simple = obj.__str__()
- except (UnicodeError, AttributeError):
- simple = u''
- if isinstance(simple, str):
- return unicode(simple, encoding, errors)
- return simple
- elif nonstring in ('repr', 'strict'):
- obj_repr = repr(obj)
- if isinstance(obj_repr, str):
- obj_repr = unicode(obj_repr, encoding, errors)
- if nonstring == 'repr':
- return obj_repr
- raise TypeError('to_unicode was given "%(obj)s" which is neither'
- ' a byte string (str) or a unicode string' %
- {'obj': obj_repr.encode(encoding, 'replace')})
-
- raise TypeError('nonstring value, %(param)s, is not set to a valid'
- ' action' % {'param': nonstring})
-
-def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
- '''Convert an object into a byte :class:`str`
-
- :arg obj: Object to convert to a byte :class:`str`. This should normally
- be a :class:`unicode` string.
- :kwarg encoding: Encoding to use to convert the :class:`unicode` string
- into a byte :class:`str`. Defaults to :term:`utf-8`.
- :kwarg errors: If errors are found while encoding, perform this action.
- Defaults to ``replace`` which replaces the invalid bytes with
- a character that means the bytes were unable to be encoded. Other
- values are the same as the error handling schemes in the `codec base
- classes
- <http://docs.python.org/library/codecs.html#codec-base-classes>`_.
- For instance ``strict`` which raises an exception and ``ignore`` which
- simply omits the non-encodable characters.
- :kwarg nonstring: How to treat nonstring values. Possible values are:
-
- :simplerepr: Attempt to call the object's "simple representation"
- method and return that value. Python-2.3+ has two methods that
- try to return a simple representation: :meth:`object.__unicode__`
- and :meth:`object.__str__`. We first try to get a usable value
- from :meth:`object.__str__`. If that fails we try the same
- with :meth:`object.__unicode__`.
- :empty: Return an empty byte :class:`str`
- :strict: Raise a :exc:`TypeError`
- :passthru: Return the object unchanged
- :repr: Attempt to return a byte :class:`str` of the :func:`repr` of the
- object
-
- Default is ``simplerepr``.
-
- :raises TypeError: if :attr:`nonstring` is ``strict`` and
- a non-:class:`basestring` object is passed in or if :attr:`nonstring`
- is set to an unknown value.
- :raises UnicodeEncodeError: if :attr:`errors` is ``strict`` and all of the
- bytes of :attr:`obj` are unable to be encoded using :attr:`encoding`.
- :returns: byte :class:`str` or the original object depending on the value
- of :attr:`nonstring`.
-
- .. warning::
-
- If you pass a byte :class:`str` into this function the byte
- :class:`str` is returned unmodified. It is **not** re-encoded with
- the specified :attr:`encoding`. The easiest way to achieve that is::
-
- to_bytes(to_unicode(text), encoding='utf-8')
-
- The initial :func:`to_unicode` call will ensure text is
- a :class:`unicode` string. Then, :func:`to_bytes` will turn that into
- a byte :class:`str` with the specified encoding.
-
- Usually, this should be used on a :class:`unicode` string but it can take
- either a byte :class:`str` or a :class:`unicode` string intelligently.
- Nonstring objects are handled in different ways depending on the setting
- of the :attr:`nonstring` parameter.
-
- The default values of this function are set so as to always return a byte
- :class:`str` and never raise an error when converting from unicode to
- bytes. However, when you do not pass an encoding that can validly encode
- the object (or a non-string object), you may end up with output that you
- don't expect. Be sure you understand the requirements of your data, not
- just ignore errors by passing it through this function.
- '''
- # Could use isbasestring, isbytestring here but we want this to be as fast
- # as possible
- if isinstance(obj, basestring):
- if isinstance(obj, str):
- return obj
- return obj.encode(encoding, errors)
- if not nonstring:
- nonstring = 'simplerepr'
-
- if nonstring == 'empty':
- return ''
- elif nonstring == 'passthru':
- return obj
- elif nonstring == 'simplerepr':
- try:
- simple = str(obj)
- except UnicodeError:
- try:
- simple = obj.__str__()
- except (AttributeError, UnicodeError):
- simple = None
- if not simple:
- try:
- simple = obj.__unicode__()
- except (AttributeError, UnicodeError):
- simple = ''
- if isinstance(simple, unicode):
- simple = simple.encode(encoding, 'replace')
- return simple
- elif nonstring in ('repr', 'strict'):
- try:
- obj_repr = obj.__repr__()
- except (AttributeError, UnicodeError):
- obj_repr = ''
- if isinstance(obj_repr, unicode):
- obj_repr = obj_repr.encode(encoding, errors)
- else:
- obj_repr = str(obj_repr)
- if nonstring == 'repr':
- return obj_repr
- raise TypeError('to_bytes was given "%(obj)s" which is neither'
- ' a unicode string or a byte string (str)' % {'obj': obj_repr})
-
- raise TypeError('nonstring value, %(param)s, is not set to a valid'
- ' action' % {'param': nonstring})
-
-
-# force the return value of a function to be unicode. Use with partial to
-# ensure that a filter will return unicode values.
-def unicode_wrap(func, *args, **kwargs):
- return to_unicode(func(*args, **kwargs), nonstring='passthru')
diff --git a/v1/ansible/utils/vault.py b/v1/ansible/utils/vault.py
deleted file mode 100644
index 842688a2c1..0000000000
--- a/v1/ansible/utils/vault.py
+++ /dev/null
@@ -1,585 +0,0 @@
-# (c) 2014, James Tanner <tanner.jc@gmail.com>
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-# ansible-pull is a script that runs ansible in local mode
-# after checking out a playbooks directory from source repo. There is an
-# example playbook to bootstrap this script in the examples/ dir which
-# installs ansible and sets it up to run on cron.
-
-import os
-import shlex
-import shutil
-import tempfile
-from io import BytesIO
-from subprocess import call
-from ansible import errors
-from hashlib import sha256
-
-# Note: Only used for loading obsolete VaultAES files. All files are written
-# using the newer VaultAES256 which does not require md5
-try:
- from hashlib import md5
-except ImportError:
- try:
- from md5 import md5
- except ImportError:
- # MD5 unavailable. Possibly FIPS mode
- md5 = None
-
-from binascii import hexlify
-from binascii import unhexlify
-from ansible import constants as C
-
-try:
- from Crypto.Hash import SHA256, HMAC
- HAS_HASH = True
-except ImportError:
- HAS_HASH = False
-
-# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Util import Counter
- HAS_COUNTER = True
-except ImportError:
- HAS_COUNTER = False
-
-# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Protocol.KDF import PBKDF2
- HAS_PBKDF2 = True
-except ImportError:
- HAS_PBKDF2 = False
-
-# AES IMPORTS
-try:
- from Crypto.Cipher import AES as AES
- HAS_AES = True
-except ImportError:
- HAS_AES = False
-
-CRYPTO_UPGRADE = "ansible-vault requires a newer version of pycrypto than the one installed on your platform. You may fix this with OS-specific commands such as: yum install python-devel; rpm -e --nodeps python-crypto; pip install pycrypto"
-
-HEADER='$ANSIBLE_VAULT'
-CIPHER_WHITELIST=['AES', 'AES256']
-
-class VaultLib(object):
-
- def __init__(self, password):
- self.password = password
- self.cipher_name = None
- self.version = '1.1'
-
- def is_encrypted(self, data):
- if data.startswith(HEADER):
- return True
- else:
- return False
-
- def encrypt(self, data):
-
- if self.is_encrypted(data):
- raise errors.AnsibleError("data is already encrypted")
-
- if not self.cipher_name:
- self.cipher_name = "AES256"
- #raise errors.AnsibleError("the cipher must be set before encrypting data")
-
- if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
- cipher = globals()['Vault' + self.cipher_name]
- this_cipher = cipher()
- else:
- raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name)
-
- """
- # combine sha + data
- this_sha = sha256(data).hexdigest()
- tmp_data = this_sha + "\n" + data
- """
-
- # encrypt sha + data
- enc_data = this_cipher.encrypt(data, self.password)
-
- # add header
- tmp_data = self._add_header(enc_data)
- return tmp_data
-
- def decrypt(self, data):
- if self.password is None:
- raise errors.AnsibleError("A vault password must be specified to decrypt data")
-
- if not self.is_encrypted(data):
- raise errors.AnsibleError("data is not encrypted")
-
- # clean out header
- data = self._split_header(data)
-
- # create the cipher object
- if 'Vault' + self.cipher_name in globals() and self.cipher_name in CIPHER_WHITELIST:
- cipher = globals()['Vault' + self.cipher_name]
- this_cipher = cipher()
- else:
- raise errors.AnsibleError("%s cipher could not be found" % self.cipher_name)
-
- # try to unencrypt data
- data = this_cipher.decrypt(data, self.password)
- if data is None:
- raise errors.AnsibleError("Decryption failed")
-
- return data
-
- def _add_header(self, data):
- # combine header and encrypted data in 80 char columns
-
- #tmpdata = hexlify(data)
- tmpdata = [data[i:i+80] for i in range(0, len(data), 80)]
-
- if not self.cipher_name:
- raise errors.AnsibleError("the cipher must be set before adding a header")
-
- dirty_data = HEADER + ";" + str(self.version) + ";" + self.cipher_name + "\n"
-
- for l in tmpdata:
- dirty_data += l + '\n'
-
- return dirty_data
-
-
- def _split_header(self, data):
- # used by decrypt
-
- tmpdata = data.split('\n')
- tmpheader = tmpdata[0].strip().split(';')
-
- self.version = str(tmpheader[1].strip())
- self.cipher_name = str(tmpheader[2].strip())
- clean_data = '\n'.join(tmpdata[1:])
-
- """
- # strip out newline, join, unhex
- clean_data = [ x.strip() for x in clean_data ]
- clean_data = unhexlify(''.join(clean_data))
- """
-
- return clean_data
-
- def __enter__(self):
- return self
-
- def __exit__(self, *err):
- pass
-
-class VaultEditor(object):
- # uses helper methods for write_file(self, filename, data)
- # to write a file so that code isn't duplicated for simple
- # file I/O, ditto read_file(self, filename) and launch_editor(self, filename)
- # ... "Don't Repeat Yourself", etc.
-
- def __init__(self, cipher_name, password, filename):
- # instantiates a member variable for VaultLib
- self.cipher_name = cipher_name
- self.password = password
- self.filename = filename
-
- def _edit_file_helper(self, existing_data=None, cipher=None):
- # make sure the umask is set to a sane value
- old_umask = os.umask(0o077)
-
- # Create a tempfile
- _, tmp_path = tempfile.mkstemp()
-
- if existing_data:
- self.write_data(existing_data, tmp_path)
-
- # drop the user into an editor on the tmp file
- try:
- call(self._editor_shell_command(tmp_path))
- except OSError, e:
- raise Exception("Failed to open editor (%s): %s" % (self._editor_shell_command(tmp_path)[0],str(e)))
- tmpdata = self.read_data(tmp_path)
-
- # create new vault
- this_vault = VaultLib(self.password)
- if cipher:
- this_vault.cipher_name = cipher
-
- # encrypt new data and write out to tmp
- enc_data = this_vault.encrypt(tmpdata)
- self.write_data(enc_data, tmp_path)
-
- # shuffle tmp file into place
- self.shuffle_files(tmp_path, self.filename)
-
- # and restore umask
- os.umask(old_umask)
-
- def create_file(self):
- """ create a new encrypted file """
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if os.path.isfile(self.filename):
- raise errors.AnsibleError("%s exists, please use 'edit' instead" % self.filename)
-
- # Let the user specify contents and save file
- self._edit_file_helper(cipher=self.cipher_name)
-
- def decrypt_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if not os.path.isfile(self.filename):
- raise errors.AnsibleError("%s does not exist" % self.filename)
-
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- if this_vault.is_encrypted(tmpdata):
- dec_data = this_vault.decrypt(tmpdata)
- if dec_data is None:
- raise errors.AnsibleError("Decryption failed")
- else:
- self.write_data(dec_data, self.filename)
- else:
- raise errors.AnsibleError("%s is not encrypted" % self.filename)
-
- def edit_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt to tmpfile
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
-
- # let the user edit the data and save
- self._edit_file_helper(existing_data=dec_data)
- ###we want the cipher to default to AES256 (get rid of files
- # encrypted with the AES cipher)
- #self._edit_file_helper(existing_data=dec_data, cipher=this_vault.cipher_name)
-
-
- def view_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt to tmpfile
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
- old_umask = os.umask(0o077)
- _, tmp_path = tempfile.mkstemp()
- self.write_data(dec_data, tmp_path)
- os.umask(old_umask)
-
- # drop the user into pager on the tmp file
- call(self._pager_shell_command(tmp_path))
- os.remove(tmp_path)
-
- def encrypt_file(self):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- if not os.path.isfile(self.filename):
- raise errors.AnsibleError("%s does not exist" % self.filename)
-
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- this_vault.cipher_name = self.cipher_name
- if not this_vault.is_encrypted(tmpdata):
- enc_data = this_vault.encrypt(tmpdata)
- self.write_data(enc_data, self.filename)
- else:
- raise errors.AnsibleError("%s is already encrypted" % self.filename)
-
- def rekey_file(self, new_password):
-
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2 or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- # decrypt
- tmpdata = self.read_data(self.filename)
- this_vault = VaultLib(self.password)
- dec_data = this_vault.decrypt(tmpdata)
-
- # create new vault
- new_vault = VaultLib(new_password)
-
- # we want to force cipher to the default
- #new_vault.cipher_name = this_vault.cipher_name
-
- # re-encrypt data and re-write file
- enc_data = new_vault.encrypt(dec_data)
- self.write_data(enc_data, self.filename)
-
- def read_data(self, filename):
- f = open(filename, "rb")
- tmpdata = f.read()
- f.close()
- return tmpdata
-
- def write_data(self, data, filename):
- if os.path.isfile(filename):
- os.remove(filename)
- f = open(filename, "wb")
- f.write(data)
- f.close()
-
- def shuffle_files(self, src, dest):
- # overwrite dest with src
- if os.path.isfile(dest):
- os.remove(dest)
- shutil.move(src, dest)
-
- def _editor_shell_command(self, filename):
- EDITOR = os.environ.get('EDITOR','vim')
- editor = shlex.split(EDITOR)
- editor.append(filename)
-
- return editor
-
- def _pager_shell_command(self, filename):
- PAGER = os.environ.get('PAGER','less')
- pager = shlex.split(PAGER)
- pager.append(filename)
-
- return pager
-
-########################################
-# CIPHERS #
-########################################
-
-class VaultAES(object):
-
- # this version has been obsoleted by the VaultAES256 class
- # which uses encrypt-then-mac (fixing order) and also improving the KDF used
- # code remains for upgrade purposes only
- # http://stackoverflow.com/a/16761459
-
- def __init__(self):
- if not md5:
- raise errors.AnsibleError('md5 hash is unavailable (Could be due to FIPS mode). Legacy VaultAES format is unavailable.')
- if not HAS_AES:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- def aes_derive_key_and_iv(self, password, salt, key_length, iv_length):
-
- """ Create a key and an initialization vector """
-
- d = d_i = ''
- while len(d) < key_length + iv_length:
- d_i = md5(d_i + password + salt).digest()
- d += d_i
-
- key = d[:key_length]
- iv = d[key_length:key_length+iv_length]
-
- return key, iv
-
- def encrypt(self, data, password, key_length=32):
-
- """ Read plaintext data from in_file and write encrypted to out_file """
-
-
- # combine sha + data
- this_sha = sha256(data).hexdigest()
- tmp_data = this_sha + "\n" + data
-
- in_file = BytesIO(tmp_data)
- in_file.seek(0)
- out_file = BytesIO()
-
- bs = AES.block_size
-
- # Get a block of random data. EL does not have Crypto.Random.new()
- # so os.urandom is used for cross platform purposes
- salt = os.urandom(bs - len('Salted__'))
-
- key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
- cipher = AES.new(key, AES.MODE_CBC, iv)
- out_file.write('Salted__' + salt)
- finished = False
- while not finished:
- chunk = in_file.read(1024 * bs)
- if len(chunk) == 0 or len(chunk) % bs != 0:
- padding_length = (bs - len(chunk) % bs) or bs
- chunk += padding_length * chr(padding_length)
- finished = True
- out_file.write(cipher.encrypt(chunk))
-
- out_file.seek(0)
- enc_data = out_file.read()
- tmp_data = hexlify(enc_data)
-
- return tmp_data
-
-
- def decrypt(self, data, password, key_length=32):
-
- """ Read encrypted data from in_file and write decrypted to out_file """
-
- # http://stackoverflow.com/a/14989032
-
- data = ''.join(data.split('\n'))
- data = unhexlify(data)
-
- in_file = BytesIO(data)
- in_file.seek(0)
- out_file = BytesIO()
-
- bs = AES.block_size
- salt = in_file.read(bs)[len('Salted__'):]
- key, iv = self.aes_derive_key_and_iv(password, salt, key_length, bs)
- cipher = AES.new(key, AES.MODE_CBC, iv)
- next_chunk = ''
- finished = False
-
- while not finished:
- chunk, next_chunk = next_chunk, cipher.decrypt(in_file.read(1024 * bs))
- if len(next_chunk) == 0:
- padding_length = ord(chunk[-1])
- chunk = chunk[:-padding_length]
- finished = True
- out_file.write(chunk)
-
- # reset the stream pointer to the beginning
- out_file.seek(0)
- new_data = out_file.read()
-
- # split out sha and verify decryption
- split_data = new_data.split("\n")
- this_sha = split_data[0]
- this_data = '\n'.join(split_data[1:])
- test_sha = sha256(this_data).hexdigest()
-
- if this_sha != test_sha:
- raise errors.AnsibleError("Decryption failed")
-
- #return out_file.read()
- return this_data
-
-
-class VaultAES256(object):
-
- """
- Vault implementation using AES-CTR with an HMAC-SHA256 authentication code.
- Keys are derived using PBKDF2
- """
-
- # http://www.daemonology.net/blog/2009-06-11-cryptographic-right-answers.html
-
- def __init__(self):
-
- if not HAS_PBKDF2 or not HAS_COUNTER or not HAS_HASH:
- raise errors.AnsibleError(CRYPTO_UPGRADE)
-
- def gen_key_initctr(self, password, salt):
- # 16 for AES 128, 32 for AES256
- keylength = 32
-
- # match the size used for counter.new to avoid extra work
- ivlength = 16
-
- hash_function = SHA256
-
- # make two keys and one iv
- pbkdf2_prf = lambda p, s: HMAC.new(p, s, hash_function).digest()
-
-
- derivedkey = PBKDF2(password, salt, dkLen=(2 * keylength) + ivlength,
- count=10000, prf=pbkdf2_prf)
-
- key1 = derivedkey[:keylength]
- key2 = derivedkey[keylength:(keylength * 2)]
- iv = derivedkey[(keylength * 2):(keylength * 2) + ivlength]
-
- return key1, key2, hexlify(iv)
-
-
- def encrypt(self, data, password):
-
- salt = os.urandom(32)
- key1, key2, iv = self.gen_key_initctr(password, salt)
-
- # PKCS#7 PAD DATA http://tools.ietf.org/html/rfc5652#section-6.3
- bs = AES.block_size
- padding_length = (bs - len(data) % bs) or bs
- data += padding_length * chr(padding_length)
-
- # COUNTER.new PARAMETERS
- # 1) nbits (integer) - Length of the counter, in bits.
- # 2) initial_value (integer) - initial value of the counter. "iv" from gen_key_initctr
-
- ctr = Counter.new(128, initial_value=long(iv, 16))
-
- # AES.new PARAMETERS
- # 1) AES key, must be either 16, 24, or 32 bytes long -- "key" from gen_key_initctr
- # 2) MODE_CTR, is the recommended mode
- # 3) counter=<CounterObject>
-
- cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
-
- # ENCRYPT PADDED DATA
- cryptedData = cipher.encrypt(data)
-
- # COMBINE SALT, DIGEST AND DATA
- hmac = HMAC.new(key2, cryptedData, SHA256)
- message = "%s\n%s\n%s" % ( hexlify(salt), hmac.hexdigest(), hexlify(cryptedData) )
- message = hexlify(message)
- return message
-
- def decrypt(self, data, password):
-
- # SPLIT SALT, DIGEST, AND DATA
- data = ''.join(data.split("\n"))
- data = unhexlify(data)
- salt, cryptedHmac, cryptedData = data.split("\n", 2)
- salt = unhexlify(salt)
- cryptedData = unhexlify(cryptedData)
-
- key1, key2, iv = self.gen_key_initctr(password, salt)
-
- # EXIT EARLY IF DIGEST DOESN'T MATCH
- hmacDecrypt = HMAC.new(key2, cryptedData, SHA256)
- if not self.is_equal(cryptedHmac, hmacDecrypt.hexdigest()):
- return None
-
- # SET THE COUNTER AND THE CIPHER
- ctr = Counter.new(128, initial_value=long(iv, 16))
- cipher = AES.new(key1, AES.MODE_CTR, counter=ctr)
-
- # DECRYPT PADDED DATA
- decryptedData = cipher.decrypt(cryptedData)
-
- # UNPAD DATA
- padding_length = ord(decryptedData[-1])
- decryptedData = decryptedData[:-padding_length]
-
- return decryptedData
-
- def is_equal(self, a, b):
- # http://codahale.com/a-lesson-in-timing-attacks/
- if len(a) != len(b):
- return False
-
- result = 0
- for x, y in zip(a, b):
- result |= ord(x) ^ ord(y)
- return result == 0
-
-
diff --git a/v1/bin/ansible b/v1/bin/ansible
deleted file mode 100755
index 7fec34ec81..0000000000
--- a/v1/bin/ansible
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-########################################################
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import os
-import sys
-
-from ansible.runner import Runner
-import ansible.constants as C
-from ansible import utils
-from ansible import errors
-from ansible import callbacks
-from ansible import inventory
-########################################################
-
-class Cli(object):
- ''' code behind bin/ansible '''
-
- # ----------------------------------------------
-
- def __init__(self):
- self.stats = callbacks.AggregateStats()
- self.callbacks = callbacks.CliRunnerCallbacks()
- if C.DEFAULT_LOAD_CALLBACK_PLUGINS:
- callbacks.load_callback_plugins()
-
- # ----------------------------------------------
-
- def parse(self):
- ''' create an options parser for bin/ansible '''
-
- parser = utils.base_parser(
- constants=C,
- runas_opts=True,
- subset_opts=True,
- async_opts=True,
- output_opts=True,
- connect_opts=True,
- check_opts=True,
- diff_opts=False,
- usage='%prog <host-pattern> [options]'
- )
-
- parser.add_option('-a', '--args', dest='module_args',
- help="module arguments", default=C.DEFAULT_MODULE_ARGS)
- parser.add_option('-m', '--module-name', dest='module_name',
- help="module name to execute (default=%s)" % C.DEFAULT_MODULE_NAME,
- default=C.DEFAULT_MODULE_NAME)
-
- options, args = parser.parse_args()
- self.callbacks.options = options
-
- if len(args) == 0 or len(args) > 1:
- parser.print_help()
- sys.exit(1)
-
- # privlege escalation command line arguments need to be mutually exclusive
- utils.check_mutually_exclusive_privilege(options, parser)
-
- if (options.ask_vault_pass and options.vault_password_file):
- parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
-
- return (options, args)
-
- # ----------------------------------------------
-
- def run(self, options, args):
- ''' use Runner lib to do SSH things '''
-
- pattern = args[0]
-
- sshpass = becomepass = vault_pass = become_method = None
-
- # Never ask for an SSH password when we run with local connection
- if options.connection == "local":
- options.ask_pass = False
- else:
- options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
-
- options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
-
- # become
- utils.normalize_become_options(options)
- prompt_method = utils.choose_pass_prompt(options)
- (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass, become_ask_pass=options.become_ask_pass, ask_vault_pass=options.ask_vault_pass, become_method=prompt_method)
-
- # read vault_pass from a file
- if not options.ask_vault_pass and options.vault_password_file:
- vault_pass = utils.read_vault_file(options.vault_password_file)
-
- extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
-
- inventory_manager = inventory.Inventory(options.inventory, vault_password=vault_pass)
- if options.subset:
- inventory_manager.subset(options.subset)
- hosts = inventory_manager.list_hosts(pattern)
-
- if len(hosts) == 0:
- callbacks.display("No hosts matched", stderr=True)
- sys.exit(0)
-
- if options.listhosts:
- for host in hosts:
- callbacks.display(' %s' % host)
- sys.exit(0)
-
- if options.module_name in ['command','shell'] and not options.module_args:
- callbacks.display("No argument passed to %s module" % options.module_name, color='red', stderr=True)
- sys.exit(1)
-
- if options.tree:
- utils.prepare_writeable_dir(options.tree)
-
- runner = Runner(
- module_name=options.module_name,
- module_path=options.module_path,
- module_args=options.module_args,
- remote_user=options.remote_user,
- remote_pass=sshpass,
- inventory=inventory_manager,
- timeout=options.timeout,
- private_key_file=options.private_key_file,
- forks=options.forks,
- pattern=pattern,
- callbacks=self.callbacks,
- transport=options.connection,
- subset=options.subset,
- check=options.check,
- diff=options.check,
- vault_pass=vault_pass,
- become=options.become,
- become_method=options.become_method,
- become_pass=becomepass,
- become_user=options.become_user,
- extra_vars=extra_vars,
- )
-
- if options.seconds:
- callbacks.display("background launch...\n\n", color='cyan')
- results, poller = runner.run_async(options.seconds)
- results = self.poll_while_needed(poller, options)
- else:
- results = runner.run()
-
- return (runner, results)
-
- # ----------------------------------------------
-
- def poll_while_needed(self, poller, options):
- ''' summarize results from Runner '''
-
- # BACKGROUND POLL LOGIC when -B and -P are specified
- if options.seconds and options.poll_interval > 0:
- poller.wait(options.seconds, options.poll_interval)
-
- return poller.results
-
-
-########################################################
-
-if __name__ == '__main__':
- callbacks.display("", log_only=True)
- callbacks.display(" ".join(sys.argv), log_only=True)
- callbacks.display("", log_only=True)
-
- cli = Cli()
- (options, args) = cli.parse()
- try:
- (runner, results) = cli.run(options, args)
- for result in results['contacted'].values():
- if 'failed' in result or result.get('rc', 0) != 0:
- sys.exit(2)
- if results['dark']:
- sys.exit(3)
- except errors.AnsibleError, e:
- # Generic handler for ansible specific errors
- callbacks.display("ERROR: %s" % str(e), stderr=True, color='red')
- sys.exit(1)
-
diff --git a/v1/bin/ansible-doc b/v1/bin/ansible-doc
deleted file mode 100755
index dff7cecce7..0000000000
--- a/v1/bin/ansible-doc
+++ /dev/null
@@ -1,337 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import sys
-import textwrap
-import re
-import optparse
-import datetime
-import subprocess
-import fcntl
-import termios
-import struct
-
-from ansible import utils
-from ansible.utils import module_docs
-import ansible.constants as C
-from ansible.utils import version
-import traceback
-
-MODULEDIR = C.DEFAULT_MODULE_PATH
-
-BLACKLIST_EXTS = ('.pyc', '.swp', '.bak', '~', '.rpm')
-IGNORE_FILES = [ "COPYING", "CONTRIBUTING", "LICENSE", "README", "VERSION"]
-
-_ITALIC = re.compile(r"I\(([^)]+)\)")
-_BOLD = re.compile(r"B\(([^)]+)\)")
-_MODULE = re.compile(r"M\(([^)]+)\)")
-_URL = re.compile(r"U\(([^)]+)\)")
-_CONST = re.compile(r"C\(([^)]+)\)")
-PAGER = 'less'
-LESS_OPTS = 'FRSX' # -F (quit-if-one-screen) -R (allow raw ansi control chars)
- # -S (chop long lines) -X (disable termcap init and de-init)
-
-def pager_print(text):
- ''' just print text '''
- print text
-
-def pager_pipe(text, cmd):
- ''' pipe text through a pager '''
- if 'LESS' not in os.environ:
- os.environ['LESS'] = LESS_OPTS
- try:
- cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
- cmd.communicate(input=text)
- except IOError:
- pass
- except KeyboardInterrupt:
- pass
-
-def pager(text):
- ''' find reasonable way to display text '''
- # this is a much simpler form of what is in pydoc.py
- if not sys.stdout.isatty():
- pager_print(text)
- elif 'PAGER' in os.environ:
- if sys.platform == 'win32':
- pager_print(text)
- else:
- pager_pipe(text, os.environ['PAGER'])
- elif subprocess.call('(less --version) 2> /dev/null', shell = True) == 0:
- pager_pipe(text, 'less')
- else:
- pager_print(text)
-
-def tty_ify(text):
-
- t = _ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
- t = _BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
- t = _MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
- t = _URL.sub(r"\1", t) # U(word) => word
- t = _CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
-
- return t
-
-def get_man_text(doc):
-
- opt_indent=" "
- text = []
- text.append("> %s\n" % doc['module'].upper())
-
- desc = " ".join(doc['description'])
-
- text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=" ", subsequent_indent=" "))
-
- if 'option_keys' in doc and len(doc['option_keys']) > 0:
- text.append("Options (= is mandatory):\n")
-
- for o in sorted(doc['option_keys']):
- opt = doc['options'][o]
-
- if opt.get('required', False):
- opt_leadin = "="
- else:
- opt_leadin = "-"
-
- text.append("%s %s" % (opt_leadin, o))
-
- desc = " ".join(opt['description'])
-
- if 'choices' in opt:
- choices = ", ".join(str(i) for i in opt['choices'])
- desc = desc + " (Choices: " + choices + ")"
- if 'default' in opt:
- default = str(opt['default'])
- desc = desc + " [Default: " + default + "]"
- text.append("%s\n" % textwrap.fill(tty_ify(desc), initial_indent=opt_indent,
- subsequent_indent=opt_indent))
-
- if 'notes' in doc and len(doc['notes']) > 0:
- notes = " ".join(doc['notes'])
- text.append("Notes:%s\n" % textwrap.fill(tty_ify(notes), initial_indent=" ",
- subsequent_indent=opt_indent))
-
-
- if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
- req = ", ".join(doc['requirements'])
- text.append("Requirements:%s\n" % textwrap.fill(tty_ify(req), initial_indent=" ",
- subsequent_indent=opt_indent))
-
- if 'examples' in doc and len(doc['examples']) > 0:
- text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
- for ex in doc['examples']:
- text.append("%s\n" % (ex['code']))
-
- if 'plainexamples' in doc and doc['plainexamples'] is not None:
- text.append("EXAMPLES:")
- text.append(doc['plainexamples'])
- if 'returndocs' in doc and doc['returndocs'] is not None:
- text.append("RETURN VALUES:")
- text.append(doc['returndocs'])
- text.append('')
-
- return "\n".join(text)
-
-
-def get_snippet_text(doc):
-
- text = []
- desc = tty_ify(" ".join(doc['short_description']))
- text.append("- name: %s" % (desc))
- text.append(" action: %s" % (doc['module']))
-
- for o in sorted(doc['options'].keys()):
- opt = doc['options'][o]
- desc = tty_ify(" ".join(opt['description']))
-
- if opt.get('required', False):
- s = o + "="
- else:
- s = o
-
- text.append(" %-20s # %s" % (s, desc))
- text.append('')
-
- return "\n".join(text)
-
-def get_module_list_text(module_list):
- tty_size = 0
- if os.isatty(0):
- tty_size = struct.unpack('HHHH',
- fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))[1]
- columns = max(60, tty_size)
- displace = max(len(x) for x in module_list)
- linelimit = columns - displace - 5
- text = []
- deprecated = []
- for module in sorted(set(module_list)):
-
- if module in module_docs.BLACKLIST_MODULES:
- continue
-
- filename = utils.plugins.module_finder.find_plugin(module)
-
- if filename is None:
- continue
- if filename.endswith(".ps1"):
- continue
- if os.path.isdir(filename):
- continue
-
- try:
- doc, plainexamples, returndocs = module_docs.get_docstring(filename)
- desc = tty_ify(doc.get('short_description', '?')).strip()
- if len(desc) > linelimit:
- desc = desc[:linelimit] + '...'
-
- if module.startswith('_'): # Handle deprecated
- deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
- else:
- text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
- except:
- traceback.print_exc()
- sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
-
- if len(deprecated) > 0:
- text.append("\nDEPRECATED:")
- text.extend(deprecated)
- return "\n".join(text)
-
-def find_modules(path, module_list):
-
- if os.path.isdir(path):
- for module in os.listdir(path):
- if module.startswith('.'):
- continue
- elif os.path.isdir(module):
- find_modules(module, module_list)
- elif any(module.endswith(x) for x in BLACKLIST_EXTS):
- continue
- elif module.startswith('__'):
- continue
- elif module in IGNORE_FILES:
- continue
- elif module.startswith('_'):
- fullpath = '/'.join([path,module])
- if os.path.islink(fullpath): # avoids aliases
- continue
-
- module = os.path.splitext(module)[0] # removes the extension
- module_list.append(module)
-
-def main():
-
- p = optparse.OptionParser(
- version=version("%prog"),
- usage='usage: %prog [options] [module...]',
- description='Show Ansible module documentation',
- )
-
- p.add_option("-M", "--module-path",
- action="store",
- dest="module_path",
- default=MODULEDIR,
- help="Ansible modules/ directory")
- p.add_option("-l", "--list",
- action="store_true",
- default=False,
- dest='list_dir',
- help='List available modules')
- p.add_option("-s", "--snippet",
- action="store_true",
- default=False,
- dest='show_snippet',
- help='Show playbook snippet for specified module(s)')
- p.add_option('-v', action='version', help='Show version number and exit')
-
- (options, args) = p.parse_args()
-
- if options.module_path is not None:
- for i in options.module_path.split(os.pathsep):
- utils.plugins.module_finder.add_directory(i)
-
- if options.list_dir:
- # list modules
- paths = utils.plugins.module_finder._get_paths()
- module_list = []
- for path in paths:
- find_modules(path, module_list)
-
- pager(get_module_list_text(module_list))
- sys.exit()
-
- if len(args) == 0:
- p.print_help()
-
- def print_paths(finder):
- ''' Returns a string suitable for printing of the search path '''
-
- # Uses a list to get the order right
- ret = []
- for i in finder._get_paths():
- if i not in ret:
- ret.append(i)
- return os.pathsep.join(ret)
-
- text = ''
- for module in args:
-
- filename = utils.plugins.module_finder.find_plugin(module)
- if filename is None:
- sys.stderr.write("module %s not found in %s\n" % (module, print_paths(utils.plugins.module_finder)))
- continue
-
- if any(filename.endswith(x) for x in BLACKLIST_EXTS):
- continue
-
- try:
- doc, plainexamples, returndocs = module_docs.get_docstring(filename)
- except:
- traceback.print_exc()
- sys.stderr.write("ERROR: module %s has a documentation error formatting or is missing documentation\n" % module)
- continue
-
- if doc is not None:
-
- all_keys = []
- for (k,v) in doc['options'].iteritems():
- all_keys.append(k)
- all_keys = sorted(all_keys)
- doc['option_keys'] = all_keys
-
- doc['filename'] = filename
- doc['docuri'] = doc['module'].replace('_', '-')
- doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
- doc['plainexamples'] = plainexamples
- doc['returndocs'] = returndocs
-
- if options.show_snippet:
- text += get_snippet_text(doc)
- else:
- text += get_man_text(doc)
- else:
- # this typically means we couldn't even parse the docstring, not just that the YAML is busted,
- # probably a quoting issue.
- sys.stderr.write("ERROR: module %s missing documentation (or could not parse documentation)\n" % module)
- pager(text)
-
-if __name__ == '__main__':
- main()
diff --git a/v1/bin/ansible-galaxy b/v1/bin/ansible-galaxy
deleted file mode 100755
index a6d625671e..0000000000
--- a/v1/bin/ansible-galaxy
+++ /dev/null
@@ -1,957 +0,0 @@
-#!/usr/bin/env python
-
-########################################################################
-#
-# (C) 2013, James Cammarata <jcammarata@ansible.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-########################################################################
-
-import datetime
-import json
-import os
-import os.path
-import shutil
-import subprocess
-import sys
-import tarfile
-import tempfile
-import urllib
-import urllib2
-import yaml
-
-from collections import defaultdict
-from distutils.version import LooseVersion
-from jinja2 import Environment
-from optparse import OptionParser
-
-import ansible.constants as C
-import ansible.utils
-from ansible.errors import AnsibleError
-
-default_meta_template = """---
-galaxy_info:
- author: {{ author }}
- description: {{description}}
- company: {{ company }}
- # If the issue tracker for your role is not on github, uncomment the
- # next line and provide a value
- # issue_tracker_url: {{ issue_tracker_url }}
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: {{ license }}
- min_ansible_version: {{ min_ansible_version }}
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- {%- for platform,versions in platforms.iteritems() %}
- #- name: {{ platform }}
- # versions:
- # - all
- {%- for version in versions %}
- # - {{ version }}
- {%- endfor %}
- {%- endfor %}
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- {%- for category in categories %}
- #- {{ category.name }}
- {%- endfor %}
-dependencies: []
- # List your role dependencies here, one per line.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
- {% for dependency in dependencies %}
- #- {{ dependency }}
- {% endfor %}
-
-"""
-
-default_readme_template = """Role Name
-=========
-
-A brief description of the role goes here.
-
-Requirements
-------------
-
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
-
-Role Variables
---------------
-
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
-
-Dependencies
-------------
-
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
-
-Example Playbook
-----------------
-
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- - hosts: servers
- roles:
- - { role: username.rolename, x: 42 }
-
-License
--------
-
-BSD
-
-Author Information
-------------------
-
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
-"""
-
-#-------------------------------------------------------------------------------------
-# Utility functions for parsing actions/options
-#-------------------------------------------------------------------------------------
-
-VALID_ACTIONS = ("init", "info", "install", "list", "remove")
-SKIP_INFO_KEYS = ("platforms","readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
-
-def get_action(args):
- """
- Get the action the user wants to execute from the
- sys argv list.
- """
- for i in range(0,len(args)):
- arg = args[i]
- if arg in VALID_ACTIONS:
- del args[i]
- return arg
- return None
-
-def build_option_parser(action):
- """
- Builds an option parser object based on the action
- the user wants to execute.
- """
-
- usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(VALID_ACTIONS)
- epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
- OptionParser.format_epilog = lambda self, formatter: self.epilog
- parser = OptionParser(usage=usage, epilog=epilog)
-
- if not action:
- parser.print_help()
- sys.exit()
-
- # options for all actions
- # - none yet
-
- # options specific to actions
- if action == "info":
- parser.set_usage("usage: %prog info [options] role_name[,version]")
- elif action == "init":
- parser.set_usage("usage: %prog init [options] role_name")
- parser.add_option(
- '-p', '--init-path', dest='init_path', default="./",
- help='The path in which the skeleton role will be created. '
- 'The default is the current working directory.')
- parser.add_option(
- '--offline', dest='offline', default=False, action='store_true',
- help="Don't query the galaxy API when creating roles")
- elif action == "install":
- parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
- parser.add_option(
- '-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
- help='Ignore errors and continue with the next specified role.')
- parser.add_option(
- '-n', '--no-deps', dest='no_deps', action='store_true', default=False,
- help='Don\'t download roles listed as dependencies')
- parser.add_option(
- '-r', '--role-file', dest='role_file',
- help='A file containing a list of roles to be imported')
- elif action == "remove":
- parser.set_usage("usage: %prog remove role1 role2 ...")
- elif action == "list":
- parser.set_usage("usage: %prog list [role_name]")
-
- # options that apply to more than one action
- if action != "init":
- parser.add_option(
- '-p', '--roles-path', dest='roles_path', default=C.DEFAULT_ROLES_PATH,
- help='The path to the directory containing your roles. '
- 'The default is the roles_path configured in your '
- 'ansible.cfg file (/etc/ansible/roles if not configured)')
-
- if action in ("info","init","install"):
- parser.add_option(
- '-s', '--server', dest='api_server', default="galaxy.ansible.com",
- help='The API server destination')
-
- if action in ("init","install"):
- parser.add_option(
- '-f', '--force', dest='force', action='store_true', default=False,
- help='Force overwriting an existing role')
- # done, return the parser
- return parser
-
-def get_opt(options, k, defval=""):
- """
- Returns an option from an Optparse values instance.
- """
- try:
- data = getattr(options, k)
- except:
- return defval
- if k == "roles_path":
- if os.pathsep in data:
- data = data.split(os.pathsep)[0]
- return data
-
-def exit_without_ignore(options, rc=1):
- """
- Exits with the specified return code unless the
- option --ignore-errors was specified
- """
-
- if not get_opt(options, "ignore_errors", False):
- print '- you can use --ignore-errors to skip failed roles.'
- sys.exit(rc)
-
-
-#-------------------------------------------------------------------------------------
-# Galaxy API functions
-#-------------------------------------------------------------------------------------
-
-def api_get_config(api_server):
- """
- Fetches the Galaxy API current version to ensure
- the API server is up and reachable.
- """
-
- try:
- url = 'https://%s/api/' % api_server
- data = json.load(urllib2.urlopen(url))
- if not data.get("current_version",None):
- return None
- else:
- return data
- except:
- return None
-
-def api_lookup_role_by_name(api_server, role_name, notify=True):
- """
- Uses the Galaxy API to do a lookup on the role owner/name.
- """
-
- role_name = urllib.quote(role_name)
-
- try:
- parts = role_name.split(".")
- user_name = ".".join(parts[0:-1])
- role_name = parts[-1]
- if notify:
- print "- downloading role '%s', owned by %s" % (role_name, user_name)
- except:
- parser.print_help()
- print "- invalid role name (%s). Specify role as format: username.rolename" % role_name
- sys.exit(1)
-
- url = 'https://%s/api/v1/roles/?owner__username=%s&name=%s' % (api_server,user_name,role_name)
- try:
- data = json.load(urllib2.urlopen(url))
- if len(data["results"]) == 0:
- return None
- else:
- return data["results"][0]
- except:
- return None
-
-def api_fetch_role_related(api_server, related, role_id):
- """
- Uses the Galaxy API to fetch the list of related items for
- the given role. The url comes from the 'related' field of
- the role.
- """
-
- try:
- url = 'https://%s/api/v1/roles/%d/%s/?page_size=50' % (api_server, int(role_id), related)
- data = json.load(urllib2.urlopen(url))
- results = data['results']
- done = (data.get('next', None) == None)
- while not done:
- url = 'https://%s%s' % (api_server, data['next'])
- print url
- data = json.load(urllib2.urlopen(url))
- results += data['results']
- done = (data.get('next', None) == None)
- return results
- except:
- return None
-
-def api_get_list(api_server, what):
- """
- Uses the Galaxy API to fetch the list of items specified.
- """
-
- try:
- url = 'https://%s/api/v1/%s/?page_size' % (api_server, what)
- data = json.load(urllib2.urlopen(url))
- if "results" in data:
- results = data['results']
- else:
- results = data
- done = True
- if "next" in data:
- done = (data.get('next', None) == None)
- while not done:
- url = 'https://%s%s' % (api_server, data['next'])
- print url
- data = json.load(urllib2.urlopen(url))
- results += data['results']
- done = (data.get('next', None) == None)
- return results
- except:
- print "- failed to download the %s list" % what
- return None
-
-#-------------------------------------------------------------------------------------
-# scm repo utility functions
-#-------------------------------------------------------------------------------------
-
-def scm_archive_role(scm, role_url, role_version, role_name):
- if scm not in ['hg', 'git']:
- print "- scm %s is not currently supported" % scm
- return False
- tempdir = tempfile.mkdtemp()
- clone_cmd = [scm, 'clone', role_url, role_name]
- with open('/dev/null', 'w') as devnull:
- try:
- print "- executing: %s" % " ".join(clone_cmd)
- popen = subprocess.Popen(clone_cmd, cwd=tempdir, stdout=devnull, stderr=devnull)
- except:
- raise AnsibleError("error executing: %s" % " ".join(clone_cmd))
- rc = popen.wait()
- if rc != 0:
- print "- command %s failed" % ' '.join(clone_cmd)
- print " in directory %s" % tempdir
- return False
-
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar')
- if scm == 'hg':
- archive_cmd = ['hg', 'archive', '--prefix', "%s/" % role_name]
- if role_version:
- archive_cmd.extend(['-r', role_version])
- archive_cmd.append(temp_file.name)
- if scm == 'git':
- archive_cmd = ['git', 'archive', '--prefix=%s/' % role_name, '--output=%s' % temp_file.name]
- if role_version:
- archive_cmd.append(role_version)
- else:
- archive_cmd.append('HEAD')
-
- with open('/dev/null', 'w') as devnull:
- print "- executing: %s" % " ".join(archive_cmd)
- popen = subprocess.Popen(archive_cmd, cwd=os.path.join(tempdir, role_name),
- stderr=devnull, stdout=devnull)
- rc = popen.wait()
- if rc != 0:
- print "- command %s failed" % ' '.join(archive_cmd)
- print " in directory %s" % tempdir
- return False
-
- shutil.rmtree(tempdir, ignore_errors=True)
-
- return temp_file.name
-
-
-#-------------------------------------------------------------------------------------
-# Role utility functions
-#-------------------------------------------------------------------------------------
-
-def get_role_path(role_name, options):
- """
- Returns the role path based on the roles_path option
- and the role name.
- """
- roles_path = get_opt(options,'roles_path')
- roles_path = os.path.join(roles_path, role_name)
- roles_path = os.path.expanduser(roles_path)
- return roles_path
-
-def get_role_metadata(role_name, options):
- """
- Returns the metadata as YAML, if the file 'meta/main.yml'
- exists in the specified role_path
- """
- role_path = os.path.join(get_role_path(role_name, options), 'meta/main.yml')
- try:
- if os.path.isfile(role_path):
- f = open(role_path, 'r')
- meta_data = yaml.safe_load(f)
- f.close()
- return meta_data
- else:
- return None
- except:
- return None
-
-def get_galaxy_install_info(role_name, options):
- """
- Returns the YAML data contained in 'meta/.galaxy_install_info',
- if it exists.
- """
-
- try:
- info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
- if os.path.isfile(info_path):
- f = open(info_path, 'r')
- info_data = yaml.safe_load(f)
- f.close()
- return info_data
- else:
- return None
- except:
- return None
-
-def write_galaxy_install_info(role_name, role_version, options):
- """
- Writes a YAML-formatted file to the role's meta/ directory
- (named .galaxy_install_info) which contains some information
- we can use later for commands like 'list' and 'info'.
- """
-
- info = dict(
- version = role_version,
- install_date = datetime.datetime.utcnow().strftime("%c"),
- )
- try:
- info_path = os.path.join(get_role_path(role_name, options), 'meta/.galaxy_install_info')
- f = open(info_path, 'w+')
- info_data = yaml.safe_dump(info, f)
- f.close()
- except:
- return False
- return True
-
-
-def remove_role(role_name, options):
- """
- Removes the specified role from the roles path. There is a
- sanity check to make sure there's a meta/main.yml file at this
- path so the user doesn't blow away random directories
- """
- if get_role_metadata(role_name, options):
- role_path = get_role_path(role_name, options)
- shutil.rmtree(role_path)
- return True
- else:
- return False
-
-def fetch_role(role_name, target, role_data, options):
- """
- Downloads the archived role from github to a temp location, extracts
- it, and then copies the extracted role to the role library path.
- """
-
- # first grab the file and save it to a temp location
- if '://' in role_name:
- archive_url = role_name
- else:
- archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], target)
- print "- downloading role from %s" % archive_url
-
- try:
- url_file = urllib2.urlopen(archive_url)
- temp_file = tempfile.NamedTemporaryFile(delete=False)
- data = url_file.read()
- while data:
- temp_file.write(data)
- data = url_file.read()
- temp_file.close()
- return temp_file.name
- except Exception, e:
- # TODO: better urllib2 error handling for error
- # messages that are more exact
- print "- error: failed to download the file."
- return False
-
-def install_role(role_name, role_version, role_filename, options):
- # the file is a tar, so open it that way and extract it
- # to the specified (or default) roles directory
-
- if not tarfile.is_tarfile(role_filename):
- print "- error: the file downloaded was not a tar.gz"
- return False
- else:
- if role_filename.endswith('.gz'):
- role_tar_file = tarfile.open(role_filename, "r:gz")
- else:
- role_tar_file = tarfile.open(role_filename, "r")
- # verify the role's meta file
- meta_file = None
- members = role_tar_file.getmembers()
- # next find the metadata file
- for member in members:
- if "/meta/main.yml" in member.name:
- meta_file = member
- break
- if not meta_file:
- print "- error: this role does not appear to have a meta/main.yml file."
- return False
- else:
- try:
- meta_file_data = yaml.safe_load(role_tar_file.extractfile(meta_file))
- except:
- print "- error: this role does not appear to have a valid meta/main.yml file."
- return False
-
- # we strip off the top-level directory for all of the files contained within
- # the tar file here, since the default is 'github_repo-target', and change it
- # to the specified role's name
- role_path = os.path.join(get_opt(options, 'roles_path'), role_name)
- role_path = os.path.expanduser(role_path)
- print "- extracting %s to %s" % (role_name, role_path)
- try:
- if os.path.exists(role_path):
- if not os.path.isdir(role_path):
- print "- error: the specified roles path exists and is not a directory."
- return False
- elif not get_opt(options, "force", False):
- print "- error: the specified role %s appears to already exist. Use --force to replace it." % role_name
- return False
- else:
- # using --force, remove the old path
- if not remove_role(role_name, options):
- print "- error: %s doesn't appear to contain a role." % role_path
- print " please remove this directory manually if you really want to put the role here."
- return False
- else:
- os.makedirs(role_path)
-
- # now we do the actual extraction to the role_path
- for member in members:
- # we only extract files, and remove any relative path
- # bits that might be in the file for security purposes
- # and drop the leading directory, as mentioned above
- if member.isreg() or member.issym():
- parts = member.name.split("/")[1:]
- final_parts = []
- for part in parts:
- if part != '..' and '~' not in part and '$' not in part:
- final_parts.append(part)
- member.name = os.path.join(*final_parts)
- role_tar_file.extract(member, role_path)
-
- # write out the install info file for later use
- write_galaxy_install_info(role_name, role_version, options)
- except OSError, e:
- print "- error: you do not have permission to modify files in %s" % role_path
- return False
-
- # return the parsed yaml metadata
- print "- %s was installed successfully" % role_name
- return meta_file_data
-
-#-------------------------------------------------------------------------------------
-# Action functions
-#-------------------------------------------------------------------------------------
-
-def execute_init(args, options, parser):
- """
- Executes the init action, which creates the skeleton framework
- of a role that complies with the galaxy metadata format.
- """
-
- init_path = get_opt(options, 'init_path', './')
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- force = get_opt(options, 'force', False)
- offline = get_opt(options, 'offline', False)
-
- if not offline:
- api_config = api_get_config(api_server)
- if not api_config:
- print "- the API server (%s) is not responding, please try again later." % api_server
- sys.exit(1)
-
- try:
- role_name = args.pop(0).strip()
- if role_name == "":
- raise Exception("")
- role_path = os.path.join(init_path, role_name)
- if os.path.exists(role_path):
- if os.path.isfile(role_path):
- print "- the path %s already exists, but is a file - aborting" % role_path
- sys.exit(1)
- elif not force:
- print "- the directory %s already exists." % role_path
- print " you can use --force to re-initialize this directory,\n" + \
- " however it will reset any main.yml files that may have\n" + \
- " been modified there already."
- sys.exit(1)
- except Exception, e:
- parser.print_help()
- print "- no role name specified for init"
- sys.exit(1)
-
- ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars')
-
- # create the default README.md
- if not os.path.exists(role_path):
- os.makedirs(role_path)
- readme_path = os.path.join(role_path, "README.md")
- f = open(readme_path, "wb")
- f.write(default_readme_template)
- f.close
-
- for dir in ROLE_DIRS:
- dir_path = os.path.join(init_path, role_name, dir)
- main_yml_path = os.path.join(dir_path, 'main.yml')
- # create the directory if it doesn't exist already
- if not os.path.exists(dir_path):
- os.makedirs(dir_path)
-
- # now create the main.yml file for that directory
- if dir == "meta":
- # create a skeleton meta/main.yml with a valid galaxy_info
- # datastructure in place, plus with all of the available
- # tags/platforms included (but commented out) and the
- # dependencies section
- platforms = []
- if not offline:
- platforms = api_get_list(api_server, "platforms") or []
- categories = []
- if not offline:
- categories = api_get_list(api_server, "categories") or []
-
- # group the list of platforms from the api based
- # on their names, with the release field being
- # appended to a list of versions
- platform_groups = defaultdict(list)
- for platform in platforms:
- platform_groups[platform['name']].append(platform['release'])
- platform_groups[platform['name']].sort()
-
- inject = dict(
- author = 'your name',
- company = 'your company (optional)',
- license = 'license (GPLv2, CC-BY, etc)',
- issue_tracker_url = 'http://example.com/issue/tracker',
- min_ansible_version = '1.2',
- platforms = platform_groups,
- categories = categories,
- )
- rendered_meta = Environment().from_string(default_meta_template).render(inject)
- f = open(main_yml_path, 'w')
- f.write(rendered_meta)
- f.close()
- pass
- elif dir not in ('files','templates'):
- # just write a (mostly) empty YAML file for main.yml
- f = open(main_yml_path, 'w')
- f.write('---\n# %s file for %s\n' % (dir,role_name))
- f.close()
- print "- %s was created successfully" % role_name
-
-def execute_info(args, options, parser):
- """
- Executes the info action. This action prints out detailed
- information about an installed role as well as info available
- from the galaxy API.
- """
-
- if len(args) == 0:
- # the user needs to specify a role
- parser.print_help()
- print "- you must specify a user/role name"
- sys.exit(1)
-
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- api_config = api_get_config(api_server)
- roles_path = get_opt(options, "roles_path")
-
- for role in args:
-
- role_info = {}
-
- install_info = get_galaxy_install_info(role, options)
- if install_info:
- if 'version' in install_info:
- install_info['intalled_version'] = install_info['version']
- del install_info['version']
- role_info.update(install_info)
-
- remote_data = api_lookup_role_by_name(api_server, role, False)
- if remote_data:
- role_info.update(remote_data)
-
- metadata = get_role_metadata(role, options)
- if metadata:
- role_info.update(metadata)
-
- role_spec = ansible.utils.role_spec_parse(role)
- if role_spec:
- role_info.update(role_spec)
-
- if role_info:
- print "- %s:" % (role)
- for k in sorted(role_info.keys()):
-
- if k in SKIP_INFO_KEYS:
- continue
-
- if isinstance(role_info[k], dict):
- print "\t%s: " % (k)
- for key in sorted(role_info[k].keys()):
- if key in SKIP_INFO_KEYS:
- continue
- print "\t\t%s: %s" % (key, role_info[k][key])
- else:
- print "\t%s: %s" % (k, role_info[k])
- else:
- print "- the role %s was not found" % role
-
-def execute_install(args, options, parser):
- """
- Executes the installation action. The args list contains the
- roles to be installed, unless -f was specified. The list of roles
- can be a name (which will be downloaded via the galaxy API and github),
- or it can be a local .tar.gz file.
- """
-
- role_file = get_opt(options, "role_file", None)
-
- if len(args) == 0 and role_file is None:
- # the user needs to specify one of either --role-file
- # or specify a single user/role name
- parser.print_help()
- print "- you must specify a user/role name or a roles file"
- sys.exit()
- elif len(args) == 1 and not role_file is None:
- # using a role file is mutually exclusive of specifying
- # the role name on the command line
- parser.print_help()
- print "- please specify a user/role name, or a roles file, but not both"
- sys.exit(1)
-
- api_server = get_opt(options, "api_server", "galaxy.ansible.com")
- no_deps = get_opt(options, "no_deps", False)
- roles_path = get_opt(options, "roles_path")
-
- roles_done = []
- if role_file:
- f = open(role_file, 'r')
- if role_file.endswith('.yaml') or role_file.endswith('.yml'):
- roles_left = map(ansible.utils.role_yaml_parse, yaml.safe_load(f))
- else:
- # roles listed in a file, one per line
- roles_left = map(ansible.utils.role_spec_parse, f.readlines())
- f.close()
- else:
- # roles were specified directly, so we'll just go out grab them
- # (and their dependencies, unless the user doesn't want us to).
- roles_left = map(ansible.utils.role_spec_parse, args)
-
- while len(roles_left) > 0:
- # query the galaxy API for the role data
- role_data = None
- role = roles_left.pop(0)
- role_src = role.get("src")
- role_scm = role.get("scm")
- role_path = role.get("path")
-
- if role_path:
- options.roles_path = role_path
- else:
- options.roles_path = roles_path
-
- if os.path.isfile(role_src):
- # installing a local tar.gz
- tmp_file = role_src
- else:
- if role_scm:
- # create tar file from scm url
- tmp_file = scm_archive_role(role_scm, role_src, role.get("version"), role.get("name"))
- elif '://' in role_src:
- # just download a URL - version will probably be in the URL
- tmp_file = fetch_role(role_src, None, None, options)
- else:
- # installing from galaxy
- api_config = api_get_config(api_server)
- if not api_config:
- print "- the API server (%s) is not responding, please try again later." % api_server
- sys.exit(1)
-
- role_data = api_lookup_role_by_name(api_server, role_src)
- if not role_data:
- print "- sorry, %s was not found on %s." % (role_src, api_server)
- exit_without_ignore(options)
- continue
-
- role_versions = api_fetch_role_related(api_server, 'versions', role_data['id'])
- if "version" not in role or role['version'] == '':
- # convert the version names to LooseVersion objects
- # and sort them to get the latest version. If there
- # are no versions in the list, we'll grab the head
- # of the master branch
- if len(role_versions) > 0:
- loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
- loose_versions.sort()
- role["version"] = str(loose_versions[-1])
- else:
- role["version"] = 'master'
- elif role['version'] != 'master':
- if role_versions and role["version"] not in [a.get('name', None) for a in role_versions]:
- print 'role is %s' % role
- print "- the specified version (%s) was not found in the list of available versions (%s)." % (role['version'], role_versions)
- exit_without_ignore(options)
- continue
-
- # download the role. if --no-deps was specified, we stop here,
- # otherwise we recursively grab roles and all of their deps.
- tmp_file = fetch_role(role_src, role["version"], role_data, options)
- installed = False
- if tmp_file:
- installed = install_role(role.get("name"), role.get("version"), tmp_file, options)
- # we're done with the temp file, clean it up
- if tmp_file != role_src:
- os.unlink(tmp_file)
- # install dependencies, if we want them
- if not no_deps and installed:
- if not role_data:
- role_data = get_role_metadata(role.get("name"), options)
- role_dependencies = role_data['dependencies']
- else:
- role_dependencies = role_data['summary_fields']['dependencies'] # api_fetch_role_related(api_server, 'dependencies', role_data['id'])
- for dep in role_dependencies:
- if isinstance(dep, basestring):
- dep = ansible.utils.role_spec_parse(dep)
- else:
- dep = ansible.utils.role_yaml_parse(dep)
- if not get_role_metadata(dep["name"], options):
- if dep not in roles_left:
- print '- adding dependency: %s' % dep["name"]
- roles_left.append(dep)
- else:
- print '- dependency %s already pending installation.' % dep["name"]
- else:
- print '- dependency %s is already installed, skipping.' % dep["name"]
- if not tmp_file or not installed:
- print "- %s was NOT installed successfully." % role.get("name")
- exit_without_ignore(options)
- sys.exit(0)
-
-def execute_remove(args, options, parser):
- """
- Executes the remove action. The args list contains the list
- of roles to be removed. This list can contain more than one role.
- """
-
- if len(args) == 0:
- parser.print_help()
- print '- you must specify at least one role to remove.'
- sys.exit()
-
- for role in args:
- if get_role_metadata(role, options):
- if remove_role(role, options):
- print '- successfully removed %s' % role
- else:
- print "- failed to remove role: %s" % role
- else:
- print '- %s is not installed, skipping.' % role
- sys.exit(0)
-
-def execute_list(args, options, parser):
- """
- Executes the list action. The args list can contain zero
- or one role. If one is specified, only that role will be
- shown, otherwise all roles in the specified directory will
- be shown.
- """
-
- if len(args) > 1:
- print "- please specify only one role to list, or specify no roles to see a full list"
- sys.exit(1)
-
- if len(args) == 1:
- # show only the request role, if it exists
- role_name = args[0]
- metadata = get_role_metadata(role_name, options)
- if metadata:
- install_info = get_galaxy_install_info(role_name, options)
- version = None
- if install_info:
- version = install_info.get("version", None)
- if not version:
- version = "(unknown version)"
- # show some more info about single roles here
- print "- %s, %s" % (role_name, version)
- else:
- print "- the role %s was not found" % role_name
- else:
- # show all valid roles in the roles_path directory
- roles_path = get_opt(options, 'roles_path')
- roles_path = os.path.expanduser(roles_path)
- if not os.path.exists(roles_path):
- parser.print_help()
- print "- the path %s does not exist. Please specify a valid path with --roles-path" % roles_path
- sys.exit(1)
- elif not os.path.isdir(roles_path):
- print "- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % roles_path
- parser.print_help()
- sys.exit(1)
- path_files = os.listdir(roles_path)
- for path_file in path_files:
- if get_role_metadata(path_file, options):
- install_info = get_galaxy_install_info(path_file, options)
- version = None
- if install_info:
- version = install_info.get("version", None)
- if not version:
- version = "(unknown version)"
- print "- %s, %s" % (path_file, version)
- sys.exit(0)
-
-#-------------------------------------------------------------------------------------
-# The main entry point
-#-------------------------------------------------------------------------------------
-
-def main():
- # parse the CLI options
- action = get_action(sys.argv)
- parser = build_option_parser(action)
- (options, args) = parser.parse_args()
-
- # execute the desired action
- if 1: #try:
- fn = globals()["execute_%s" % action]
- fn(args, options, parser)
- #except KeyError, e:
- # print "- error: %s is not a valid action. Valid actions are: %s" % (action, ", ".join(VALID_ACTIONS))
- # sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/v1/bin/ansible-playbook b/v1/bin/ansible-playbook
deleted file mode 100755
index 3d6e1f9f40..0000000000
--- a/v1/bin/ansible-playbook
+++ /dev/null
@@ -1,330 +0,0 @@
-#!/usr/bin/env python
-# (C) 2012, Michael DeHaan, <michael.dehaan@gmail.com>
-
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-#######################################################
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import sys
-import os
-import stat
-
-# Augment PYTHONPATH to find Python modules relative to this file path
-# This is so that we can find the modules when running from a local checkout
-# installed as editable with `pip install -e ...` or `python setup.py develop`
-local_module_path = os.path.abspath(
- os.path.join(os.path.dirname(__file__), '..', 'lib')
-)
-sys.path.append(local_module_path)
-
-import ansible.playbook
-import ansible.constants as C
-import ansible.utils.template
-from ansible import errors
-from ansible import callbacks
-from ansible import utils
-from ansible.color import ANSIBLE_COLOR, stringc
-from ansible.callbacks import display
-
-def colorize(lead, num, color):
- """ Print 'lead' = 'num' in 'color' """
- if num != 0 and ANSIBLE_COLOR and color is not None:
- return "%s%s%-15s" % (stringc(lead, color), stringc("=", color), stringc(str(num), color))
- else:
- return "%s=%-4s" % (lead, str(num))
-
-def hostcolor(host, stats, color=True):
- if ANSIBLE_COLOR and color:
- if stats['failures'] != 0 or stats['unreachable'] != 0:
- return "%-37s" % stringc(host, 'red')
- elif stats['changed'] != 0:
- return "%-37s" % stringc(host, 'yellow')
- else:
- return "%-37s" % stringc(host, 'green')
- return "%-26s" % host
-
-
-def main(args):
- ''' run ansible-playbook operations '''
-
- # create parser for CLI options
- parser = utils.base_parser(
- constants=C,
- usage = "%prog playbook.yml",
- connect_opts=True,
- runas_opts=True,
- subset_opts=True,
- check_opts=True,
- diff_opts=True
- )
- #parser.add_option('--vault-password', dest="vault_password",
- # help="password for vault encrypted files")
- parser.add_option('-t', '--tags', dest='tags', default='all',
- help="only run plays and tasks tagged with these values")
- parser.add_option('--skip-tags', dest='skip_tags',
- help="only run plays and tasks whose tags do not match these values")
- parser.add_option('--syntax-check', dest='syntax', action='store_true',
- help="perform a syntax check on the playbook, but do not execute it")
- parser.add_option('--list-tasks', dest='listtasks', action='store_true',
- help="list all tasks that would be executed")
- parser.add_option('--list-tags', dest='listtags', action='store_true',
- help="list all available tags")
- parser.add_option('--step', dest='step', action='store_true',
- help="one-step-at-a-time: confirm each task before running")
- parser.add_option('--start-at-task', dest='start_at',
- help="start the playbook at the task matching this name")
- parser.add_option('--force-handlers', dest='force_handlers',
- default=C.DEFAULT_FORCE_HANDLERS, action='store_true',
- help="run handlers even if a task fails")
- parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
- help="clear the fact cache")
-
- options, args = parser.parse_args(args)
-
- if len(args) == 0:
- parser.print_help(file=sys.stderr)
- return 1
-
- # privlege escalation command line arguments need to be mutually exclusive
- utils.check_mutually_exclusive_privilege(options, parser)
-
- if (options.ask_vault_pass and options.vault_password_file):
- parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
-
- sshpass = None
- becomepass = None
- vault_pass = None
-
- options.ask_vault_pass = options.ask_vault_pass or C.DEFAULT_ASK_VAULT_PASS
-
- if options.listhosts or options.syntax or options.listtasks or options.listtags:
- (_, _, vault_pass) = utils.ask_passwords(ask_vault_pass=options.ask_vault_pass)
- else:
- options.ask_pass = options.ask_pass or C.DEFAULT_ASK_PASS
- # Never ask for an SSH password when we run with local connection
- if options.connection == "local":
- options.ask_pass = False
-
- # set pe options
- utils.normalize_become_options(options)
- prompt_method = utils.choose_pass_prompt(options)
- (sshpass, becomepass, vault_pass) = utils.ask_passwords(ask_pass=options.ask_pass,
- become_ask_pass=options.become_ask_pass,
- ask_vault_pass=options.ask_vault_pass,
- become_method=prompt_method)
-
- # read vault_pass from a file
- if not options.ask_vault_pass and options.vault_password_file:
- vault_pass = utils.read_vault_file(options.vault_password_file)
-
- extra_vars = utils.parse_extra_vars(options.extra_vars, vault_pass)
-
- only_tags = options.tags.split(",")
- skip_tags = options.skip_tags
- if options.skip_tags is not None:
- skip_tags = options.skip_tags.split(",")
-
- for playbook in args:
- if not os.path.exists(playbook):
- raise errors.AnsibleError("the playbook: %s could not be found" % playbook)
- if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
- raise errors.AnsibleError("the playbook: %s does not appear to be a file" % playbook)
-
- inventory = ansible.inventory.Inventory(options.inventory, vault_password=vault_pass)
-
- # Note: slightly wrong, this is written so that implicit localhost
- # (which is not returned in list_hosts()) is taken into account for
- # warning if inventory is empty. But it can't be taken into account for
- # checking if limit doesn't match any hosts. Instead we don't worry about
- # limit if only implicit localhost was in inventory to start with.
- #
- # Fix this in v2
- no_hosts = False
- if len(inventory.list_hosts()) == 0:
- # Empty inventory
- utils.warning("provided hosts list is empty, only localhost is available")
- no_hosts = True
- inventory.subset(options.subset)
- if len(inventory.list_hosts()) == 0 and no_hosts is False:
- # Invalid limit
- raise errors.AnsibleError("Specified --limit does not match any hosts")
-
- # run all playbooks specified on the command line
- for playbook in args:
-
- stats = callbacks.AggregateStats()
- playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
- if options.step:
- playbook_cb.step = options.step
- if options.start_at:
- playbook_cb.start_at = options.start_at
- runner_cb = callbacks.PlaybookRunnerCallbacks(stats, verbose=utils.VERBOSITY)
-
- pb = ansible.playbook.PlayBook(
- playbook=playbook,
- module_path=options.module_path,
- inventory=inventory,
- forks=options.forks,
- remote_user=options.remote_user,
- remote_pass=sshpass,
- callbacks=playbook_cb,
- runner_callbacks=runner_cb,
- stats=stats,
- timeout=options.timeout,
- transport=options.connection,
- become=options.become,
- become_method=options.become_method,
- become_user=options.become_user,
- become_pass=becomepass,
- extra_vars=extra_vars,
- private_key_file=options.private_key_file,
- only_tags=only_tags,
- skip_tags=skip_tags,
- check=options.check,
- diff=options.diff,
- vault_password=vault_pass,
- force_handlers=options.force_handlers,
- )
-
- if options.flush_cache:
- display(callbacks.banner("FLUSHING FACT CACHE"))
- pb.SETUP_CACHE.flush()
-
- if options.listhosts or options.listtasks or options.syntax or options.listtags:
- print ''
- print 'playbook: %s' % playbook
- print ''
- playnum = 0
- for (play_ds, play_basedir) in zip(pb.playbook, pb.play_basedirs):
- playnum += 1
- play = ansible.playbook.Play(pb, play_ds, play_basedir,
- vault_password=pb.vault_password)
- label = play.name
- hosts = pb.inventory.list_hosts(play.hosts)
-
- if options.listhosts:
- print ' play #%d (%s): host count=%d' % (playnum, label, len(hosts))
- for host in hosts:
- print ' %s' % host
-
- if options.listtags or options.listtasks:
- print ' play #%d (%s):\tTAGS: [%s]' % (playnum, label,','.join(sorted(set(play.tags))))
-
- if options.listtags:
- tags = []
- for task in pb.tasks_to_run_in_play(play):
- tags.extend(task.tags)
- print ' TASK TAGS: [%s]' % (', '.join(sorted(set(tags).difference(['untagged']))))
-
- if options.listtasks:
-
- for task in pb.tasks_to_run_in_play(play):
- if getattr(task, 'name', None) is not None:
- # meta tasks have no names
- print ' %s\tTAGS: [%s]' % (task.name, ', '.join(sorted(set(task.tags).difference(['untagged']))))
-
- if options.listhosts or options.listtasks or options.listtags:
- print ''
- continue
-
- if options.syntax:
- # if we've not exited by now then we are fine.
- print 'Playbook Syntax is fine'
- return 0
-
- failed_hosts = []
- unreachable_hosts = []
-
- try:
-
- pb.run()
-
- hosts = sorted(pb.stats.processed.keys())
- display(callbacks.banner("PLAY RECAP"))
- playbook_cb.on_stats(pb.stats)
-
- for h in hosts:
- t = pb.stats.summarize(h)
- if t['failures'] > 0:
- failed_hosts.append(h)
- if t['unreachable'] > 0:
- unreachable_hosts.append(h)
-
- retries = failed_hosts + unreachable_hosts
-
- if C.RETRY_FILES_ENABLED and len(retries) > 0:
- filename = pb.generate_retry_inventory(retries)
- if filename:
- display(" to retry, use: --limit @%s\n" % filename)
-
- for h in hosts:
- t = pb.stats.summarize(h)
-
- display("%s : %s %s %s %s" % (
- hostcolor(h, t),
- colorize('ok', t['ok'], 'green'),
- colorize('changed', t['changed'], 'yellow'),
- colorize('unreachable', t['unreachable'], 'red'),
- colorize('failed', t['failures'], 'red')),
- screen_only=True
- )
-
- display("%s : %s %s %s %s" % (
- hostcolor(h, t, False),
- colorize('ok', t['ok'], None),
- colorize('changed', t['changed'], None),
- colorize('unreachable', t['unreachable'], None),
- colorize('failed', t['failures'], None)),
- log_only=True
- )
-
-
- print ""
- if len(failed_hosts) > 0:
- return 2
- if len(unreachable_hosts) > 0:
- return 3
-
- except errors.AnsibleError, e:
- display("ERROR: %s" % e, color='red')
- return 1
-
- return 0
-
-
-if __name__ == "__main__":
- display(" ", log_only=True)
- display(" ".join(sys.argv), log_only=True)
- display(" ", log_only=True)
- try:
- sys.exit(main(sys.argv[1:]))
- except errors.AnsibleError, e:
- display("ERROR: %s" % e, color='red', stderr=True)
- sys.exit(1)
- except KeyboardInterrupt, ke:
- display("ERROR: interrupted", color='red', stderr=True)
- sys.exit(1)
diff --git a/v1/bin/ansible-pull b/v1/bin/ansible-pull
deleted file mode 100755
index d4887631e0..0000000000
--- a/v1/bin/ansible-pull
+++ /dev/null
@@ -1,257 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Stephen Fromm <sfromm@gmail.com>
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-# ansible-pull is a script that runs ansible in local mode
-# after checking out a playbooks directory from source repo. There is an
-# example playbook to bootstrap this script in the examples/ dir which
-# installs ansible and sets it up to run on cron.
-
-# usage:
-# ansible-pull -d /var/lib/ansible \
-# -U http://example.net/content.git [-C production] \
-# [path/playbook.yml]
-#
-# the -d and -U arguments are required; the -C argument is optional.
-#
-# ansible-pull accepts an optional argument to specify a playbook
-# location underneath the workdir and then searches the source repo
-# for playbooks in the following order, stopping at the first match:
-#
-# 1. $workdir/path/playbook.yml, if specified
-# 2. $workdir/$fqdn.yml
-# 3. $workdir/$hostname.yml
-# 4. $workdir/local.yml
-#
-# the source repo must contain at least one of these playbooks.
-
-import os
-import shutil
-import sys
-import datetime
-import socket
-import random
-import time
-from ansible import utils
-from ansible.utils import cmd_functions
-from ansible import errors
-from ansible import inventory
-
-DEFAULT_REPO_TYPE = 'git'
-DEFAULT_PLAYBOOK = 'local.yml'
-PLAYBOOK_ERRORS = {1: 'File does not exist',
- 2: 'File is not readable'}
-
-VERBOSITY=0
-
-def increment_debug(option, opt, value, parser):
- global VERBOSITY
- VERBOSITY += 1
-
-def try_playbook(path):
- if not os.path.exists(path):
- return 1
- if not os.access(path, os.R_OK):
- return 2
- return 0
-
-
-def select_playbook(path, args):
- playbook = None
- if len(args) > 0 and args[0] is not None:
- playbook = "%s/%s" % (path, args[0])
- rc = try_playbook(playbook)
- if rc != 0:
- print >>sys.stderr, "%s: %s" % (playbook, PLAYBOOK_ERRORS[rc])
- return None
- return playbook
- else:
- fqdn = socket.getfqdn()
- hostpb = "%s/%s.yml" % (path, fqdn)
- shorthostpb = "%s/%s.yml" % (path, fqdn.split('.')[0])
- localpb = "%s/%s" % (path, DEFAULT_PLAYBOOK)
- errors = []
- for pb in [hostpb, shorthostpb, localpb]:
- rc = try_playbook(pb)
- if rc == 0:
- playbook = pb
- break
- else:
- errors.append("%s: %s" % (pb, PLAYBOOK_ERRORS[rc]))
- if playbook is None:
- print >>sys.stderr, "\n".join(errors)
- return playbook
-
-
-def main(args):
- """ Set up and run a local playbook """
- usage = "%prog [options] [playbook.yml]"
- parser = utils.SortedOptParser(usage=usage)
- parser.add_option('--purge', default=False, action='store_true',
- help='purge checkout after playbook run')
- parser.add_option('-o', '--only-if-changed', dest='ifchanged', default=False, action='store_true',
- help='only run the playbook if the repository has been updated')
- parser.add_option('-s', '--sleep', dest='sleep', default=None,
- help='sleep for random interval (between 0 and n number of seconds) before starting. this is a useful way to disperse git requests')
- parser.add_option('-f', '--force', dest='force', default=False,
- action='store_true',
- help='run the playbook even if the repository could '
- 'not be updated')
- parser.add_option('-d', '--directory', dest='dest', default=None,
- help='directory to checkout repository to')
- #parser.add_option('-l', '--live', default=True, action='store_live',
- # help='Print the ansible-playbook output while running')
- parser.add_option('-U', '--url', dest='url', default=None,
- help='URL of the playbook repository')
- parser.add_option('-C', '--checkout', dest='checkout',
- help='branch/tag/commit to checkout. '
- 'Defaults to behavior of repository module.')
- parser.add_option('-i', '--inventory-file', dest='inventory',
- help="location of the inventory host file")
- parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
- help="set additional variables as key=value or YAML/JSON", default=[])
- parser.add_option('-v', '--verbose', default=False, action="callback",
- callback=increment_debug,
- help='Pass -vvvv to ansible-playbook')
- parser.add_option('-m', '--module-name', dest='module_name',
- default=DEFAULT_REPO_TYPE,
- help='Module name used to check out repository. '
- 'Default is %s.' % DEFAULT_REPO_TYPE)
- parser.add_option('--vault-password-file', dest='vault_password_file',
- help="vault password file")
- parser.add_option('-K', '--ask-sudo-pass', default=False, dest='ask_sudo_pass', action='store_true',
- help='ask for sudo password')
- parser.add_option('-t', '--tags', dest='tags', default=False,
- help='only run plays and tasks tagged with these values')
- parser.add_option('--accept-host-key', default=False, dest='accept_host_key', action='store_true',
- help='adds the hostkey for the repo url if not already added')
- parser.add_option('--key-file', dest='key_file',
- help="Pass '-i <key_file>' to the SSH arguments used by git.")
- options, args = parser.parse_args(args)
-
- hostname = socket.getfqdn()
- if not options.dest:
- # use a hostname dependent directory, in case of $HOME on nfs
- options.dest = utils.prepare_writeable_dir('~/.ansible/pull/%s' % hostname)
-
- options.dest = os.path.abspath(options.dest)
-
- if not options.url:
- parser.error("URL for repository not specified, use -h for help")
- return 1
-
- now = datetime.datetime.now()
- print now.strftime("Starting ansible-pull at %F %T")
-
- # Attempt to use the inventory passed in as an argument
- # It might not yet have been downloaded so use localhost if note
- if not options.inventory or not os.path.exists(options.inventory):
- inv_opts = 'localhost,'
- else:
- inv_opts = options.inventory
- limit_opts = 'localhost:%s:127.0.0.1' % hostname
- repo_opts = "name=%s dest=%s" % (options.url, options.dest)
-
- if VERBOSITY == 0:
- base_opts = '-c local --limit "%s"' % limit_opts
- elif VERBOSITY > 0:
- debug_level = ''.join([ "v" for x in range(0, VERBOSITY) ])
- base_opts = '-%s -c local --limit "%s"' % (debug_level, limit_opts)
-
- if options.checkout:
- repo_opts += ' version=%s' % options.checkout
-
- # Only git module is supported
- if options.module_name == DEFAULT_REPO_TYPE:
- if options.accept_host_key:
- repo_opts += ' accept_hostkey=yes'
-
- if options.key_file:
- repo_opts += ' key_file=%s' % options.key_file
-
- path = utils.plugins.module_finder.find_plugin(options.module_name)
- if path is None:
- sys.stderr.write("module '%s' not found.\n" % options.module_name)
- return 1
-
- bin_path = os.path.dirname(os.path.abspath(__file__))
- cmd = '%s/ansible localhost -i "%s" %s -m %s -a "%s"' % (
- bin_path, inv_opts, base_opts, options.module_name, repo_opts
- )
-
- for ev in options.extra_vars:
- cmd += ' -e "%s"' % ev
-
- if options.sleep:
- try:
- secs = random.randint(0,int(options.sleep));
- except ValueError:
- parser.error("%s is not a number." % options.sleep)
- return 1
-
- print >>sys.stderr, "Sleeping for %d seconds..." % secs
- time.sleep(secs);
-
-
- # RUN THe CHECKOUT COMMAND
- rc, out, err = cmd_functions.run_cmd(cmd, live=True)
-
- if rc != 0:
- if options.force:
- print >>sys.stderr, "Unable to update repository. Continuing with (forced) run of playbook."
- else:
- return rc
- elif options.ifchanged and '"changed": true' not in out:
- print "Repository has not changed, quitting."
- return 0
-
- playbook = select_playbook(options.dest, args)
-
- if playbook is None:
- print >>sys.stderr, "Could not find a playbook to run."
- return 1
-
- cmd = '%s/ansible-playbook %s %s' % (bin_path, base_opts, playbook)
- if options.vault_password_file:
- cmd += " --vault-password-file=%s" % options.vault_password_file
- if options.inventory:
- cmd += ' -i "%s"' % options.inventory
- for ev in options.extra_vars:
- cmd += ' -e "%s"' % ev
- if options.ask_sudo_pass:
- cmd += ' -K'
- if options.tags:
- cmd += ' -t "%s"' % options.tags
- os.chdir(options.dest)
-
- # RUN THE PLAYBOOK COMMAND
- rc, out, err = cmd_functions.run_cmd(cmd, live=True)
-
- if options.purge:
- os.chdir('/')
- try:
- shutil.rmtree(options.dest)
- except Exception, e:
- print >>sys.stderr, "Failed to remove %s: %s" % (options.dest, str(e))
-
- return rc
-
-if __name__ == '__main__':
- try:
- sys.exit(main(sys.argv[1:]))
- except KeyboardInterrupt, e:
- print >>sys.stderr, "Exit on user request.\n"
- sys.exit(1)
diff --git a/v1/bin/ansible-vault b/v1/bin/ansible-vault
deleted file mode 100755
index 22cfc0e148..0000000000
--- a/v1/bin/ansible-vault
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2014, James Tanner <tanner.jc@gmail.com>
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-# ansible-vault is a script that encrypts/decrypts YAML files. See
-# http://docs.ansible.com/playbooks_vault.html for more details.
-
-__requires__ = ['ansible']
-try:
- import pkg_resources
-except Exception:
- # Use pkg_resources to find the correct versions of libraries and set
- # sys.path appropriately when there are multiversion installs. But we
- # have code that better expresses the errors in the places where the code
- # is actually used (the deps are optional for many code paths) so we don't
- # want to fail here.
- pass
-
-import os
-import sys
-import traceback
-
-import ansible.constants as C
-
-from ansible import utils
-from ansible import errors
-from ansible.utils.vault import VaultEditor
-
-from optparse import OptionParser
-
-#-------------------------------------------------------------------------------------
-# Utility functions for parsing actions/options
-#-------------------------------------------------------------------------------------
-
-VALID_ACTIONS = ("create", "decrypt", "edit", "encrypt", "rekey", "view")
-
-def build_option_parser(action):
- """
- Builds an option parser object based on the action
- the user wants to execute.
- """
-
- usage = "usage: %%prog [%s] [--help] [options] file_name" % "|".join(VALID_ACTIONS)
- epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
- OptionParser.format_epilog = lambda self, formatter: self.epilog
- parser = OptionParser(usage=usage, epilog=epilog)
-
- if not action:
- parser.print_help()
- sys.exit()
-
- # options for all actions
- #parser.add_option('-c', '--cipher', dest='cipher', default="AES256", help="cipher to use")
- parser.add_option('--debug', dest='debug', action="store_true", help="debug")
- parser.add_option('--vault-password-file', dest='password_file',
- help="vault password file", default=C.DEFAULT_VAULT_PASSWORD_FILE)
-
- # options specific to actions
- if action == "create":
- parser.set_usage("usage: %prog create [options] file_name")
- elif action == "decrypt":
- parser.set_usage("usage: %prog decrypt [options] file_name")
- elif action == "edit":
- parser.set_usage("usage: %prog edit [options] file_name")
- elif action == "view":
- parser.set_usage("usage: %prog view [options] file_name")
- elif action == "encrypt":
- parser.set_usage("usage: %prog encrypt [options] file_name")
- elif action == "rekey":
- parser.set_usage("usage: %prog rekey [options] file_name")
-
- # done, return the parser
- return parser
-
-def get_action(args):
- """
- Get the action the user wants to execute from the
- sys argv list.
- """
- for i in range(0,len(args)):
- arg = args[i]
- if arg in VALID_ACTIONS:
- del args[i]
- return arg
- return None
-
-def get_opt(options, k, defval=""):
- """
- Returns an option from an Optparse values instance.
- """
- try:
- data = getattr(options, k)
- except:
- return defval
- if k == "roles_path":
- if os.pathsep in data:
- data = data.split(os.pathsep)[0]
- return data
-
-#-------------------------------------------------------------------------------------
-# Command functions
-#-------------------------------------------------------------------------------------
-
-def execute_create(args, options, parser):
- if len(args) > 1:
- raise errors.AnsibleError("'create' does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- this_editor = VaultEditor(cipher, password, args[0])
- this_editor.create_file()
-
-def execute_decrypt(args, options, parser):
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.decrypt_file()
-
- print "Decryption successful"
-
-def execute_edit(args, options, parser):
-
- if len(args) > 1:
- raise errors.AnsibleError("edit does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = None
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.edit_file()
-
-def execute_view(args, options, parser):
-
- if len(args) > 1:
- raise errors.AnsibleError("view does not accept more than one filename")
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = None
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.view_file()
-
-def execute_encrypt(args, options, parser):
-
- if not options.password_file:
- password, new_password = utils.ask_vault_passwords(ask_vault_pass=True, confirm_vault=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- cipher = 'AES256'
- if hasattr(options, 'cipher'):
- cipher = options.cipher
-
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.encrypt_file()
-
- print "Encryption successful"
-
-def execute_rekey(args, options, parser):
-
- if not options.password_file:
- password, __ = utils.ask_vault_passwords(ask_vault_pass=True)
- else:
- password = utils.read_vault_file(options.password_file)
-
- __, new_password = utils.ask_vault_passwords(ask_vault_pass=False, ask_new_vault_pass=True, confirm_new=True)
-
- cipher = None
- for f in args:
- this_editor = VaultEditor(cipher, password, f)
- this_editor.rekey_file(new_password)
-
- print "Rekey successful"
-
-#-------------------------------------------------------------------------------------
-# MAIN
-#-------------------------------------------------------------------------------------
-
-def main():
-
- action = get_action(sys.argv)
- parser = build_option_parser(action)
- (options, args) = parser.parse_args()
-
- if not len(args):
- raise errors.AnsibleError(
- "The '%s' command requires a filename as the first argument" % action
- )
-
- # execute the desired action
- try:
- fn = globals()["execute_%s" % action]
- fn(args, options, parser)
- except Exception, err:
- if options.debug:
- print traceback.format_exc()
- print "ERROR:",err
- sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/v1/hacking/README.md b/v1/hacking/README.md
deleted file mode 100644
index ae8db7e3a9..0000000000
--- a/v1/hacking/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-'Hacking' directory tools
-=========================
-
-Env-setup
----------
-
-The 'env-setup' script modifies your environment to allow you to run
-ansible from a git checkout using python 2.6+. (You may not use
-python 3 at this time).
-
-First, set up your environment to run from the checkout:
-
- $ source ./hacking/env-setup
-
-You will need some basic prerequisites installed. If you do not already have them
-and do not wish to install them from your operating system package manager, you
-can install them from pip
-
- $ easy_install pip # if pip is not already available
- $ pip install pyyaml jinja2 nose passlib pycrypto
-
-From there, follow ansible instructions on docs.ansible.com as normal.
-
-Test-module
------------
-
-'test-module' is a simple program that allows module developers (or testers) to run
-a module outside of the ansible program, locally, on the current machine.
-
-Example:
-
- $ ./hacking/test-module -m lib/ansible/modules/core/commands/shell -a "echo hi"
-
-This is a good way to insert a breakpoint into a module, for instance.
-
-Module-formatter
-----------------
-
-The module formatter is a script used to generate manpages and online
-module documentation. This is used by the system makefiles and rarely
-needs to be run directly.
-
-Authors
--------
-'authors' is a simple script that generates a list of everyone who has
-contributed code to the ansible repository.
-
-
diff --git a/v1/hacking/authors.sh b/v1/hacking/authors.sh
deleted file mode 100755
index 7c97840b2f..0000000000
--- a/v1/hacking/authors.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-# script from http://stackoverflow.com/questions/12133583
-set -e
-
-# Get a list of authors ordered by number of commits
-# and remove the commit count column
-AUTHORS=$(git --no-pager shortlog -nse | cut -f 2- | sort -f)
-if [ -z "$AUTHORS" ] ; then
- echo "Authors list was empty"
- exit 1
-fi
-
-# Display the authors list and write it to the file
-echo "$AUTHORS" | tee "$(git rev-parse --show-toplevel)/AUTHORS.TXT"
diff --git a/v1/hacking/env-setup b/v1/hacking/env-setup
deleted file mode 100644
index 29f4828410..0000000000
--- a/v1/hacking/env-setup
+++ /dev/null
@@ -1,78 +0,0 @@
-# usage: source hacking/env-setup [-q]
-# modifies environment for running Ansible from checkout
-
-# Default values for shell variables we use
-PYTHONPATH=${PYTHONPATH-""}
-PATH=${PATH-""}
-MANPATH=${MANPATH-""}
-verbosity=${1-info} # Defaults to `info' if unspecified
-
-if [ "$verbosity" = -q ]; then
- verbosity=silent
-fi
-
-# When run using source as directed, $0 gets set to bash, so we must use $BASH_SOURCE
-if [ -n "$BASH_SOURCE" ] ; then
- HACKING_DIR=$(dirname "$BASH_SOURCE")
-elif [ $(basename -- "$0") = "env-setup" ]; then
- HACKING_DIR=$(dirname "$0")
-# Works with ksh93 but not pdksh
-elif [ -n "$KSH_VERSION" ] && echo $KSH_VERSION | grep -qv '^@(#)PD KSH'; then
- HACKING_DIR=$(dirname "${.sh.file}")
-else
- HACKING_DIR="$PWD/hacking"
-fi
-# The below is an alternative to readlink -fn which doesn't exist on OS X
-# Source: http://stackoverflow.com/a/1678636
-FULL_PATH=$(python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
-ANSIBLE_HOME=$(dirname "$FULL_PATH")
-
-PREFIX_PYTHONPATH="$ANSIBLE_HOME"
-PREFIX_PATH="$ANSIBLE_HOME/bin"
-PREFIX_MANPATH="$ANSIBLE_HOME/docs/man"
-
-expr "$PYTHONPATH" : "${PREFIX_PYTHONPATH}.*" > /dev/null || export PYTHONPATH="$PREFIX_PYTHONPATH:$PYTHONPATH"
-expr "$PATH" : "${PREFIX_PATH}.*" > /dev/null || export PATH="$PREFIX_PATH:$PATH"
-expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_MANPATH:$MANPATH"
-
-#
-# Generate egg_info so that pkg_resources works
-#
-
-# Do the work in a function so we don't repeat ourselves later
-gen_egg_info()
-{
- if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then
- rm -r "$PREFIX_PYTHONPATH/ansible.egg-info"
- fi
- python setup.py egg_info
-}
-
-if [ "$ANSIBLE_HOME" != "$PWD" ] ; then
- current_dir="$PWD"
-else
- current_dir="$ANSIBLE_HOME"
-fi
-cd "$ANSIBLE_HOME"
-if [ "$verbosity" = silent ] ; then
- gen_egg_info > /dev/null 2>&1
-else
- gen_egg_info
-fi
-cd "$current_dir"
-
-if [ "$verbosity" != silent ] ; then
- cat <<- EOF
-
- Setting up Ansible to run out of checkout...
-
- PATH=$PATH
- PYTHONPATH=$PYTHONPATH
- MANPATH=$MANPATH
-
- Remember, you may wish to specify your host file with -i
-
- Done!
-
- EOF
-fi
diff --git a/v1/hacking/env-setup.fish b/v1/hacking/env-setup.fish
deleted file mode 100644
index 9deffb4e3d..0000000000
--- a/v1/hacking/env-setup.fish
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env fish
-# usage: . ./hacking/env-setup [-q]
-# modifies environment for running Ansible from checkout
-set HACKING_DIR (dirname (status -f))
-set FULL_PATH (python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
-set ANSIBLE_HOME (dirname $FULL_PATH)
-set PREFIX_PYTHONPATH $ANSIBLE_HOME/
-set PREFIX_PATH $ANSIBLE_HOME/bin
-set PREFIX_MANPATH $ANSIBLE_HOME/docs/man
-
-# Set PYTHONPATH
-if not set -q PYTHONPATH
- set -gx PYTHONPATH $PREFIX_PYTHONPATH
-else
- switch PYTHONPATH
- case "$PREFIX_PYTHONPATH*"
- case "*"
- echo "Appending PYTHONPATH"
- set -gx PYTHONPATH "$PREFIX_PYTHONPATH:$PYTHONPATH"
- end
-end
-
-# Set PATH
-if not contains $PREFIX_PATH $PATH
- set -gx PATH $PREFIX_PATH $PATH
-end
-
-# Set MANPATH
-if not contains $PREFIX_MANPATH $MANPATH
- if not set -q MANPATH
- set -gx MANPATH $PREFIX_MANPATH
- else
- set -gx MANPATH $PREFIX_MANPATH $MANPATH
- end
-end
-
-set -gx ANSIBLE_LIBRARY $ANSIBLE_HOME/library
-
-# Generate egg_info so that pkg_resources works
-pushd $ANSIBLE_HOME
-python setup.py egg_info
-if test -e $PREFIX_PYTHONPATH/ansible*.egg-info
- rm -r $PREFIX_PYTHONPATH/ansible*.egg-info
-end
-mv ansible*egg-info $PREFIX_PYTHONPATH
-popd
-
-
-if set -q argv
- switch $argv
- case '-q' '--quiet'
- case '*'
- echo ""
- echo "Setting up Ansible to run out of checkout..."
- echo ""
- echo "PATH=$PATH"
- echo "PYTHONPATH=$PYTHONPATH"
- echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY"
- echo "MANPATH=$MANPATH"
- echo ""
-
- echo "Remember, you may wish to specify your host file with -i"
- echo ""
- echo "Done!"
- echo ""
- end
-end
diff --git a/v1/hacking/get_library.py b/v1/hacking/get_library.py
deleted file mode 100755
index 571183b688..0000000000
--- a/v1/hacking/get_library.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2014, Will Thames <will@thames.id.au>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import ansible.constants as C
-import sys
-
-def main():
- print C.DEFAULT_MODULE_PATH
- return 0
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/v1/hacking/module_formatter.py b/v1/hacking/module_formatter.py
deleted file mode 100755
index acddd70093..0000000000
--- a/v1/hacking/module_formatter.py
+++ /dev/null
@@ -1,447 +0,0 @@
-#!/usr/bin/env python
-# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
-# (c) 2012-2014, Michael DeHaan <michael@ansible.com> and others
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import os
-import glob
-import sys
-import yaml
-import codecs
-import json
-import ast
-import re
-import optparse
-import time
-import datetime
-import subprocess
-import cgi
-from jinja2 import Environment, FileSystemLoader
-
-from ansible.utils import module_docs
-from ansible.utils.vars import merge_hash
-
-#####################################################################################
-# constants and paths
-
-# if a module is added in a version of Ansible older than this, don't print the version added information
-# in the module documentation because everyone is assumed to be running something newer than this already.
-TO_OLD_TO_BE_NOTABLE = 1.0
-
-# Get parent directory of the directory this script lives in
-MODULEDIR=os.path.abspath(os.path.join(
- os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
-))
-
-# The name of the DOCUMENTATION template
-EXAMPLE_YAML=os.path.abspath(os.path.join(
- os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
-))
-
-_ITALIC = re.compile(r"I\(([^)]+)\)")
-_BOLD = re.compile(r"B\(([^)]+)\)")
-_MODULE = re.compile(r"M\(([^)]+)\)")
-_URL = re.compile(r"U\(([^)]+)\)")
-_CONST = re.compile(r"C\(([^)]+)\)")
-
-DEPRECATED = " (D)"
-NOTCORE = " (E)"
-#####################################################################################
-
-def rst_ify(text):
- ''' convert symbols like I(this is in italics) to valid restructured text '''
-
- t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
- t = _BOLD.sub(r'**' + r"\1" + r"**", t)
- t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
- t = _URL.sub(r"\1", t)
- t = _CONST.sub(r'``' + r"\1" + r"``", t)
-
- return t
-
-#####################################################################################
-
-def html_ify(text):
- ''' convert symbols like I(this is in italics) to valid HTML '''
-
- t = cgi.escape(text)
- t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
- t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
- t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
- t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
- t = _CONST.sub("<code>" + r"\1" + "</code>", t)
-
- return t
-
-
-#####################################################################################
-
-def rst_fmt(text, fmt):
- ''' helper for Jinja2 to do format strings '''
-
- return fmt % (text)
-
-#####################################################################################
-
-def rst_xline(width, char="="):
- ''' return a restructured text line of a given length '''
-
- return char * width
-
-#####################################################################################
-
-def write_data(text, options, outputname, module):
- ''' dumps module output to a file or the screen, as requested '''
-
- if options.output_dir is not None:
- fname = os.path.join(options.output_dir, outputname % module)
- fname = fname.replace(".py","")
- f = open(fname, 'w')
- f.write(text.encode('utf-8'))
- f.close()
- else:
- print text
-
-#####################################################################################
-
-
-def list_modules(module_dir, depth=0):
- ''' returns a hash of categories, each category being a hash of module names to file paths '''
-
- categories = dict(all=dict(),_aliases=dict())
- if depth <= 3: # limit # of subdirs
-
- files = glob.glob("%s/*" % module_dir)
- for d in files:
-
- category = os.path.splitext(os.path.basename(d))[0]
- if os.path.isdir(d):
-
- res = list_modules(d, depth + 1)
- for key in res.keys():
- if key in categories:
- categories[key] = merge_hash(categories[key], res[key])
- res.pop(key, None)
-
- if depth < 2:
- categories.update(res)
- else:
- category = module_dir.split("/")[-1]
- if not category in categories:
- categories[category] = res
- else:
- categories[category].update(res)
- else:
- module = category
- category = os.path.basename(module_dir)
- if not d.endswith(".py") or d.endswith('__init__.py'):
- # windows powershell modules have documentation stubs in python docstring
- # format (they are not executed) so skip the ps1 format files
- continue
- elif module.startswith("_") and os.path.islink(d):
- source = os.path.splitext(os.path.basename(os.path.realpath(d)))[0]
- module = module.replace("_","",1)
- if not d in categories['_aliases']:
- categories['_aliases'][source] = [module]
- else:
- categories['_aliases'][source].update(module)
- continue
-
- if not category in categories:
- categories[category] = {}
- categories[category][module] = d
- categories['all'][module] = d
-
- return categories
-
-#####################################################################################
-
-def generate_parser():
- ''' generate an optparse parser '''
-
- p = optparse.OptionParser(
- version='%prog 1.0',
- usage='usage: %prog [options] arg1 arg2',
- description='Generate module documentation from metadata',
- )
-
- p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
- p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
- p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
- p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
- p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose")
- p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
- p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
- p.add_option('-V', action='version', help='Show version number and exit')
- return p
-
-#####################################################################################
-
-def jinja2_environment(template_dir, typ):
-
- env = Environment(loader=FileSystemLoader(template_dir),
- variable_start_string="@{",
- variable_end_string="}@",
- trim_blocks=True,
- )
- env.globals['xline'] = rst_xline
-
- if typ == 'rst':
- env.filters['convert_symbols_to_format'] = rst_ify
- env.filters['html_ify'] = html_ify
- env.filters['fmt'] = rst_fmt
- env.filters['xline'] = rst_xline
- template = env.get_template('rst.j2')
- outputname = "%s_module.rst"
- else:
- raise Exception("unknown module format type: %s" % typ)
-
- return env, template, outputname
-
-#####################################################################################
-
-def process_module(module, options, env, template, outputname, module_map, aliases):
-
- fname = module_map[module]
- if isinstance(fname, dict):
- return "SKIPPED"
-
- basename = os.path.basename(fname)
- deprecated = False
-
- # ignore files with extensions
- if not basename.endswith(".py"):
- return
- elif module.startswith("_"):
- if os.path.islink(fname):
- return # ignore, its an alias
- deprecated = True
- module = module.replace("_","",1)
-
- print "rendering: %s" % module
-
- # use ansible core library to parse out doc metadata YAML and plaintext examples
- doc, examples, returndocs = module_docs.get_docstring(fname, verbose=options.verbose)
-
- # crash if module is missing documentation and not explicitly hidden from docs index
- if doc is None:
- if module in module_docs.BLACKLIST_MODULES:
- return "SKIPPED"
- else:
- sys.stderr.write("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
- sys.exit(1)
-
- if deprecated and 'deprecated' not in doc:
- sys.stderr.write("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module))
- sys.exit(1)
-
- if "/core/" in fname:
- doc['core'] = True
- else:
- doc['core'] = False
-
- if module in aliases:
- doc['aliases'] = aliases[module]
-
- all_keys = []
-
- if not 'version_added' in doc:
- sys.stderr.write("*** ERROR: missing version_added in: %s ***\n" % module)
- sys.exit(1)
-
- added = 0
- if doc['version_added'] == 'historical':
- del doc['version_added']
- else:
- added = doc['version_added']
-
- # don't show version added information if it's too old to be called out
- if added:
- added_tokens = str(added).split(".")
- added = added_tokens[0] + "." + added_tokens[1]
- added_float = float(added)
- if added and added_float < TO_OLD_TO_BE_NOTABLE:
- del doc['version_added']
-
- if 'options' in doc:
- for (k,v) in doc['options'].iteritems():
- all_keys.append(k)
-
- all_keys = sorted(all_keys)
-
- doc['option_keys'] = all_keys
- doc['filename'] = fname
- doc['docuri'] = doc['module'].replace('_', '-')
- doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
- doc['ansible_version'] = options.ansible_version
- doc['plainexamples'] = examples #plain text
- if returndocs:
- doc['returndocs'] = yaml.safe_load(returndocs)
- else:
- doc['returndocs'] = None
-
- # here is where we build the table of contents...
-
- text = template.render(doc)
- write_data(text, options, outputname, module)
- return doc['short_description']
-
-#####################################################################################
-
-def print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases):
- modstring = module
- modname = module
- if module in deprecated:
- modstring = modstring + DEPRECATED
- modname = "_" + module
- elif module not in core:
- modstring = modstring + NOTCORE
-
- result = process_module(modname, options, env, template, outputname, module_map, aliases)
-
- if result != "SKIPPED":
- category_file.write(" %s - %s <%s_module>\n" % (modstring, result, module))
-
-def process_category(category, categories, options, env, template, outputname):
-
- module_map = categories[category]
-
- aliases = {}
- if '_aliases' in categories:
- aliases = categories['_aliases']
-
- category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
- category_file = open(category_file_path, "w")
- print "*** recording category %s in %s ***" % (category, category_file_path)
-
- # TODO: start a new category file
-
- category = category.replace("_"," ")
- category = category.title()
-
- modules = []
- deprecated = []
- core = []
- for module in module_map.keys():
-
- if isinstance(module_map[module], dict):
- for mod in module_map[module].keys():
- if mod.startswith("_"):
- mod = mod.replace("_","",1)
- deprecated.append(mod)
- elif '/core/' in module_map[module][mod]:
- core.append(mod)
- else:
- if module.startswith("_"):
- module = module.replace("_","",1)
- deprecated.append(module)
- elif '/core/' in module_map[module]:
- core.append(module)
-
- modules.append(module)
-
- modules.sort()
-
- category_header = "%s Modules" % (category.title())
- underscores = "`" * len(category_header)
-
- category_file.write("""\
-%s
-%s
-
-.. toctree:: :maxdepth: 1
-
-""" % (category_header, underscores))
- sections = []
- for module in modules:
- if module in module_map and isinstance(module_map[module], dict):
- sections.append(module)
- continue
- else:
- print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map, aliases)
-
- sections.sort()
- for section in sections:
- category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section)))
- category_file.write(".. toctree:: :maxdepth: 1\n\n")
-
- section_modules = module_map[section].keys()
- section_modules.sort()
- #for module in module_map[section]:
- for module in section_modules:
- print_modules(module, category_file, deprecated, core, options, env, template, outputname, module_map[section], aliases)
-
- category_file.write("""\n\n
-.. note::
- - %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged. The module documentation details page may explain more about this rationale.
- - %s: This marks a module as 'extras', which means it ships with ansible but may be a newer module and possibly (but not necessarily) less actively maintained than 'core' modules.
- - Tickets filed on modules are filed to different repos than those on the main open source project. Core module tickets should be filed at `ansible/ansible-modules-core on GitHub <http://github.com/ansible/ansible-modules-core>`_, extras tickets to `ansible/ansible-modules-extras on GitHub <http://github.com/ansible/ansible-modules-extras>`_
-""" % (DEPRECATED, NOTCORE))
- category_file.close()
-
- # TODO: end a new category file
-
-#####################################################################################
-
-def validate_options(options):
- ''' validate option parser options '''
-
- if not options.module_dir:
- print >>sys.stderr, "--module-dir is required"
- sys.exit(1)
- if not os.path.exists(options.module_dir):
- print >>sys.stderr, "--module-dir does not exist: %s" % options.module_dir
- sys.exit(1)
- if not options.template_dir:
- print "--template-dir must be specified"
- sys.exit(1)
-
-#####################################################################################
-
-def main():
-
- p = generate_parser()
-
- (options, args) = p.parse_args()
- validate_options(options)
-
- env, template, outputname = jinja2_environment(options.template_dir, options.type)
-
- categories = list_modules(options.module_dir)
- last_category = None
- category_names = categories.keys()
- category_names.sort()
-
- category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
- category_list_file = open(category_list_path, "w")
- category_list_file.write("Module Index\n")
- category_list_file.write("============\n")
- category_list_file.write("\n\n")
- category_list_file.write(".. toctree::\n")
- category_list_file.write(" :maxdepth: 1\n\n")
-
- for category in category_names:
- if category.startswith("_"):
- continue
- category_list_file.write(" list_of_%s_modules\n" % category)
- process_category(category, categories, options, env, template, outputname)
-
- category_list_file.close()
-
-if __name__ == '__main__':
- main()
diff --git a/v1/hacking/templates/rst.j2 b/v1/hacking/templates/rst.j2
deleted file mode 100644
index f6f38e5910..0000000000
--- a/v1/hacking/templates/rst.j2
+++ /dev/null
@@ -1,211 +0,0 @@
-.. _@{ module }@:
-
-{% if short_description %}
-{% set title = module + ' - ' + short_description|convert_symbols_to_format %}
-{% else %}
-{% set title = module %}
-{% endif %}
-{% set title_len = title|length %}
-
-@{ title }@
-@{ '+' * title_len }@
-
-.. contents::
- :local:
- :depth: 1
-
-{# ------------------------------------------
- #
- # Please note: this looks like a core dump
- # but it isn't one.
- #
- --------------------------------------------#}
-
-{% if aliases is defined -%}
-Aliases: @{ ','.join(aliases) }@
-{% endif %}
-
-{% if deprecated is defined -%}
-DEPRECATED
-----------
-
-@{ deprecated }@
-{% endif %}
-
-Synopsis
---------
-
-{% if version_added is defined -%}
-.. versionadded:: @{ version_added }@
-{% endif %}
-
-{% for desc in description -%}
-@{ desc | convert_symbols_to_format }@
-{% endfor %}
-
-{% if options -%}
-Options
--------
-
-.. raw:: html
-
- <table border=1 cellpadding=4>
- <tr>
- <th class="head">parameter</th>
- <th class="head">required</th>
- <th class="head">default</th>
- <th class="head">choices</th>
- <th class="head">comments</th>
- </tr>
- {% for k in option_keys %}
- {% set v = options[k] %}
- <tr>
- <td>@{ k }@</td>
- <td>{% if v.get('required', False) %}yes{% else %}no{% endif %}</td>
- <td>{% if v['default'] %}@{ v['default'] }@{% endif %}</td>
- {% if v.get('type', 'not_bool') == 'bool' %}
- <td><ul><li>yes</li><li>no</li></ul></td>
- {% else %}
- <td><ul>{% for choice in v.get('choices',[]) -%}<li>@{ choice }@</li>{% endfor -%}</ul></td>
- {% endif %}
- <td>{% for desc in v.description -%}@{ desc | html_ify }@{% endfor -%}{% if v['version_added'] %} (added in Ansible @{v['version_added']}@){% endif %}</td>
- </tr>
- {% endfor %}
- </table>
-{% endif %}
-
-{% if requirements %}
-{% for req in requirements %}
-
-.. note:: Requires @{ req | convert_symbols_to_format }@
-
-{% endfor %}
-{% endif %}
-
-{% if examples or plainexamples %}
-Examples
---------
-
-.. raw:: html
-
-{% for example in examples %}
- {% if example['description'] %}<p>@{ example['description'] | html_ify }@</p>{% endif %}
- <p>
- <pre>
-@{ example['code'] | escape | indent(4, True) }@
- </pre>
- </p>
-{% endfor %}
- <br/>
-
-{% if plainexamples %}
-
-::
-
-@{ plainexamples | indent(4, True) }@
-{% endif %}
-{% endif %}
-
-
-{% if returndocs %}
-Return Values
--------------
-
-Common return values are documented here :doc:`common_return_values`, the following are the fields unique to this module:
-
-.. raw:: html
-
- <table border=1 cellpadding=4>
- <tr>
- <th class="head">name</th>
- <th class="head">description</th>
- <th class="head">returned</th>
- <th class="head">type</th>
- <th class="head">sample</th>
- </tr>
-
- {% for entry in returndocs %}
- <tr>
- <td> @{ entry }@ </td>
- <td> @{ returndocs[entry].description }@ </td>
- <td align=center> @{ returndocs[entry].returned }@ </td>
- <td align=center> @{ returndocs[entry].type }@ </td>
- <td align=center> @{ returndocs[entry].sample}@ </td>
- </tr>
- {% if returndocs[entry].type == 'dictionary' %}
- <tr><td>contains: </td>
- <td colspan=4>
- <table border=1 cellpadding=2>
- <tr>
- <th class="head">name</th>
- <th class="head">description</th>
- <th class="head">returned</th>
- <th class="head">type</th>
- <th class="head">sample</th>
- </tr>
-
- {% for sub in returndocs[entry].contains %}
- <tr>
- <td> @{ sub }@ </td>
- <td> @{ returndocs[entry].contains[sub].description }@ </td>
- <td align=center> @{ returndocs[entry].contains[sub].returned }@ </td>
- <td align=center> @{ returndocs[entry].contains[sub].type }@ </td>
- <td align=center> @{ returndocs[entry].contains[sub].sample}@ </td>
- </tr>
- {% endfor %}
-
- </table>
- </td></tr>
-
- {% endif %}
- {% endfor %}
-
- </table>
- </br></br>
-{% endif %}
-
-{% if notes %}
-{% for note in notes %}
-.. note:: @{ note | convert_symbols_to_format }@
-{% endfor %}
-{% endif %}
-
-
-{% if not deprecated %}
- {% if core %}
-
-This is a Core Module
----------------------
-
-The source of this module is hosted on GitHub in the `ansible-modules-core <http://github.com/ansible/ansible-modules-core>`_ repo.
-
-If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-core <http://github.com/ansible/ansible-modules-core>`_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-
-Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group <https://groups.google.com/forum/#!forum/ansible-project>`_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group <https://groups.google.com/forum/#!forum/ansible-devel>`_.
-
-Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
-
-This is a "core" ansible module, which means it will receive slightly higher priority for all requests than those in the "extras" repos.
-
- {% else %}
-
-This is an Extras Module
-------------------------
-
-This source of this module is hosted on GitHub in the `ansible-modules-extras <http://github.com/ansible/ansible-modules-extras>`_ repo.
-
-If you believe you have found a bug in this module, and are already running the latest stable or development version of Ansible, first look in the `issue tracker at github.com/ansible/ansible-modules-extras <http://github.com/ansible/ansible-modules-extras>`_ to see if a bug has already been filed. If not, we would be grateful if you would file one.
-
-Should you have a question rather than a bug report, inquries are welcome on the `ansible-project google group <https://groups.google.com/forum/#!forum/ansible-project>`_ or on Ansible's "#ansible" channel, located on irc.freenode.net. Development oriented topics should instead use the similar `ansible-devel google group <https://groups.google.com/forum/#!forum/ansible-devel>`_.
-
-Documentation updates for this module can also be edited directly by submitting a pull request to the module source code, just look for the "DOCUMENTATION" block in the source tree.
-
-Note that this module is designated a "extras" module. Non-core modules are still fully usable, but may receive slightly lower response rates for issues and pull requests.
-Popular "extras" modules may be promoted to core modules over time.
-
- {% endif %}
-{% endif %}
-
-For help in developing on modules, should you be so inclined, please read :doc:`community`, :doc:`developing_test_pr` and :doc:`developing_modules`.
-
-
diff --git a/v1/hacking/test-module b/v1/hacking/test-module
deleted file mode 100755
index c226f32e88..0000000000
--- a/v1/hacking/test-module
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env python
-
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-# this script is for testing modules without running through the
-# entire guts of ansible, and is very helpful for when developing
-# modules
-#
-# example:
-# test-module -m ../library/commands/command -a "/bin/sleep 3"
-# test-module -m ../library/system/service -a "name=httpd ensure=restarted"
-# test-module -m ../library/system/service -a "name=httpd ensure=restarted" --debugger /usr/bin/pdb
-# test-modulr -m ../library/file/lineinfile -a "dest=/etc/exports line='/srv/home hostname1(rw,sync)'" --check
-
-import sys
-import base64
-import os
-import subprocess
-import traceback
-import optparse
-import ansible.utils as utils
-import ansible.module_common as module_common
-import ansible.constants as C
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-def parse():
- """parse command line
-
- :return : (options, args)"""
- parser = optparse.OptionParser()
-
- parser.usage = "%prog -[options] (-h for help)"
-
- parser.add_option('-m', '--module-path', dest='module_path',
- help="REQUIRED: full path of module source to execute")
- parser.add_option('-a', '--args', dest='module_args', default="",
- help="module argument string")
- parser.add_option('-D', '--debugger', dest='debugger',
- help="path to python debugger (e.g. /usr/bin/pdb)")
- parser.add_option('-I', '--interpreter', dest='interpreter',
- help="path to interpreter to use for this module (e.g. ansible_python_interpreter=/usr/bin/python)",
- metavar='INTERPRETER_TYPE=INTERPRETER_PATH')
- parser.add_option('-c', '--check', dest='check', action='store_true',
- help="run the module in check mode")
- options, args = parser.parse_args()
- if not options.module_path:
- parser.print_help()
- sys.exit(1)
- else:
- return options, args
-
-def write_argsfile(argstring, json=False):
- """ Write args to a file for old-style module's use. """
- argspath = os.path.expanduser("~/.ansible_test_module_arguments")
- argsfile = open(argspath, 'w')
- if json:
- args = utils.parse_kv(argstring)
- argstring = utils.jsonify(args)
- argsfile.write(argstring)
- argsfile.close()
- return argspath
-
-def boilerplate_module(modfile, args, interpreter, check):
- """ simulate what ansible does with new style modules """
-
- #module_fh = open(modfile)
- #module_data = module_fh.read()
- #module_fh.close()
-
- replacer = module_common.ModuleReplacer()
-
- #included_boilerplate = module_data.find(module_common.REPLACER) != -1 or module_data.find("import ansible.module_utils") != -1
-
- complex_args = {}
- if args.startswith("@"):
- # Argument is a YAML file (JSON is a subset of YAML)
- complex_args = utils.combine_vars(complex_args, utils.parse_yaml_from_file(args[1:]))
- args=''
- elif args.startswith("{"):
- # Argument is a YAML document (not a file)
- complex_args = utils.combine_vars(complex_args, utils.parse_yaml(args))
- args=''
-
- inject = {}
- if interpreter:
- if '=' not in interpreter:
- print 'interpreter must by in the form of ansible_python_interpreter=/usr/bin/python'
- sys.exit(1)
- interpreter_type, interpreter_path = interpreter.split('=')
- if not interpreter_type.startswith('ansible_'):
- interpreter_type = 'ansible_%s' % interpreter_type
- if not interpreter_type.endswith('_interpreter'):
- interpreter_type = '%s_interpreter' % interpreter_type
- inject[interpreter_type] = interpreter_path
-
- if check:
- complex_args['CHECKMODE'] = True
-
- (module_data, module_style, shebang) = replacer.modify_module(
- modfile,
- complex_args,
- args,
- inject
- )
-
- modfile2_path = os.path.expanduser("~/.ansible_module_generated")
- print "* including generated source, if any, saving to: %s" % modfile2_path
- print "* this may offset any line numbers in tracebacks/debuggers!"
- modfile2 = open(modfile2_path, 'w')
- modfile2.write(module_data)
- modfile2.close()
- modfile = modfile2_path
-
- return (modfile2_path, module_style)
-
-def runtest( modfile, argspath):
- """Test run a module, piping it's output for reporting."""
-
- os.system("chmod +x %s" % modfile)
-
- invoke = "%s" % (modfile)
- if argspath is not None:
- invoke = "%s %s" % (modfile, argspath)
-
- cmd = subprocess.Popen(invoke, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- (out, err) = cmd.communicate()
-
- try:
- print "***********************************"
- print "RAW OUTPUT"
- print out
- print err
- results = utils.parse_json(out)
- except:
- print "***********************************"
- print "INVALID OUTPUT FORMAT"
- print out
- traceback.print_exc()
- sys.exit(1)
-
- print "***********************************"
- print "PARSED OUTPUT"
- print utils.jsonify(results,format=True)
-
-def rundebug(debugger, modfile, argspath):
- """Run interactively with console debugger."""
-
- if argspath is not None:
- subprocess.call("%s %s %s" % (debugger, modfile, argspath), shell=True)
- else:
- subprocess.call("%s %s" % (debugger, modfile), shell=True)
-
-def main():
-
- options, args = parse()
- (modfile, module_style) = boilerplate_module(options.module_path, options.module_args, options.interpreter, options.check)
-
- argspath=None
- if module_style != 'new':
- if module_style == 'non_native_want_json':
- argspath = write_argsfile(options.module_args, json=True)
- elif module_style == 'old':
- argspath = write_argsfile(options.module_args, json=False)
- else:
- raise Exception("internal error, unexpected module style: %s" % module_style)
- if options.debugger:
- rundebug(options.debugger, modfile, argspath)
- else:
- runtest(modfile, argspath)
-
-if __name__ == "__main__":
- main()
-
diff --git a/v1/hacking/update.sh b/v1/hacking/update.sh
deleted file mode 100755
index 5979dd0ab2..0000000000
--- a/v1/hacking/update.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-git pull --rebase
-git submodule update --init --recursive
diff --git a/v1/tests/README.md b/v1/tests/README.md
deleted file mode 100644
index d0b3dd5abd..0000000000
--- a/v1/tests/README.md
+++ /dev/null
@@ -1,5 +0,0 @@
-Unit tests
-==========
-
-Tests at code level. Should be concise and to the point, and organized by subject.
-
diff --git a/v1/tests/TestConstants.py b/v1/tests/TestConstants.py
deleted file mode 100644
index f3b96e8abc..0000000000
--- a/v1/tests/TestConstants.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-
-from ansible.constants import get_config
-import ConfigParser
-import random
-import string
-import os
-
-
-def random_string(length):
- return ''.join(random.choice(string.ascii_uppercase) for x in range(6))
-
-p = ConfigParser.ConfigParser()
-p.read(os.path.join(os.path.dirname(__file__), 'ansible.cfg'))
-
-class TestConstants(unittest.TestCase):
-
- #####################################
- ### get_config unit tests
-
-
- def test_configfile_and_env_both_set(self):
- r = random_string(6)
- env_var = 'ANSIBLE_TEST_%s' % r
- os.environ[env_var] = r
-
- res = get_config(p, 'defaults', 'test_key', env_var, 'default')
- del os.environ[env_var]
-
- assert res == r
-
-
- def test_configfile_set_env_not_set(self):
- r = random_string(6)
- env_var = 'ANSIBLE_TEST_%s' % r
- assert env_var not in os.environ
-
- res = get_config(p, 'defaults', 'test_key', env_var, 'default')
-
- print res
- assert res == 'test_value'
-
-
- def test_configfile_not_set_env_set(self):
- r = random_string(6)
- env_var = 'ANSIBLE_TEST_%s' % r
- os.environ[env_var] = r
-
- res = get_config(p, 'defaults', 'doesnt_exist', env_var, 'default')
- del os.environ[env_var]
-
- assert res == r
-
-
- def test_configfile_not_set_env_not_set(self):
- r = random_string(6)
- env_var = 'ANSIBLE_TEST_%s' % r
- assert env_var not in os.environ
-
- res = get_config(p, 'defaults', 'doesnt_exist', env_var, 'default')
-
- assert res == 'default'
diff --git a/v1/tests/TestFilters.py b/v1/tests/TestFilters.py
deleted file mode 100644
index 3c7eb4506e..0000000000
--- a/v1/tests/TestFilters.py
+++ /dev/null
@@ -1,191 +0,0 @@
-'''
-Test bundled filters
-'''
-
-import os.path
-import unittest, tempfile, shutil
-from ansible import playbook, inventory, callbacks
-import ansible.runner.filter_plugins.core
-import ansible.runner.filter_plugins.mathstuff
-
-INVENTORY = inventory.Inventory(['localhost'])
-
-BOOK = '''
-- hosts: localhost
- vars:
- var: { a: [1,2,3] }
- tasks:
- - template: src=%s dest=%s
-'''
-
-SRC = '''
--
-{{ var|to_json }}
--
-{{ var|to_nice_json }}
--
-{{ var|to_yaml }}
--
-{{ var|to_nice_yaml }}
-'''
-
-DEST = '''
--
-{"a": [1, 2, 3]}
--
-{
- "a": [
- 1,
- 2,
- 3
- ]
-}
--
-a: [1, 2, 3]
-
--
-a:
-- 1
-- 2
-- 3
-'''
-
-class TestFilters(unittest.TestCase):
-
- def setUp(self):
- self.tmpdir = tempfile.mkdtemp(dir='/tmp')
-
- def tearDown(self):
- shutil.rmtree(self.tmpdir)
-
- def temp(self, name, data=''):
- '''write a temporary file and return the name'''
- name = self.tmpdir + '/' + name
- with open(name, 'w') as f:
- f.write(data)
- return name
-
- def test_bool_none(self):
- a = ansible.runner.filter_plugins.core.bool(None)
- assert a == None
-
- def test_bool_true(self):
- a = ansible.runner.filter_plugins.core.bool(True)
- assert a == True
-
- def test_bool_yes(self):
- a = ansible.runner.filter_plugins.core.bool('Yes')
- assert a == True
-
- def test_bool_no(self):
- a = ansible.runner.filter_plugins.core.bool('Foo')
- assert a == False
-
- def test_quotes(self):
- a = ansible.runner.filter_plugins.core.quote('ls | wc -l')
- assert a == "'ls | wc -l'"
-
- def test_fileglob(self):
- pathname = os.path.join(os.path.dirname(__file__), '*')
- a = ansible.runner.filter_plugins.core.fileglob(pathname)
- assert __file__ in a
-
- def test_regex(self):
- a = ansible.runner.filter_plugins.core.regex('ansible', 'ansible',
- match_type='findall')
- assert a == True
-
- def test_match_case_sensitive(self):
- a = ansible.runner.filter_plugins.core.match('ansible', 'ansible')
- assert a == True
-
- def test_match_case_insensitive(self):
- a = ansible.runner.filter_plugins.core.match('ANSIBLE', 'ansible',
- True)
- assert a == True
-
- def test_match_no_match(self):
- a = ansible.runner.filter_plugins.core.match(' ansible', 'ansible')
- assert a == False
-
- def test_search_case_sensitive(self):
- a = ansible.runner.filter_plugins.core.search(' ansible ', 'ansible')
- assert a == True
-
- def test_search_case_insensitive(self):
- a = ansible.runner.filter_plugins.core.search(' ANSIBLE ', 'ansible',
- True)
- assert a == True
-
- def test_regex_replace_case_sensitive(self):
- a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^a.*i(.*)$',
- 'a\\1')
- assert a == 'able'
-
- def test_regex_replace_case_insensitive(self):
- a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^A.*I(.*)$',
- 'a\\1', True)
- assert a == 'able'
-
- def test_regex_replace_no_match(self):
- a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^b.*i(.*)$',
- 'a\\1')
- assert a == 'ansible'
-
- def test_to_uuid(self):
- a = ansible.runner.filter_plugins.core.to_uuid('example.com')
-
- assert a == 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'
-
- #def test_filters(self):
-
- # this test is pretty low level using a playbook, hence I am disabling it for now -- MPD.
- #return
-
- #src = self.temp('src.j2', SRC)
- #dest = self.temp('dest.txt')
- #book = self.temp('book', BOOK % (src, dest))
-
- #playbook.PlayBook(
- # playbook = book,
- # inventory = INVENTORY,
- # transport = 'local',
- # callbacks = callbacks.PlaybookCallbacks(),
- # runner_callbacks = callbacks.DefaultRunnerCallbacks(),
- # stats = callbacks.AggregateStats(),
- #).run()
-
- #out = open(dest).read()
- #self.assertEqual(DEST, out)
-
- def test_version_compare(self):
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(0, 1.1, 'lt', False))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.2, '<'))
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, '=='))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, '='))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, 'eq'))
-
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, 'gt'))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '>'))
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, 'ne'))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '!='))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '<>'))
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.1, 'ge'))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.1, '>='))
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.1, 'le'))
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.0, 1.1, '<='))
-
- self.assertTrue(ansible.runner.filter_plugins.core.version_compare('12.04', 12, 'ge'))
-
- def test_min(self):
- a = ansible.runner.filter_plugins.mathstuff.min([3, 2, 5, 4])
- assert a == 2
-
- def test_max(self):
- a = ansible.runner.filter_plugins.mathstuff.max([3, 2, 5, 4])
- assert a == 5
diff --git a/v1/tests/TestInventory.py b/v1/tests/TestInventory.py
deleted file mode 100644
index b4bee4300e..0000000000
--- a/v1/tests/TestInventory.py
+++ /dev/null
@@ -1,510 +0,0 @@
-import os
-import unittest
-from nose.tools import raises
-
-from ansible import errors
-from ansible.inventory import Inventory
-
-class TestInventory(unittest.TestCase):
-
- def setUp(self):
-
- self.cwd = os.getcwd()
- self.test_dir = os.path.join(self.cwd, 'inventory_test_data')
-
- self.inventory_file = os.path.join(self.test_dir, 'simple_hosts')
- self.large_range_inventory_file = os.path.join(self.test_dir, 'large_range')
- self.complex_inventory_file = os.path.join(self.test_dir, 'complex_hosts')
- self.inventory_script = os.path.join(self.test_dir, 'inventory_api.py')
- self.inventory_dir = os.path.join(self.test_dir, 'inventory_dir')
-
- os.chmod(self.inventory_script, 0755)
-
- def tearDown(self):
- os.chmod(self.inventory_script, 0644)
-
- def compare(self, left, right, sort=True):
- if sort:
- left = sorted(left)
- right = sorted(right)
- print left
- print right
- assert left == right
-
- def empty_inventory(self):
- return Inventory(None)
-
- def simple_inventory(self):
- return Inventory(self.inventory_file)
-
- def large_range_inventory(self):
- return Inventory(self.large_range_inventory_file)
-
- def script_inventory(self):
- return Inventory(self.inventory_script)
-
- def complex_inventory(self):
- return Inventory(self.complex_inventory_file)
-
- def dir_inventory(self):
- return Inventory(self.inventory_dir)
-
- all_simple_hosts=['jupiter', 'saturn', 'zeus', 'hera',
- 'cerberus001','cerberus002','cerberus003',
- 'cottus99', 'cottus100',
- 'poseidon', 'thor', 'odin', 'loki',
- 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2',
- 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5',
- 'Hotep-a', 'Hotep-b', 'Hotep-c',
- 'BastC', 'BastD', 'neptun', 'goldorak', ]
-
- #####################################
- ### Empty inventory format tests
-
- def test_empty(self):
- inventory = self.empty_inventory()
- hosts = inventory.list_hosts()
- self.assertEqual(hosts, [])
-
- #####################################
- ### Simple inventory format tests
-
- def test_simple(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(self.all_simple_hosts))
-
- def test_simple_all(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts('all')
- self.assertEqual(sorted(hosts), sorted(self.all_simple_hosts))
-
- def test_get_hosts(self):
- inventory = Inventory('127.0.0.1,192.168.1.1')
- hosts = inventory.get_hosts('!10.0.0.1')
- hosts_all = inventory.get_hosts('all')
- self.assertEqual(sorted(hosts), sorted(hosts_all))
-
- def test_no_src(self):
- inventory = Inventory('127.0.0.1,')
- self.assertEqual(inventory.src(), None)
-
- def test_simple_norse(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts("norse")
-
- expected_hosts=['thor', 'odin', 'loki']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_simple_ungrouped(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts("ungrouped")
-
- expected_hosts=['jupiter', 'saturn',
- 'thrudgelmir0', 'thrudgelmir1', 'thrudgelmir2',
- 'thrudgelmir3', 'thrudgelmir4', 'thrudgelmir5']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_simple_combined(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts("norse:greek")
-
- expected_hosts=['zeus', 'hera', 'poseidon',
- 'cerberus001','cerberus002','cerberus003',
- 'cottus99','cottus100',
- 'thor', 'odin', 'loki']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_simple_restrict(self):
- inventory = self.simple_inventory()
-
- restricted_hosts = ['hera', 'poseidon', 'thor']
- expected_hosts=['zeus', 'hera', 'poseidon',
- 'cerberus001','cerberus002','cerberus003',
- 'cottus99', 'cottus100',
- 'thor', 'odin', 'loki']
-
- inventory.restrict_to(restricted_hosts)
- hosts = inventory.list_hosts("norse:greek")
-
- assert sorted(hosts) == sorted(restricted_hosts)
-
- inventory.lift_restriction()
- hosts = inventory.list_hosts("norse:greek")
-
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_simple_string_ipv4(self):
- inventory = Inventory('127.0.0.1,192.168.1.1')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['127.0.0.1','192.168.1.1']))
-
- def test_simple_string_ipv4_port(self):
- inventory = Inventory('127.0.0.1:2222,192.168.1.1')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['127.0.0.1','192.168.1.1']))
-
- def test_simple_string_ipv4_vars(self):
- inventory = Inventory('127.0.0.1:2222,192.168.1.1')
- var = inventory.get_variables('127.0.0.1')
- self.assertEqual(var['ansible_ssh_port'], 2222)
-
- def test_simple_string_ipv6(self):
- inventory = Inventory('FE80:EF45::12:1,192.168.1.1')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1','192.168.1.1']))
-
- def test_simple_string_ipv6_port(self):
- inventory = Inventory('[FE80:EF45::12:1]:2222,192.168.1.1')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['FE80:EF45::12:1','192.168.1.1']))
-
- def test_simple_string_ipv6_vars(self):
- inventory = Inventory('[FE80:EF45::12:1]:2222,192.168.1.1')
- var = inventory.get_variables('FE80:EF45::12:1')
- self.assertEqual(var['ansible_ssh_port'], 2222)
-
- def test_simple_string_fqdn(self):
- inventory = Inventory('foo.example.com,bar.example.com')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['foo.example.com','bar.example.com']))
-
- def test_simple_string_fqdn_port(self):
- inventory = Inventory('foo.example.com:2222,bar.example.com')
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted(['foo.example.com','bar.example.com']))
-
- def test_simple_string_fqdn_vars(self):
- inventory = Inventory('foo.example.com:2222,bar.example.com')
- var = inventory.get_variables('foo.example.com')
- self.assertEqual(var['ansible_ssh_port'], 2222)
-
- def test_simple_vars(self):
- inventory = self.simple_inventory()
- vars = inventory.get_variables('thor')
-
- assert vars == {'group_names': ['norse'],
- 'inventory_hostname': 'thor',
- 'inventory_hostname_short': 'thor'}
-
- def test_simple_port(self):
- inventory = self.simple_inventory()
- vars = inventory.get_variables('hera')
-
- expected = { 'ansible_ssh_port': 3000,
- 'group_names': ['greek'],
- 'inventory_hostname': 'hera',
- 'inventory_hostname_short': 'hera' }
- assert vars == expected
-
- def test_large_range(self):
- inventory = self.large_range_inventory()
- hosts = inventory.list_hosts()
- self.assertEqual(sorted(hosts), sorted('bob%03i' %i for i in range(0, 143)))
-
- def test_subset(self):
- inventory = self.simple_inventory()
- inventory.subset('odin;thor,loki')
- self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor','odin','loki']))
-
- def test_subset_range(self):
- inventory = self.simple_inventory()
- inventory.subset('greek[0-2];norse[0]')
- self.assertEqual(sorted(inventory.list_hosts()), sorted(['zeus','hera','thor']))
-
- def test_subet_range_empty_group(self):
- inventory = self.simple_inventory()
- inventory.subset('missing[0]')
- self.assertEqual(sorted(inventory.list_hosts()), sorted([]))
-
- def test_subset_filename(self):
- inventory = self.simple_inventory()
- inventory.subset('@' + os.path.join(self.test_dir, 'restrict_pattern'))
- self.assertEqual(sorted(inventory.list_hosts()), sorted(['thor','odin']))
-
- def test_vars_yaml_extension(self):
- inventory = self.simple_inventory()
- vars = inventory.get_variables('goldorak')
- assert vars['YAML_FILENAME_EXTENSIONS_TEST']
-
- @raises(errors.AnsibleError)
- def testinvalid_entry(self):
- Inventory('1234')
-
- ###################################################
- ### INI file advanced tests
-
- def test_complex_vars(self):
- inventory = self.complex_inventory()
-
- vars = inventory.get_variables('rtp_a')
- print vars
-
- expected = dict(
- a=1, b=2, c=3, d=10002, e=10003, f='10004 != 10005',
- g=' g ', h=' h ', i="' i \"", j='" j',
- k=[ 'k1', 'k2' ],
- rga=1, rgb=2, rgc=3,
- inventory_hostname='rtp_a', inventory_hostname_short='rtp_a',
- group_names=[ 'eastcoast', 'nc', 'redundantgroup', 'redundantgroup2', 'redundantgroup3', 'rtp', 'us' ]
- )
- print vars
- print expected
- assert vars == expected
-
- def test_complex_group_names(self):
- inventory = self.complex_inventory()
- tests = {
- 'host1': [ 'role1', 'role3' ],
- 'host2': [ 'role1', 'role2' ],
- 'host3': [ 'role2', 'role3' ]
- }
- for host, roles in tests.iteritems():
- group_names = inventory.get_variables(host)['group_names']
- assert sorted(group_names) == sorted(roles)
-
- def test_complex_exclude(self):
- inventory = self.complex_inventory()
- hosts = inventory.list_hosts("nc:florida:!triangle:!orlando")
- expected_hosts = ['miami', 'rtp_a', 'rtp_b', 'rtp_c']
- print "HOSTS=%s" % sorted(hosts)
- print "EXPECTED=%s" % sorted(expected_hosts)
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_regex_exclude(self):
- inventory = self.complex_inventory()
- hosts = inventory.list_hosts("~rtp_[ac]")
- expected_hosts = ['rtp_a', 'rtp_c']
- print "HOSTS=%s" % sorted(hosts)
- print "EXPECTED=%s" % sorted(expected_hosts)
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_regex_grouping(self):
- inventory = self.simple_inventory()
- hosts = inventory.list_hosts("~(cer[a-z]|berc)(erus00[13])")
- expected_hosts = ['cerberus001', 'cerberus003']
- print "HOSTS=%s" % sorted(hosts)
- print "EXPECTED=%s" % sorted(expected_hosts)
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_complex_enumeration(self):
-
-
- expected1 = ['rtp_b']
- expected2 = ['rtp_a', 'rtp_b']
- expected3 = ['rtp_a', 'rtp_b', 'rtp_c', 'tri_a', 'tri_b', 'tri_c']
- expected4 = ['rtp_b', 'orlando' ]
- expected5 = ['blade-a-1']
-
- inventory = self.complex_inventory()
- hosts = inventory.list_hosts("nc[1]")
- self.compare(hosts, expected1, sort=False)
- hosts = inventory.list_hosts("nc[0-2]")
- self.compare(hosts, expected2, sort=False)
- hosts = inventory.list_hosts("nc[0-99999]")
- self.compare(hosts, expected3, sort=False)
- hosts = inventory.list_hosts("nc[1-2]:florida[0-1]")
- self.compare(hosts, expected4, sort=False)
- hosts = inventory.list_hosts("blade-a-1")
- self.compare(hosts, expected5, sort=False)
-
- def test_complex_intersect(self):
- inventory = self.complex_inventory()
- hosts = inventory.list_hosts("nc:&redundantgroup:!rtp_c")
- self.compare(hosts, ['rtp_a'])
- hosts = inventory.list_hosts("nc:&triangle:!tri_c")
- self.compare(hosts, ['tri_a', 'tri_b'])
-
- @raises(errors.AnsibleError)
- def test_invalid_range(self):
- Inventory(os.path.join(self.test_dir, 'inventory','test_incorrect_range'))
-
- @raises(errors.AnsibleError)
- def test_missing_end(self):
- Inventory(os.path.join(self.test_dir, 'inventory','test_missing_end'))
-
- @raises(errors.AnsibleError)
- def test_incorrect_format(self):
- Inventory(os.path.join(self.test_dir, 'inventory','test_incorrect_format'))
-
- @raises(errors.AnsibleError)
- def test_alpha_end_before_beg(self):
- Inventory(os.path.join(self.test_dir, 'inventory','test_alpha_end_before_beg'))
-
- def test_combined_range(self):
- i = Inventory(os.path.join(self.test_dir, 'inventory','test_combined_range'))
- hosts = i.list_hosts('test')
- expected_hosts=['host1A','host2A','host1B','host2B']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_leading_range(self):
- i = Inventory(os.path.join(self.test_dir, 'inventory','test_leading_range'))
- hosts = i.list_hosts('test')
- expected_hosts=['1.host','2.host','A.host','B.host']
- assert sorted(hosts) == sorted(expected_hosts)
-
- hosts2 = i.list_hosts('test2')
- expected_hosts2=['1.host','2.host','3.host']
- assert sorted(hosts2) == sorted(expected_hosts2)
-
- ###################################################
- ### Inventory API tests
-
- def test_script(self):
- inventory = self.script_inventory()
- hosts = inventory.list_hosts()
-
- expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
-
- print "Expected: %s"%(expected_hosts)
- print "Got : %s"%(hosts)
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_script_all(self):
- inventory = self.script_inventory()
- hosts = inventory.list_hosts('all')
-
- expected_hosts=['jupiter', 'saturn', 'zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_script_norse(self):
- inventory = self.script_inventory()
- hosts = inventory.list_hosts("norse")
-
- expected_hosts=['thor', 'odin', 'loki']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_script_combined(self):
- inventory = self.script_inventory()
- hosts = inventory.list_hosts("norse:greek")
-
- expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_script_restrict(self):
- inventory = self.script_inventory()
-
- restricted_hosts = ['hera', 'poseidon', 'thor']
- expected_hosts=['zeus', 'hera', 'poseidon', 'thor', 'odin', 'loki']
-
- inventory.restrict_to(restricted_hosts)
- hosts = inventory.list_hosts("norse:greek")
-
- assert sorted(hosts) == sorted(restricted_hosts)
-
- inventory.lift_restriction()
- hosts = inventory.list_hosts("norse:greek")
-
- assert sorted(hosts) == sorted(expected_hosts)
-
- def test_script_vars(self):
- inventory = self.script_inventory()
- vars = inventory.get_variables('thor')
-
- print "VARS=%s" % vars
-
- assert vars == {'hammer':True,
- 'group_names': ['norse'],
- 'inventory_hostname': 'thor',
- 'inventory_hostname_short': 'thor'}
-
- def test_hosts_list(self):
- # Test the case when playbook 'hosts' var is a list.
- inventory = self.script_inventory()
- host_names = sorted(['thor', 'loki', 'odin']) # Not sure if sorting is in the contract or not
- actual_hosts = inventory.get_hosts(host_names)
- actual_host_names = [host.name for host in actual_hosts]
- assert host_names == actual_host_names
-
- def test_script_multiple_groups(self):
- inventory = self.script_inventory()
- vars = inventory.get_variables('zeus')
-
- print "VARS=%s" % vars
-
- assert vars == {'inventory_hostname': 'zeus',
- 'inventory_hostname_short': 'zeus',
- 'group_names': ['greek', 'major-god']}
-
- def test_allows_equals_sign_in_var(self):
- inventory = self.simple_inventory()
- auth = inventory.get_variables('neptun')['auth']
- assert auth == 'YWRtaW46YWRtaW4='
-
- def test_dir_inventory(self):
- inventory = self.dir_inventory()
-
- host_vars = inventory.get_variables('zeus')
-
- expected_vars = {'inventory_hostname': 'zeus',
- 'inventory_hostname_short': 'zeus',
- 'group_names': ['greek', 'major-god'],
- 'var_a': '3#4'}
-
- print "HOST VARS=%s" % host_vars
- print "EXPECTED VARS=%s" % expected_vars
-
- assert host_vars == expected_vars
-
- def test_dir_inventory_multiple_groups(self):
- inventory = self.dir_inventory()
- group_greek = inventory.get_hosts('greek')
- actual_host_names = [host.name for host in group_greek]
- print "greek : %s " % actual_host_names
- assert actual_host_names == ['zeus', 'morpheus']
-
- def test_dir_inventory_skip_extension(self):
- inventory = self.dir_inventory()
- assert 'skipme' not in [h.name for h in inventory.get_hosts()]
-
- def test_dir_inventory_group_hosts(self):
- inventory = self.dir_inventory()
- expected_groups = {'all': ['morpheus', 'thor', 'zeus'],
- 'major-god': ['thor', 'zeus'],
- 'minor-god': ['morpheus'],
- 'norse': ['thor'],
- 'greek': ['morpheus', 'zeus'],
- 'ungrouped': []}
-
- actual_groups = {}
- for group in inventory.get_groups():
- actual_groups[group.name] = sorted([h.name for h in group.get_hosts()])
- print "INVENTORY groups[%s].hosts=%s" % (group.name, actual_groups[group.name])
- print "EXPECTED groups[%s].hosts=%s" % (group.name, expected_groups[group.name])
-
- assert actual_groups == expected_groups
-
- def test_dir_inventory_groups_for_host(self):
- inventory = self.dir_inventory()
- expected_groups_for_host = {'morpheus': ['all', 'greek', 'minor-god'],
- 'thor': ['all', 'major-god', 'norse'],
- 'zeus': ['all', 'greek', 'major-god']}
-
- actual_groups_for_host = {}
- for (host, expected) in expected_groups_for_host.iteritems():
- groups = inventory.groups_for_host(host)
- names = sorted([g.name for g in groups])
- actual_groups_for_host[host] = names
- print "INVENTORY groups_for_host(%s)=%s" % (host, names)
- print "EXPECTED groups_for_host(%s)=%s" % (host, expected)
-
- assert actual_groups_for_host == expected_groups_for_host
-
- def test_dir_inventory_groups_list(self):
- inventory = self.dir_inventory()
- inventory_groups = inventory.groups_list()
-
- expected_groups = {'all': ['morpheus', 'thor', 'zeus'],
- 'major-god': ['thor', 'zeus'],
- 'minor-god': ['morpheus'],
- 'norse': ['thor'],
- 'greek': ['morpheus', 'zeus'],
- 'ungrouped': []}
-
- for (name, expected_hosts) in expected_groups.iteritems():
- inventory_groups[name] = sorted(inventory_groups.get(name, []))
- print "INVENTORY groups_list['%s']=%s" % (name, inventory_groups[name])
- print "EXPECTED groups_list['%s']=%s" % (name, expected_hosts)
-
- assert inventory_groups == expected_groups
-
diff --git a/v1/tests/TestModuleUtilsBasic.py b/v1/tests/TestModuleUtilsBasic.py
deleted file mode 100644
index 5b8be28307..0000000000
--- a/v1/tests/TestModuleUtilsBasic.py
+++ /dev/null
@@ -1,334 +0,0 @@
-import os
-import tempfile
-
-import unittest
-from nose.tools import raises
-from nose.tools import timed
-
-from ansible import errors
-from ansible.module_common import ModuleReplacer
-from ansible.module_utils.basic import heuristic_log_sanitize
-from ansible.utils import checksum as utils_checksum
-
-TEST_MODULE_DATA = """
-from ansible.module_utils.basic import *
-
-def get_module():
- return AnsibleModule(
- argument_spec = dict(),
- supports_check_mode = True,
- no_log = True,
- )
-
-get_module()
-
-"""
-
-class TestModuleUtilsBasic(unittest.TestCase):
-
- def cleanup_temp_file(self, fd, path):
- try:
- os.close(fd)
- os.remove(path)
- except:
- pass
-
- def cleanup_temp_dir(self, path):
- try:
- os.rmdir(path)
- except:
- pass
-
- def setUp(self):
- # create a temporary file for the test module
- # we're about to generate
- self.tmp_fd, self.tmp_path = tempfile.mkstemp()
- os.write(self.tmp_fd, TEST_MODULE_DATA)
-
- # template the module code and eval it
- module_data, module_style, shebang = ModuleReplacer().modify_module(self.tmp_path, {}, "", {})
-
- d = {}
- exec(module_data, d, d)
- self.module = d['get_module']()
-
- # module_utils/basic.py screws with CWD, let's save it and reset
- self.cwd = os.getcwd()
-
- def tearDown(self):
- self.cleanup_temp_file(self.tmp_fd, self.tmp_path)
- # Reset CWD back to what it was before basic.py changed it
- os.chdir(self.cwd)
-
- #################################################################################
- # run_command() tests
-
- # test run_command with a string command
- def test_run_command_string(self):
- (rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'")
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar')
- (rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'", use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar')
-
- # test run_command with an array of args (with both use_unsafe_shell=True|False)
- def test_run_command_args(self):
- (rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"])
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar')
- (rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"], use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar')
-
- # test run_command with leading environment variables
- @raises(SystemExit)
- def test_run_command_string_with_env_variables(self):
- self.module.run_command('FOO=bar /bin/echo -n "foo bar"')
-
- @raises(SystemExit)
- def test_run_command_args_with_env_variables(self):
- self.module.run_command(['FOO=bar', '/bin/echo', '-n', 'foo bar'])
-
- def test_run_command_string_unsafe_with_env_variables(self):
- (rc, out, err) = self.module.run_command('FOO=bar /bin/echo -n "foo bar"', use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar')
-
- # test run_command with a command pipe (with both use_unsafe_shell=True|False)
- def test_run_command_string_unsafe_with_pipe(self):
- (rc, out, err) = self.module.run_command('echo "foo bar" | cat', use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar\n')
-
- # test run_command with a shell redirect in (with both use_unsafe_shell=True|False)
- def test_run_command_string_unsafe_with_redirect_in(self):
- (rc, out, err) = self.module.run_command('cat << EOF\nfoo bar\nEOF', use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar\n')
-
- # test run_command with a shell redirect out (with both use_unsafe_shell=True|False)
- def test_run_command_string_unsafe_with_redirect_out(self):
- tmp_fd, tmp_path = tempfile.mkstemp()
- try:
- (rc, out, err) = self.module.run_command('echo "foo bar" > %s' % tmp_path, use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertTrue(os.path.exists(tmp_path))
- checksum = utils_checksum(tmp_path)
- self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec')
- except:
- raise
- finally:
- self.cleanup_temp_file(tmp_fd, tmp_path)
-
- # test run_command with a double shell redirect out (append) (with both use_unsafe_shell=True|False)
- def test_run_command_string_unsafe_with_double_redirect_out(self):
- tmp_fd, tmp_path = tempfile.mkstemp()
- try:
- (rc, out, err) = self.module.run_command('echo "foo bar" >> %s' % tmp_path, use_unsafe_shell=True)
- self.assertEqual(rc, 0)
- self.assertTrue(os.path.exists(tmp_path))
- checksum = utils_checksum(tmp_path)
- self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec')
- except:
- raise
- finally:
- self.cleanup_temp_file(tmp_fd, tmp_path)
-
- # test run_command with data
- def test_run_command_string_with_data(self):
- (rc, out, err) = self.module.run_command('cat', data='foo bar')
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'foo bar\n')
-
- # test run_command with binary data
- def test_run_command_string_with_binary_data(self):
- (rc, out, err) = self.module.run_command('cat', data='\x41\x42\x43\x44', binary_data=True)
- self.assertEqual(rc, 0)
- self.assertEqual(out, 'ABCD')
-
- # test run_command with a cwd set
- def test_run_command_string_with_cwd(self):
- tmp_path = tempfile.mkdtemp()
- try:
- (rc, out, err) = self.module.run_command('pwd', cwd=tmp_path)
- self.assertEqual(rc, 0)
- self.assertTrue(os.path.exists(tmp_path))
- self.assertEqual(out.strip(), os.path.realpath(tmp_path))
- except:
- raise
- finally:
- self.cleanup_temp_dir(tmp_path)
-
-
-class TestModuleUtilsBasicHelpers(unittest.TestCase):
- ''' Test some implementation details of AnsibleModule
-
- Some pieces of AnsibleModule are implementation details but they have
- potential cornercases that we need to check. Go ahead and test at
- this level that the functions are behaving even though their API may
- change and we'd have to rewrite these tests so that we know that we
- need to check for those problems in any rewrite.
-
- In the future we might want to restructure higher level code to be
- friendlier to unittests so that we can test at the level that the public
- is interacting with the APIs.
- '''
-
- MANY_RECORDS = 7000
- URL_SECRET = 'http://username:pas:word@foo.com/data'
- SSH_SECRET = 'username:pas:word@foo.com/data'
-
- def cleanup_temp_file(self, fd, path):
- try:
- os.close(fd)
- os.remove(path)
- except:
- pass
-
- def cleanup_temp_dir(self, path):
- try:
- os.rmdir(path)
- except:
- pass
-
- def _gen_data(self, records, per_rec, top_level, secret_text):
- hostvars = {'hostvars': {}}
- for i in range(1, records, 1):
- host_facts = {'host%s' % i:
- {'pstack':
- {'running': '875.1',
- 'symlinked': '880.0',
- 'tars': [],
- 'versions': ['885.0']},
- }}
-
- if per_rec:
- host_facts['host%s' % i]['secret'] = secret_text
- hostvars['hostvars'].update(host_facts)
- if top_level:
- hostvars['secret'] = secret_text
- return hostvars
-
- def setUp(self):
- self.many_url = repr(self._gen_data(self.MANY_RECORDS, True, True,
- self.URL_SECRET))
- self.many_ssh = repr(self._gen_data(self.MANY_RECORDS, True, True,
- self.SSH_SECRET))
- self.one_url = repr(self._gen_data(self.MANY_RECORDS, False, True,
- self.URL_SECRET))
- self.one_ssh = repr(self._gen_data(self.MANY_RECORDS, False, True,
- self.SSH_SECRET))
- self.zero_secrets = repr(self._gen_data(self.MANY_RECORDS, False,
- False, ''))
- self.few_url = repr(self._gen_data(2, True, True, self.URL_SECRET))
- self.few_ssh = repr(self._gen_data(2, True, True, self.SSH_SECRET))
-
- # create a temporary file for the test module
- # we're about to generate
- self.tmp_fd, self.tmp_path = tempfile.mkstemp()
- os.write(self.tmp_fd, TEST_MODULE_DATA)
-
- # template the module code and eval it
- module_data, module_style, shebang = ModuleReplacer().modify_module(self.tmp_path, {}, "", {})
-
- d = {}
- exec(module_data, d, d)
- self.module = d['get_module']()
-
- # module_utils/basic.py screws with CWD, let's save it and reset
- self.cwd = os.getcwd()
-
- def tearDown(self):
- self.cleanup_temp_file(self.tmp_fd, self.tmp_path)
- # Reset CWD back to what it was before basic.py changed it
- os.chdir(self.cwd)
-
-
- #################################################################################
-
- #
- # Speed tests
- #
-
- # Previously, we used regexes which had some pathologically slow cases for
- # parameters with large amounts of data with many ':' but no '@'. The
- # present function gets slower when there are many replacements so we may
- # want to explore regexes in the future (for the speed when substituting
- # or flexibility). These speed tests will hopefully tell us if we're
- # introducing code that has cases that are simply too slow.
- #
- # Some regex notes:
- # * re.sub() is faster than re.match() + str.join().
- # * We may be able to detect a large number of '@' symbols and then use
- # a regex else use the present function.
-
- @timed(5)
- def test_log_sanitize_speed_many_url(self):
- heuristic_log_sanitize(self.many_url)
-
- @timed(5)
- def test_log_sanitize_speed_many_ssh(self):
- heuristic_log_sanitize(self.many_ssh)
-
- @timed(5)
- def test_log_sanitize_speed_one_url(self):
- heuristic_log_sanitize(self.one_url)
-
- @timed(5)
- def test_log_sanitize_speed_one_ssh(self):
- heuristic_log_sanitize(self.one_ssh)
-
- @timed(5)
- def test_log_sanitize_speed_zero_secrets(self):
- heuristic_log_sanitize(self.zero_secrets)
-
- #
- # Test that the password obfuscation sanitizes somewhat cleanly.
- #
-
- def test_log_sanitize_correctness(self):
- url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
- ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
-
- url_output = heuristic_log_sanitize(url_data)
- ssh_output = heuristic_log_sanitize(ssh_data)
-
- # Basic functionality: Successfully hid the password
- try:
- self.assertNotIn('pas:word', url_output)
- self.assertNotIn('pas:word', ssh_output)
-
- # Slightly more advanced, we hid all of the password despite the ":"
- self.assertNotIn('pas', url_output)
- self.assertNotIn('pas', ssh_output)
- except AttributeError:
- # python2.6 or less's unittest
- self.assertFalse('pas:word' in url_output, '%s is present in %s' % ('"pas:word"', url_output))
- self.assertFalse('pas:word' in ssh_output, '%s is present in %s' % ('"pas:word"', ssh_output))
-
- self.assertFalse('pas' in url_output, '%s is present in %s' % ('"pas"', url_output))
- self.assertFalse('pas' in ssh_output, '%s is present in %s' % ('"pas"', ssh_output))
-
- # In this implementation we replace the password with 8 "*" which is
- # also the length of our password. The url fields should be able to
- # accurately detect where the password ends so the length should be
- # the same:
- self.assertEqual(len(url_output), len(url_data))
-
- # ssh checking is harder as the heuristic is overzealous in many
- # cases. Since the input will have at least one ":" present before
- # the password we can tell some things about the beginning and end of
- # the data, though:
- self.assertTrue(ssh_output.startswith("{'"))
- self.assertTrue(ssh_output.endswith("}"))
- try:
- self.assertIn(":********@foo.com/data'", ssh_output)
- except AttributeError:
- # python2.6 or less's unittest
- self.assertTrue(":********@foo.com/data'" in ssh_output, '%s is not present in %s' % (":********@foo.com/data'", ssh_output))
-
- # The overzealous-ness here may lead to us changing the algorithm in
- # the future. We could make it consume less of the data (with the
- # possibility of leaving partial passwords exposed) and encourage
- # people to use no_log instead of relying on this obfuscation.
diff --git a/v1/tests/TestModuleUtilsDatabase.py b/v1/tests/TestModuleUtilsDatabase.py
deleted file mode 100644
index 67da0b60e0..0000000000
--- a/v1/tests/TestModuleUtilsDatabase.py
+++ /dev/null
@@ -1,118 +0,0 @@
-import collections
-import mock
-import os
-import re
-
-from nose.tools import eq_
-try:
- from nose.tools import assert_raises_regexp
-except ImportError:
- # Python < 2.7
- def assert_raises_regexp(expected, regexp, callable, *a, **kw):
- try:
- callable(*a, **kw)
- except expected as e:
- if isinstance(regexp, basestring):
- regexp = re.compile(regexp)
- if not regexp.search(str(e)):
- raise Exception('"%s" does not match "%s"' %
- (regexp.pattern, str(e)))
- else:
- if hasattr(expected,'__name__'): excName = expected.__name__
- else: excName = str(expected)
- raise AssertionError("%s not raised" % excName)
-
-from ansible.module_utils.database import (
- pg_quote_identifier,
- SQLParseError,
-)
-
-
-# Note: Using nose's generator test cases here so we can't inherit from
-# unittest.TestCase
-class TestQuotePgIdentifier(object):
-
- # These are all valid strings
- # The results are based on interpreting the identifier as a table name
- valid = {
- # User quoted
- '"public.table"': '"public.table"',
- '"public"."table"': '"public"."table"',
- '"schema test"."table test"': '"schema test"."table test"',
-
- # We quote part
- 'public.table': '"public"."table"',
- '"public".table': '"public"."table"',
- 'public."table"': '"public"."table"',
- 'schema test.table test': '"schema test"."table test"',
- '"schema test".table test': '"schema test"."table test"',
- 'schema test."table test"': '"schema test"."table test"',
-
- # Embedded double quotes
- 'table "test"': '"table ""test"""',
- 'public."table ""test"""': '"public"."table ""test"""',
- 'public.table "test"': '"public"."table ""test"""',
- 'schema "test".table': '"schema ""test"""."table"',
- '"schema ""test""".table': '"schema ""test"""."table"',
- '"""wat"""."""test"""': '"""wat"""."""test"""',
- # Sigh, handle these as well:
- '"no end quote': '"""no end quote"',
- 'schema."table': '"schema"."""table"',
- '"schema.table': '"""schema"."table"',
- 'schema."table.something': '"schema"."""table"."something"',
-
- # Embedded dots
- '"schema.test"."table.test"': '"schema.test"."table.test"',
- '"schema.".table': '"schema."."table"',
- '"schema."."table"': '"schema."."table"',
- 'schema.".table"': '"schema".".table"',
- '"schema".".table"': '"schema".".table"',
- '"schema.".".table"': '"schema.".".table"',
- # These are valid but maybe not what the user intended
- '."table"': '".""table"""',
- 'table.': '"table."',
- }
-
- invalid = {
- ('test.too.many.dots', 'table'): 'PostgreSQL does not support table with more than 3 dots',
- ('"test.too".many.dots', 'database'): 'PostgreSQL does not support database with more than 1 dots',
- ('test.too."many.dots"', 'database'): 'PostgreSQL does not support database with more than 1 dots',
- ('"test"."too"."many"."dots"', 'database'): "PostgreSQL does not support database with more than 1 dots",
- ('"test"."too"."many"."dots"', 'schema'): "PostgreSQL does not support schema with more than 2 dots",
- ('"test"."too"."many"."dots"', 'table'): "PostgreSQL does not support table with more than 3 dots",
- ('"test"."too"."many"."dots"."for"."column"', 'column'): "PostgreSQL does not support column with more than 4 dots",
- ('"table "invalid" double quote"', 'table'): 'User escaped identifiers must escape extra quotes',
- ('"schema "invalid"""."table "invalid"', 'table'): 'User escaped identifiers must escape extra quotes',
- ('"schema."table"','table'): 'User escaped identifiers must escape extra quotes',
- ('"schema".', 'table'): 'Identifier name unspecified or unquoted trailing dot',
- }
-
- def check_valid_quotes(self, identifier, quoted_identifier):
- eq_(pg_quote_identifier(identifier, 'table'), quoted_identifier)
-
- def test_valid_quotes(self):
- for identifier in self.valid:
- yield self.check_valid_quotes, identifier, self.valid[identifier]
-
- def check_invalid_quotes(self, identifier, id_type, msg):
- assert_raises_regexp(SQLParseError, msg, pg_quote_identifier, *(identifier, id_type))
-
- def test_invalid_quotes(self):
- for test in self.invalid:
- yield self.check_invalid_quotes, test[0], test[1], self.invalid[test]
-
- def test_how_many_dots(self):
- eq_(pg_quote_identifier('role', 'role'), '"role"')
- assert_raises_regexp(SQLParseError, "PostgreSQL does not support role with more than 1 dots", pg_quote_identifier, *('role.more', 'role'))
-
- eq_(pg_quote_identifier('db', 'database'), '"db"')
- assert_raises_regexp(SQLParseError, "PostgreSQL does not support database with more than 1 dots", pg_quote_identifier, *('db.more', 'database'))
-
- eq_(pg_quote_identifier('db.schema', 'schema'), '"db"."schema"')
- assert_raises_regexp(SQLParseError, "PostgreSQL does not support schema with more than 2 dots", pg_quote_identifier, *('db.schema.more', 'schema'))
-
- eq_(pg_quote_identifier('db.schema.table', 'table'), '"db"."schema"."table"')
- assert_raises_regexp(SQLParseError, "PostgreSQL does not support table with more than 3 dots", pg_quote_identifier, *('db.schema.table.more', 'table'))
-
- eq_(pg_quote_identifier('db.schema.table.column', 'column'), '"db"."schema"."table"."column"')
- assert_raises_regexp(SQLParseError, "PostgreSQL does not support column with more than 4 dots", pg_quote_identifier, *('db.schema.table.column.more', 'column'))
diff --git a/v1/tests/TestModules.py b/v1/tests/TestModules.py
deleted file mode 100644
index aef2e83ed6..0000000000
--- a/v1/tests/TestModules.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import os
-import ast
-import unittest
-from ansible import utils
-
-
-class TestModules(unittest.TestCase):
-
- def list_all_modules(self):
- paths = utils.plugins.module_finder._get_paths()
- paths = [x for x in paths if os.path.isdir(x)]
- module_list = []
- for path in paths:
- for (dirpath, dirnames, filenames) in os.walk(path):
- for filename in filenames:
- (path, ext) = os.path.splitext(filename)
- if ext == ".py":
- module_list.append(os.path.join(dirpath, filename))
- return module_list
-
- def test_ast_parse(self):
- module_list = self.list_all_modules()
- ERRORS = []
- # attempt to parse each module with ast
- for m in module_list:
- try:
- ast.parse(''.join(open(m)))
- except Exception, e:
- ERRORS.append((m, e))
- assert len(ERRORS) == 0, "get_docstring errors: %s" % ERRORS
diff --git a/v1/tests/TestPlayVarsFiles.py b/v1/tests/TestPlayVarsFiles.py
deleted file mode 100644
index 9d42b73e8b..0000000000
--- a/v1/tests/TestPlayVarsFiles.py
+++ /dev/null
@@ -1,390 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import shutil
-from tempfile import mkstemp
-from tempfile import mkdtemp
-from ansible.playbook.play import Play
-import ansible
-
-import unittest
-from nose.plugins.skip import SkipTest
-
-
-class FakeCallBacks(object):
- def __init__(self):
- pass
- def on_vars_prompt(self):
- pass
- def on_import_for_host(self, host, filename):
- pass
-
-class FakeInventory(object):
- def __init__(self):
- self.hosts = {}
- def basedir(self):
- return "."
- def src(self):
- return "fakeinventory"
- def get_variables(self, host, vault_password=None):
- if host in self.hosts:
- return self.hosts[host]
- else:
- return {}
-
-class FakePlayBook(object):
- def __init__(self):
- self.extra_vars = {}
- self.remote_user = None
- self.remote_port = None
- self.sudo = None
- self.sudo_user = None
- self.su = None
- self.su_user = None
- self.become = None
- self.become_method = None
- self.become_user = None
- self.transport = None
- self.only_tags = None
- self.skip_tags = None
- self.force_handlers = None
- self.VARS_CACHE = {}
- self.SETUP_CACHE = {}
- self.inventory = FakeInventory()
- self.callbacks = FakeCallBacks()
-
- self.VARS_CACHE['localhost'] = {}
-
-
-class TestMe(unittest.TestCase):
-
- ########################################
- # BASIC FILE LOADING BEHAVIOR TESTS
- ########################################
-
- def test_play_constructor(self):
- # __init__(self, playbook, ds, basedir, vault_password=None)
- playbook = FakePlayBook()
- ds = { "hosts": "localhost"}
- basedir = "."
- play = Play(playbook, ds, basedir)
-
- def test_vars_file(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # create a play with a vars_file
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": [temp_path]}
- basedir = "."
- play = Play(playbook, ds, basedir)
- os.remove(temp_path)
-
- # make sure the variable was loaded
- assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars"
- assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars"
-
- def test_vars_file_nonlist_error(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # create a play with a string for vars_files
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": temp_path}
- basedir = "."
- error_hit = False
- try:
- play = Play(playbook, ds, basedir)
- except:
- error_hit = True
- os.remove(temp_path)
-
- assert error_hit == True, "no error was thrown when vars_files was not a list"
-
-
- def test_multiple_vars_files(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # make a second vars file
- fd, temp_path2 = mkstemp()
- f = open(temp_path2, "wb")
- f.write("baz: bang\n")
- f.close()
-
-
- # create a play with two vars_files
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": [temp_path, temp_path2]}
- basedir = "."
- play = Play(playbook, ds, basedir)
- os.remove(temp_path)
- os.remove(temp_path2)
-
- # make sure the variables were loaded
- assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars"
- assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars"
- assert 'baz' in play.vars_file_vars, "vars_file2 was not loaded into play.vars_file_vars"
- assert play.vars_file_vars['baz'] == 'bang', "baz was not set to bang in play.vars_file_vars"
-
- def test_vars_files_first_found(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # get a random file path
- fd, temp_path2 = mkstemp()
- # make sure this file doesn't exist
- os.remove(temp_path2)
-
- # create a play
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": [[temp_path2, temp_path]]}
- basedir = "."
- play = Play(playbook, ds, basedir)
- os.remove(temp_path)
-
- # make sure the variable was loaded
- assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars"
- assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars"
-
- def test_vars_files_multiple_found(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # make a second vars file
- fd, temp_path2 = mkstemp()
- f = open(temp_path2, "wb")
- f.write("baz: bang\n")
- f.close()
-
- # create a play
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": [[temp_path, temp_path2]]}
- basedir = "."
- play = Play(playbook, ds, basedir)
- os.remove(temp_path)
- os.remove(temp_path2)
-
- # make sure the variables were loaded
- assert 'foo' in play.vars_file_vars, "vars_file was not loaded into play.vars_file_vars"
- assert play.vars_file_vars['foo'] == 'bar', "foo was not set to bar in play.vars_file_vars"
- assert 'baz' not in play.vars_file_vars, "vars_file2 was loaded after vars_file1 was loaded"
-
- def test_vars_files_assert_all_found(self):
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # make a second vars file
- fd, temp_path2 = mkstemp()
- # make sure it doesn't exist
- os.remove(temp_path2)
-
- # create a play
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": [temp_path, temp_path2]}
- basedir = "."
-
- error_hit = False
- error_msg = None
-
- try:
- play = Play(playbook, ds, basedir)
- except ansible.errors.AnsibleError, e:
- error_hit = True
- error_msg = e
-
- os.remove(temp_path)
- assert error_hit == True, "no error was thrown for missing vars_file"
-
-
- ########################################
- # VARIABLE PRECEDENCE TESTS
- ########################################
-
- # On the first run vars_files are loaded into play.vars_file_vars by host == None
- # * only files with vars from host==None will work here
- # On the secondary run(s), a host is given and the vars_files are loaded into VARS_CACHE
- # * this only occurs if host is not None, filename2 has vars in the name, and filename3 does not
-
- # filename -- the original string
- # filename2 -- filename templated with play vars
- # filename3 -- filename2 template with inject (hostvars + setup_cache + vars_cache)
- # filename4 -- path_dwim(filename3)
-
- def test_vars_files_for_host(self):
-
- # host != None
- # vars in filename2
- # no vars in filename3
-
- # make a vars file
- fd, temp_path = mkstemp()
- f = open(temp_path, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # build play attributes
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars_files": ["{{ temp_path }}"]}
- basedir = "."
- playbook.VARS_CACHE['localhost']['temp_path'] = temp_path
-
- # create play and do first run
- play = Play(playbook, ds, basedir)
-
- # the second run is started by calling update_vars_files
- play.update_vars_files(['localhost'])
- os.remove(temp_path)
-
- assert 'foo' in play.playbook.VARS_CACHE['localhost'], "vars_file vars were not loaded into vars_cache"
- assert play.playbook.VARS_CACHE['localhost']['foo'] == 'bar', "foo does not equal bar"
-
-
- ########################################
- # COMPLEX FILENAME TEMPLATING TESTS
- ########################################
-
- def test_vars_files_two_vars_in_name(self):
-
- # self.vars_file_vars = ds['vars']
- # self.vars_file_vars += _get_vars() ... aka extra_vars
-
- # make a temp dir
- temp_dir = mkdtemp()
-
- # make a temp file
- fd, temp_file = mkstemp(dir=temp_dir)
- f = open(temp_file, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # build play attributes
- playbook = FakePlayBook()
- ds = { "hosts": "localhost",
- "vars": { "temp_dir": os.path.dirname(temp_file),
- "temp_file": os.path.basename(temp_file) },
- "vars_files": ["{{ temp_dir + '/' + temp_file }}"]}
- basedir = "."
-
- # create play and do first run
- play = Play(playbook, ds, basedir)
-
- # cleanup
- shutil.rmtree(temp_dir)
-
- assert 'foo' in play.vars_file_vars, "double var templated vars_files filename not loaded"
-
- def test_vars_files_two_vars_different_scope(self):
-
- #
- # Use a play var and an inventory var to create the filename
- #
-
- # self.playbook.inventory.get_variables(host)
- # {'group_names': ['ungrouped'], 'inventory_hostname': 'localhost',
- # 'ansible_ssh_user': 'root', 'inventory_hostname_short': 'localhost'}
-
- # make a temp dir
- temp_dir = mkdtemp()
-
- # make a temp file
- fd, temp_file = mkstemp(dir=temp_dir)
- f = open(temp_file, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # build play attributes
- playbook = FakePlayBook()
- playbook.inventory.hosts['localhost'] = {'inventory_hostname': os.path.basename(temp_file)}
- ds = { "hosts": "localhost",
- "vars": { "temp_dir": os.path.dirname(temp_file)},
- "vars_files": ["{{ temp_dir + '/' + inventory_hostname }}"]}
- basedir = "."
-
- # create play and do first run
- play = Play(playbook, ds, basedir)
-
- # do the host run
- play.update_vars_files(['localhost'])
-
- # cleanup
- shutil.rmtree(temp_dir)
-
- assert 'foo' not in play.vars_file_vars, \
- "mixed scope vars_file loaded into play vars"
- assert 'foo' in play.playbook.VARS_CACHE['localhost'], \
- "differently scoped templated vars_files filename not loaded"
- assert play.playbook.VARS_CACHE['localhost']['foo'] == 'bar', \
- "foo is not bar"
-
- def test_vars_files_two_vars_different_scope_first_found(self):
-
- #
- # Use a play var and an inventory var to create the filename
- #
-
- # make a temp dir
- temp_dir = mkdtemp()
-
- # make a temp file
- fd, temp_file = mkstemp(dir=temp_dir)
- f = open(temp_file, "wb")
- f.write("foo: bar\n")
- f.close()
-
- # build play attributes
- playbook = FakePlayBook()
- playbook.inventory.hosts['localhost'] = {'inventory_hostname': os.path.basename(temp_file)}
- ds = { "hosts": "localhost",
- "vars": { "temp_dir": os.path.dirname(temp_file)},
- "vars_files": [["{{ temp_dir + '/' + inventory_hostname }}"]]}
- basedir = "."
-
- # create play and do first run
- play = Play(playbook, ds, basedir)
-
- # do the host run
- play.update_vars_files(['localhost'])
-
- # cleanup
- shutil.rmtree(temp_dir)
-
- assert 'foo' not in play.vars_file_vars, \
- "mixed scope vars_file loaded into play vars"
- assert 'foo' in play.playbook.VARS_CACHE['localhost'], \
- "differently scoped templated vars_files filename not loaded"
- assert play.playbook.VARS_CACHE['localhost']['foo'] == 'bar', \
- "foo is not bar"
-
-
diff --git a/v1/tests/TestSynchronize.py b/v1/tests/TestSynchronize.py
deleted file mode 100644
index cf28ea5d80..0000000000
--- a/v1/tests/TestSynchronize.py
+++ /dev/null
@@ -1,176 +0,0 @@
-
-import unittest
-import getpass
-import os
-import shutil
-import time
-import tempfile
-from nose.plugins.skip import SkipTest
-
-from ansible.runner.action_plugins.synchronize import ActionModule as Synchronize
-
-class FakeRunner(object):
- def __init__(self):
- self.connection = None
- self.transport = None
- self.basedir = None
- self.sudo = None
- self.remote_user = None
- self.private_key_file = None
- self.check = False
- self.become = False
- self.become_method = 'sudo'
- self.become_user = False
-
- def _execute_module(self, conn, tmp, module_name, args,
- async_jid=None, async_module=None, async_limit=None, inject=None,
- persist_files=False, complex_args=None, delete_remote_tmp=True):
- self.executed_conn = conn
- self.executed_tmp = tmp
- self.executed_module_name = module_name
- self.executed_args = args
- self.executed_async_jid = async_jid
- self.executed_async_module = async_module
- self.executed_async_limit = async_limit
- self.executed_inject = inject
- self.executed_persist_files = persist_files
- self.executed_complex_args = complex_args
- self.executed_delete_remote_tmp = delete_remote_tmp
-
- def noop_on_check(self, inject):
- return self.check
-
-class FakeConn(object):
- def __init__(self):
- self.host = None
- self.delegate = None
-
-class TestSynchronize(unittest.TestCase):
-
-
- def test_synchronize_action_basic(self):
-
- """ verify the synchronize action plugin sets
- the delegate to 127.0.0.1 and remote path to user@host:/path """
-
- runner = FakeRunner()
- runner.remote_user = "root"
- runner.transport = "ssh"
- conn = FakeConn()
- inject = {
- 'inventory_hostname': "el6.lab.net",
- 'inventory_hostname_short': "el6",
- 'ansible_connection': None,
- 'ansible_ssh_user': 'root',
- 'delegate_to': None,
- 'playbook_dir': '.',
- }
-
- x = Synchronize(runner)
- x.setup("synchronize", inject)
- x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
-
- assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- assert runner.executed_complex_args == {"dest":"root@el6.lab.net:/tmp/bar", "src":"/tmp/foo"}, "wrong args used"
- assert runner.sudo == None, "sudo was not reset to None"
-
- def test_synchronize_action_sudo(self):
-
- """ verify the synchronize action plugin unsets and then sets sudo """
-
- runner = FakeRunner()
- runner.become = True
- runner.remote_user = "root"
- runner.transport = "ssh"
- conn = FakeConn()
- inject = {
- 'inventory_hostname': "el6.lab.net",
- 'inventory_hostname_short': "el6",
- 'ansible_connection': None,
- 'ansible_ssh_user': 'root',
- 'delegate_to': None,
- 'playbook_dir': '.',
- }
-
- x = Synchronize(runner)
- x.setup("synchronize", inject)
- x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
-
- assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- assert runner.executed_complex_args == {'dest':'root@el6.lab.net:/tmp/bar',
- 'src':'/tmp/foo',
- 'rsync_path':'"sudo rsync"'}, "wrong args used"
- assert runner.become == True, "sudo was not reset to True"
-
-
- def test_synchronize_action_local(self):
-
- """ verify the synchronize action plugin sets
- the delegate to 127.0.0.1 and does not alter the dest """
-
- runner = FakeRunner()
- runner.remote_user = "jtanner"
- runner.transport = "paramiko"
- conn = FakeConn()
- conn.host = "127.0.0.1"
- conn.delegate = "thishost"
- inject = {
- 'inventory_hostname': "thishost",
- 'ansible_ssh_host': '127.0.0.1',
- 'ansible_connection': 'local',
- 'delegate_to': None,
- 'playbook_dir': '.',
- }
-
- x = Synchronize(runner)
- x.setup("synchronize", inject)
- x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
-
- assert runner.transport == "paramiko", "runner transport was changed"
- assert runner.remote_user == "jtanner", "runner remote_user was changed"
- assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- assert "dest_port" not in runner.executed_complex_args, "dest_port should not have been set"
- assert runner.executed_complex_args.get("src") == "/tmp/foo", "source was set incorrectly"
- assert runner.executed_complex_args.get("dest") == "/tmp/bar", "dest was set incorrectly"
-
-
- def test_synchronize_action_vagrant(self):
-
- """ Verify the action plugin accommodates the common
- scenarios for vagrant boxes. """
-
- runner = FakeRunner()
- runner.remote_user = "jtanner"
- runner.transport = "ssh"
- conn = FakeConn()
- conn.host = "127.0.0.1"
- conn.delegate = "thishost"
- inject = {
- 'inventory_hostname': "thishost",
- 'ansible_ssh_user': 'vagrant',
- 'ansible_ssh_host': '127.0.0.1',
- 'ansible_ssh_port': '2222',
- 'delegate_to': None,
- 'playbook_dir': '.',
- 'hostvars': {
- 'thishost': {
- 'inventory_hostname': 'thishost',
- 'ansible_ssh_port': '2222',
- 'ansible_ssh_host': '127.0.0.1',
- 'ansible_ssh_user': 'vagrant'
- }
- }
- }
-
- x = Synchronize(runner)
- x.setup("synchronize", inject)
- x.run(conn, "/tmp", "synchronize", "src=/tmp/foo dest=/tmp/bar", inject)
-
- assert runner.transport == "ssh", "runner transport was changed"
- assert runner.remote_user == "jtanner", "runner remote_user was changed"
- assert runner.executed_inject['delegate_to'] == "127.0.0.1", "was not delegated to 127.0.0.1"
- assert runner.executed_inject['ansible_ssh_user'] == "vagrant", "runner user was changed"
- assert runner.executed_complex_args.get("dest_port") == "2222", "remote port was not set to 2222"
- assert runner.executed_complex_args.get("src") == "/tmp/foo", "source was set incorrectly"
- assert runner.executed_complex_args.get("dest") == "vagrant@127.0.0.1:/tmp/bar", "dest was set incorrectly"
-
diff --git a/v1/tests/TestUtils.py b/v1/tests/TestUtils.py
deleted file mode 100644
index c0ca9ba538..0000000000
--- a/v1/tests/TestUtils.py
+++ /dev/null
@@ -1,945 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import traceback
-import unittest
-import os
-import os.path
-import re
-import tempfile
-import yaml
-import passlib.hash
-import string
-import StringIO
-import copy
-import tempfile
-import shutil
-
-from nose.plugins.skip import SkipTest
-from mock import patch
-
-import ansible.utils
-import ansible.errors
-import ansible.constants as C
-import ansible.utils.template as template2
-from ansible.module_utils.splitter import split_args
-
-from ansible import __version__
-
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-
-class TestUtils(unittest.TestCase):
-
- def _is_fips(self):
- try:
- data = open('/proc/sys/crypto/fips_enabled').read().strip()
- except:
- return False
- if data != '1':
- return False
- return True
-
- def test_before_comment(self):
- ''' see if we can detect the part of a string before a comment. Used by INI parser in inventory '''
-
- input = "before # comment"
- expected = "before "
- actual = ansible.utils.before_comment(input)
- self.assertEqual(expected, actual)
-
- input = "before \# not a comment"
- expected = "before # not a comment"
- actual = ansible.utils.before_comment(input)
- self.assertEqual(expected, actual)
-
- input = ""
- expected = ""
- actual = ansible.utils.before_comment(input)
- self.assertEqual(expected, actual)
-
- input = "#"
- expected = ""
- actual = ansible.utils.before_comment(input)
- self.assertEqual(expected, actual)
-
- #####################################
- ### check_conditional tests
-
- def test_check_conditional_jinja2_literals(self):
- # see http://jinja.pocoo.org/docs/templates/#literals
-
- # none
- self.assertEqual(ansible.utils.check_conditional(
- None, '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- '', '/', {}), True)
-
- # list
- self.assertEqual(ansible.utils.check_conditional(
- ['true'], '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- ['false'], '/', {}), False)
-
- # non basestring or list
- self.assertEqual(ansible.utils.check_conditional(
- {}, '/', {}), {})
-
- # boolean
- self.assertEqual(ansible.utils.check_conditional(
- 'true', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'false', '/', {}), False)
- self.assertEqual(ansible.utils.check_conditional(
- 'True', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'False', '/', {}), False)
-
- # integer
- self.assertEqual(ansible.utils.check_conditional(
- '1', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- '0', '/', {}), False)
-
- # string, beware, a string is truthy unless empty
- self.assertEqual(ansible.utils.check_conditional(
- '"yes"', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- '"no"', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- '""', '/', {}), False)
-
-
- def test_check_conditional_jinja2_variable_literals(self):
- # see http://jinja.pocoo.org/docs/templates/#literals
-
- # boolean
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'True'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'true'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'False'}), False)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'false'}), False)
-
- # integer
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '1'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 1}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '0'}), False)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 0}), False)
-
- # string, beware, a string is truthy unless empty
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '"yes"'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '"no"'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '""'}), False)
-
- # Python boolean in Jinja2 expression
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': True}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': False}), False)
-
-
- def test_check_conditional_jinja2_expression(self):
- self.assertEqual(ansible.utils.check_conditional(
- '1 == 1', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'bar == 42', '/', {'bar': 42}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'bar != 42', '/', {'bar': 42}), False)
-
-
- def test_check_conditional_jinja2_expression_in_variable(self):
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': '1 == 1'}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'bar == 42', 'bar': 42}), True)
- self.assertEqual(ansible.utils.check_conditional(
- 'var', '/', {'var': 'bar != 42', 'bar': 42}), False)
-
- def test_check_conditional_jinja2_unicode(self):
- self.assertEqual(ansible.utils.check_conditional(
- u'"\u00df"', '/', {}), True)
- self.assertEqual(ansible.utils.check_conditional(
- u'var == "\u00df"', '/', {'var': u'\u00df'}), True)
-
-
- #####################################
- ### key-value parsing
-
- def test_parse_kv_basic(self):
- self.assertEqual(ansible.utils.parse_kv('a=simple b="with space" c="this=that"'),
- {'a': 'simple', 'b': 'with space', 'c': 'this=that'})
- self.assertEqual(ansible.utils.parse_kv('msg=АБВГД'),
- {'msg': 'АБВГД'})
-
-
- def test_jsonify(self):
- self.assertEqual(ansible.utils.jsonify(None), '{}')
- self.assertEqual(ansible.utils.jsonify(dict(foo='bar', baz=['qux'])), '{"baz": ["qux"], "foo": "bar"}')
- expected = u'{"baz":["qux"],"foo":"bar"}'
- self.assertEqual("".join(ansible.utils.jsonify(dict(foo='bar', baz=['qux']), format=True).split()), expected)
-
- def test_is_failed(self):
- self.assertEqual(ansible.utils.is_failed(dict(rc=0)), False)
- self.assertEqual(ansible.utils.is_failed(dict(rc=1)), True)
- self.assertEqual(ansible.utils.is_failed(dict()), False)
- self.assertEqual(ansible.utils.is_failed(dict(failed=False)), False)
- self.assertEqual(ansible.utils.is_failed(dict(failed=True)), True)
- self.assertEqual(ansible.utils.is_failed(dict(failed='True')), True)
- self.assertEqual(ansible.utils.is_failed(dict(failed='true')), True)
-
- def test_is_changed(self):
- self.assertEqual(ansible.utils.is_changed(dict()), False)
- self.assertEqual(ansible.utils.is_changed(dict(changed=False)), False)
- self.assertEqual(ansible.utils.is_changed(dict(changed=True)), True)
- self.assertEqual(ansible.utils.is_changed(dict(changed='True')), True)
- self.assertEqual(ansible.utils.is_changed(dict(changed='true')), True)
-
- def test_path_dwim(self):
- self.assertEqual(ansible.utils.path_dwim(None, __file__),
- __file__)
- self.assertEqual(ansible.utils.path_dwim(None, '~'),
- os.path.expanduser('~'))
- self.assertEqual(ansible.utils.path_dwim(None, 'TestUtils.py'),
- __file__.rstrip('c'))
-
- def test_path_dwim_relative(self):
- self.assertEqual(ansible.utils.path_dwim_relative(__file__, 'units', 'TestUtils.py',
- os.path.dirname(os.path.dirname(__file__))),
- __file__.rstrip('c'))
-
- def test_json_loads(self):
- self.assertEqual(ansible.utils.json_loads('{"foo": "bar"}'), dict(foo='bar'))
-
- def test_parse_json(self):
- # leading junk
- self.assertEqual(ansible.utils.parse_json('ansible\n{"foo": "bar"}'), dict(foo="bar"))
-
- # No closing quotation
- try:
- rc = ansible.utils.parse_json('foo=bar "')
- print rc
- except ValueError:
- pass
- else:
- traceback.print_exc()
- raise AssertionError('Incorrect exception, expected ValueError')
-
- # Failed to parse
- try:
- ansible.utils.parse_json('{')
- except ValueError:
- pass
- else:
- raise AssertionError('Incorrect exception, expected ValueError')
-
- def test_parse_yaml(self):
- #json
- self.assertEqual(ansible.utils.parse_yaml('{"foo": "bar"}'), dict(foo='bar'))
-
- # broken json
- try:
- ansible.utils.parse_yaml('{')
- except ansible.errors.AnsibleError:
- pass
- else:
- raise AssertionError
-
- # broken json with path_hint
- try:
- ansible.utils.parse_yaml('{', path_hint='foo')
- except ansible.errors.AnsibleError:
- pass
- else:
- raise AssertionError
-
- # yaml with front-matter
- self.assertEqual(ansible.utils.parse_yaml("---\nfoo: bar"), dict(foo='bar'))
- # yaml no front-matter
- self.assertEqual(ansible.utils.parse_yaml('foo: bar'), dict(foo='bar'))
- # yaml indented first line (See #6348)
- self.assertEqual(ansible.utils.parse_yaml(' - foo: bar\n baz: qux'), [dict(foo='bar', baz='qux')])
-
- def test_process_common_errors(self):
- # no quote
- self.assertTrue('YAML thought it' in ansible.utils.process_common_errors('', 'foo: {{bar}}', 6))
-
- # extra colon
- self.assertTrue('an extra unquoted colon' in ansible.utils.process_common_errors('', 'foo: bar:', 8))
-
- # match
- self.assertTrue('same kind of quote' in ansible.utils.process_common_errors('', 'foo: "{{bar}}"baz', 6))
- self.assertTrue('same kind of quote' in ansible.utils.process_common_errors('', "foo: '{{bar}}'baz", 6))
-
- # unbalanced
- self.assertTrue('We could be wrong' in ansible.utils.process_common_errors('', 'foo: "bad" "wolf"', 6))
- self.assertTrue('We could be wrong' in ansible.utils.process_common_errors('', "foo: 'bad' 'wolf'", 6))
-
-
- def test_process_yaml_error(self):
- data = 'foo: bar\n baz: qux'
- try:
- ansible.utils.parse_yaml(data)
- except yaml.YAMLError, exc:
- try:
- ansible.utils.process_yaml_error(exc, data, __file__)
- except ansible.errors.AnsibleYAMLValidationFailed, e:
- self.assertTrue('Syntax Error while loading' in str(e))
- else:
- raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed')
-
- data = 'foo: bar\n baz: {{qux}}'
- try:
- ansible.utils.parse_yaml(data)
- except yaml.YAMLError, exc:
- try:
- ansible.utils.process_yaml_error(exc, data, __file__)
- except ansible.errors.AnsibleYAMLValidationFailed, e:
- self.assertTrue('Syntax Error while loading' in str(e))
- else:
- raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed')
-
- data = '\xFF'
- try:
- ansible.utils.parse_yaml(data)
- except yaml.YAMLError, exc:
- try:
- ansible.utils.process_yaml_error(exc, data, __file__)
- except ansible.errors.AnsibleYAMLValidationFailed, e:
- self.assertTrue('Check over' in str(e))
- else:
- raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed')
-
- data = '\xFF'
- try:
- ansible.utils.parse_yaml(data)
- except yaml.YAMLError, exc:
- try:
- ansible.utils.process_yaml_error(exc, data, None)
- except ansible.errors.AnsibleYAMLValidationFailed, e:
- self.assertTrue('Could not parse YAML.' in str(e))
- else:
- raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed')
-
- def test_parse_yaml_from_file(self):
- test = os.path.join(os.path.dirname(__file__), 'inventory_test_data',
- 'common_vars.yml')
- encrypted = os.path.join(os.path.dirname(__file__), 'inventory_test_data',
- 'encrypted.yml')
- broken = os.path.join(os.path.dirname(__file__), 'inventory_test_data',
- 'broken.yml')
-
- try:
- ansible.utils.parse_yaml_from_file(os.path.dirname(__file__))
- except ansible.errors.AnsibleError:
- pass
- else:
- raise AssertionError('Incorrect exception, expected AnsibleError')
-
- self.assertEqual(ansible.utils.parse_yaml_from_file(test), yaml.safe_load(open(test)))
-
- self.assertEqual(ansible.utils.parse_yaml_from_file(encrypted, 'ansible'), dict(foo='bar'))
-
- try:
- ansible.utils.parse_yaml_from_file(broken)
- except ansible.errors.AnsibleYAMLValidationFailed, e:
- self.assertTrue('Syntax Error while loading' in str(e))
- else:
- raise AssertionError('Incorrect exception, expected AnsibleYAMLValidationFailed')
-
- def test_merge_hash(self):
- self.assertEqual(ansible.utils.merge_hash(dict(foo='bar', baz='qux'), dict(foo='baz')),
- dict(foo='baz', baz='qux'))
- self.assertEqual(ansible.utils.merge_hash(dict(foo=dict(bar='baz')), dict(foo=dict(bar='qux'))),
- dict(foo=dict(bar='qux')))
-
- def test_md5s(self):
- if self._is_fips():
- raise SkipTest('MD5 unavailable on FIPs enabled systems')
- self.assertEqual(ansible.utils.md5s('ansible'), '640c8a5376aa12fa15cf02130ce239a6')
- # Need a test that causes UnicodeEncodeError See 4221
-
- def test_md5(self):
- if self._is_fips():
- raise SkipTest('MD5 unavailable on FIPs enabled systems')
- self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cfg')),
- 'fb7b5b90ea63f04bde33e804b6fad42c')
- self.assertEqual(ansible.utils.md5(os.path.join(os.path.dirname(__file__), 'ansible.cf')),
- None)
-
- def test_checksum_s(self):
- self.assertEqual(ansible.utils.checksum_s('ansible'), 'bef45157a43c9e5f469d188810814a4a8ab9f2ed')
- # Need a test that causes UnicodeEncodeError See 4221
-
- def test_checksum(self):
- self.assertEqual(ansible.utils.checksum(os.path.join(os.path.dirname(__file__), 'ansible.cfg')),
- '658b67c8ac7595adde7048425ff1f9aba270721a')
- self.assertEqual(ansible.utils.checksum(os.path.join(os.path.dirname(__file__), 'ansible.cf')),
- None)
-
- def test_default(self):
- self.assertEqual(ansible.utils.default(None, lambda: {}), {})
- self.assertEqual(ansible.utils.default(dict(foo='bar'), lambda: {}), dict(foo='bar'))
-
- def test__gitinfo(self):
- # this fails if not run from git clone
- # self.assertEqual('last updated' in ansible.utils._gitinfo())
- # missing test for git submodule
- # missing test outside of git clone
- pass
-
- def test_version(self):
- version = ansible.utils.version('ansible')
- self.assertTrue(version.startswith('ansible %s' % __version__))
- # this fails if not run from git clone
- # self.assertEqual('last updated' in version)
-
- def test_getch(self):
- # figure out how to test this
- pass
-
- def test_sanitize_output(self):
- self.assertEqual(ansible.utils.sanitize_output('password=foo'), 'password=VALUE_HIDDEN')
- self.assertEqual(ansible.utils.sanitize_output('foo=user:pass@foo/whatever'),
- 'foo=user:********@foo/whatever')
- self.assertEqual(ansible.utils.sanitize_output('foo=http://username:pass@wherever/foo'),
- 'foo=http://username:********@wherever/foo')
- self.assertEqual(ansible.utils.sanitize_output('foo=http://wherever/foo'),
- 'foo=http://wherever/foo')
-
- def test_increment_debug(self):
- ansible.utils.VERBOSITY = 0
- ansible.utils.increment_debug(None, None, None, None)
- self.assertEqual(ansible.utils.VERBOSITY, 1)
-
- def test_base_parser(self):
- output = ansible.utils.base_parser(output_opts=True)
- self.assertTrue(output.has_option('--one-line') and output.has_option('--tree'))
-
- runas = ansible.utils.base_parser(runas_opts=True)
- for opt in ['--sudo', '--sudo-user', '--user', '--su', '--su-user']:
- self.assertTrue(runas.has_option(opt))
-
- async = ansible.utils.base_parser(async_opts=True)
- self.assertTrue(async.has_option('--poll') and async.has_option('--background'))
-
- connect = ansible.utils.base_parser(connect_opts=True)
- self.assertTrue(connect.has_option('--connection'))
-
- subset = ansible.utils.base_parser(subset_opts=True)
- self.assertTrue(subset.has_option('--limit'))
-
- check = ansible.utils.base_parser(check_opts=True)
- self.assertTrue(check.has_option('--check'))
-
- diff = ansible.utils.base_parser(diff_opts=True)
- self.assertTrue(diff.has_option('--diff'))
-
- def test_do_encrypt(self):
- salt_chars = string.ascii_letters + string.digits + './'
- salt = ansible.utils.random_password(length=8, chars=salt_chars)
- hash = ansible.utils.do_encrypt('ansible', 'sha256_crypt', salt=salt)
- self.assertTrue(passlib.hash.sha256_crypt.verify('ansible', hash))
-
- hash = ansible.utils.do_encrypt('ansible', 'sha256_crypt')
- self.assertTrue(passlib.hash.sha256_crypt.verify('ansible', hash))
-
- try:
- ansible.utils.do_encrypt('ansible', 'ansible')
- except ansible.errors.AnsibleError:
- pass
- else:
- raise AssertionError('Incorrect exception, expected AnsibleError')
-
- def test_do_encrypt_md5(self):
- if self._is_fips():
- raise SkipTest('MD5 unavailable on FIPS systems')
- hash = ansible.utils.do_encrypt('ansible', 'md5_crypt', salt_size=4)
- self.assertTrue(passlib.hash.md5_crypt.verify('ansible', hash))
-
- def test_last_non_blank_line(self):
- self.assertEqual(ansible.utils.last_non_blank_line('a\n\nb\n\nc'), 'c')
- self.assertEqual(ansible.utils.last_non_blank_line(''), '')
-
- def test_filter_leading_non_json_lines(self):
- self.assertEqual(ansible.utils.filter_leading_non_json_lines('a\nb\nansible!\n{"foo": "bar"}'),
- '{"foo": "bar"}\n')
- self.assertEqual(ansible.utils.filter_leading_non_json_lines('a\nb\nansible!\n["foo", "bar"]'),
- '["foo", "bar"]\n')
-
- def test_boolean(self):
- self.assertEqual(ansible.utils.boolean("true"), True)
- self.assertEqual(ansible.utils.boolean("True"), True)
- self.assertEqual(ansible.utils.boolean("TRUE"), True)
- self.assertEqual(ansible.utils.boolean("t"), True)
- self.assertEqual(ansible.utils.boolean("T"), True)
- self.assertEqual(ansible.utils.boolean("Y"), True)
- self.assertEqual(ansible.utils.boolean("y"), True)
- self.assertEqual(ansible.utils.boolean("1"), True)
- self.assertEqual(ansible.utils.boolean(1), True)
- self.assertEqual(ansible.utils.boolean("false"), False)
- self.assertEqual(ansible.utils.boolean("False"), False)
- self.assertEqual(ansible.utils.boolean("0"), False)
- self.assertEqual(ansible.utils.boolean(0), False)
- self.assertEqual(ansible.utils.boolean("foo"), False)
-
- def test_make_sudo_cmd(self):
- cmd = ansible.utils.make_sudo_cmd(C.DEFAULT_SUDO_EXE, 'root', '/bin/sh', '/bin/ls')
- self.assertTrue(isinstance(cmd, tuple))
- self.assertEqual(len(cmd), 3)
- self.assertTrue('-u root' in cmd[0])
- self.assertTrue('-p "[sudo via ansible, key=' in cmd[0] and cmd[1].startswith('[sudo via ansible, key'))
- self.assertTrue('echo BECOME-SUCCESS-' in cmd[0] and cmd[2].startswith('BECOME-SUCCESS-'))
- self.assertTrue('sudo -k' in cmd[0])
-
- def test_make_su_cmd(self):
- cmd = ansible.utils.make_su_cmd('root', '/bin/sh', '/bin/ls')
- self.assertTrue(isinstance(cmd, tuple))
- self.assertEqual(len(cmd), 3)
- self.assertTrue('root -c "/bin/sh' in cmd[0] or ' root -c /bin/sh' in cmd[0])
- self.assertTrue('echo BECOME-SUCCESS-' in cmd[0] and cmd[2].startswith('BECOME-SUCCESS-'))
-
- def test_to_unicode(self):
- uni = ansible.utils.unicode.to_unicode(u'ansible')
- self.assertTrue(isinstance(uni, unicode))
- self.assertEqual(uni, u'ansible')
-
- none = ansible.utils.unicode.to_unicode(None, nonstring='passthru')
- self.assertTrue(isinstance(none, type(None)))
- self.assertTrue(none is None)
-
- utf8 = ansible.utils.unicode.to_unicode('ansible')
- self.assertTrue(isinstance(utf8, unicode))
- self.assertEqual(utf8, u'ansible')
-
- def test_is_list_of_strings(self):
- self.assertEqual(ansible.utils.is_list_of_strings(['foo', 'bar', u'baz']), True)
- self.assertEqual(ansible.utils.is_list_of_strings(['foo', 'bar', True]), False)
- self.assertEqual(ansible.utils.is_list_of_strings(['one', 2, 'three']), False)
-
- def test_contains_vars(self):
- self.assertTrue(ansible.utils.contains_vars('{{foo}}'))
- self.assertTrue(ansible.utils.contains_vars('$foo'))
- self.assertFalse(ansible.utils.contains_vars('foo'))
-
- def test_safe_eval(self):
- # Not basestring
- self.assertEqual(ansible.utils.safe_eval(len), len)
- self.assertEqual(ansible.utils.safe_eval(1), 1)
- self.assertEqual(ansible.utils.safe_eval(len, include_exceptions=True), (len, None))
- self.assertEqual(ansible.utils.safe_eval(1, include_exceptions=True), (1, None))
-
- # module
- self.assertEqual(ansible.utils.safe_eval('foo.bar('), 'foo.bar(')
- self.assertEqual(ansible.utils.safe_eval('foo.bar(', include_exceptions=True), ('foo.bar(', None))
-
- # import
- self.assertEqual(ansible.utils.safe_eval('import foo'), 'import foo')
- self.assertEqual(ansible.utils.safe_eval('import foo', include_exceptions=True), ('import foo', None))
-
- # valid simple eval
- self.assertEqual(ansible.utils.safe_eval('True'), True)
- self.assertEqual(ansible.utils.safe_eval('True', include_exceptions=True), (True, None))
-
- # valid eval with lookup
- self.assertEqual(ansible.utils.safe_eval('foo + bar', dict(foo=1, bar=2)), 3)
- self.assertEqual(ansible.utils.safe_eval('foo + bar', dict(foo=1, bar=2), include_exceptions=True), (3, None))
-
- # invalid eval
- self.assertEqual(ansible.utils.safe_eval('foo'), 'foo')
- nameerror = ansible.utils.safe_eval('foo', include_exceptions=True)
- self.assertTrue(isinstance(nameerror, tuple))
- self.assertEqual(nameerror[0], 'foo')
- self.assertTrue(isinstance(nameerror[1], NameError))
-
- def test_listify_lookup_plugin_terms(self):
- basedir = os.path.dirname(__file__)
- # Straight lookups
- #self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=[])), [])
- #self.assertEqual(ansible.utils.listify_lookup_plugin_terms('things', basedir, dict(things=['one', 'two'])), ['one', 'two'])
-
- def test_deprecated(self):
- sys_stderr = sys.stderr
- sys.stderr = StringIO.StringIO()
- ansible.utils.deprecated('Ack!', '0.0')
- out = sys.stderr.getvalue()
- self.assertTrue('0.0' in out)
- self.assertTrue('[DEPRECATION WARNING]' in out)
-
- sys.stderr = StringIO.StringIO()
- ansible.utils.deprecated('Ack!', None)
- out = sys.stderr.getvalue()
- self.assertTrue('0.0' not in out)
- self.assertTrue('[DEPRECATION WARNING]' in out)
-
- sys.stderr = StringIO.StringIO()
- warnings = C.DEPRECATION_WARNINGS
- C.DEPRECATION_WARNINGS = False
- ansible.utils.deprecated('Ack!', None)
- out = sys.stderr.getvalue()
- self.assertTrue(not out)
- C.DEPRECATION_WARNINGS = warnings
-
- sys.stderr = sys_stderr
-
- try:
- ansible.utils.deprecated('Ack!', '0.0', True)
- except ansible.errors.AnsibleError, e:
- self.assertTrue('0.0' not in str(e))
- self.assertTrue('[DEPRECATED]' in str(e))
- else:
- raise AssertionError("Incorrect exception, expected AnsibleError")
-
- def test_warning(self):
- sys_stderr = sys.stderr
- sys.stderr = StringIO.StringIO()
- ansible.utils.warning('ANSIBLE')
- out = sys.stderr.getvalue()
- sys.stderr = sys_stderr
- self.assertTrue('[WARNING]: ANSIBLE' in out)
-
- def test_combine_vars(self):
- one = {'foo': {'bar': True}, 'baz': {'one': 'qux'}}
- two = {'baz': {'two': 'qux'}}
- replace = {'baz': {'two': 'qux'}, 'foo': {'bar': True}}
- merge = {'baz': {'two': 'qux', 'one': 'qux'}, 'foo': {'bar': True}}
-
- C.DEFAULT_HASH_BEHAVIOUR = 'replace'
- self.assertEqual(ansible.utils.combine_vars(one, two), replace)
-
- C.DEFAULT_HASH_BEHAVIOUR = 'merge'
- self.assertEqual(ansible.utils.combine_vars(one, two), merge)
-
- def test_err(self):
- sys_stderr = sys.stderr
- sys.stderr = StringIO.StringIO()
- ansible.utils.err('ANSIBLE')
- out = sys.stderr.getvalue()
- sys.stderr = sys_stderr
- self.assertEqual(out, 'ANSIBLE\n')
-
- def test_exit(self):
- sys_stderr = sys.stderr
- sys.stderr = StringIO.StringIO()
- try:
- ansible.utils.exit('ansible')
- except SystemExit, e:
- self.assertEqual(e.code, 1)
- self.assertEqual(sys.stderr.getvalue(), 'ansible\n')
- else:
- raise AssertionError('Incorrect exception, expected SystemExit')
- finally:
- sys.stderr = sys_stderr
-
- def test_unfrackpath(self):
- os.environ['TEST_ROOT'] = os.path.dirname(os.path.dirname(__file__))
- self.assertEqual(ansible.utils.unfrackpath('$TEST_ROOT/units/../units/TestUtils.py'), __file__.rstrip('c'))
-
- def test_is_executable(self):
- self.assertEqual(ansible.utils.is_executable(__file__), 0)
-
- bin_ansible = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
- 'bin', 'ansible')
- self.assertNotEqual(ansible.utils.is_executable(bin_ansible), 0)
-
- def test_get_diff(self):
- standard = dict(
- before_header='foo',
- after_header='bar',
- before='fooo',
- after='foo'
- )
-
- standard_expected = """--- before: foo
-+++ after: bar
-@@ -1 +1 @@
--fooo+foo"""
-
- # workaround py26 and py27 difflib differences
- standard_expected = """-fooo+foo"""
- diff = ansible.utils.get_diff(standard)
- diff = diff.split('\n')
- del diff[0]
- del diff[0]
- del diff[0]
- diff = '\n'.join(diff)
- self.assertEqual(diff, unicode(standard_expected))
-
- def test_split_args(self):
- # split_args is a smarter shlex.split for the needs of the way ansible uses it
-
- def _split_info(input, desired, actual):
- print "SENT: ", input
- print "WANT: ", desired
- print "GOT: ", actual
-
- def _test_combo(input, desired):
- actual = split_args(input)
- _split_info(input, desired, actual)
- assert actual == desired
-
- # trivial splitting
- _test_combo('a b=c d=f', ['a', 'b=c', 'd=f' ])
-
- # mixed quotes
- _test_combo('a b=\'c\' d="e" f=\'g\'', ['a', "b='c'", 'd="e"', "f='g'" ])
-
- # with spaces
- # FIXME: this fails, commenting out only for now
- # _test_combo('a "\'one two three\'"', ['a', "'one two three'" ])
-
- # TODO: ...
- # jinja2 preservation
- _test_combo('a {{ y }} z', ['a', '{{ y }}', 'z' ])
-
- # jinja2 preservation with spaces and filters and other hard things
- _test_combo(
- 'a {{ x | filter(\'moo\', \'param\') }} z {{ chicken }} "waffles"',
- ['a', "{{ x | filter('moo', 'param') }}", 'z', '{{ chicken }}', '"waffles"']
- )
-
- # invalid quote detection
- self.assertRaises(Exception, split_args, 'hey I started a quote"')
- self.assertRaises(Exception, split_args, 'hey I started a\' quote')
-
- # jinja2 loop blocks with lots of complexity
- _test_combo(
- # in memory of neighbors cat
- # we preserve line breaks unless a line continuation character precedes them
- 'a {% if x %} y {%else %} {{meow}} {% endif %} "cookie\nchip" \\\ndone\nand done',
- ['a', '{% if x %}', 'y', '{%else %}', '{{meow}}', '{% endif %}', '"cookie\nchip"', 'done\n', 'and', 'done']
- )
-
- # test space preservation within quotes
- _test_combo(
- 'content="1 2 3 4 " foo=bar',
- ['content="1 2 3 4 "', 'foo=bar']
- )
-
- # invalid jinja2 nesting detection
- # invalid quote nesting detection
-
- def test_clean_data(self):
- # clean data removes jinja2 tags from data
- self.assertEqual(
- ansible.utils._clean_data('this is a normal string', from_remote=True),
- 'this is a normal string'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string has a {{variable}}', from_remote=True),
- 'this string has a {#variable#}'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string {{has}} two {{variables}} in it', from_remote=True),
- 'this string {#has#} two {#variables#} in it'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string has a {{variable with a\nnewline}}', from_remote=True),
- 'this string has a {#variable with a\nnewline#}'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string is from inventory {{variable}}', from_inventory=True),
- 'this string is from inventory {{variable}}'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string is from inventory too but uses lookup {{lookup("foo","bar")}}', from_inventory=True),
- 'this string is from inventory too but uses lookup {#lookup("foo","bar")#}'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string has JSON in it: {"foo":{"bar":{"baz":"oops"}}}', from_remote=True),
- 'this string has JSON in it: {"foo":{"bar":{"baz":"oops"}}}'
- )
- self.assertEqual(
- ansible.utils._clean_data('this string contains unicode: ¢ £ ¤ ¥', from_remote=True),
- 'this string contains unicode: ¢ £ ¤ ¥'
- )
-
-
- def test_censor_unlogged_data(self):
- ''' used by the no_log attribute '''
- input = dict(
- password='sekrit',
- rc=12,
- failed=True,
- changed=False,
- skipped=True,
- msg='moo',
- )
- data = ansible.utils.censor_unlogged_data(input)
- assert 'password' not in data
- assert 'rc' in data
- assert 'failed' in data
- assert 'changed' in data
- assert 'skipped' in data
- assert 'msg' not in data
- assert data['censored'] == 'results hidden due to no_log parameter'
-
- def test_repo_url_to_role_name(self):
- tests = [("http://git.example.com/repos/repo.git", "repo"),
- ("ssh://git@git.example.com:repos/role-name", "role-name"),
- ("ssh://git@git.example.com:repos/role-name,v0.1", "role-name"),
- ("directory/role/is/installed/in", "directory/role/is/installed/in")]
- for (url, result) in tests:
- self.assertEqual(ansible.utils.repo_url_to_role_name(url), result)
-
- def test_role_spec_parse(self):
- tests = [
- (
- "git+http://git.example.com/repos/repo.git,v1.0",
- {
- 'scm': 'git',
- 'src': 'http://git.example.com/repos/repo.git',
- 'version': 'v1.0',
- 'name': 'repo'
- }
- ),
- (
- "http://repo.example.com/download/tarfile.tar.gz",
- {
- 'scm': None,
- 'src': 'http://repo.example.com/download/tarfile.tar.gz',
- 'version': '',
- 'name': 'tarfile'
- }
- ),
- (
- "http://repo.example.com/download/tarfile.tar.gz,,nicename",
- {
- 'scm': None,
- 'src': 'http://repo.example.com/download/tarfile.tar.gz',
- 'version': '',
- 'name': 'nicename'
- }
- ),
- (
- "git+http://git.example.com/repos/repo.git,v1.0,awesome",
- {
- 'scm': 'git',
- 'src': 'http://git.example.com/repos/repo.git',
- 'version': 'v1.0',
- 'name': 'awesome'
- }
- ),
- (
- # test that http://github URLs are assumed git+http:// unless they end in .tar.gz
- "http://github.com/ansible/fakerole/fake",
- {
- 'scm' : 'git',
- 'src' : 'http://github.com/ansible/fakerole/fake',
- 'version' : 'master',
- 'name' : 'fake'
- }
- ),
- (
- # test that http://github URLs are assumed git+http:// unless they end in .tar.gz
- "http://github.com/ansible/fakerole/fake/archive/master.tar.gz",
- {
- 'scm' : None,
- 'src' : 'http://github.com/ansible/fakerole/fake/archive/master.tar.gz',
- 'version' : '',
- 'name' : 'master'
- }
- )
- ]
- for (spec, result) in tests:
- self.assertEqual(ansible.utils.role_spec_parse(spec), result)
-
- def test_role_yaml_parse(self):
- tests = (
- (
- # Old style
- {
- 'role': 'debops.elasticsearch',
- 'name': 'elks'
- },
- {
- 'role': 'debops.elasticsearch',
- 'name': 'elks',
- 'scm': None,
- 'src': 'debops.elasticsearch',
- 'version': '',
- }
- ),
- (
- {
- 'role': 'debops.elasticsearch,1.0,elks',
- 'my_param': 'foo'
- },
- {
- 'role': 'debops.elasticsearch,1.0,elks',
- 'name': 'elks',
- 'scm': None,
- 'src': 'debops.elasticsearch',
- 'version': '1.0',
- 'my_param': 'foo',
- }
- ),
- (
- {
- 'role': 'debops.elasticsearch,1.0',
- 'my_param': 'foo'
- },
- {
- 'role': 'debops.elasticsearch,1.0',
- 'name': 'debops.elasticsearch',
- 'scm': None,
- 'src': 'debops.elasticsearch',
- 'version': '1.0',
- 'my_param': 'foo',
- }
- ),
- # New style
- (
- {
- 'src': 'debops.elasticsearch',
- 'name': 'elks',
- 'my_param': 'foo'
- },
- {
- 'name': 'elks',
- 'scm': None,
- 'src': 'debops.elasticsearch',
- 'version': '',
- 'my_param': 'foo'
- }
- ),
- )
-
- for (role, result) in tests:
- self.assertEqual(ansible.utils.role_yaml_parse(role), result)
-
- @patch('ansible.utils.plugins.module_finder._get_paths')
- def test_find_plugin(self, mock_get_paths):
-
- tmp_path = tempfile.mkdtemp()
- mock_get_paths.return_value = [tmp_path,]
- right_module_1 = 'module.py'
- right_module_2 = 'module_without_extension'
- wrong_module_1 = 'folder'
- wrong_module_2 = 'inexistent'
- path_right_module_1 = os.path.join(tmp_path, right_module_1)
- path_right_module_2 = os.path.join(tmp_path, right_module_2)
- path_wrong_module_1 = os.path.join(tmp_path, wrong_module_1)
- open(path_right_module_1, 'w').close()
- open(path_right_module_2, 'w').close()
- os.mkdir(path_wrong_module_1)
-
- self.assertEqual(ansible.utils.plugins.module_finder.find_plugin(right_module_1),
- path_right_module_1)
- self.assertEqual(ansible.utils.plugins.module_finder.find_plugin(right_module_2),
- path_right_module_2)
- self.assertEqual(ansible.utils.plugins.module_finder.find_plugin(wrong_module_1),
- None)
- self.assertEqual(ansible.utils.plugins.module_finder.find_plugin(wrong_module_2),
- None)
-
- shutil.rmtree(tmp_path)
diff --git a/v1/tests/TestUtilsStringFunctions.py b/v1/tests/TestUtilsStringFunctions.py
deleted file mode 100644
index cccedf280d..0000000000
--- a/v1/tests/TestUtilsStringFunctions.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# -*- coding: utf-8 -*-
-
-import unittest
-import os
-import os.path
-import tempfile
-import yaml
-import passlib.hash
-import string
-import StringIO
-import copy
-
-from nose.plugins.skip import SkipTest
-
-from ansible.utils import string_functions
-import ansible.errors
-import ansible.constants as C
-import ansible.utils.template as template2
-
-from ansible import __version__
-
-import sys
-reload(sys)
-sys.setdefaultencoding("utf8")
-
-class TestUtilsStringFunctions(unittest.TestCase):
- def test_isprintable(self):
- self.assertFalse(string_functions.isprintable(chr(7)))
- self.assertTrue(string_functions.isprintable('hello'))
-
- def test_count_newlines_from_end(self):
- self.assertEqual(string_functions.count_newlines_from_end('foo\n\n\n\n'), 4)
- self.assertEqual(string_functions.count_newlines_from_end('\nfoo'), 0)
diff --git a/v1/tests/TestVault.py b/v1/tests/TestVault.py
deleted file mode 100644
index b720d72e84..0000000000
--- a/v1/tests/TestVault.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#!/usr/bin/env python
-
-from unittest import TestCase
-import getpass
-import os
-import shutil
-import time
-import tempfile
-from binascii import unhexlify
-from binascii import hexlify
-from nose.plugins.skip import SkipTest
-
-from ansible import errors
-from ansible.utils.vault import VaultLib
-
-# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Util import Counter
- HAS_COUNTER = True
-except ImportError:
- HAS_COUNTER = False
-
-# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Protocol.KDF import PBKDF2
- HAS_PBKDF2 = True
-except ImportError:
- HAS_PBKDF2 = False
-
-# AES IMPORTS
-try:
- from Crypto.Cipher import AES as AES
- HAS_AES = True
-except ImportError:
- HAS_AES = False
-
-class TestVaultLib(TestCase):
-
- def _is_fips(self):
- try:
- data = open('/proc/sys/crypto/fips_enabled').read().strip()
- except:
- return False
- if data != '1':
- return False
- return True
-
- def test_methods_exist(self):
- v = VaultLib('ansible')
- slots = ['is_encrypted',
- 'encrypt',
- 'decrypt',
- '_add_header',
- '_split_header',]
- for slot in slots:
- assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
-
- def test_is_encrypted(self):
- v = VaultLib(None)
- assert not v.is_encrypted("foobar"), "encryption check on plaintext failed"
- data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible")
- assert v.is_encrypted(data), "encryption check on headered text failed"
-
- def test_add_header(self):
- v = VaultLib('ansible')
- v.cipher_name = "TEST"
- sensitive_data = "ansible"
- data = v._add_header(sensitive_data)
- lines = data.split('\n')
- assert len(lines) > 1, "failed to properly add header"
- header = lines[0]
- assert header.endswith(';TEST'), "header does end with cipher name"
- header_parts = header.split(';')
- assert len(header_parts) == 3, "header has the wrong number of parts"
- assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT"
- assert header_parts[1] == v.version, "header version is incorrect"
- assert header_parts[2] == 'TEST', "header does end with cipher name"
-
- def test_split_header(self):
- v = VaultLib('ansible')
- data = "$ANSIBLE_VAULT;9.9;TEST\nansible"
- rdata = v._split_header(data)
- lines = rdata.split('\n')
- assert lines[0] == "ansible"
- assert v.cipher_name == 'TEST', "cipher name was not set"
- assert v.version == "9.9"
-
- def test_encrypt_decrypt_aes(self):
- if self._is_fips():
- raise SkipTest('MD5 not available on FIPS enabled systems')
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- v = VaultLib('ansible')
- v.cipher_name = 'AES'
- enc_data = v.encrypt("foobar")
- dec_data = v.decrypt(enc_data)
- assert enc_data != "foobar", "encryption failed"
- assert dec_data == "foobar", "decryption failed"
-
- def test_encrypt_decrypt_aes256(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- v = VaultLib('ansible')
- v.cipher_name = 'AES256'
- enc_data = v.encrypt("foobar")
- dec_data = v.decrypt(enc_data)
- assert enc_data != "foobar", "encryption failed"
- assert dec_data == "foobar", "decryption failed"
-
- def test_encrypt_encrypted(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- v = VaultLib('ansible')
- v.cipher_name = 'AES'
- data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify("ansible")
- error_hit = False
- try:
- enc_data = v.encrypt(data)
- except errors.AnsibleError, e:
- error_hit = True
- assert error_hit, "No error was thrown when trying to encrypt data with a header"
-
- def test_decrypt_decrypted(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- v = VaultLib('ansible')
- data = "ansible"
- error_hit = False
- try:
- dec_data = v.decrypt(data)
- except errors.AnsibleError, e:
- error_hit = True
- assert error_hit, "No error was thrown when trying to decrypt data without a header"
-
- def test_cipher_not_set(self):
- # not setting the cipher should default to AES256
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- v = VaultLib('ansible')
- data = "ansible"
- error_hit = False
- try:
- enc_data = v.encrypt(data)
- except errors.AnsibleError, e:
- error_hit = True
- assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set"
- assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name
diff --git a/v1/tests/TestVaultEditor.py b/v1/tests/TestVaultEditor.py
deleted file mode 100644
index cfa5bc13e6..0000000000
--- a/v1/tests/TestVaultEditor.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/env python
-
-from unittest import TestCase
-import getpass
-import os
-import shutil
-import time
-import tempfile
-from binascii import unhexlify
-from binascii import hexlify
-from nose.plugins.skip import SkipTest
-
-from ansible import errors
-from ansible.utils.vault import VaultLib
-from ansible.utils.vault import VaultEditor
-
-# Counter import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Util import Counter
- HAS_COUNTER = True
-except ImportError:
- HAS_COUNTER = False
-
-# KDF import fails for 2.0.1, requires >= 2.6.1 from pip
-try:
- from Crypto.Protocol.KDF import PBKDF2
- HAS_PBKDF2 = True
-except ImportError:
- HAS_PBKDF2 = False
-
-# AES IMPORTS
-try:
- from Crypto.Cipher import AES as AES
- HAS_AES = True
-except ImportError:
- HAS_AES = False
-
-class TestVaultEditor(TestCase):
-
- def _is_fips(self):
- try:
- data = open('/proc/sys/crypto/fips_enabled').read().strip()
- except:
- return False
- if data != '1':
- return False
- return True
-
- def test_methods_exist(self):
- v = VaultEditor(None, None, None)
- slots = ['create_file',
- 'decrypt_file',
- 'edit_file',
- 'encrypt_file',
- 'rekey_file',
- 'read_data',
- 'write_data',
- 'shuffle_files']
- for slot in slots:
- assert hasattr(v, slot), "VaultLib is missing the %s method" % slot
-
- def test_decrypt_1_0(self):
- if self._is_fips():
- raise SkipTest('Vault-1.0 will not function on FIPS enabled systems')
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- dirpath = tempfile.mkdtemp()
- filename = os.path.join(dirpath, "foo-ansible-1.0.yml")
- shutil.rmtree(dirpath)
- shutil.copytree("vault_test_data", dirpath)
- ve = VaultEditor(None, "ansible", filename)
-
- # make sure the password functions for the cipher
- error_hit = False
- try:
- ve.decrypt_file()
- except errors.AnsibleError, e:
- error_hit = True
-
- # verify decrypted content
- f = open(filename, "rb")
- fdata = f.read()
- f.close()
-
- shutil.rmtree(dirpath)
- assert error_hit == False, "error decrypting 1.0 file"
- assert fdata.strip() == "foo", "incorrect decryption of 1.0 file: %s" % fdata.strip()
-
- def test_decrypt_1_1_newline(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- dirpath = tempfile.mkdtemp()
- filename = os.path.join(dirpath, "foo-ansible-1.1-ansible-newline-ansible.yml")
- shutil.rmtree(dirpath)
- shutil.copytree("vault_test_data", dirpath)
- ve = VaultEditor(None, "ansible\nansible\n", filename)
-
- # make sure the password functions for the cipher
- error_hit = False
- try:
- ve.decrypt_file()
- except errors.AnsibleError, e:
- error_hit = True
-
- # verify decrypted content
- f = open(filename, "rb")
- fdata = f.read()
- f.close()
-
- shutil.rmtree(dirpath)
- assert error_hit == False, "error decrypting 1.1 file with newline in password"
- #assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip()
-
-
- def test_decrypt_1_1(self):
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- dirpath = tempfile.mkdtemp()
- filename = os.path.join(dirpath, "foo-ansible-1.1.yml")
- shutil.rmtree(dirpath)
- shutil.copytree("vault_test_data", dirpath)
- ve = VaultEditor(None, "ansible", filename)
-
- # make sure the password functions for the cipher
- error_hit = False
- try:
- ve.decrypt_file()
- except errors.AnsibleError, e:
- error_hit = True
-
- # verify decrypted content
- f = open(filename, "rb")
- fdata = f.read()
- f.close()
-
- shutil.rmtree(dirpath)
- assert error_hit == False, "error decrypting 1.1 file"
- assert fdata.strip() == "foo", "incorrect decryption of 1.1 file: %s" % fdata.strip()
-
-
- def test_rekey_migration(self):
- if self._is_fips():
- raise SkipTest('Vault-1.0 will not function on FIPS enabled systems')
- if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2:
- raise SkipTest
- dirpath = tempfile.mkdtemp()
- filename = os.path.join(dirpath, "foo-ansible-1.0.yml")
- shutil.rmtree(dirpath)
- shutil.copytree("vault_test_data", dirpath)
- ve = VaultEditor(None, "ansible", filename)
-
- # make sure the password functions for the cipher
- error_hit = False
- try:
- ve.rekey_file('ansible2')
- except errors.AnsibleError, e:
- error_hit = True
-
- # verify decrypted content
- f = open(filename, "rb")
- fdata = f.read()
- f.close()
-
- shutil.rmtree(dirpath)
- assert error_hit == False, "error rekeying 1.0 file to 1.1"
-
- # ensure filedata can be decrypted, is 1.1 and is AES256
- vl = VaultLib("ansible2")
- dec_data = None
- error_hit = False
- try:
- dec_data = vl.decrypt(fdata)
- except errors.AnsibleError, e:
- error_hit = True
-
- assert vl.cipher_name == "AES256", "wrong cipher name set after rekey: %s" % vl.cipher_name
- assert error_hit == False, "error decrypting migrated 1.0 file"
- assert dec_data.strip() == "foo", "incorrect decryption of rekeyed/migrated file: %s" % dec_data
-
-
diff --git a/v1/tests/ansible.cfg b/v1/tests/ansible.cfg
deleted file mode 100644
index dd99b8102d..0000000000
--- a/v1/tests/ansible.cfg
+++ /dev/null
@@ -1,3 +0,0 @@
-[defaults]
-
-test_key = test_value
diff --git a/v1/tests/inventory_test_data/ansible_hosts b/v1/tests/inventory_test_data/ansible_hosts
deleted file mode 100644
index 94074edc3c..0000000000
--- a/v1/tests/inventory_test_data/ansible_hosts
+++ /dev/null
@@ -1,2 +0,0 @@
-[somegroup]
-localhost
diff --git a/v1/tests/inventory_test_data/broken.yml b/v1/tests/inventory_test_data/broken.yml
deleted file mode 100644
index 0eccc1ba78..0000000000
--- a/v1/tests/inventory_test_data/broken.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-foo: bar
- baz: qux
diff --git a/v1/tests/inventory_test_data/common_vars.yml b/v1/tests/inventory_test_data/common_vars.yml
deleted file mode 100644
index c4c09b67f2..0000000000
--- a/v1/tests/inventory_test_data/common_vars.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-duck: quack
-cow: moo
-extguard: " '$favcolor' == 'blue' "
diff --git a/v1/tests/inventory_test_data/complex_hosts b/v1/tests/inventory_test_data/complex_hosts
deleted file mode 100644
index 34935c6330..0000000000
--- a/v1/tests/inventory_test_data/complex_hosts
+++ /dev/null
@@ -1,96 +0,0 @@
-# order of groups, children, and vars is not significant
-# so this example mixes them up for maximum testing
-
-[nc:children]
-rtp
-triangle
-
-[eastcoast:children]
-nc
-florida
-
-[us:children]
-eastcoast
-
-[redundantgroup]
-rtp_a
-
-[redundantgroup2]
-rtp_a
-
-[redundantgroup3:children]
-rtp
-
-[redundantgroup:vars]
-rga=1
-
-[redundantgroup2:vars]
-rgb=2
-
-[redundantgroup3:vars]
-rgc=3
-
-[nc:vars]
-b=10000
-c=10001
-d=10002
-e = 10003
- f = 10004 != 10005
- g = " g "
- h = ' h '
- i = ' i "
- j = " j
- k = ['k1', 'k2']
-
-[rtp]
-rtp_a
-rtp_b
-rtp_c
-
-[rtp:vars]
-a=1
-b=2
-c=3
-
-[triangle]
-tri_a
-tri_b
-tri_c
-
-[triangle:vars]
-a=11
-b=12
-c=13
-
-[florida]
-orlando
-miami
-
-[florida:vars]
-a=100
-b=101
-c=102
-
-
-[eastcoast:vars]
-b=100000
-c=100001
-d=100002
-
-[us:vars]
-c=1000000
-
-[role1]
-host[1:2]
-
-[role2]
-host[2:3]
-
-[role3]
-host[1:3:2]
-
-[role4]
-blade-[a:c]-[1:16]
-blade-[d:z]-[01:16].example.com
-blade-[1:10]-[1:16]
-host-e-[10:16].example.net:1234
diff --git a/v1/tests/inventory_test_data/encrypted.yml b/v1/tests/inventory_test_data/encrypted.yml
deleted file mode 100644
index ca33ab25cb..0000000000
--- a/v1/tests/inventory_test_data/encrypted.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-$ANSIBLE_VAULT;1.1;AES256
-33343734386261666161626433386662623039356366656637303939306563376130623138626165
-6436333766346533353463636566313332623130383662340a393835656134633665333861393331
-37666233346464636263636530626332623035633135363732623332313534306438393366323966
-3135306561356164310a343937653834643433343734653137383339323330626437313562306630
-3035
diff --git a/v1/tests/inventory_test_data/hosts_list.yml b/v1/tests/inventory_test_data/hosts_list.yml
deleted file mode 100644
index 09c5ca7c17..0000000000
--- a/v1/tests/inventory_test_data/hosts_list.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-# Test that playbooks support YAML lists of hosts.
----
-- hosts: [host1, host2, host3]
- connection: local
- tasks:
- - action: command true
diff --git a/v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg b/v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg
deleted file mode 100644
index 1b7a478d87..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_alpha_end_before_beg
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
-host[Z:T]
diff --git a/v1/tests/inventory_test_data/inventory/test_combined_range b/v1/tests/inventory_test_data/inventory/test_combined_range
deleted file mode 100644
index cbcb41753e..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_combined_range
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
-host[1:2][A:B]
diff --git a/v1/tests/inventory_test_data/inventory/test_incorrect_format b/v1/tests/inventory_test_data/inventory/test_incorrect_format
deleted file mode 100644
index 339bd59edf..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_incorrect_format
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
-host[001:10]
diff --git a/v1/tests/inventory_test_data/inventory/test_incorrect_range b/v1/tests/inventory_test_data/inventory/test_incorrect_range
deleted file mode 100644
index 272ca7be71..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_incorrect_range
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
-host[1:2:3:4]
diff --git a/v1/tests/inventory_test_data/inventory/test_leading_range b/v1/tests/inventory_test_data/inventory/test_leading_range
deleted file mode 100644
index bf390de42a..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_leading_range
+++ /dev/null
@@ -1,6 +0,0 @@
-[test]
-[1:2].host
-[A:B].host
-
-[test2] # comment
-[1:3].host
diff --git a/v1/tests/inventory_test_data/inventory/test_missing_end b/v1/tests/inventory_test_data/inventory/test_missing_end
deleted file mode 100644
index ff32042402..0000000000
--- a/v1/tests/inventory_test_data/inventory/test_missing_end
+++ /dev/null
@@ -1,2 +0,0 @@
-[test]
-host[1:]
diff --git a/v1/tests/inventory_test_data/inventory_api.py b/v1/tests/inventory_test_data/inventory_api.py
deleted file mode 100644
index 9bdca22ed3..0000000000
--- a/v1/tests/inventory_test_data/inventory_api.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-
-import json
-import sys
-
-from optparse import OptionParser
-
-parser = OptionParser()
-parser.add_option('-l', '--list', default=False, dest="list_hosts", action="store_true")
-parser.add_option('-H', '--host', default=None, dest="host")
-parser.add_option('-e', '--extra-vars', default=None, dest="extra")
-
-options, args = parser.parse_args()
-
-systems = {
- "ungrouped": [ "jupiter", "saturn" ],
- "greek": [ "zeus", "hera", "poseidon" ],
- "norse": [ "thor", "odin", "loki" ],
- "major-god": [ "zeus", "odin" ],
-}
-
-variables = {
- "thor": {
- "hammer": True
- },
- "zeus": {},
-}
-
-if options.list_hosts == True:
- print json.dumps(systems)
- sys.exit(0)
-
-if options.host is not None:
- if options.extra:
- k,v = options.extra.split("=")
- variables[options.host][k] = v
- if options.host in variables:
- print json.dumps(variables[options.host])
- else:
- print "{}"
- sys.exit(0)
-
-parser.print_help()
-sys.exit(1)
diff --git a/v1/tests/inventory_test_data/inventory_dir/0hosts b/v1/tests/inventory_test_data/inventory_dir/0hosts
deleted file mode 100644
index 6f78a33a22..0000000000
--- a/v1/tests/inventory_test_data/inventory_dir/0hosts
+++ /dev/null
@@ -1,3 +0,0 @@
-zeus var_a=0
-morpheus
-thor
diff --git a/v1/tests/inventory_test_data/inventory_dir/1mythology b/v1/tests/inventory_test_data/inventory_dir/1mythology
deleted file mode 100644
index 43fa181bd5..0000000000
--- a/v1/tests/inventory_test_data/inventory_dir/1mythology
+++ /dev/null
@@ -1,6 +0,0 @@
-[greek]
-zeus
-morpheus
-
-[norse]
-thor
diff --git a/v1/tests/inventory_test_data/inventory_dir/2levels b/v1/tests/inventory_test_data/inventory_dir/2levels
deleted file mode 100644
index 363294923e..0000000000
--- a/v1/tests/inventory_test_data/inventory_dir/2levels
+++ /dev/null
@@ -1,6 +0,0 @@
-[major-god]
-zeus var_a=2
-thor
-
-[minor-god]
-morpheus
diff --git a/v1/tests/inventory_test_data/inventory_dir/3comments b/v1/tests/inventory_test_data/inventory_dir/3comments
deleted file mode 100644
index e11b5e416b..0000000000
--- a/v1/tests/inventory_test_data/inventory_dir/3comments
+++ /dev/null
@@ -1,8 +0,0 @@
-[major-god] # group with inline comments
-zeus var_a="3\#4" # host with inline comments and "#" in the var string
-# A comment
-thor
-
-[minor-god] # group with inline comment and unbalanced quotes: ' "
-morpheus # host with inline comments and unbalanced quotes: ' "
-# A comment with unbalanced quotes: ' "
diff --git a/v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini b/v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini
deleted file mode 100644
index a30afe5fcc..0000000000
--- a/v1/tests/inventory_test_data/inventory_dir/4skip_extensions.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[skip]
-skipme \ No newline at end of file
diff --git a/v1/tests/inventory_test_data/large_range b/v1/tests/inventory_test_data/large_range
deleted file mode 100644
index 18cfc22078..0000000000
--- a/v1/tests/inventory_test_data/large_range
+++ /dev/null
@@ -1 +0,0 @@
-bob[000:142]
diff --git a/v1/tests/inventory_test_data/restrict_pattern b/v1/tests/inventory_test_data/restrict_pattern
deleted file mode 100644
index fb16b4dda5..0000000000
--- a/v1/tests/inventory_test_data/restrict_pattern
+++ /dev/null
@@ -1,2 +0,0 @@
-odin
-thor
diff --git a/v1/tests/inventory_test_data/simple_hosts b/v1/tests/inventory_test_data/simple_hosts
deleted file mode 100644
index 08c62b4537..0000000000
--- a/v1/tests/inventory_test_data/simple_hosts
+++ /dev/null
@@ -1,28 +0,0 @@
-jupiter
-saturn
-thrudgelmir[:5]
-
-[greek]
-zeus
-hera:3000
-poseidon
-cerberus[001:003]
-cottus[99:100]
-
-[norse]
-thor
-odin
-loki
-
-[egyptian]
-Hotep-[a:c]
-Bast[C:D]
-
-[auth]
-neptun auth="YWRtaW46YWRtaW4="
-
-[parse:children]
-noparse
-
-[noparse]
-goldorak
diff --git a/v1/tests/module_tests/TestApt.py b/v1/tests/module_tests/TestApt.py
deleted file mode 100644
index e7f2dafc95..0000000000
--- a/v1/tests/module_tests/TestApt.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import collections
-import mock
-import os
-import unittest
-
-from ansible.modules.core.packaging.os.apt import (
- expand_pkgspec_from_fnmatches,
-)
-
-
-class AptExpandPkgspecTestCase(unittest.TestCase):
-
- def setUp(self):
- FakePackage = collections.namedtuple("Package", ("name",))
- self.fake_cache = [ FakePackage("apt"),
- FakePackage("apt-utils"),
- FakePackage("not-selected"),
- ]
-
- def test_trivial(self):
- foo = ["apt"]
- self.assertEqual(
- expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
-
- def test_version_wildcard(self):
- foo = ["apt=1.0*"]
- self.assertEqual(
- expand_pkgspec_from_fnmatches(None, foo, self.fake_cache), foo)
-
- def test_pkgname_wildcard_version_wildcard(self):
- foo = ["apt*=1.0*"]
- m_mock = mock.Mock()
- self.assertEqual(
- expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
- ['apt', 'apt-utils'])
-
- def test_pkgname_expands(self):
- foo = ["apt*"]
- m_mock = mock.Mock()
- self.assertEqual(
- expand_pkgspec_from_fnmatches(m_mock, foo, self.fake_cache),
- ["apt", "apt-utils"])
diff --git a/v1/tests/module_tests/TestDocker.py b/v1/tests/module_tests/TestDocker.py
deleted file mode 100644
index b8c8cf1e23..0000000000
--- a/v1/tests/module_tests/TestDocker.py
+++ /dev/null
@@ -1,19 +0,0 @@
-import collections
-import os
-import unittest
-
-from ansible.modules.core.cloud.docker.docker import get_split_image_tag
-
-class DockerSplitImageTagTestCase(unittest.TestCase):
-
- def test_trivial(self):
- self.assertEqual(get_split_image_tag('test'), ('test', 'latest'))
-
- def test_with_org_name(self):
- self.assertEqual(get_split_image_tag('ansible/centos7-ansible'), ('ansible/centos7-ansible', 'latest'))
-
- def test_with_tag(self):
- self.assertEqual(get_split_image_tag('test:devel'), ('test', 'devel'))
-
- def test_with_tag_and_org_name(self):
- self.assertEqual(get_split_image_tag('ansible/centos7-ansible:devel'), ('ansible/centos7-ansible', 'devel'))
diff --git a/v1/tests/vault_test_data/foo-ansible-1.0.yml b/v1/tests/vault_test_data/foo-ansible-1.0.yml
deleted file mode 100644
index f71ddf10ce..0000000000
--- a/v1/tests/vault_test_data/foo-ansible-1.0.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-$ANSIBLE_VAULT;1.0;AES
-53616c7465645f5fd0026926a2d415a28a2622116273fbc90e377225c12a347e1daf4456d36a77f9
-9ad98d59f61d06a4b66718d855f16fb7bdfe54d1ec8aeaa4d06c2dc1fa630ae1846a029877f0eeb1
-83c62ffb04c2512995e815de4b4d29ed
diff --git a/v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml b/v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml
deleted file mode 100644
index 6e025a1c40..0000000000
--- a/v1/tests/vault_test_data/foo-ansible-1.1-ansible-newline-ansible.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-$ANSIBLE_VAULT;1.1;AES256
-61333063333663376535373431643063613232393438623732643966613962363563383132363631
-3235363730623635323039623439343561313566313361630a313632643338613636303637623765
-64356531643630303636323064336439393335313836366235336464633635376339663830333232
-6338353337663139320a646632386131646431656165656338633535386535623236393265373634
-37656134633661333935346434363237613435323865356234323264663838643931
diff --git a/v1/tests/vault_test_data/foo-ansible-1.1.yml b/v1/tests/vault_test_data/foo-ansible-1.1.yml
deleted file mode 100644
index d9a4a448a6..0000000000
--- a/v1/tests/vault_test_data/foo-ansible-1.1.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-$ANSIBLE_VAULT;1.1;AES256
-62303130653266653331306264616235333735323636616539316433666463323964623162386137
-3961616263373033353631316333623566303532663065310a393036623466376263393961326530
-64336561613965383835646464623865663966323464653236343638373165343863623638316664
-3631633031323837340a396530313963373030343933616133393566366137363761373930663833
-3739