summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDoug Hellmann <doug@doughellmann.com>2014-12-11 14:50:26 -0500
committerDoug Hellmann <doug@doughellmann.com>2014-12-18 16:08:01 -0500
commitbdb739e249ceee8fd6a460eb0a110c9f60acd07a (patch)
tree13f31192b952e68dc7baa8016d3c941e6f7824d6
parent44aa91f133fecc47a964374a32a33f22ad085b56 (diff)
downloadoslo-rootwrap-bdb739e249ceee8fd6a460eb0a110c9f60acd07a.tar.gz
Move files out of the namespace package
Move the public API out of oslo.rootwrap to oslo_rootwrap. Retain the ability to import from the old namespace package for backwards compatibility for this release cycle. bp/drop-namespace-packages Change-Id: Ifed1a99e5ea6d999760731867c4294707698d41c
-rw-r--r--README.rst8
-rw-r--r--benchmark/benchmark.py6
-rw-r--r--oslo/rootwrap/__init__.py26
-rw-r--r--oslo/rootwrap/client.py133
-rw-r--r--oslo/rootwrap/cmd.py113
-rw-r--r--oslo/rootwrap/daemon.py140
-rw-r--r--oslo/rootwrap/filters.py339
-rw-r--r--oslo/rootwrap/jsonrpc.py197
-rw-r--r--oslo/rootwrap/wrapper.py196
-rw-r--r--oslo_rootwrap/__init__.py0
-rw-r--r--oslo_rootwrap/client.py144
-rw-r--r--oslo_rootwrap/cmd.py124
-rw-r--r--oslo_rootwrap/daemon.py151
-rw-r--r--oslo_rootwrap/filters.py350
-rw-r--r--oslo_rootwrap/jsonrpc.py208
-rw-r--r--oslo_rootwrap/tests/__init__.py0
-rw-r--r--oslo_rootwrap/tests/run_daemon.py57
-rw-r--r--oslo_rootwrap/tests/test_functional.py242
-rw-r--r--oslo_rootwrap/tests/test_functional_eventlet.py31
-rw-r--r--oslo_rootwrap/tests/test_rootwrap.py585
-rw-r--r--oslo_rootwrap/wrapper.py207
-rw-r--r--setup.cfg2
-rw-r--r--test-requirements-py3.txt3
-rw-r--r--test-requirements.txt2
-rw-r--r--tests/test_functional.py6
-rw-r--r--tests/test_rootwrap.py18
-rw-r--r--tests/test_warning.py61
27 files changed, 2213 insertions, 1136 deletions
diff --git a/README.rst b/README.rst
index 6a7fed7..cae0a3a 100644
--- a/README.rst
+++ b/README.rst
@@ -6,8 +6,8 @@ The Oslo Rootwrap allows fine filtering of shell commands to run as `root`
from OpenStack services.
Rootwrap should be used as a separate Python process calling the
-oslo.rootwrap.cmd:main function. You can set up a specific console_script
-calling into oslo.rootwrap.cmd:main, called for example `nova-rootwrap`.
+``oslo_rootwrap.cmd:main`` function. You can set up a specific console_script
+calling into ``oslo_rootwrap.cmd:main``, called for example `nova-rootwrap`.
To keep things simple, this document will consider that your console_script
is called `/usr/bin/nova-rootwrap`.
@@ -318,13 +318,13 @@ Daemon mode
Since 1.3.0 version ``oslo.rootwrap`` supports "daemon mode". In this mode
rootwrap would start, read config file and wait for commands to be run with
root priviledges. All communications with the daemon should go through
-``Client`` class that resides in ``oslo.rootwrap.client`` module.
+``Client`` class that resides in ``oslo_rootwrap.client`` module.
Its constructor expects one argument - a list that can be passed to ``Popen``
to create rootwrap daemon process. For ``root_helper`` above it will be
``["sudo", "nova-rootwrap-daemon", "/etc/neutron/rootwrap.conf"]``,
for example. Note that it uses a separate script that points to
-``oslo.rootwrap.cmd:daemon`` endpoint (instead of ``:main``).
+``oslo_rootwrap.cmd:daemon`` endpoint (instead of ``:main``).
The class provides one method ``execute`` with following arguments:
diff --git a/benchmark/benchmark.py b/benchmark/benchmark.py
index b661974..ef7417c 100644
--- a/benchmark/benchmark.py
+++ b/benchmark/benchmark.py
@@ -22,7 +22,7 @@ import subprocess
import sys
import timeit
-from oslo.rootwrap import client
+from oslo_rootwrap import client
config_path = "rootwrap.conf"
num_iterations = 100
@@ -44,12 +44,12 @@ def run_sudo(cmd):
def run_rootwrap(cmd):
return run_plain([
"sudo", sys.executable, "-c",
- "from oslo.rootwrap import cmd; cmd.main()", config_path] + cmd)
+ "from oslo_rootwrap import cmd; cmd.main()", config_path] + cmd)
run_daemon = client.Client([
"sudo", sys.executable, "-c",
- "from oslo.rootwrap import cmd; cmd.daemon()", config_path]).execute
+ "from oslo_rootwrap import cmd; cmd.daemon()", config_path]).execute
def run_one(runner, cmd):
diff --git a/oslo/rootwrap/__init__.py b/oslo/rootwrap/__init__.py
index e69de29..73e54f3 100644
--- a/oslo/rootwrap/__init__.py
+++ b/oslo/rootwrap/__init__.py
@@ -0,0 +1,26 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import warnings
+
+
+def deprecated():
+ new_name = __name__.replace('.', '_')
+ warnings.warn(
+ ('The oslo namespace package is deprecated. Please use %s instead.' %
+ new_name),
+ DeprecationWarning,
+ stacklevel=3,
+ )
+
+
+deprecated()
diff --git a/oslo/rootwrap/client.py b/oslo/rootwrap/client.py
index e9930a0..44d3338 100644
--- a/oslo/rootwrap/client.py
+++ b/oslo/rootwrap/client.py
@@ -1,6 +1,3 @@
-# Copyright (c) 2014 Mirantis Inc.
-# All Rights Reserved.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -13,132 +10,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-from multiprocessing import managers
-from multiprocessing import util as mp_util
-import os
-import subprocess
-import threading
-import weakref
-
-try:
- import eventlet.patcher
-except ImportError:
- patched_socket = False
-else:
- # In tests patching happens later, so we'll rely on environment variable
- patched_socket = (eventlet.patcher.is_monkey_patched('socket') or
- os.environ.get('TEST_EVENTLET', False))
-
-from oslo.rootwrap import daemon
-from oslo.rootwrap import jsonrpc
-
-if patched_socket:
- # We have to use slow version of recvall with eventlet because of a bug in
- # GreenSocket.recv_into:
- # https://bitbucket.org/eventlet/eventlet/pull-request/41
- # This check happens here instead of jsonrpc to avoid importing eventlet
- # from daemon code that is run with root priviledges.
- jsonrpc.JsonConnection.recvall = jsonrpc.JsonConnection._recvall_slow
-
-try:
- finalize = weakref.finalize
-except AttributeError:
- def finalize(obj, func, *args, **kwargs):
- return mp_util.Finalize(obj, func, args=args, kwargs=kwargs,
- exitpriority=0)
-
-ClientManager = daemon.get_manager_class()
-LOG = logging.getLogger(__name__)
-
-
-class Client(object):
- def __init__(self, rootwrap_daemon_cmd):
- self._start_command = rootwrap_daemon_cmd
- self._initialized = False
- self._mutex = threading.Lock()
- self._manager = None
- self._proxy = None
- self._process = None
- self._finalize = None
-
- def _initialize(self):
- if self._process is not None and self._process.poll() is not None:
- LOG.warning("Leaving behind already spawned process with pid %d, "
- "root should kill it if it's still there (I can't)",
- self._process.pid)
-
- process_obj = subprocess.Popen(self._start_command,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- LOG.info("Spawned new rootwrap daemon process with pid=%d",
- process_obj.pid)
-
- self._process = process_obj
- socket_path = process_obj.stdout.readline()[:-1]
- # For Python 3 we need to convert bytes to str here
- if not isinstance(socket_path, str):
- socket_path = socket_path.decode('utf-8')
- authkey = process_obj.stdout.read(32)
- if process_obj.poll() is not None:
- stderr = process_obj.stderr.read()
- # NOTE(yorik-sar): don't expose stdout here
- raise Exception("Failed to spawn rootwrap process.\nstderr:\n%s" %
- (stderr,))
- self._manager = ClientManager(socket_path, authkey)
- self._manager.connect()
- self._proxy = self._manager.rootwrap()
- self._finalize = finalize(self, self._shutdown, self._process,
- self._manager)
- self._initialized = True
-
- @staticmethod
- def _shutdown(process, manager, JsonClient=jsonrpc.JsonClient):
- # Storing JsonClient in arguments because globals are set to None
- # before executing atexit routines in Python 2.x
- if process.poll() is None:
- LOG.info('Stopping rootwrap daemon process with pid=%s',
- process.pid)
- try:
- manager.rootwrap().shutdown()
- except (EOFError, IOError):
- pass # assume it is dead already
- # We might want to wait for process to exit or kill it, but we
- # can't provide sane timeout on 2.x and we most likely don't have
- # permisions to do so
- # Invalidate manager's state so that proxy won't try to do decref
- manager._state.value = managers.State.SHUTDOWN
-
- def _ensure_initialized(self):
- with self._mutex:
- if not self._initialized:
- self._initialize()
-
- def _restart(self, proxy):
- with self._mutex:
- assert self._initialized
- # Verify if someone has already restarted this.
- if self._proxy is proxy:
- self._finalize()
- self._manager = None
- self._proxy = None
- self._initialized = False
- self._initialize()
- return self._proxy
-
- def execute(self, cmd, env=None, stdin=None):
- self._ensure_initialized()
- proxy = self._proxy
- retry = False
- try:
- res = proxy.run_one_command(cmd, env, stdin)
- except (EOFError, IOError):
- retry = True
- # res can be None if we received final None sent by dying server thread
- # instead of response to our request. Process is most likely to be dead
- # at this point.
- if retry or res is None:
- proxy = self._restart(proxy)
- res = proxy.run_one_command(cmd, env, stdin)
- return res
+from oslo_rootwrap.client import * # noqa
diff --git a/oslo/rootwrap/cmd.py b/oslo/rootwrap/cmd.py
index fc0ff8f..fd7ed80 100644
--- a/oslo/rootwrap/cmd.py
+++ b/oslo/rootwrap/cmd.py
@@ -1,6 +1,3 @@
-# Copyright (c) 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -13,112 +10,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Root wrapper for OpenStack services
-
- Filters which commands a service is allowed to run as another user.
-
- To use this with oslo, you should set the following in
- oslo.conf:
- rootwrap_config=/etc/oslo/rootwrap.conf
-
- You also need to let the oslo user run oslo-rootwrap
- as root in sudoers:
- oslo ALL = (root) NOPASSWD: /usr/bin/oslo-rootwrap
- /etc/oslo/rootwrap.conf *
-
- Service packaging should deploy .filters files only on nodes where
- they are needed, to avoid allowing more than is necessary.
-"""
-
-from __future__ import print_function
-
-import logging
-import sys
-
-from six import moves
-
-from oslo.rootwrap import daemon as daemon_mod
-from oslo.rootwrap import wrapper
-
-RC_UNAUTHORIZED = 99
-RC_NOCOMMAND = 98
-RC_BADCONFIG = 97
-RC_NOEXECFOUND = 96
-SIGNAL_BASE = 128
-
-
-def _exit_error(execname, message, errorcode, log=True):
- print("%s: %s" % (execname, message), file=sys.stderr)
- if log:
- logging.error(message)
- sys.exit(errorcode)
-
-
-def daemon():
- return main(run_daemon=True)
-
-
-def main(run_daemon=False):
- # Split arguments, require at least a command
- execname = sys.argv.pop(0)
- if run_daemon:
- if len(sys.argv) != 1:
- _exit_error(execname, "Extra arguments to daemon", RC_NOCOMMAND,
- log=False)
- else:
- if len(sys.argv) < 2:
- _exit_error(execname, "No command specified", RC_NOCOMMAND,
- log=False)
-
- configfile = sys.argv.pop(0)
-
- # Load configuration
- try:
- rawconfig = moves.configparser.RawConfigParser()
- rawconfig.read(configfile)
- config = wrapper.RootwrapConfig(rawconfig)
- except ValueError as exc:
- msg = "Incorrect value in %s: %s" % (configfile, exc.message)
- _exit_error(execname, msg, RC_BADCONFIG, log=False)
- except moves.configparser.Error:
- _exit_error(execname, "Incorrect configuration file: %s" % configfile,
- RC_BADCONFIG, log=False)
-
- if config.use_syslog:
- wrapper.setup_syslog(execname,
- config.syslog_log_facility,
- config.syslog_log_level)
-
- filters = wrapper.load_filters(config.filters_path)
-
- if run_daemon:
- daemon_mod.daemon_start(config, filters)
- else:
- run_one_command(execname, config, filters, sys.argv)
-
-
-def run_one_command(execname, config, filters, userargs):
- # Execute command if it matches any of the loaded filters
- try:
- obj = wrapper.start_subprocess(
- filters, userargs,
- exec_dirs=config.exec_dirs,
- log=config.use_syslog,
- stdin=sys.stdin,
- stdout=sys.stdout,
- stderr=sys.stderr)
- returncode = obj.wait()
- # Fix returncode of Popen
- if returncode < 0:
- returncode = SIGNAL_BASE - returncode
- sys.exit(returncode)
-
- except wrapper.FilterMatchNotExecutable as exc:
- msg = ("Executable not found: %s (filter match = %s)"
- % (exc.match.exec_path, exc.match.name))
- _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog)
-
- except wrapper.NoFilterMatched:
- msg = ("Unauthorized command: %s (no filter matched)"
- % ' '.join(userargs))
- _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog)
+from oslo_rootwrap.cmd import * # noqa
diff --git a/oslo/rootwrap/daemon.py b/oslo/rootwrap/daemon.py
index fbb4086..42d3caf 100644
--- a/oslo/rootwrap/daemon.py
+++ b/oslo/rootwrap/daemon.py
@@ -1,6 +1,3 @@
-# Copyright (c) 2014 Mirantis Inc.
-# All Rights Reserved.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -13,139 +10,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-from __future__ import print_function
-
-import functools
-import logging
-from multiprocessing import managers
-import os
-import shutil
-import signal
-import stat
-import subprocess
-import sys
-import tempfile
-import threading
-
-from oslo.rootwrap import jsonrpc
-from oslo.rootwrap import wrapper
-
-LOG = logging.getLogger(__name__)
-
-# Since multiprocessing supports only pickle and xmlrpclib for serialization of
-# RPC requests and responses, we declare another 'jsonrpc' serializer
-
-managers.listener_client['jsonrpc'] = jsonrpc.JsonListener, jsonrpc.JsonClient
-
-
-class RootwrapClass(object):
- def __init__(self, config, filters):
- self.config = config
- self.filters = filters
-
- def run_one_command(self, userargs, env=None, stdin=None):
- if env is None:
- env = {}
-
- obj = wrapper.start_subprocess(
- self.filters, userargs,
- exec_dirs=self.config.exec_dirs,
- log=self.config.use_syslog,
- close_fds=True,
- env=env,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out, err = obj.communicate(stdin)
- return obj.returncode, out, err
-
- def shutdown(self):
- # Suicide to force break of the main thread
- os.kill(os.getpid(), signal.SIGINT)
-
-
-def get_manager_class(config=None, filters=None):
- class RootwrapManager(managers.BaseManager):
- def __init__(self, address=None, authkey=None):
- # Force jsonrpc because neither pickle nor xmlrpclib is secure
- super(RootwrapManager, self).__init__(address, authkey,
- serializer='jsonrpc')
-
- if config is not None:
- partial_class = functools.partial(RootwrapClass, config, filters)
- RootwrapManager.register('rootwrap', partial_class)
- else:
- RootwrapManager.register('rootwrap')
-
- return RootwrapManager
-
-
-def daemon_start(config, filters):
- temp_dir = tempfile.mkdtemp(prefix='rootwrap-')
- LOG.debug("Created temporary directory %s", temp_dir)
- try:
- # allow everybody to find the socket
- rwxr_xr_x = (stat.S_IRWXU |
- stat.S_IRGRP | stat.S_IXGRP |
- stat.S_IROTH | stat.S_IXOTH)
- os.chmod(temp_dir, rwxr_xr_x)
- socket_path = os.path.join(temp_dir, "rootwrap.sock")
- LOG.debug("Will listen on socket %s", socket_path)
- manager_cls = get_manager_class(config, filters)
- manager = manager_cls(address=socket_path)
- server = manager.get_server()
- # allow everybody to connect to the socket
- rw_rw_rw_ = (stat.S_IRUSR | stat.S_IWUSR |
- stat.S_IRGRP | stat.S_IWGRP |
- stat.S_IROTH | stat.S_IWOTH)
- os.chmod(socket_path, rw_rw_rw_)
- try:
- # In Python 3 we have to use buffer to push in bytes directly
- stdout = sys.stdout.buffer
- except AttributeError:
- stdout = sys.stdout
- stdout.write(socket_path.encode('utf-8'))
- stdout.write(b'\n')
- stdout.write(bytes(server.authkey))
- sys.stdin.close()
- sys.stdout.close()
- sys.stderr.close()
- # Gracefully shutdown on INT or TERM signals
- stop = functools.partial(daemon_stop, server)
- signal.signal(signal.SIGTERM, stop)
- signal.signal(signal.SIGINT, stop)
- LOG.info("Starting rootwrap daemon main loop")
- server.serve_forever()
- finally:
- conn = server.listener
- # This will break accept() loop with EOFError if it was not in the main
- # thread (as in Python 3.x)
- conn.close()
- # Closing all currently connected client sockets for reading to break
- # worker threads blocked on recv()
- for cl_conn in conn.get_accepted():
- try:
- cl_conn.half_close()
- except Exception:
- # Most likely the socket have already been closed
- LOG.debug("Failed to close connection")
- LOG.info("Waiting for all client threads to finish.")
- for thread in threading.enumerate():
- if thread.daemon:
- LOG.debug("Joining thread %s", thread)
- thread.join()
- LOG.debug("Removing temporary directory %s", temp_dir)
- shutil.rmtree(temp_dir)
-
-
-def daemon_stop(server, signal, frame):
- LOG.info("Got signal %s. Shutting down server", signal)
- # Signals are caught in the main thread which means this handler will run
- # in the middle of serve_forever() loop. It will catch this exception and
- # properly return. Since all threads created by server_forever are
- # daemonic, we need to join them afterwards. In Python 3 we can just hit
- # stop_event instead.
- try:
- server.stop_event.set()
- except AttributeError:
- raise KeyboardInterrupt
+from oslo_rootwrap.daemon import * # noqa
diff --git a/oslo/rootwrap/filters.py b/oslo/rootwrap/filters.py
index b8747ae..92b502b 100644
--- a/oslo/rootwrap/filters.py
+++ b/oslo/rootwrap/filters.py
@@ -1,6 +1,3 @@
-# Copyright (c) 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -13,338 +10,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
-import re
-
-
-class CommandFilter(object):
- """Command filter only checking that the 1st argument matches exec_path."""
-
- def __init__(self, exec_path, run_as, *args):
- self.name = ''
- self.exec_path = exec_path
- self.run_as = run_as
- self.args = args
- self.real_exec = None
-
- def get_exec(self, exec_dirs=None):
- """Returns existing executable, or empty string if none found."""
- exec_dirs = exec_dirs or []
- if self.real_exec is not None:
- return self.real_exec
- self.real_exec = ""
- if os.path.isabs(self.exec_path):
- if os.access(self.exec_path, os.X_OK):
- self.real_exec = self.exec_path
- else:
- for binary_path in exec_dirs:
- expanded_path = os.path.join(binary_path, self.exec_path)
- if os.access(expanded_path, os.X_OK):
- self.real_exec = expanded_path
- break
- return self.real_exec
-
- def match(self, userargs):
- """Only check that the first argument (command) matches exec_path."""
- return userargs and os.path.basename(self.exec_path) == userargs[0]
-
- def get_command(self, userargs, exec_dirs=None):
- """Returns command to execute (with sudo -u if run_as != root)."""
- exec_dirs = exec_dirs or []
- to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
- if (self.run_as != 'root'):
- # Used to run commands at lesser privileges
- return ['sudo', '-u', self.run_as, to_exec] + userargs[1:]
- return [to_exec] + userargs[1:]
-
- def get_environment(self, userargs, env=None):
- """Returns specific environment to set, None if none."""
- return env
-
-
-class RegExpFilter(CommandFilter):
- """Command filter doing regexp matching for every argument."""
-
- def match(self, userargs):
- # Early skip if command or number of args don't match
- if (not userargs or len(self.args) != len(userargs)):
- # DENY: argument numbers don't match
- return False
- # Compare each arg (anchoring pattern explicitly at end of string)
- for (pattern, arg) in zip(self.args, userargs):
- try:
- if not re.match(pattern + '$', arg):
- # DENY: Some arguments did not match
- return False
- except re.error:
- # DENY: Badly-formed filter
- return False
- # ALLOW: All arguments matched
- return True
-
-
-class PathFilter(CommandFilter):
- """Command filter checking that path arguments are within given dirs
-
- One can specify the following constraints for command arguments:
- 1) pass - pass an argument as is to the resulting command
- 2) some_str - check if an argument is equal to the given string
- 3) abs path - check if a path argument is within the given base dir
-
- A typical rootwrapper filter entry looks like this:
- # cmdname: filter name, raw command, user, arg_i_constraint [, ...]
- chown: PathFilter, /bin/chown, root, nova, /var/lib/images
-
- """
-
- def match(self, userargs):
- if not userargs or len(userargs) < 2:
- return False
-
- arguments = userargs[1:]
-
- equal_args_num = len(self.args) == len(arguments)
- exec_is_valid = super(PathFilter, self).match(userargs)
- args_equal_or_pass = all(
- arg == 'pass' or arg == value
- for arg, value in zip(self.args, arguments)
- if not os.path.isabs(arg) # arguments not specifying abs paths
- )
- paths_are_within_base_dirs = all(
- os.path.commonprefix([arg, os.path.realpath(value)]) == arg
- for arg, value in zip(self.args, arguments)
- if os.path.isabs(arg) # arguments specifying abs paths
- )
-
- return (equal_args_num and
- exec_is_valid and
- args_equal_or_pass and
- paths_are_within_base_dirs)
-
- def get_command(self, userargs, exec_dirs=None):
- exec_dirs = exec_dirs or []
- command, arguments = userargs[0], userargs[1:]
-
- # convert path values to canonical ones; copy other args as is
- args = [os.path.realpath(value) if os.path.isabs(arg) else value
- for arg, value in zip(self.args, arguments)]
-
- return super(PathFilter, self).get_command([command] + args,
- exec_dirs)
-
-
-class KillFilter(CommandFilter):
- """Specific filter for the kill calls.
-
- 1st argument is the user to run /bin/kill under
- 2nd argument is the location of the affected executable
- if the argument is not absolute, it is checked against $PATH
- Subsequent arguments list the accepted signals (if any)
-
- This filter relies on /proc to accurately determine affected
- executable, so it will only work on procfs-capable systems (not OSX).
- """
-
- def __init__(self, *args):
- super(KillFilter, self).__init__("/bin/kill", *args)
-
- def match(self, userargs):
- if not userargs or userargs[0] != "kill":
- return False
- args = list(userargs)
- if len(args) == 3:
- # A specific signal is requested
- signal = args.pop(1)
- if signal not in self.args[1:]:
- # Requested signal not in accepted list
- return False
- else:
- if len(args) != 2:
- # Incorrect number of arguments
- return False
- if len(self.args) > 1:
- # No signal requested, but filter requires specific signal
- return False
- try:
- command = os.readlink("/proc/%d/exe" % int(args[1]))
- except (ValueError, OSError):
- # Incorrect PID
- return False
-
- # NOTE(yufang521247): /proc/PID/exe may have '\0' on the
- # end, because python doesn't stop at '\0' when read the
- # target path.
- command = command.partition('\0')[0]
-
- # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on
- # the end if an executable is updated or deleted
- if command.endswith(" (deleted)"):
- command = command[:-len(" (deleted)")]
-
- kill_command = self.args[0]
-
- if os.path.isabs(kill_command):
- return kill_command == command
-
- return (os.path.isabs(command) and
- kill_command == os.path.basename(command) and
- os.path.dirname(command) in os.environ.get('PATH', ''
- ).split(':'))
-
-
-class ReadFileFilter(CommandFilter):
- """Specific filter for the utils.read_file_as_root call."""
-
- def __init__(self, file_path, *args):
- self.file_path = file_path
- super(ReadFileFilter, self).__init__("/bin/cat", "root", *args)
-
- def match(self, userargs):
- return (userargs == ['cat', self.file_path])
-
-
-class IpFilter(CommandFilter):
- """Specific filter for the ip utility to that does not match exec."""
-
- def match(self, userargs):
- if userargs[0] == 'ip':
- # Avoid the 'netns exec' command here
- for a, b in zip(userargs[1:], userargs[2:]):
- if a == 'netns':
- return (b != 'exec')
- else:
- return True
-
-
-class EnvFilter(CommandFilter):
- """Specific filter for the env utility.
-
- Behaves like CommandFilter, except that it handles
- leading env A=B.. strings appropriately.
- """
-
- def _extract_env(self, arglist):
- """Extract all leading NAME=VALUE arguments from arglist."""
-
- envs = set()
- for arg in arglist:
- if '=' not in arg:
- break
- envs.add(arg.partition('=')[0])
- return envs
-
- def __init__(self, exec_path, run_as, *args):
- super(EnvFilter, self).__init__(exec_path, run_as, *args)
-
- env_list = self._extract_env(self.args)
- # Set exec_path to X when args are in the form of
- # env A=a B=b C=c X Y Z
- if "env" in exec_path and len(env_list) < len(self.args):
- self.exec_path = self.args[len(env_list)]
-
- def match(self, userargs):
- # ignore leading 'env'
- if userargs[0] == 'env':
- userargs.pop(0)
-
- # require one additional argument after configured ones
- if len(userargs) < len(self.args):
- return False
-
- # extract all env args
- user_envs = self._extract_env(userargs)
- filter_envs = self._extract_env(self.args)
- user_command = userargs[len(user_envs):len(user_envs) + 1]
-
- # match first non-env argument with CommandFilter
- return (super(EnvFilter, self).match(user_command)
- and len(filter_envs) and user_envs == filter_envs)
-
- def exec_args(self, userargs):
- args = userargs[:]
-
- # ignore leading 'env'
- if args[0] == 'env':
- args.pop(0)
-
- # Throw away leading NAME=VALUE arguments
- while args and '=' in args[0]:
- args.pop(0)
-
- return args
-
- def get_command(self, userargs, exec_dirs=[]):
- to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
- return [to_exec] + self.exec_args(userargs)[1:]
-
- def get_environment(self, userargs, env=None):
- if env is None:
- env = os.environ
- env = env.copy()
-
- # ignore leading 'env'
- if userargs[0] == 'env':
- userargs.pop(0)
-
- # Handle leading NAME=VALUE pairs
- for a in userargs:
- env_name, equals, env_value = a.partition('=')
- if not equals:
- break
- if env_name and env_value:
- env[env_name] = env_value
-
- return env
-
-
-class ChainingFilter(CommandFilter):
- def exec_args(self, userargs):
- return []
-
-
-class IpNetnsExecFilter(ChainingFilter):
- """Specific filter for the ip utility to that does match exec."""
-
- def match(self, userargs):
- # Network namespaces currently require root
- # require <ns> argument
- if self.run_as != "root" or len(userargs) < 4:
- return False
-
- return (userargs[:3] == ['ip', 'netns', 'exec'])
-
- def exec_args(self, userargs):
- args = userargs[4:]
- if args:
- args[0] = os.path.basename(args[0])
- return args
-
-
-class ChainingRegExpFilter(ChainingFilter):
- """Command filter doing regexp matching for prefix commands.
-
- Remaining arguments are filtered again. This means that the command
- specified as the arguments must be also allowed to execute directly.
- """
-
- def match(self, userargs):
- # Early skip if number of args is smaller than the filter
- if (not userargs or len(self.args) > len(userargs)):
- return False
- # Compare each arg (anchoring pattern explicitly at end of string)
- for (pattern, arg) in zip(self.args, userargs):
- try:
- if not re.match(pattern + '$', arg):
- # DENY: Some arguments did not match
- return False
- except re.error:
- # DENY: Badly-formed filter
- return False
- # ALLOW: All arguments matched
- return True
-
- def exec_args(self, userargs):
- args = userargs[len(self.args):]
- if args:
- args[0] = os.path.basename(args[0])
- return args
+from oslo_rootwrap.filters import * # noqa
diff --git a/oslo/rootwrap/jsonrpc.py b/oslo/rootwrap/jsonrpc.py
index 9c81a1d..a9b1a9f 100644
--- a/oslo/rootwrap/jsonrpc.py
+++ b/oslo/rootwrap/jsonrpc.py
@@ -1,6 +1,3 @@
-# Copyright (c) 2014 Mirantis Inc.
-# All Rights Reserved.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -13,196 +10,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-import base64
-import errno
-import json
-from multiprocessing import connection
-from multiprocessing import managers
-import socket
-import struct
-import weakref
-
-from oslo.rootwrap import wrapper
-
-
-class RpcJSONEncoder(json.JSONEncoder):
- def default(self, o):
- # We need to pass bytes unchanged as they are expected in arguments for
- # and are result of Popen.communicate()
- if isinstance(o, bytes):
- return {"__bytes__": base64.b64encode(o).decode('ascii')}
- # Handle two exception types relevant to command execution
- if isinstance(o, wrapper.NoFilterMatched):
- return {"__exception__": "NoFilterMatched"}
- elif isinstance(o, wrapper.FilterMatchNotExecutable):
- return {"__exception__": "FilterMatchNotExecutable",
- "match": o.match}
- # Other errors will fail to pass JSON encoding and will be visible on
- # client side
- else:
- return super(RpcJSONEncoder, self).default(o)
-
-
-# Parse whatever RpcJSONEncoder supplied us with
-def rpc_object_hook(obj):
- if "__exception__" in obj:
- type_name = obj.pop("__exception__")
- if type_name not in ("NoFilterMatched", "FilterMatchNotExecutable"):
- return obj
- exc_type = getattr(wrapper, type_name)
- return exc_type(**obj)
- elif "__bytes__" in obj:
- return base64.b64decode(obj["__bytes__"].encode('ascii'))
- else:
- return obj
-
-
-class JsonListener(object):
- def __init__(self, address, backlog=1):
- self.address = address
- self._socket = socket.socket(socket.AF_UNIX)
- try:
- self._socket.setblocking(True)
- self._socket.bind(address)
- self._socket.listen(backlog)
- except socket.error:
- self._socket.close()
- raise
- self.closed = False
- # Python 2.6 doesn't have WeakSet
- self._accepted = weakref.WeakKeyDictionary()
-
- def accept(self):
- while True:
- try:
- s, _ = self._socket.accept()
- except socket.error as e:
- if e.errno in (errno.EINVAL, errno.EBADF):
- raise EOFError
- elif e.errno != errno.EINTR:
- raise
- else:
- break
- s.setblocking(True)
- conn = JsonConnection(s)
- self._accepted[conn] = None
- return conn
-
- def close(self):
- if not self.closed:
- self._socket.shutdown(socket.SHUT_RDWR)
- self._socket.close()
- self.closed = True
-
- def get_accepted(self):
- return list(self._accepted)
-
-if hasattr(managers.Server, 'accepter'):
- # In Python 3 accepter() thread has infinite loop. We break it with
- # EOFError, so we should silence this error here.
- def silent_accepter(self):
- try:
- old_accepter(self)
- except EOFError:
- pass
- old_accepter = managers.Server.accepter
- managers.Server.accepter = silent_accepter
-
-try:
- memoryview
-except NameError:
- has_memoryview = False
-else:
- has_memoryview = True
-
-
-class JsonConnection(object):
- def __init__(self, sock):
- sock.setblocking(True)
- self._socket = sock
-
- def send_bytes(self, s):
- self._socket.sendall(struct.pack('!Q', len(s)))
- self._socket.sendall(s)
-
- def recv_bytes(self, maxsize=None):
- l = struct.unpack('!Q', self.recvall(8))[0]
- if maxsize is not None and l > maxsize:
- raise RuntimeError("Too big message received")
- s = self.recvall(l)
- return s
-
- def send(self, obj):
- s = self.dumps(obj)
- self.send_bytes(s)
-
- def recv(self):
- s = self.recv_bytes()
- return self.loads(s)
-
- def close(self):
- self._socket.close()
-
- def half_close(self):
- self._socket.shutdown(socket.SHUT_RD)
-
- # Unfortunatelly Python 2.6 doesn't support memoryview, so we'll have
- # to do it the slow way.
- def _recvall_slow(self, size):
- remaining = size
- res = []
- while remaining:
- piece = self._socket.recv(remaining)
- if not piece:
- raise EOFError
- res.append(piece)
- remaining -= len(piece)
- return b''.join(res)
-
- # For all later versions we can do it almost like in C
- def _recvall_fast(self, size):
- buf = bytearray(size)
- mem = memoryview(buf)
- got = 0
- while got < size:
- piece_size = self._socket.recv_into(mem[got:])
- if not piece_size:
- raise EOFError
- got += piece_size
- # bytearray is mostly compatible with bytes and we could avoid copying
- # data here, but hmac doesn't like it in Python 3.3 (not in 2.7 or 3.4)
- return bytes(buf)
-
- if has_memoryview:
- recvall = _recvall_fast
- else:
- recvall = _recvall_slow
-
- @staticmethod
- def dumps(obj):
- return json.dumps(obj, cls=RpcJSONEncoder).encode('utf-8')
-
- @staticmethod
- def loads(s):
- res = json.loads(s.decode('utf-8'), object_hook=rpc_object_hook)
- try:
- kind = res[0]
- except (IndexError, TypeError):
- pass
- else:
- # In Python 2 json returns unicode while multiprocessing needs str
- if (kind in ("#TRACEBACK", "#UNSERIALIZABLE") and
- not isinstance(res[1], str)):
- res[1] = res[1].encode('utf-8', 'replace')
- return res
-
-
-class JsonClient(JsonConnection):
- def __init__(self, address, authkey=None):
- sock = socket.socket(socket.AF_UNIX)
- sock.setblocking(True)
- sock.connect(address)
- super(JsonClient, self).__init__(sock)
- if authkey is not None:
- connection.answer_challenge(self, authkey)
- connection.deliver_challenge(self, authkey)
+from oslo_rootwrap.jsonrpc import * # noqa
diff --git a/oslo/rootwrap/wrapper.py b/oslo/rootwrap/wrapper.py
index 553eaa9..16b7938 100644
--- a/oslo/rootwrap/wrapper.py
+++ b/oslo/rootwrap/wrapper.py
@@ -1,6 +1,3 @@
-# Copyright (c) 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
@@ -13,195 +10,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-import logging.handlers
-import os
-import pwd
-import signal
-import subprocess
-
-from six import moves
-
-from oslo.rootwrap import filters
-
-
-class NoFilterMatched(Exception):
- """This exception is raised when no filter matched."""
- pass
-
-
-class FilterMatchNotExecutable(Exception):
- """Raised when a filter matched but no executable was found."""
- def __init__(self, match=None, **kwargs):
- self.match = match
-
-
-class RootwrapConfig(object):
-
- def __init__(self, config):
- # filters_path
- self.filters_path = config.get("DEFAULT", "filters_path").split(",")
-
- # exec_dirs
- if config.has_option("DEFAULT", "exec_dirs"):
- self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
- else:
- self.exec_dirs = []
- # Use system PATH if exec_dirs is not specified
- if "PATH" in os.environ:
- self.exec_dirs = os.environ['PATH'].split(':')
-
- # syslog_log_facility
- if config.has_option("DEFAULT", "syslog_log_facility"):
- v = config.get("DEFAULT", "syslog_log_facility")
- facility_names = logging.handlers.SysLogHandler.facility_names
- self.syslog_log_facility = getattr(logging.handlers.SysLogHandler,
- v, None)
- if self.syslog_log_facility is None and v in facility_names:
- self.syslog_log_facility = facility_names.get(v)
- if self.syslog_log_facility is None:
- raise ValueError('Unexpected syslog_log_facility: %s' % v)
- else:
- default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG
- self.syslog_log_facility = default_facility
-
- # syslog_log_level
- if config.has_option("DEFAULT", "syslog_log_level"):
- v = config.get("DEFAULT", "syslog_log_level")
- self.syslog_log_level = logging.getLevelName(v.upper())
- if (self.syslog_log_level == "Level %s" % v.upper()):
- raise ValueError('Unexpected syslog_log_level: %s' % v)
- else:
- self.syslog_log_level = logging.ERROR
-
- # use_syslog
- if config.has_option("DEFAULT", "use_syslog"):
- self.use_syslog = config.getboolean("DEFAULT", "use_syslog")
- else:
- self.use_syslog = False
-
- # use_syslog_rfc_format
- if config.has_option("DEFAULT", "use_syslog_rfc_format"):
- self.use_syslog_rfc_format = config.getboolean(
- "DEFAULT", "use_syslog_rfc_format")
- else:
- self.use_syslog_rfc_format = False
-
-
-def setup_syslog(execname, facility, level):
- rootwrap_logger = logging.getLogger()
- rootwrap_logger.setLevel(level)
- handler = logging.handlers.SysLogHandler(address='/dev/log',
- facility=facility)
- handler.setFormatter(logging.Formatter(
- os.path.basename(execname) + ': %(message)s'))
- rootwrap_logger.addHandler(handler)
-
-
-def build_filter(class_name, *args):
- """Returns a filter object of class class_name."""
- if not hasattr(filters, class_name):
- logging.warning("Skipping unknown filter class (%s) specified "
- "in filter definitions" % class_name)
- return None
- filterclass = getattr(filters, class_name)
- return filterclass(*args)
-
-
-def load_filters(filters_path):
- """Load filters from a list of directories."""
- filterlist = []
- for filterdir in filters_path:
- if not os.path.isdir(filterdir):
- continue
- for filterfile in filter(lambda f: not f.startswith('.'),
- os.listdir(filterdir)):
- filterconfig = moves.configparser.RawConfigParser()
- filterconfig.read(os.path.join(filterdir, filterfile))
- for (name, value) in filterconfig.items("Filters"):
- filterdefinition = [s.strip() for s in value.split(',')]
- newfilter = build_filter(*filterdefinition)
- if newfilter is None:
- continue
- newfilter.name = name
- filterlist.append(newfilter)
- return filterlist
-
-
-def match_filter(filter_list, userargs, exec_dirs=None):
- """Checks user command and arguments through command filters.
-
- Returns the first matching filter.
-
- Raises NoFilterMatched if no filter matched.
- Raises FilterMatchNotExecutable if no executable was found for the
- best filter match.
- """
- first_not_executable_filter = None
- exec_dirs = exec_dirs or []
-
- for f in filter_list:
- if f.match(userargs):
- if isinstance(f, filters.ChainingFilter):
- # This command calls exec verify that remaining args
- # matches another filter.
- def non_chain_filter(fltr):
- return (fltr.run_as == f.run_as
- and not isinstance(fltr, filters.ChainingFilter))
-
- leaf_filters = [fltr for fltr in filter_list
- if non_chain_filter(fltr)]
- args = f.exec_args(userargs)
- if not args:
- continue
- try:
- match_filter(leaf_filters, args, exec_dirs=exec_dirs)
- except (NoFilterMatched, FilterMatchNotExecutable):
- continue
-
- # Try other filters if executable is absent
- if not f.get_exec(exec_dirs=exec_dirs):
- if not first_not_executable_filter:
- first_not_executable_filter = f
- continue
- # Otherwise return matching filter for execution
- return f
-
- if first_not_executable_filter:
- # A filter matched, but no executable was found for it
- raise FilterMatchNotExecutable(match=first_not_executable_filter)
-
- # No filter matched
- raise NoFilterMatched()
-
-
-def _subprocess_setup():
- # Python installs a SIGPIPE handler by default. This is usually not what
- # non-Python subprocesses expect.
- signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-
-
-def _getlogin():
- try:
- return os.getlogin()
- except OSError:
- return (os.getenv('USER') or
- os.getenv('USERNAME') or
- os.getenv('LOGNAME'))
-
-
-def start_subprocess(filter_list, userargs, exec_dirs=[], log=False,
- env=None, **kwargs):
- filtermatch = match_filter(filter_list, userargs, exec_dirs)
-
- command = filtermatch.get_command(userargs, exec_dirs)
- if log:
- logging.info("(%s > %s) Executing %s (filter match = %s)" % (
- _getlogin(), pwd.getpwuid(os.getuid())[0],
- command, filtermatch.name))
-
- obj = subprocess.Popen(command,
- preexec_fn=_subprocess_setup,
- env=filtermatch.get_environment(userargs, env=env),
- **kwargs)
- return obj
+from oslo_rootwrap.wrapper import * # noqa
diff --git a/oslo_rootwrap/__init__.py b/oslo_rootwrap/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/oslo_rootwrap/__init__.py
diff --git a/oslo_rootwrap/client.py b/oslo_rootwrap/client.py
new file mode 100644
index 0000000..5163772
--- /dev/null
+++ b/oslo_rootwrap/client.py
@@ -0,0 +1,144 @@
+# Copyright (c) 2014 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+from multiprocessing import managers
+from multiprocessing import util as mp_util
+import os
+import subprocess
+import threading
+import weakref
+
+try:
+ import eventlet.patcher
+except ImportError:
+ patched_socket = False
+else:
+ # In tests patching happens later, so we'll rely on environment variable
+ patched_socket = (eventlet.patcher.is_monkey_patched('socket') or
+ os.environ.get('TEST_EVENTLET', False))
+
+from oslo_rootwrap import daemon
+from oslo_rootwrap import jsonrpc
+
+if patched_socket:
+ # We have to use slow version of recvall with eventlet because of a bug in
+ # GreenSocket.recv_into:
+ # https://bitbucket.org/eventlet/eventlet/pull-request/41
+ # This check happens here instead of jsonrpc to avoid importing eventlet
+ # from daemon code that is run with root priviledges.
+ jsonrpc.JsonConnection.recvall = jsonrpc.JsonConnection._recvall_slow
+
+try:
+ finalize = weakref.finalize
+except AttributeError:
+ def finalize(obj, func, *args, **kwargs):
+ return mp_util.Finalize(obj, func, args=args, kwargs=kwargs,
+ exitpriority=0)
+
+ClientManager = daemon.get_manager_class()
+LOG = logging.getLogger(__name__)
+
+
+class Client(object):
+ def __init__(self, rootwrap_daemon_cmd):
+ self._start_command = rootwrap_daemon_cmd
+ self._initialized = False
+ self._mutex = threading.Lock()
+ self._manager = None
+ self._proxy = None
+ self._process = None
+ self._finalize = None
+
+ def _initialize(self):
+ if self._process is not None and self._process.poll() is not None:
+ LOG.warning("Leaving behind already spawned process with pid %d, "
+ "root should kill it if it's still there (I can't)",
+ self._process.pid)
+
+ process_obj = subprocess.Popen(self._start_command,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ LOG.info("Spawned new rootwrap daemon process with pid=%d",
+ process_obj.pid)
+
+ self._process = process_obj
+ socket_path = process_obj.stdout.readline()[:-1]
+ # For Python 3 we need to convert bytes to str here
+ if not isinstance(socket_path, str):
+ socket_path = socket_path.decode('utf-8')
+ authkey = process_obj.stdout.read(32)
+ if process_obj.poll() is not None:
+ stderr = process_obj.stderr.read()
+ # NOTE(yorik-sar): don't expose stdout here
+ raise Exception("Failed to spawn rootwrap process.\nstderr:\n%s" %
+ (stderr,))
+ self._manager = ClientManager(socket_path, authkey)
+ self._manager.connect()
+ self._proxy = self._manager.rootwrap()
+ self._finalize = finalize(self, self._shutdown, self._process,
+ self._manager)
+ self._initialized = True
+
+ @staticmethod
+ def _shutdown(process, manager, JsonClient=jsonrpc.JsonClient):
+ # Storing JsonClient in arguments because globals are set to None
+ # before executing atexit routines in Python 2.x
+ if process.poll() is None:
+ LOG.info('Stopping rootwrap daemon process with pid=%s',
+ process.pid)
+ try:
+ manager.rootwrap().shutdown()
+ except (EOFError, IOError):
+ pass # assume it is dead already
+ # We might want to wait for process to exit or kill it, but we
+ # can't provide sane timeout on 2.x and we most likely don't have
+ # permisions to do so
+ # Invalidate manager's state so that proxy won't try to do decref
+ manager._state.value = managers.State.SHUTDOWN
+
+ def _ensure_initialized(self):
+ with self._mutex:
+ if not self._initialized:
+ self._initialize()
+
+ def _restart(self, proxy):
+ with self._mutex:
+ assert self._initialized
+ # Verify if someone has already restarted this.
+ if self._proxy is proxy:
+ self._finalize()
+ self._manager = None
+ self._proxy = None
+ self._initialized = False
+ self._initialize()
+ return self._proxy
+
+ def execute(self, cmd, env=None, stdin=None):
+ self._ensure_initialized()
+ proxy = self._proxy
+ retry = False
+ try:
+ res = proxy.run_one_command(cmd, env, stdin)
+ except (EOFError, IOError):
+ retry = True
+ # res can be None if we received final None sent by dying server thread
+ # instead of response to our request. Process is most likely to be dead
+ # at this point.
+ if retry or res is None:
+ proxy = self._restart(proxy)
+ res = proxy.run_one_command(cmd, env, stdin)
+ return res
diff --git a/oslo_rootwrap/cmd.py b/oslo_rootwrap/cmd.py
new file mode 100644
index 0000000..ea5709f
--- /dev/null
+++ b/oslo_rootwrap/cmd.py
@@ -0,0 +1,124 @@
+# Copyright (c) 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Root wrapper for OpenStack services
+
+ Filters which commands a service is allowed to run as another user.
+
+ To use this with oslo, you should set the following in
+ oslo.conf:
+ rootwrap_config=/etc/oslo/rootwrap.conf
+
+ You also need to let the oslo user run oslo-rootwrap
+ as root in sudoers:
+ oslo ALL = (root) NOPASSWD: /usr/bin/oslo-rootwrap
+ /etc/oslo/rootwrap.conf *
+
+ Service packaging should deploy .filters files only on nodes where
+ they are needed, to avoid allowing more than is necessary.
+"""
+
+from __future__ import print_function
+
+import logging
+import sys
+
+from six import moves
+
+from oslo_rootwrap import daemon as daemon_mod
+from oslo_rootwrap import wrapper
+
+RC_UNAUTHORIZED = 99
+RC_NOCOMMAND = 98
+RC_BADCONFIG = 97
+RC_NOEXECFOUND = 96
+SIGNAL_BASE = 128
+
+
+def _exit_error(execname, message, errorcode, log=True):
+ print("%s: %s" % (execname, message), file=sys.stderr)
+ if log:
+ logging.error(message)
+ sys.exit(errorcode)
+
+
+def daemon():
+ return main(run_daemon=True)
+
+
+def main(run_daemon=False):
+ # Split arguments, require at least a command
+ execname = sys.argv.pop(0)
+ if run_daemon:
+ if len(sys.argv) != 1:
+ _exit_error(execname, "Extra arguments to daemon", RC_NOCOMMAND,
+ log=False)
+ else:
+ if len(sys.argv) < 2:
+ _exit_error(execname, "No command specified", RC_NOCOMMAND,
+ log=False)
+
+ configfile = sys.argv.pop(0)
+
+ # Load configuration
+ try:
+ rawconfig = moves.configparser.RawConfigParser()
+ rawconfig.read(configfile)
+ config = wrapper.RootwrapConfig(rawconfig)
+ except ValueError as exc:
+ msg = "Incorrect value in %s: %s" % (configfile, exc.message)
+ _exit_error(execname, msg, RC_BADCONFIG, log=False)
+ except moves.configparser.Error:
+ _exit_error(execname, "Incorrect configuration file: %s" % configfile,
+ RC_BADCONFIG, log=False)
+
+ if config.use_syslog:
+ wrapper.setup_syslog(execname,
+ config.syslog_log_facility,
+ config.syslog_log_level)
+
+ filters = wrapper.load_filters(config.filters_path)
+
+ if run_daemon:
+ daemon_mod.daemon_start(config, filters)
+ else:
+ run_one_command(execname, config, filters, sys.argv)
+
+
+def run_one_command(execname, config, filters, userargs):
+ # Execute command if it matches any of the loaded filters
+ try:
+ obj = wrapper.start_subprocess(
+ filters, userargs,
+ exec_dirs=config.exec_dirs,
+ log=config.use_syslog,
+ stdin=sys.stdin,
+ stdout=sys.stdout,
+ stderr=sys.stderr)
+ returncode = obj.wait()
+ # Fix returncode of Popen
+ if returncode < 0:
+ returncode = SIGNAL_BASE - returncode
+ sys.exit(returncode)
+
+ except wrapper.FilterMatchNotExecutable as exc:
+ msg = ("Executable not found: %s (filter match = %s)"
+ % (exc.match.exec_path, exc.match.name))
+ _exit_error(execname, msg, RC_NOEXECFOUND, log=config.use_syslog)
+
+ except wrapper.NoFilterMatched:
+ msg = ("Unauthorized command: %s (no filter matched)"
+ % ' '.join(userargs))
+ _exit_error(execname, msg, RC_UNAUTHORIZED, log=config.use_syslog)
diff --git a/oslo_rootwrap/daemon.py b/oslo_rootwrap/daemon.py
new file mode 100644
index 0000000..7bda2c2
--- /dev/null
+++ b/oslo_rootwrap/daemon.py
@@ -0,0 +1,151 @@
+# Copyright (c) 2014 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from __future__ import print_function
+
+import functools
+import logging
+from multiprocessing import managers
+import os
+import shutil
+import signal
+import stat
+import subprocess
+import sys
+import tempfile
+import threading
+
+from oslo_rootwrap import jsonrpc
+from oslo_rootwrap import wrapper
+
+LOG = logging.getLogger(__name__)
+
+# Since multiprocessing supports only pickle and xmlrpclib for serialization of
+# RPC requests and responses, we declare another 'jsonrpc' serializer
+
+managers.listener_client['jsonrpc'] = jsonrpc.JsonListener, jsonrpc.JsonClient
+
+
+class RootwrapClass(object):
+ def __init__(self, config, filters):
+ self.config = config
+ self.filters = filters
+
+ def run_one_command(self, userargs, env=None, stdin=None):
+ if env is None:
+ env = {}
+
+ obj = wrapper.start_subprocess(
+ self.filters, userargs,
+ exec_dirs=self.config.exec_dirs,
+ log=self.config.use_syslog,
+ close_fds=True,
+ env=env,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = obj.communicate(stdin)
+ return obj.returncode, out, err
+
+ def shutdown(self):
+ # Suicide to force break of the main thread
+ os.kill(os.getpid(), signal.SIGINT)
+
+
+def get_manager_class(config=None, filters=None):
+ class RootwrapManager(managers.BaseManager):
+ def __init__(self, address=None, authkey=None):
+ # Force jsonrpc because neither pickle nor xmlrpclib is secure
+ super(RootwrapManager, self).__init__(address, authkey,
+ serializer='jsonrpc')
+
+ if config is not None:
+ partial_class = functools.partial(RootwrapClass, config, filters)
+ RootwrapManager.register('rootwrap', partial_class)
+ else:
+ RootwrapManager.register('rootwrap')
+
+ return RootwrapManager
+
+
+def daemon_start(config, filters):
+ temp_dir = tempfile.mkdtemp(prefix='rootwrap-')
+ LOG.debug("Created temporary directory %s", temp_dir)
+ try:
+ # allow everybody to find the socket
+ rwxr_xr_x = (stat.S_IRWXU |
+ stat.S_IRGRP | stat.S_IXGRP |
+ stat.S_IROTH | stat.S_IXOTH)
+ os.chmod(temp_dir, rwxr_xr_x)
+ socket_path = os.path.join(temp_dir, "rootwrap.sock")
+ LOG.debug("Will listen on socket %s", socket_path)
+ manager_cls = get_manager_class(config, filters)
+ manager = manager_cls(address=socket_path)
+ server = manager.get_server()
+ # allow everybody to connect to the socket
+ rw_rw_rw_ = (stat.S_IRUSR | stat.S_IWUSR |
+ stat.S_IRGRP | stat.S_IWGRP |
+ stat.S_IROTH | stat.S_IWOTH)
+ os.chmod(socket_path, rw_rw_rw_)
+ try:
+ # In Python 3 we have to use buffer to push in bytes directly
+ stdout = sys.stdout.buffer
+ except AttributeError:
+ stdout = sys.stdout
+ stdout.write(socket_path.encode('utf-8'))
+ stdout.write(b'\n')
+ stdout.write(bytes(server.authkey))
+ sys.stdin.close()
+ sys.stdout.close()
+ sys.stderr.close()
+ # Gracefully shutdown on INT or TERM signals
+ stop = functools.partial(daemon_stop, server)
+ signal.signal(signal.SIGTERM, stop)
+ signal.signal(signal.SIGINT, stop)
+ LOG.info("Starting rootwrap daemon main loop")
+ server.serve_forever()
+ finally:
+ conn = server.listener
+ # This will break accept() loop with EOFError if it was not in the main
+ # thread (as in Python 3.x)
+ conn.close()
+ # Closing all currently connected client sockets for reading to break
+ # worker threads blocked on recv()
+ for cl_conn in conn.get_accepted():
+ try:
+ cl_conn.half_close()
+ except Exception:
+ # Most likely the socket have already been closed
+ LOG.debug("Failed to close connection")
+ LOG.info("Waiting for all client threads to finish.")
+ for thread in threading.enumerate():
+ if thread.daemon:
+ LOG.debug("Joining thread %s", thread)
+ thread.join()
+ LOG.debug("Removing temporary directory %s", temp_dir)
+ shutil.rmtree(temp_dir)
+
+
+def daemon_stop(server, signal, frame):
+ LOG.info("Got signal %s. Shutting down server", signal)
+ # Signals are caught in the main thread which means this handler will run
+ # in the middle of serve_forever() loop. It will catch this exception and
+ # properly return. Since all threads created by server_forever are
+ # daemonic, we need to join them afterwards. In Python 3 we can just hit
+ # stop_event instead.
+ try:
+ server.stop_event.set()
+ except AttributeError:
+ raise KeyboardInterrupt
diff --git a/oslo_rootwrap/filters.py b/oslo_rootwrap/filters.py
new file mode 100644
index 0000000..b8747ae
--- /dev/null
+++ b/oslo_rootwrap/filters.py
@@ -0,0 +1,350 @@
+# Copyright (c) 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import re
+
+
+class CommandFilter(object):
+ """Command filter only checking that the 1st argument matches exec_path."""
+
+ def __init__(self, exec_path, run_as, *args):
+ self.name = ''
+ self.exec_path = exec_path
+ self.run_as = run_as
+ self.args = args
+ self.real_exec = None
+
+ def get_exec(self, exec_dirs=None):
+ """Returns existing executable, or empty string if none found."""
+ exec_dirs = exec_dirs or []
+ if self.real_exec is not None:
+ return self.real_exec
+ self.real_exec = ""
+ if os.path.isabs(self.exec_path):
+ if os.access(self.exec_path, os.X_OK):
+ self.real_exec = self.exec_path
+ else:
+ for binary_path in exec_dirs:
+ expanded_path = os.path.join(binary_path, self.exec_path)
+ if os.access(expanded_path, os.X_OK):
+ self.real_exec = expanded_path
+ break
+ return self.real_exec
+
+ def match(self, userargs):
+ """Only check that the first argument (command) matches exec_path."""
+ return userargs and os.path.basename(self.exec_path) == userargs[0]
+
+ def get_command(self, userargs, exec_dirs=None):
+ """Returns command to execute (with sudo -u if run_as != root)."""
+ exec_dirs = exec_dirs or []
+ to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
+ if (self.run_as != 'root'):
+ # Used to run commands at lesser privileges
+ return ['sudo', '-u', self.run_as, to_exec] + userargs[1:]
+ return [to_exec] + userargs[1:]
+
+ def get_environment(self, userargs, env=None):
+ """Returns specific environment to set, None if none."""
+ return env
+
+
+class RegExpFilter(CommandFilter):
+ """Command filter doing regexp matching for every argument."""
+
+ def match(self, userargs):
+ # Early skip if command or number of args don't match
+ if (not userargs or len(self.args) != len(userargs)):
+ # DENY: argument numbers don't match
+ return False
+ # Compare each arg (anchoring pattern explicitly at end of string)
+ for (pattern, arg) in zip(self.args, userargs):
+ try:
+ if not re.match(pattern + '$', arg):
+ # DENY: Some arguments did not match
+ return False
+ except re.error:
+ # DENY: Badly-formed filter
+ return False
+ # ALLOW: All arguments matched
+ return True
+
+
+class PathFilter(CommandFilter):
+ """Command filter checking that path arguments are within given dirs
+
+ One can specify the following constraints for command arguments:
+ 1) pass - pass an argument as is to the resulting command
+ 2) some_str - check if an argument is equal to the given string
+ 3) abs path - check if a path argument is within the given base dir
+
+ A typical rootwrapper filter entry looks like this:
+ # cmdname: filter name, raw command, user, arg_i_constraint [, ...]
+ chown: PathFilter, /bin/chown, root, nova, /var/lib/images
+
+ """
+
+ def match(self, userargs):
+ if not userargs or len(userargs) < 2:
+ return False
+
+ arguments = userargs[1:]
+
+ equal_args_num = len(self.args) == len(arguments)
+ exec_is_valid = super(PathFilter, self).match(userargs)
+ args_equal_or_pass = all(
+ arg == 'pass' or arg == value
+ for arg, value in zip(self.args, arguments)
+ if not os.path.isabs(arg) # arguments not specifying abs paths
+ )
+ paths_are_within_base_dirs = all(
+ os.path.commonprefix([arg, os.path.realpath(value)]) == arg
+ for arg, value in zip(self.args, arguments)
+ if os.path.isabs(arg) # arguments specifying abs paths
+ )
+
+ return (equal_args_num and
+ exec_is_valid and
+ args_equal_or_pass and
+ paths_are_within_base_dirs)
+
+ def get_command(self, userargs, exec_dirs=None):
+ exec_dirs = exec_dirs or []
+ command, arguments = userargs[0], userargs[1:]
+
+ # convert path values to canonical ones; copy other args as is
+ args = [os.path.realpath(value) if os.path.isabs(arg) else value
+ for arg, value in zip(self.args, arguments)]
+
+ return super(PathFilter, self).get_command([command] + args,
+ exec_dirs)
+
+
+class KillFilter(CommandFilter):
+ """Specific filter for the kill calls.
+
+ 1st argument is the user to run /bin/kill under
+ 2nd argument is the location of the affected executable
+ if the argument is not absolute, it is checked against $PATH
+ Subsequent arguments list the accepted signals (if any)
+
+ This filter relies on /proc to accurately determine affected
+ executable, so it will only work on procfs-capable systems (not OSX).
+ """
+
+ def __init__(self, *args):
+ super(KillFilter, self).__init__("/bin/kill", *args)
+
+ def match(self, userargs):
+ if not userargs or userargs[0] != "kill":
+ return False
+ args = list(userargs)
+ if len(args) == 3:
+ # A specific signal is requested
+ signal = args.pop(1)
+ if signal not in self.args[1:]:
+ # Requested signal not in accepted list
+ return False
+ else:
+ if len(args) != 2:
+ # Incorrect number of arguments
+ return False
+ if len(self.args) > 1:
+ # No signal requested, but filter requires specific signal
+ return False
+ try:
+ command = os.readlink("/proc/%d/exe" % int(args[1]))
+ except (ValueError, OSError):
+ # Incorrect PID
+ return False
+
+ # NOTE(yufang521247): /proc/PID/exe may have '\0' on the
+ # end, because python doesn't stop at '\0' when read the
+ # target path.
+ command = command.partition('\0')[0]
+
+ # NOTE(dprince): /proc/PID/exe may have ' (deleted)' on
+ # the end if an executable is updated or deleted
+ if command.endswith(" (deleted)"):
+ command = command[:-len(" (deleted)")]
+
+ kill_command = self.args[0]
+
+ if os.path.isabs(kill_command):
+ return kill_command == command
+
+ return (os.path.isabs(command) and
+ kill_command == os.path.basename(command) and
+ os.path.dirname(command) in os.environ.get('PATH', ''
+ ).split(':'))
+
+
+class ReadFileFilter(CommandFilter):
+ """Specific filter for the utils.read_file_as_root call."""
+
+ def __init__(self, file_path, *args):
+ self.file_path = file_path
+ super(ReadFileFilter, self).__init__("/bin/cat", "root", *args)
+
+ def match(self, userargs):
+ return (userargs == ['cat', self.file_path])
+
+
+class IpFilter(CommandFilter):
+ """Specific filter for the ip utility to that does not match exec."""
+
+ def match(self, userargs):
+ if userargs[0] == 'ip':
+ # Avoid the 'netns exec' command here
+ for a, b in zip(userargs[1:], userargs[2:]):
+ if a == 'netns':
+ return (b != 'exec')
+ else:
+ return True
+
+
+class EnvFilter(CommandFilter):
+ """Specific filter for the env utility.
+
+ Behaves like CommandFilter, except that it handles
+ leading env A=B.. strings appropriately.
+ """
+
+ def _extract_env(self, arglist):
+ """Extract all leading NAME=VALUE arguments from arglist."""
+
+ envs = set()
+ for arg in arglist:
+ if '=' not in arg:
+ break
+ envs.add(arg.partition('=')[0])
+ return envs
+
+ def __init__(self, exec_path, run_as, *args):
+ super(EnvFilter, self).__init__(exec_path, run_as, *args)
+
+ env_list = self._extract_env(self.args)
+ # Set exec_path to X when args are in the form of
+ # env A=a B=b C=c X Y Z
+ if "env" in exec_path and len(env_list) < len(self.args):
+ self.exec_path = self.args[len(env_list)]
+
+ def match(self, userargs):
+ # ignore leading 'env'
+ if userargs[0] == 'env':
+ userargs.pop(0)
+
+ # require one additional argument after configured ones
+ if len(userargs) < len(self.args):
+ return False
+
+ # extract all env args
+ user_envs = self._extract_env(userargs)
+ filter_envs = self._extract_env(self.args)
+ user_command = userargs[len(user_envs):len(user_envs) + 1]
+
+ # match first non-env argument with CommandFilter
+ return (super(EnvFilter, self).match(user_command)
+ and len(filter_envs) and user_envs == filter_envs)
+
+ def exec_args(self, userargs):
+ args = userargs[:]
+
+ # ignore leading 'env'
+ if args[0] == 'env':
+ args.pop(0)
+
+ # Throw away leading NAME=VALUE arguments
+ while args and '=' in args[0]:
+ args.pop(0)
+
+ return args
+
+ def get_command(self, userargs, exec_dirs=[]):
+ to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
+ return [to_exec] + self.exec_args(userargs)[1:]
+
+ def get_environment(self, userargs, env=None):
+ if env is None:
+ env = os.environ
+ env = env.copy()
+
+ # ignore leading 'env'
+ if userargs[0] == 'env':
+ userargs.pop(0)
+
+ # Handle leading NAME=VALUE pairs
+ for a in userargs:
+ env_name, equals, env_value = a.partition('=')
+ if not equals:
+ break
+ if env_name and env_value:
+ env[env_name] = env_value
+
+ return env
+
+
+class ChainingFilter(CommandFilter):
+ def exec_args(self, userargs):
+ return []
+
+
+class IpNetnsExecFilter(ChainingFilter):
+ """Specific filter for the ip utility to that does match exec."""
+
+ def match(self, userargs):
+ # Network namespaces currently require root
+ # require <ns> argument
+ if self.run_as != "root" or len(userargs) < 4:
+ return False
+
+ return (userargs[:3] == ['ip', 'netns', 'exec'])
+
+ def exec_args(self, userargs):
+ args = userargs[4:]
+ if args:
+ args[0] = os.path.basename(args[0])
+ return args
+
+
+class ChainingRegExpFilter(ChainingFilter):
+ """Command filter doing regexp matching for prefix commands.
+
+ Remaining arguments are filtered again. This means that the command
+ specified as the arguments must be also allowed to execute directly.
+ """
+
+ def match(self, userargs):
+ # Early skip if number of args is smaller than the filter
+ if (not userargs or len(self.args) > len(userargs)):
+ return False
+ # Compare each arg (anchoring pattern explicitly at end of string)
+ for (pattern, arg) in zip(self.args, userargs):
+ try:
+ if not re.match(pattern + '$', arg):
+ # DENY: Some arguments did not match
+ return False
+ except re.error:
+ # DENY: Badly-formed filter
+ return False
+ # ALLOW: All arguments matched
+ return True
+
+ def exec_args(self, userargs):
+ args = userargs[len(self.args):]
+ if args:
+ args[0] = os.path.basename(args[0])
+ return args
diff --git a/oslo_rootwrap/jsonrpc.py b/oslo_rootwrap/jsonrpc.py
new file mode 100644
index 0000000..37a642f
--- /dev/null
+++ b/oslo_rootwrap/jsonrpc.py
@@ -0,0 +1,208 @@
+# Copyright (c) 2014 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import base64
+import errno
+import json
+from multiprocessing import connection
+from multiprocessing import managers
+import socket
+import struct
+import weakref
+
+from oslo_rootwrap import wrapper
+
+
+class RpcJSONEncoder(json.JSONEncoder):
+ def default(self, o):
+ # We need to pass bytes unchanged as they are expected in arguments for
+ # and are result of Popen.communicate()
+ if isinstance(o, bytes):
+ return {"__bytes__": base64.b64encode(o).decode('ascii')}
+ # Handle two exception types relevant to command execution
+ if isinstance(o, wrapper.NoFilterMatched):
+ return {"__exception__": "NoFilterMatched"}
+ elif isinstance(o, wrapper.FilterMatchNotExecutable):
+ return {"__exception__": "FilterMatchNotExecutable",
+ "match": o.match}
+ # Other errors will fail to pass JSON encoding and will be visible on
+ # client side
+ else:
+ return super(RpcJSONEncoder, self).default(o)
+
+
+# Parse whatever RpcJSONEncoder supplied us with
+def rpc_object_hook(obj):
+ if "__exception__" in obj:
+ type_name = obj.pop("__exception__")
+ if type_name not in ("NoFilterMatched", "FilterMatchNotExecutable"):
+ return obj
+ exc_type = getattr(wrapper, type_name)
+ return exc_type(**obj)
+ elif "__bytes__" in obj:
+ return base64.b64decode(obj["__bytes__"].encode('ascii'))
+ else:
+ return obj
+
+
+class JsonListener(object):
+ def __init__(self, address, backlog=1):
+ self.address = address
+ self._socket = socket.socket(socket.AF_UNIX)
+ try:
+ self._socket.setblocking(True)
+ self._socket.bind(address)
+ self._socket.listen(backlog)
+ except socket.error:
+ self._socket.close()
+ raise
+ self.closed = False
+ # Python 2.6 doesn't have WeakSet
+ self._accepted = weakref.WeakKeyDictionary()
+
+ def accept(self):
+ while True:
+ try:
+ s, _ = self._socket.accept()
+ except socket.error as e:
+ if e.errno in (errno.EINVAL, errno.EBADF):
+ raise EOFError
+ elif e.errno != errno.EINTR:
+ raise
+ else:
+ break
+ s.setblocking(True)
+ conn = JsonConnection(s)
+ self._accepted[conn] = None
+ return conn
+
+ def close(self):
+ if not self.closed:
+ self._socket.shutdown(socket.SHUT_RDWR)
+ self._socket.close()
+ self.closed = True
+
+ def get_accepted(self):
+ return list(self._accepted)
+
+if hasattr(managers.Server, 'accepter'):
+ # In Python 3 accepter() thread has infinite loop. We break it with
+ # EOFError, so we should silence this error here.
+ def silent_accepter(self):
+ try:
+ old_accepter(self)
+ except EOFError:
+ pass
+ old_accepter = managers.Server.accepter
+ managers.Server.accepter = silent_accepter
+
+try:
+ memoryview
+except NameError:
+ has_memoryview = False
+else:
+ has_memoryview = True
+
+
+class JsonConnection(object):
+ def __init__(self, sock):
+ sock.setblocking(True)
+ self._socket = sock
+
+ def send_bytes(self, s):
+ self._socket.sendall(struct.pack('!Q', len(s)))
+ self._socket.sendall(s)
+
+ def recv_bytes(self, maxsize=None):
+ l = struct.unpack('!Q', self.recvall(8))[0]
+ if maxsize is not None and l > maxsize:
+ raise RuntimeError("Too big message received")
+ s = self.recvall(l)
+ return s
+
+ def send(self, obj):
+ s = self.dumps(obj)
+ self.send_bytes(s)
+
+ def recv(self):
+ s = self.recv_bytes()
+ return self.loads(s)
+
+ def close(self):
+ self._socket.close()
+
+ def half_close(self):
+ self._socket.shutdown(socket.SHUT_RD)
+
+ # Unfortunatelly Python 2.6 doesn't support memoryview, so we'll have
+ # to do it the slow way.
+ def _recvall_slow(self, size):
+ remaining = size
+ res = []
+ while remaining:
+ piece = self._socket.recv(remaining)
+ if not piece:
+ raise EOFError
+ res.append(piece)
+ remaining -= len(piece)
+ return b''.join(res)
+
+ # For all later versions we can do it almost like in C
+ def _recvall_fast(self, size):
+ buf = bytearray(size)
+ mem = memoryview(buf)
+ got = 0
+ while got < size:
+ piece_size = self._socket.recv_into(mem[got:])
+ if not piece_size:
+ raise EOFError
+ got += piece_size
+ # bytearray is mostly compatible with bytes and we could avoid copying
+ # data here, but hmac doesn't like it in Python 3.3 (not in 2.7 or 3.4)
+ return bytes(buf)
+
+ if has_memoryview:
+ recvall = _recvall_fast
+ else:
+ recvall = _recvall_slow
+
+ @staticmethod
+ def dumps(obj):
+ return json.dumps(obj, cls=RpcJSONEncoder).encode('utf-8')
+
+ @staticmethod
+ def loads(s):
+ res = json.loads(s.decode('utf-8'), object_hook=rpc_object_hook)
+ try:
+ kind = res[0]
+ except (IndexError, TypeError):
+ pass
+ else:
+ # In Python 2 json returns unicode while multiprocessing needs str
+ if (kind in ("#TRACEBACK", "#UNSERIALIZABLE") and
+ not isinstance(res[1], str)):
+ res[1] = res[1].encode('utf-8', 'replace')
+ return res
+
+
+class JsonClient(JsonConnection):
+ def __init__(self, address, authkey=None):
+ sock = socket.socket(socket.AF_UNIX)
+ sock.setblocking(True)
+ sock.connect(address)
+ super(JsonClient, self).__init__(sock)
+ if authkey is not None:
+ connection.answer_challenge(self, authkey)
+ connection.deliver_challenge(self, authkey)
diff --git a/oslo_rootwrap/tests/__init__.py b/oslo_rootwrap/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/oslo_rootwrap/tests/__init__.py
diff --git a/oslo_rootwrap/tests/run_daemon.py b/oslo_rootwrap/tests/run_daemon.py
new file mode 100644
index 0000000..669b298
--- /dev/null
+++ b/oslo_rootwrap/tests/run_daemon.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2014 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import subprocess
+import sys
+import threading
+
+from oslo_rootwrap import cmd
+
+
+def forward_stream(fr, to):
+ while True:
+ line = fr.readline()
+ if not line:
+ break
+ to.write(line)
+
+
+def forwarding_popen(f, old_popen=subprocess.Popen):
+ def popen(*args, **kwargs):
+ p = old_popen(*args, **kwargs)
+ t = threading.Thread(target=forward_stream, args=(p.stderr, f))
+ t.daemon = True
+ t.start()
+ return p
+ return popen
+
+
+class nonclosing(object):
+ def __init__(self, f):
+ self._f = f
+
+ def __getattr__(self, name):
+ return getattr(self._f, name)
+
+ def close(self):
+ pass
+
+log_format = ("%(asctime)s | [%(process)5s]+%(levelname)5s | "
+ "%(message)s")
+if __name__ == '__main__':
+ logging.basicConfig(level=logging.DEBUG, format=log_format)
+ sys.stderr = nonclosing(sys.stderr)
+ cmd.daemon()
diff --git a/oslo_rootwrap/tests/test_functional.py b/oslo_rootwrap/tests/test_functional.py
new file mode 100644
index 0000000..aa7a5a9
--- /dev/null
+++ b/oslo_rootwrap/tests/test_functional.py
@@ -0,0 +1,242 @@
+# Copyright (c) 2014 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import contextlib
+import io
+import logging
+import os
+import signal
+import subprocess
+import sys
+import threading
+
+try:
+ import StringIO
+except ImportError:
+ StringIO = io.StringIO
+else:
+ StringIO = StringIO.StringIO
+
+try:
+ import eventlet
+except ImportError:
+ eventlet = None
+
+import fixtures
+import mock
+import testtools
+from testtools import content
+
+from oslo_rootwrap import client
+from oslo_rootwrap import wrapper
+from tests import run_daemon
+
+
+class _FunctionalBase(object):
+ def setUp(self):
+ super(_FunctionalBase, self).setUp()
+ tmpdir = self.useFixture(fixtures.TempDir()).path
+ self.config_file = os.path.join(tmpdir, 'rootwrap.conf')
+ filters_dir = os.path.join(tmpdir, 'filters.d')
+ filters_file = os.path.join(tmpdir, 'filters.d', 'test.filters')
+ os.mkdir(filters_dir)
+ with open(self.config_file, 'w') as f:
+ f.write("""[DEFAULT]
+filters_path=%s
+exec_dirs=/bin""" % (filters_dir,))
+ with open(filters_file, 'w') as f:
+ f.write("""[Filters]
+echo: CommandFilter, /bin/echo, root
+cat: CommandFilter, /bin/cat, root
+sh: CommandFilter, /bin/sh, root
+""")
+
+ def test_run_once(self):
+ code, out, err = self.execute(['echo', 'teststr'])
+ self.assertEqual(0, code)
+ self.assertEqual(b'teststr\n', out)
+ self.assertEqual(b'', err)
+
+ def test_run_with_stdin(self):
+ code, out, err = self.execute(['cat'], stdin=b'teststr')
+ self.assertEqual(0, code)
+ self.assertEqual(b'teststr', out)
+ self.assertEqual(b'', err)
+
+
+class RootwrapTest(_FunctionalBase, testtools.TestCase):
+ def setUp(self):
+ super(RootwrapTest, self).setUp()
+ self.cmd = [
+ sys.executable, '-c',
+ 'from oslo_rootwrap import cmd; cmd.main()',
+ self.config_file]
+
+ def execute(self, cmd, stdin=None):
+ proc = subprocess.Popen(
+ self.cmd + cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ out, err = proc.communicate(stdin)
+ self.addDetail('stdout',
+ content.text_content(out.decode('utf-8', 'replace')))
+ self.addDetail('stderr',
+ content.text_content(err.decode('utf-8', 'replace')))
+ return proc.returncode, out, err
+
+
+class RootwrapDaemonTest(_FunctionalBase, testtools.TestCase):
+ def assert_unpatched(self):
+ # We need to verify that these tests are run without eventlet patching
+ if eventlet and eventlet.patcher.is_monkey_patched('socket'):
+ self.fail("Standard library should not be patched by eventlet"
+ " for this test")
+
+ def setUp(self):
+ self.assert_unpatched()
+
+ super(RootwrapDaemonTest, self).setUp()
+
+ # Collect daemon logs
+ daemon_log = io.BytesIO()
+ p = mock.patch('subprocess.Popen',
+ run_daemon.forwarding_popen(daemon_log))
+ p.start()
+ self.addCleanup(p.stop)
+
+ # Collect client logs
+ client_log = StringIO()
+ handler = logging.StreamHandler(client_log)
+ log_format = run_daemon.log_format.replace('+', ' ')
+ handler.setFormatter(logging.Formatter(log_format))
+ logger = logging.getLogger('oslo_rootwrap')
+ logger.addHandler(handler)
+ logger.setLevel(logging.DEBUG)
+ self.addCleanup(logger.removeHandler, handler)
+
+ # Add all logs as details
+ @self.addCleanup
+ def add_logs():
+ self.addDetail('daemon_log', content.Content(
+ content.UTF8_TEXT,
+ lambda: [daemon_log.getvalue()]))
+ self.addDetail('client_log', content.Content(
+ content.UTF8_TEXT,
+ lambda: [client_log.getvalue().encode('utf-8')]))
+
+ # Create client
+ self.client = client.Client([
+ sys.executable, run_daemon.__file__,
+ self.config_file])
+
+ # _finalize is set during Client.execute()
+ @self.addCleanup
+ def finalize_client():
+ if self.client._initialized:
+ self.client._finalize()
+
+ self.execute = self.client.execute
+
+ def test_error_propagation(self):
+ self.assertRaises(wrapper.NoFilterMatched, self.execute, ['other'])
+
+ def test_daemon_ressurection(self):
+ # Let the client start a daemon
+ self.execute(['cat'])
+ # Make daemon go away
+ os.kill(self.client._process.pid, signal.SIGTERM)
+ # Expect client to succesfully restart daemon and run simple request
+ self.test_run_once()
+
+ def test_env_setting(self):
+ code, out, err = self.execute(['sh', '-c', 'echo $SOMEVAR'],
+ env={'SOMEVAR': 'teststr'})
+ self.assertEqual(0, code)
+ self.assertEqual(b'teststr\n', out)
+ self.assertEqual(b'', err)
+
+ def _exec_thread(self, fifo_path):
+ try:
+ # Run a shell script that signals calling process through FIFO and
+ # then hangs around for 1 sec
+ self._thread_res = self.execute([
+ 'sh', '-c', 'echo > "%s"; sleep 1; echo OK' % fifo_path])
+ except Exception as e:
+ self._thread_res = e
+
+ def test_graceful_death(self):
+ # Create a fifo in a temporary dir
+ tmpdir = self.useFixture(fixtures.TempDir()).path
+ fifo_path = os.path.join(tmpdir, 'fifo')
+ os.mkfifo(fifo_path)
+ # Start daemon
+ self.execute(['cat'])
+ # Begin executing shell script
+ t = threading.Thread(target=self._exec_thread, args=(fifo_path,))
+ t.start()
+ # Wait for shell script to actually start
+ with open(fifo_path) as f:
+ f.readline()
+ # Gracefully kill daemon process
+ os.kill(self.client._process.pid, signal.SIGTERM)
+ # Expect daemon to wait for our request to finish
+ t.join()
+ if isinstance(self._thread_res, Exception):
+ raise self._thread_res # Python 3 will even provide nice traceback
+ code, out, err = self._thread_res
+ self.assertEqual(0, code)
+ self.assertEqual(b'OK\n', out)
+ self.assertEqual(b'', err)
+
+ @contextlib.contextmanager
+ def _test_daemon_cleanup(self):
+ # Start a daemon
+ self.execute(['cat'])
+ socket_path = self.client._manager._address
+ # Stop it one way or another
+ yield
+ process = self.client._process
+ stop = threading.Event()
+
+ # Start background thread that would kill process in 1 second if it
+ # doesn't die by then
+ def sleep_kill():
+ stop.wait(1)
+ if not stop.is_set():
+ os.kill(process.pid, signal.SIGKILL)
+ threading.Thread(target=sleep_kill).start()
+ # Wait for process to finish one way or another
+ self.client._process.wait()
+ # Notify background thread that process is dead (no need to kill it)
+ stop.set()
+ # Fail if the process got killed by the background thread
+ self.assertNotEqual(-signal.SIGKILL, process.returncode,
+ "Server haven't stopped in one second")
+ # Verify that socket is deleted
+ self.assertFalse(os.path.exists(socket_path),
+ "Server didn't remove its temporary directory")
+
+ def test_daemon_cleanup_client(self):
+ # Run _test_daemon_cleanup stopping daemon as Client instance would
+ # normally do
+ with self._test_daemon_cleanup():
+ self.client._finalize()
+
+ def test_daemon_cleanup_signal(self):
+ # Run _test_daemon_cleanup stopping daemon with SIGTERM signal
+ with self._test_daemon_cleanup():
+ os.kill(self.client._process.pid, signal.SIGTERM)
diff --git a/oslo_rootwrap/tests/test_functional_eventlet.py b/oslo_rootwrap/tests/test_functional_eventlet.py
new file mode 100644
index 0000000..c94bc69
--- /dev/null
+++ b/oslo_rootwrap/tests/test_functional_eventlet.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2014 Mirantis Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+if os.environ.get('TEST_EVENTLET', False):
+ import eventlet
+ eventlet.monkey_patch()
+
+ from tests import test_functional
+
+ class RootwrapDaemonTest(test_functional.RootwrapDaemonTest):
+ def assert_unpatched(self):
+ # This test case is specifically for eventlet testing
+ pass
+
+ def test_graceful_death(self):
+ # This test fails with eventlet on Python 2.6.6 on CentOS
+ self.skip("Eventlet doesn't like FIFOs")
diff --git a/oslo_rootwrap/tests/test_rootwrap.py b/oslo_rootwrap/tests/test_rootwrap.py
new file mode 100644
index 0000000..2dee792
--- /dev/null
+++ b/oslo_rootwrap/tests/test_rootwrap.py
@@ -0,0 +1,585 @@
+# Copyright 2011 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import logging.handlers
+import os
+import subprocess
+import uuid
+
+import fixtures
+import mock
+from six import moves
+import testtools
+
+from oslo_rootwrap import cmd
+from oslo_rootwrap import filters
+from oslo_rootwrap import wrapper
+
+
+class RootwrapTestCase(testtools.TestCase):
+ if os.path.exists('/sbin/ip'):
+ _ip = '/sbin/ip'
+ else:
+ _ip = '/bin/ip'
+
+ def setUp(self):
+ super(RootwrapTestCase, self).setUp()
+ self.filters = [
+ filters.RegExpFilter("/bin/ls", "root", 'ls', '/[a-z]+'),
+ filters.CommandFilter("/usr/bin/foo_bar_not_exist", "root"),
+ filters.RegExpFilter("/bin/cat", "root", 'cat', '/[a-z]+'),
+ filters.CommandFilter("/nonexistent/cat", "root"),
+ filters.CommandFilter("/bin/cat", "root") # Keep this one last
+ ]
+
+ def test_CommandFilter(self):
+ f = filters.CommandFilter("sleep", 'root', '10')
+ self.assertFalse(f.match(["sleep2"]))
+
+ # verify that any arguments are accepted
+ self.assertTrue(f.match(["sleep"]))
+ self.assertTrue(f.match(["sleep", "anything"]))
+ self.assertTrue(f.match(["sleep", "10"]))
+ f = filters.CommandFilter("sleep", 'root')
+ self.assertTrue(f.match(["sleep", "10"]))
+
+ def test_empty_commandfilter(self):
+ f = filters.CommandFilter("sleep", "root")
+ self.assertFalse(f.match([]))
+ self.assertFalse(f.match(None))
+
+ def test_empty_regexpfilter(self):
+ f = filters.RegExpFilter("sleep", "root", "sleep")
+ self.assertFalse(f.match([]))
+ self.assertFalse(f.match(None))
+
+ def test_empty_invalid_regexpfilter(self):
+ f = filters.RegExpFilter("sleep", "root")
+ self.assertFalse(f.match(["anything"]))
+ self.assertFalse(f.match([]))
+
+ def test_RegExpFilter_match(self):
+ usercmd = ["ls", "/root"]
+ filtermatch = wrapper.match_filter(self.filters, usercmd)
+ self.assertFalse(filtermatch is None)
+ self.assertEqual(filtermatch.get_command(usercmd),
+ ["/bin/ls", "/root"])
+
+ def test_RegExpFilter_reject(self):
+ usercmd = ["ls", "root"]
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, self.filters, usercmd)
+
+ def test_missing_command(self):
+ valid_but_missing = ["foo_bar_not_exist"]
+ invalid = ["foo_bar_not_exist_and_not_matched"]
+ self.assertRaises(wrapper.FilterMatchNotExecutable,
+ wrapper.match_filter,
+ self.filters, valid_but_missing)
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, self.filters, invalid)
+
+ def _test_EnvFilter_as_DnsMasq(self, config_file_arg):
+ usercmd = ['env', config_file_arg + '=A', 'NETWORK_ID=foobar',
+ 'dnsmasq', 'foo']
+ f = filters.EnvFilter("env", "root", config_file_arg + '=A',
+ 'NETWORK_ID=', "/usr/bin/dnsmasq")
+ self.assertTrue(f.match(usercmd))
+ self.assertEqual(f.get_command(usercmd), ['/usr/bin/dnsmasq', 'foo'])
+ env = f.get_environment(usercmd)
+ self.assertEqual(env.get(config_file_arg), 'A')
+ self.assertEqual(env.get('NETWORK_ID'), 'foobar')
+
+ def test_EnvFilter(self):
+ envset = ['A=/some/thing', 'B=somethingelse']
+ envcmd = ['env'] + envset
+ realcmd = ['sleep', '10']
+ usercmd = envcmd + realcmd
+
+ f = filters.EnvFilter("env", "root", "A=", "B=ignored", "sleep")
+ # accept with leading env
+ self.assertTrue(f.match(envcmd + ["sleep"]))
+ # accept without leading env
+ self.assertTrue(f.match(envset + ["sleep"]))
+
+ # any other command does not match
+ self.assertFalse(f.match(envcmd + ["sleep2"]))
+ self.assertFalse(f.match(envset + ["sleep2"]))
+
+ # accept any trailing arguments
+ self.assertTrue(f.match(usercmd))
+
+ # require given environment variables to match
+ self.assertFalse(f.match([envcmd, 'C=ELSE']))
+ self.assertFalse(f.match(['env', 'C=xx']))
+ self.assertFalse(f.match(['env', 'A=xx']))
+
+ # require env command to be given
+ # (otherwise CommandFilters should match
+ self.assertFalse(f.match(realcmd))
+ # require command to match
+ self.assertFalse(f.match(envcmd))
+ self.assertFalse(f.match(envcmd[1:]))
+
+ # ensure that the env command is stripped when executing
+ self.assertEqual(f.exec_args(usercmd), realcmd)
+ env = f.get_environment(usercmd)
+ # check that environment variables are set
+ self.assertEqual(env.get('A'), '/some/thing')
+ self.assertEqual(env.get('B'), 'somethingelse')
+ self.assertFalse('sleep' in env.keys())
+
+ def test_EnvFilter_without_leading_env(self):
+ envset = ['A=/some/thing', 'B=somethingelse']
+ envcmd = ['env'] + envset
+ realcmd = ['sleep', '10']
+
+ f = filters.EnvFilter("sleep", "root", "A=", "B=ignored")
+
+ # accept without leading env
+ self.assertTrue(f.match(envset + ["sleep"]))
+
+ self.assertEqual(f.get_command(envcmd + realcmd), realcmd)
+ self.assertEqual(f.get_command(envset + realcmd), realcmd)
+
+ env = f.get_environment(envset + realcmd)
+ # check that environment variables are set
+ self.assertEqual(env.get('A'), '/some/thing')
+ self.assertEqual(env.get('B'), 'somethingelse')
+ self.assertFalse('sleep' in env.keys())
+
+ def test_KillFilter(self):
+ if not os.path.exists("/proc/%d" % os.getpid()):
+ self.skipTest("Test requires /proc filesystem (procfs)")
+ p = subprocess.Popen(["cat"], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ try:
+ f = filters.KillFilter("root", "/bin/cat", "-9", "-HUP")
+ f2 = filters.KillFilter("root", "/usr/bin/cat", "-9", "-HUP")
+ usercmd = ['kill', '-ALRM', p.pid]
+ # Incorrect signal should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ usercmd = ['kill', p.pid]
+ # Providing no signal should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ # Providing matching signal should be allowed
+ usercmd = ['kill', '-9', p.pid]
+ self.assertTrue(f.match(usercmd) or f2.match(usercmd))
+
+ f = filters.KillFilter("root", "/bin/cat")
+ f2 = filters.KillFilter("root", "/usr/bin/cat")
+ usercmd = ['kill', os.getpid()]
+ # Our own PID does not match /bin/sleep, so it should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ usercmd = ['kill', 999999]
+ # Nonexistent PID should fail
+ self.assertFalse(f.match(usercmd) or f2.match(usercmd))
+ usercmd = ['kill', p.pid]
+ # Providing no signal should work
+ self.assertTrue(f.match(usercmd) or f2.match(usercmd))
+
+ # verify that relative paths are matched against $PATH
+ f = filters.KillFilter("root", "cat")
+ # Our own PID does not match so it should fail
+ usercmd = ['kill', os.getpid()]
+ self.assertFalse(f.match(usercmd))
+ # Filter should find cat in /bin or /usr/bin
+ usercmd = ['kill', p.pid]
+ self.assertTrue(f.match(usercmd))
+ # Filter shouldn't be able to find binary in $PATH, so fail
+ with fixtures.EnvironmentVariable("PATH", "/foo:/bar"):
+ self.assertFalse(f.match(usercmd))
+ # ensure that unset $PATH is not causing an exception
+ with fixtures.EnvironmentVariable("PATH"):
+ self.assertFalse(f.match(usercmd))
+ finally:
+ # Terminate the "cat" process and wait for it to finish
+ p.terminate()
+ p.wait()
+
+ def test_KillFilter_no_raise(self):
+ """Makes sure ValueError from bug 926412 is gone."""
+ f = filters.KillFilter("root", "")
+ # Providing anything other than kill should be False
+ usercmd = ['notkill', 999999]
+ self.assertFalse(f.match(usercmd))
+ # Providing something that is not a pid should be False
+ usercmd = ['kill', 'notapid']
+ self.assertFalse(f.match(usercmd))
+ # no arguments should also be fine
+ self.assertFalse(f.match([]))
+ self.assertFalse(f.match(None))
+
+ def test_KillFilter_deleted_exe(self):
+ """Makes sure deleted exe's are killed correctly."""
+ f = filters.KillFilter("root", "/bin/commandddddd")
+ usercmd = ['kill', 1234]
+ # Providing no signal should work
+ with mock.patch('os.readlink') as readlink:
+ readlink.return_value = '/bin/commandddddd (deleted)'
+ self.assertTrue(f.match(usercmd))
+
+ def test_KillFilter_upgraded_exe(self):
+ """Makes sure upgraded exe's are killed correctly."""
+ f = filters.KillFilter("root", "/bin/commandddddd")
+ usercmd = ['kill', 1234]
+ with mock.patch('os.readlink') as readlink:
+ readlink.return_value = '/bin/commandddddd\0\05190bfb2 (deleted)'
+ self.assertTrue(f.match(usercmd))
+
+ def test_ReadFileFilter(self):
+ goodfn = '/good/file.name'
+ f = filters.ReadFileFilter(goodfn)
+ usercmd = ['cat', '/bad/file']
+ self.assertFalse(f.match(['cat', '/bad/file']))
+ usercmd = ['cat', goodfn]
+ self.assertEqual(f.get_command(usercmd), ['/bin/cat', goodfn])
+ self.assertTrue(f.match(usercmd))
+
+ def test_IpFilter_non_netns(self):
+ f = filters.IpFilter(self._ip, 'root')
+ self.assertTrue(f.match(['ip', 'link', 'list']))
+ self.assertTrue(f.match(['ip', '-s', 'link', 'list']))
+ self.assertTrue(f.match(['ip', '-s', '-v', 'netns', 'add']))
+ self.assertTrue(f.match(['ip', 'link', 'set', 'interface',
+ 'netns', 'somens']))
+
+ def test_IpFilter_netns(self):
+ f = filters.IpFilter(self._ip, 'root')
+ self.assertFalse(f.match(['ip', 'netns', 'exec', 'foo']))
+ self.assertFalse(f.match(['ip', 'netns', 'exec']))
+ self.assertFalse(f.match(['ip', '-s', 'netns', 'exec']))
+ self.assertFalse(f.match(['ip', '-l', '42', 'netns', 'exec']))
+
+ def _test_IpFilter_netns_helper(self, action):
+ f = filters.IpFilter(self._ip, 'root')
+ self.assertTrue(f.match(['ip', 'link', action]))
+
+ def test_IpFilter_netns_add(self):
+ self._test_IpFilter_netns_helper('add')
+
+ def test_IpFilter_netns_delete(self):
+ self._test_IpFilter_netns_helper('delete')
+
+ def test_IpFilter_netns_list(self):
+ self._test_IpFilter_netns_helper('list')
+
+ def test_IpNetnsExecFilter_match(self):
+ f = filters.IpNetnsExecFilter(self._ip, 'root')
+ self.assertTrue(
+ f.match(['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list']))
+
+ def test_IpNetnsExecFilter_nomatch(self):
+ f = filters.IpNetnsExecFilter(self._ip, 'root')
+ self.assertFalse(f.match(['ip', 'link', 'list']))
+
+ # verify that at least a NS is given
+ self.assertFalse(f.match(['ip', 'netns', 'exec']))
+
+ def test_IpNetnsExecFilter_nomatch_nonroot(self):
+ f = filters.IpNetnsExecFilter(self._ip, 'user')
+ self.assertFalse(
+ f.match(['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list']))
+
+ def test_match_filter_recurses_exec_command_filter_matches(self):
+ filter_list = [filters.IpNetnsExecFilter(self._ip, 'root'),
+ filters.IpFilter(self._ip, 'root')]
+ args = ['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list']
+
+ self.assertIsNotNone(wrapper.match_filter(filter_list, args))
+
+ def test_match_filter_recurses_exec_command_matches_user(self):
+ filter_list = [filters.IpNetnsExecFilter(self._ip, 'root'),
+ filters.IpFilter(self._ip, 'user')]
+ args = ['ip', 'netns', 'exec', 'foo', 'ip', 'link', 'list']
+
+ # Currently ip netns exec requires root, so verify that
+ # no non-root filter is matched, as that would escalate privileges
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, filter_list, args)
+
+ def test_match_filter_recurses_exec_command_filter_does_not_match(self):
+ filter_list = [filters.IpNetnsExecFilter(self._ip, 'root'),
+ filters.IpFilter(self._ip, 'root')]
+ args = ['ip', 'netns', 'exec', 'foo', 'ip', 'netns', 'exec', 'bar',
+ 'ip', 'link', 'list']
+
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, filter_list, args)
+
+ def test_ChainingRegExpFilter_match(self):
+ filter_list = [filters.ChainingRegExpFilter('nice', 'root',
+ 'nice', '-?\d+'),
+ filters.CommandFilter('cat', 'root')]
+ args = ['nice', '5', 'cat', '/a']
+ dirs = ['/bin', '/usr/bin']
+
+ self.assertIsNotNone(wrapper.match_filter(filter_list, args, dirs))
+
+ def test_ChainingRegExpFilter_not_match(self):
+ filter_list = [filters.ChainingRegExpFilter('nice', 'root',
+ 'nice', '-?\d+'),
+ filters.CommandFilter('cat', 'root')]
+ args_invalid = (['nice', '5', 'ls', '/a'],
+ ['nice', '--5', 'cat', '/a'],
+ ['nice2', '5', 'cat', '/a'],
+ ['nice', 'cat', '/a'],
+ ['nice', '5'])
+ dirs = ['/bin', '/usr/bin']
+
+ for args in args_invalid:
+ self.assertRaises(wrapper.NoFilterMatched,
+ wrapper.match_filter, filter_list, args, dirs)
+
+ def test_ChainingRegExpFilter_multiple(self):
+ filter_list = [filters.ChainingRegExpFilter('ionice', 'root', 'ionice',
+ '-c[0-3]'),
+ filters.ChainingRegExpFilter('ionice', 'root', 'ionice',
+ '-c[0-3]', '-n[0-7]'),
+ filters.CommandFilter('cat', 'root')]
+ # both filters match to ['ionice', '-c2'], but only the second accepts
+ args = ['ionice', '-c2', '-n7', 'cat', '/a']
+ dirs = ['/bin', '/usr/bin']
+
+ self.assertIsNotNone(wrapper.match_filter(filter_list, args, dirs))
+
+ def test_ReadFileFilter_empty_args(self):
+ goodfn = '/good/file.name'
+ f = filters.ReadFileFilter(goodfn)
+ self.assertFalse(f.match([]))
+ self.assertFalse(f.match(None))
+
+ def test_exec_dirs_search(self):
+ # This test supposes you have /bin/cat or /usr/bin/cat locally
+ f = filters.CommandFilter("cat", "root")
+ usercmd = ['cat', '/f']
+ self.assertTrue(f.match(usercmd))
+ self.assertTrue(f.get_command(usercmd,
+ exec_dirs=['/bin', '/usr/bin'])
+ in (['/bin/cat', '/f'], ['/usr/bin/cat', '/f']))
+
+ def test_skips(self):
+ # Check that all filters are skipped and that the last matches
+ usercmd = ["cat", "/"]
+ filtermatch = wrapper.match_filter(self.filters, usercmd)
+ self.assertTrue(filtermatch is self.filters[-1])
+
+ def test_RootwrapConfig(self):
+ raw = moves.configparser.RawConfigParser()
+
+ # Empty config should raise configparser.Error
+ self.assertRaises(moves.configparser.Error,
+ wrapper.RootwrapConfig, raw)
+
+ # Check default values
+ raw.set('DEFAULT', 'filters_path', '/a,/b')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.filters_path, ['/a', '/b'])
+ self.assertEqual(config.exec_dirs, os.environ["PATH"].split(':'))
+
+ with fixtures.EnvironmentVariable("PATH"):
+ c = wrapper.RootwrapConfig(raw)
+ self.assertEqual(c.exec_dirs, [])
+
+ self.assertFalse(config.use_syslog)
+ self.assertFalse(config.use_syslog_rfc_format)
+ self.assertEqual(config.syslog_log_facility,
+ logging.handlers.SysLogHandler.LOG_SYSLOG)
+ self.assertEqual(config.syslog_log_level, logging.ERROR)
+
+ # Check general values
+ raw.set('DEFAULT', 'exec_dirs', '/a,/x')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.exec_dirs, ['/a', '/x'])
+
+ raw.set('DEFAULT', 'use_syslog', 'oui')
+ self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
+ raw.set('DEFAULT', 'use_syslog', 'true')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertTrue(config.use_syslog)
+
+ raw.set('DEFAULT', 'use_syslog_rfc_format', 'oui')
+ self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
+ raw.set('DEFAULT', 'use_syslog_rfc_format', 'true')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertTrue(config.use_syslog_rfc_format)
+
+ raw.set('DEFAULT', 'syslog_log_facility', 'moo')
+ self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
+ raw.set('DEFAULT', 'syslog_log_facility', 'local0')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.syslog_log_facility,
+ logging.handlers.SysLogHandler.LOG_LOCAL0)
+ raw.set('DEFAULT', 'syslog_log_facility', 'LOG_AUTH')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.syslog_log_facility,
+ logging.handlers.SysLogHandler.LOG_AUTH)
+
+ raw.set('DEFAULT', 'syslog_log_level', 'bar')
+ self.assertRaises(ValueError, wrapper.RootwrapConfig, raw)
+ raw.set('DEFAULT', 'syslog_log_level', 'INFO')
+ config = wrapper.RootwrapConfig(raw)
+ self.assertEqual(config.syslog_log_level, logging.INFO)
+
+ def test_getlogin(self):
+ with mock.patch('os.getlogin') as os_getlogin:
+ os_getlogin.return_value = 'foo'
+ self.assertEqual(wrapper._getlogin(), 'foo')
+
+ def test_getlogin_bad(self):
+ with mock.patch('os.getenv') as os_getenv:
+ with mock.patch('os.getlogin') as os_getlogin:
+ os_getenv.side_effect = [None, None, 'bar']
+ os_getlogin.side_effect = OSError(
+ '[Errno 22] Invalid argument')
+ self.assertEqual(wrapper._getlogin(), 'bar')
+ os_getlogin.assert_called_once_with()
+ self.assertEqual(os_getenv.call_count, 3)
+
+
+class PathFilterTestCase(testtools.TestCase):
+ def setUp(self):
+ super(PathFilterTestCase, self).setUp()
+
+ tmpdir = fixtures.TempDir('/tmp')
+ self.useFixture(tmpdir)
+
+ self.f = filters.PathFilter('/bin/chown', 'root', 'nova', tmpdir.path)
+
+ gen_name = lambda: str(uuid.uuid4())
+
+ self.SIMPLE_FILE_WITHIN_DIR = os.path.join(tmpdir.path, 'some')
+ self.SIMPLE_FILE_OUTSIDE_DIR = os.path.join('/tmp', 'some')
+ self.TRAVERSAL_WITHIN_DIR = os.path.join(tmpdir.path, 'a', '..',
+ 'some')
+ self.TRAVERSAL_OUTSIDE_DIR = os.path.join(tmpdir.path, '..', 'some')
+
+ self.TRAVERSAL_SYMLINK_WITHIN_DIR = os.path.join(tmpdir.path,
+ gen_name())
+ os.symlink(os.path.join(tmpdir.path, 'a', '..', 'a'),
+ self.TRAVERSAL_SYMLINK_WITHIN_DIR)
+
+ self.TRAVERSAL_SYMLINK_OUTSIDE_DIR = os.path.join(tmpdir.path,
+ gen_name())
+ os.symlink(os.path.join(tmpdir.path, 'a', '..', '..', '..', 'etc'),
+ self.TRAVERSAL_SYMLINK_OUTSIDE_DIR)
+
+ self.SYMLINK_WITHIN_DIR = os.path.join(tmpdir.path, gen_name())
+ os.symlink(os.path.join(tmpdir.path, 'a'), self.SYMLINK_WITHIN_DIR)
+
+ self.SYMLINK_OUTSIDE_DIR = os.path.join(tmpdir.path, gen_name())
+ os.symlink(os.path.join('/tmp', 'some_file'), self.SYMLINK_OUTSIDE_DIR)
+
+ def test_empty_args(self):
+ self.assertFalse(self.f.match([]))
+ self.assertFalse(self.f.match(None))
+
+ def test_argument_pass_constraint(self):
+ f = filters.PathFilter('/bin/chown', 'root', 'pass', 'pass')
+
+ args = ['chown', 'something', self.SIMPLE_FILE_OUTSIDE_DIR]
+ self.assertTrue(f.match(args))
+
+ def test_argument_equality_constraint(self):
+ f = filters.PathFilter('/bin/chown', 'root', 'nova', '/tmp/spam/eggs')
+
+ args = ['chown', 'nova', '/tmp/spam/eggs']
+ self.assertTrue(f.match(args))
+
+ args = ['chown', 'quantum', '/tmp/spam/eggs']
+ self.assertFalse(f.match(args))
+
+ def test_wrong_arguments_number(self):
+ args = ['chown', '-c', 'nova', self.SIMPLE_FILE_WITHIN_DIR]
+ self.assertFalse(self.f.match(args))
+
+ def test_wrong_exec_command(self):
+ args = ['wrong_exec', self.SIMPLE_FILE_WITHIN_DIR]
+ self.assertFalse(self.f.match(args))
+
+ def test_match(self):
+ args = ['chown', 'nova', self.SIMPLE_FILE_WITHIN_DIR]
+ self.assertTrue(self.f.match(args))
+
+ def test_match_traversal(self):
+ args = ['chown', 'nova', self.TRAVERSAL_WITHIN_DIR]
+ self.assertTrue(self.f.match(args))
+
+ def test_match_symlink(self):
+ args = ['chown', 'nova', self.SYMLINK_WITHIN_DIR]
+ self.assertTrue(self.f.match(args))
+
+ def test_match_traversal_symlink(self):
+ args = ['chown', 'nova', self.TRAVERSAL_SYMLINK_WITHIN_DIR]
+ self.assertTrue(self.f.match(args))
+
+ def test_reject(self):
+ args = ['chown', 'nova', self.SIMPLE_FILE_OUTSIDE_DIR]
+ self.assertFalse(self.f.match(args))
+
+ def test_reject_traversal(self):
+ args = ['chown', 'nova', self.TRAVERSAL_OUTSIDE_DIR]
+ self.assertFalse(self.f.match(args))
+
+ def test_reject_symlink(self):
+ args = ['chown', 'nova', self.SYMLINK_OUTSIDE_DIR]
+ self.assertFalse(self.f.match(args))
+
+ def test_reject_traversal_symlink(self):
+ args = ['chown', 'nova', self.TRAVERSAL_SYMLINK_OUTSIDE_DIR]
+ self.assertFalse(self.f.match(args))
+
+ def test_get_command(self):
+ args = ['chown', 'nova', self.SIMPLE_FILE_WITHIN_DIR]
+ expected = ['/bin/chown', 'nova', self.SIMPLE_FILE_WITHIN_DIR]
+
+ self.assertEqual(expected, self.f.get_command(args))
+
+ def test_get_command_traversal(self):
+ args = ['chown', 'nova', self.TRAVERSAL_WITHIN_DIR]
+ expected = ['/bin/chown', 'nova',
+ os.path.realpath(self.TRAVERSAL_WITHIN_DIR)]
+
+ self.assertEqual(expected, self.f.get_command(args))
+
+ def test_get_command_symlink(self):
+ args = ['chown', 'nova', self.SYMLINK_WITHIN_DIR]
+ expected = ['/bin/chown', 'nova',
+ os.path.realpath(self.SYMLINK_WITHIN_DIR)]
+
+ self.assertEqual(expected, self.f.get_command(args))
+
+ def test_get_command_traversal_symlink(self):
+ args = ['chown', 'nova', self.TRAVERSAL_SYMLINK_WITHIN_DIR]
+ expected = ['/bin/chown', 'nova',
+ os.path.realpath(self.TRAVERSAL_SYMLINK_WITHIN_DIR)]
+
+ self.assertEqual(expected, self.f.get_command(args))
+
+
+class RunOneCommandTestCase(testtools.TestCase):
+ def _test_returncode_helper(self, returncode, expected):
+ with mock.patch.object(wrapper, 'start_subprocess') as mock_start:
+ with mock.patch('sys.exit') as mock_exit:
+ mock_start.return_value.wait.return_value = returncode
+ cmd.run_one_command(None, mock.Mock(), None, None)
+ mock_exit.assert_called_once_with(expected)
+
+ def test_positive_returncode(self):
+ self._test_returncode_helper(1, 1)
+
+ def test_negative_returncode(self):
+ self._test_returncode_helper(-1, 129)
diff --git a/oslo_rootwrap/wrapper.py b/oslo_rootwrap/wrapper.py
new file mode 100644
index 0000000..6136d8f
--- /dev/null
+++ b/oslo_rootwrap/wrapper.py
@@ -0,0 +1,207 @@
+# Copyright (c) 2011 OpenStack Foundation.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import logging
+import logging.handlers
+import os
+import pwd
+import signal
+import subprocess
+
+from six import moves
+
+from oslo_rootwrap import filters
+
+
+class NoFilterMatched(Exception):
+ """This exception is raised when no filter matched."""
+ pass
+
+
+class FilterMatchNotExecutable(Exception):
+ """Raised when a filter matched but no executable was found."""
+ def __init__(self, match=None, **kwargs):
+ self.match = match
+
+
+class RootwrapConfig(object):
+
+ def __init__(self, config):
+ # filters_path
+ self.filters_path = config.get("DEFAULT", "filters_path").split(",")
+
+ # exec_dirs
+ if config.has_option("DEFAULT", "exec_dirs"):
+ self.exec_dirs = config.get("DEFAULT", "exec_dirs").split(",")
+ else:
+ self.exec_dirs = []
+ # Use system PATH if exec_dirs is not specified
+ if "PATH" in os.environ:
+ self.exec_dirs = os.environ['PATH'].split(':')
+
+ # syslog_log_facility
+ if config.has_option("DEFAULT", "syslog_log_facility"):
+ v = config.get("DEFAULT", "syslog_log_facility")
+ facility_names = logging.handlers.SysLogHandler.facility_names
+ self.syslog_log_facility = getattr(logging.handlers.SysLogHandler,
+ v, None)
+ if self.syslog_log_facility is None and v in facility_names:
+ self.syslog_log_facility = facility_names.get(v)
+ if self.syslog_log_facility is None:
+ raise ValueError('Unexpected syslog_log_facility: %s' % v)
+ else:
+ default_facility = logging.handlers.SysLogHandler.LOG_SYSLOG
+ self.syslog_log_facility = default_facility
+
+ # syslog_log_level
+ if config.has_option("DEFAULT", "syslog_log_level"):
+ v = config.get("DEFAULT", "syslog_log_level")
+ self.syslog_log_level = logging.getLevelName(v.upper())
+ if (self.syslog_log_level == "Level %s" % v.upper()):
+ raise ValueError('Unexpected syslog_log_level: %s' % v)
+ else:
+ self.syslog_log_level = logging.ERROR
+
+ # use_syslog
+ if config.has_option("DEFAULT", "use_syslog"):
+ self.use_syslog = config.getboolean("DEFAULT", "use_syslog")
+ else:
+ self.use_syslog = False
+
+ # use_syslog_rfc_format
+ if config.has_option("DEFAULT", "use_syslog_rfc_format"):
+ self.use_syslog_rfc_format = config.getboolean(
+ "DEFAULT", "use_syslog_rfc_format")
+ else:
+ self.use_syslog_rfc_format = False
+
+
+def setup_syslog(execname, facility, level):
+ rootwrap_logger = logging.getLogger()
+ rootwrap_logger.setLevel(level)
+ handler = logging.handlers.SysLogHandler(address='/dev/log',
+ facility=facility)
+ handler.setFormatter(logging.Formatter(
+ os.path.basename(execname) + ': %(message)s'))
+ rootwrap_logger.addHandler(handler)
+
+
+def build_filter(class_name, *args):
+ """Returns a filter object of class class_name."""
+ if not hasattr(filters, class_name):
+ logging.warning("Skipping unknown filter class (%s) specified "
+ "in filter definitions" % class_name)
+ return None
+ filterclass = getattr(filters, class_name)
+ return filterclass(*args)
+
+
+def load_filters(filters_path):
+ """Load filters from a list of directories."""
+ filterlist = []
+ for filterdir in filters_path:
+ if not os.path.isdir(filterdir):
+ continue
+ for filterfile in filter(lambda f: not f.startswith('.'),
+ os.listdir(filterdir)):
+ filterconfig = moves.configparser.RawConfigParser()
+ filterconfig.read(os.path.join(filterdir, filterfile))
+ for (name, value) in filterconfig.items("Filters"):
+ filterdefinition = [s.strip() for s in value.split(',')]
+ newfilter = build_filter(*filterdefinition)
+ if newfilter is None:
+ continue
+ newfilter.name = name
+ filterlist.append(newfilter)
+ return filterlist
+
+
+def match_filter(filter_list, userargs, exec_dirs=None):
+ """Checks user command and arguments through command filters.
+
+ Returns the first matching filter.
+
+ Raises NoFilterMatched if no filter matched.
+ Raises FilterMatchNotExecutable if no executable was found for the
+ best filter match.
+ """
+ first_not_executable_filter = None
+ exec_dirs = exec_dirs or []
+
+ for f in filter_list:
+ if f.match(userargs):
+ if isinstance(f, filters.ChainingFilter):
+ # This command calls exec verify that remaining args
+ # matches another filter.
+ def non_chain_filter(fltr):
+ return (fltr.run_as == f.run_as
+ and not isinstance(fltr, filters.ChainingFilter))
+
+ leaf_filters = [fltr for fltr in filter_list
+ if non_chain_filter(fltr)]
+ args = f.exec_args(userargs)
+ if not args:
+ continue
+ try:
+ match_filter(leaf_filters, args, exec_dirs=exec_dirs)
+ except (NoFilterMatched, FilterMatchNotExecutable):
+ continue
+
+ # Try other filters if executable is absent
+ if not f.get_exec(exec_dirs=exec_dirs):
+ if not first_not_executable_filter:
+ first_not_executable_filter = f
+ continue
+ # Otherwise return matching filter for execution
+ return f
+
+ if first_not_executable_filter:
+ # A filter matched, but no executable was found for it
+ raise FilterMatchNotExecutable(match=first_not_executable_filter)
+
+ # No filter matched
+ raise NoFilterMatched()
+
+
+def _subprocess_setup():
+ # Python installs a SIGPIPE handler by default. This is usually not what
+ # non-Python subprocesses expect.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+
+def _getlogin():
+ try:
+ return os.getlogin()
+ except OSError:
+ return (os.getenv('USER') or
+ os.getenv('USERNAME') or
+ os.getenv('LOGNAME'))
+
+
+def start_subprocess(filter_list, userargs, exec_dirs=[], log=False,
+ env=None, **kwargs):
+ filtermatch = match_filter(filter_list, userargs, exec_dirs)
+
+ command = filtermatch.get_command(userargs, exec_dirs)
+ if log:
+ logging.info("(%s > %s) Executing %s (filter match = %s)" % (
+ _getlogin(), pwd.getpwuid(os.getuid())[0],
+ command, filtermatch.name))
+
+ obj = subprocess.Popen(command,
+ preexec_fn=_subprocess_setup,
+ env=filtermatch.get_environment(userargs, env=env),
+ **kwargs)
+ return obj
diff --git a/setup.cfg b/setup.cfg
index e05bbfd..fd5c69f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -22,6 +22,8 @@ classifier =
[files]
packages =
oslo
+ oslo.rootwrap
+ oslo_rootwrap
namespace_packages =
oslo
diff --git a/test-requirements-py3.txt b/test-requirements-py3.txt
index 238a0f8..1703881 100644
--- a/test-requirements-py3.txt
+++ b/test-requirements-py3.txt
@@ -22,3 +22,6 @@ mock>=1.0
# rootwrap daemon's client should be verified to run in eventlet
# not available for Python 3.x
# eventlet>=0.13.0
+
+oslotest>=1.2.0
+
diff --git a/test-requirements.txt b/test-requirements.txt
index 4db6103..fdcd08e 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -14,6 +14,8 @@ testtools>=0.9.36,!=1.2.0
# this is required for the docs build jobs
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
oslosphinx>=2.2.0 # Apache-2.0
+oslotest>=1.2.0
+
# mocking framework
mock>=1.0
diff --git a/tests/test_functional.py b/tests/test_functional.py
index e3d17b0..ed24d46 100644
--- a/tests/test_functional.py
+++ b/tests/test_functional.py
@@ -80,7 +80,11 @@ class RootwrapTest(_FunctionalBase, testtools.TestCase):
def setUp(self):
super(RootwrapTest, self).setUp()
self.cmd = [
- sys.executable, '-c',
+ # We need to explicitly ignore the DeprecationWarning
+ # generated by importing oslo.rootwrap instead of
+ # oslo_rootwrap under python 2.6 but it is going to be
+ # ignored by default for versions after 2.7.
+ sys.executable, '-W', 'ignore::DeprecationWarning', '-c',
'from oslo.rootwrap import cmd; cmd.main()',
self.config_file]
diff --git a/tests/test_rootwrap.py b/tests/test_rootwrap.py
index 17dfd2a..a573ba5 100644
--- a/tests/test_rootwrap.py
+++ b/tests/test_rootwrap.py
@@ -434,21 +434,6 @@ class RootwrapTestCase(testtools.TestCase):
config = wrapper.RootwrapConfig(raw)
self.assertEqual(config.syslog_log_level, logging.INFO)
- def test_getlogin(self):
- with mock.patch('os.getlogin') as os_getlogin:
- os_getlogin.return_value = 'foo'
- self.assertEqual(wrapper._getlogin(), 'foo')
-
- def test_getlogin_bad(self):
- with mock.patch('os.getenv') as os_getenv:
- with mock.patch('os.getlogin') as os_getlogin:
- os_getenv.side_effect = [None, None, 'bar']
- os_getlogin.side_effect = OSError(
- '[Errno 22] Invalid argument')
- self.assertEqual(wrapper._getlogin(), 'bar')
- os_getlogin.assert_called_once_with()
- self.assertEqual(os_getenv.call_count, 3)
-
class PathFilterTestCase(testtools.TestCase):
def setUp(self):
@@ -572,7 +557,8 @@ class PathFilterTestCase(testtools.TestCase):
class RunOneCommandTestCase(testtools.TestCase):
def _test_returncode_helper(self, returncode, expected):
- with mock.patch.object(wrapper, 'start_subprocess') as mock_start:
+ start_name = 'oslo_rootwrap.wrapper.start_subprocess'
+ with mock.patch(start_name) as mock_start:
with mock.patch('sys.exit') as mock_exit:
mock_start.return_value.wait.return_value = returncode
cmd.run_one_command(None, mock.Mock(), None, None)
diff --git a/tests/test_warning.py b/tests/test_warning.py
new file mode 100644
index 0000000..23202e0
--- /dev/null
+++ b/tests/test_warning.py
@@ -0,0 +1,61 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import imp
+import os
+import warnings
+
+import mock
+from oslotest import base as test_base
+import six
+
+
+class DeprecationWarningTest(test_base.BaseTestCase):
+
+ @mock.patch('warnings.warn')
+ def test_warning(self, mock_warn):
+ import oslo.rootwrap
+ imp.reload(oslo.rootwrap)
+ self.assertTrue(mock_warn.called)
+ args = mock_warn.call_args
+ self.assertIn('oslo_rootwrap', args[0][0])
+ self.assertIn('deprecated', args[0][0])
+ self.assertTrue(issubclass(args[0][1], DeprecationWarning))
+
+ def test_real_warning(self):
+ with warnings.catch_warnings(record=True) as warning_msgs:
+ warnings.resetwarnings()
+ warnings.simplefilter('always', DeprecationWarning)
+ import oslo.rootwrap
+
+ # Use a separate function to get the stack level correct
+ # so we know the message points back to this file. This
+ # corresponds to an import or reload, which isn't working
+ # inside the test under Python 3.3. That may be due to a
+ # difference in the import implementation not triggering
+ # warnings properly when the module is reloaded, or
+ # because the warnings module is mostly implemented in C
+ # and something isn't cleanly resetting the global state
+ # used to track whether a warning needs to be
+ # emitted. Whatever the cause, we definitely see the
+ # warnings.warn() being invoked on a reload (see the test
+ # above) and warnings are reported on the console when we
+ # run the tests. A simpler test script run outside of
+ # testr does correctly report the warnings.
+ def foo():
+ oslo.rootwrap.deprecated()
+
+ foo()
+ self.assertEqual(1, len(warning_msgs))
+ msg = warning_msgs[0]
+ self.assertIn('oslo_rootwrap', six.text_type(msg.message))
+ self.assertEqual('test_warning.py', os.path.basename(msg.filename))