summaryrefslogtreecommitdiff
path: root/openstack
diff options
context:
space:
mode:
Diffstat (limited to 'openstack')
-rw-r--r--openstack/__init__.py22
-rw-r--r--openstack/common/__init__.py0
-rw-r--r--openstack/common/_i18n.py45
-rw-r--r--openstack/common/cache/__init__.py0
-rw-r--r--openstack/common/cache/_backends/__init__.py0
-rw-r--r--openstack/common/cache/_backends/memory.py166
-rw-r--r--openstack/common/cache/backends.py250
-rw-r--r--openstack/common/cache/cache.py92
-rw-r--r--openstack/common/cliutils.py272
-rw-r--r--openstack/common/crypto/__init__.py0
-rw-r--r--openstack/common/crypto/utils.py197
-rw-r--r--openstack/common/imageutils.py152
-rw-r--r--openstack/common/memorycache.py103
-rw-r--r--openstack/common/scheduler/__init__.py0
-rw-r--r--openstack/common/scheduler/base_filter.py95
-rw-r--r--openstack/common/scheduler/base_handler.py46
-rw-r--r--openstack/common/scheduler/base_weight.py147
-rw-r--r--openstack/common/scheduler/filters/__init__.py38
-rw-r--r--openstack/common/scheduler/filters/availability_zone_filter.py32
-rw-r--r--openstack/common/scheduler/filters/capabilities_filter.py74
-rw-r--r--openstack/common/scheduler/filters/extra_specs_ops.py72
-rw-r--r--openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py57
-rw-r--r--openstack/common/scheduler/filters/json_filter.py151
-rw-r--r--openstack/common/scheduler/weights/__init__.py45
24 files changed, 0 insertions, 2056 deletions
diff --git a/openstack/__init__.py b/openstack/__init__.py
deleted file mode 100644
index 3e10c3f7..00000000
--- a/openstack/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This ensures the openstack namespace is defined
-try:
- import pkg_resources
- pkg_resources.declare_namespace(__name__)
-except ImportError:
- import pkgutil
- __path__ = pkgutil.extend_path(__path__, __name__)
diff --git a/openstack/common/__init__.py b/openstack/common/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/openstack/common/__init__.py
+++ /dev/null
diff --git a/openstack/common/_i18n.py b/openstack/common/_i18n.py
deleted file mode 100644
index fc392f91..00000000
--- a/openstack/common/_i18n.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""oslo.i18n integration module.
-
-See http://docs.openstack.org/developer/oslo.i18n/usage.html
-
-"""
-
-try:
- import oslo_i18n
-
- # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
- # application name when this module is synced into the separate
- # repository. It is OK to have more than one translation function
- # using the same domain, since there will still only be one message
- # catalog.
- _translators = oslo_i18n.TranslatorFactory(domain='oslo')
-
- # The primary translation function using the well-known name "_"
- _ = _translators.primary
-
- # Translators for log levels.
- #
- # The abbreviated names are meant to reflect the usual use of a short
- # name like '_'. The "L" is for "log" and the other letter comes from
- # the level.
- _LI = _translators.log_info
- _LW = _translators.log_warning
- _LE = _translators.log_error
- _LC = _translators.log_critical
-except ImportError:
- # NOTE(dims): Support for cases where a project wants to use
- # code from oslo-incubator, but is not ready to be internationalized
- # (like tempest)
- _ = _LI = _LW = _LE = _LC = lambda x: x
diff --git a/openstack/common/cache/__init__.py b/openstack/common/cache/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/openstack/common/cache/__init__.py
+++ /dev/null
diff --git a/openstack/common/cache/_backends/__init__.py b/openstack/common/cache/_backends/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/openstack/common/cache/_backends/__init__.py
+++ /dev/null
diff --git a/openstack/common/cache/_backends/memory.py b/openstack/common/cache/_backends/memory.py
deleted file mode 100644
index f784479c..00000000
--- a/openstack/common/cache/_backends/memory.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import collections
-
-from oslo_concurrency import lockutils
-from oslo_utils import timeutils
-
-from openstack.common.cache import backends
-
-
-class MemoryBackend(backends.BaseCache):
-
- def __init__(self, parsed_url, options=None):
- super(MemoryBackend, self).__init__(parsed_url, options)
- self._clear()
-
- def _set_unlocked(self, key, value, ttl=0):
- expires_at = 0
- if ttl != 0:
- expires_at = timeutils.utcnow_ts() + ttl
-
- self._cache[key] = (expires_at, value)
-
- if expires_at:
- self._keys_expires[expires_at].add(key)
-
- def _set(self, key, value, ttl=0, not_exists=False):
- with lockutils.lock(key):
-
- # NOTE(flaper87): This is needed just in `set`
- # calls, hence it's not in `_set_unlocked`
- if not_exists and self._exists_unlocked(key):
- return False
-
- self._set_unlocked(key, value, ttl)
- return True
-
- def _get_unlocked(self, key, default=None):
- now = timeutils.utcnow_ts()
-
- try:
- timeout, value = self._cache[key]
- except KeyError:
- return (0, default)
-
- if timeout and now >= timeout:
-
- # NOTE(flaper87): Record expired,
- # remove it from the cache but catch
- # KeyError and ValueError in case
- # _purge_expired removed this key already.
- try:
- del self._cache[key]
- except KeyError:
- pass
-
- try:
- # NOTE(flaper87): Keys with ttl == 0
- # don't exist in the _keys_expires dict
- self._keys_expires[timeout].remove(key)
- except (KeyError, ValueError):
- pass
-
- return (0, default)
-
- return (timeout, value)
-
- def _get(self, key, default=None):
- with lockutils.lock(key):
- return self._get_unlocked(key, default)[1]
-
- def _exists_unlocked(self, key):
- now = timeutils.utcnow_ts()
- try:
- timeout = self._cache[key][0]
- return not timeout or now <= timeout
- except KeyError:
- return False
-
- def __contains__(self, key):
- with lockutils.lock(key):
- return self._exists_unlocked(key)
-
- def _incr_append(self, key, other):
- with lockutils.lock(key):
- timeout, value = self._get_unlocked(key)
-
- if value is None:
- return None
-
- ttl = timeutils.utcnow_ts() - timeout
- new_value = value + other
- self._set_unlocked(key, new_value, ttl)
- return new_value
-
- def _incr(self, key, delta):
- if not isinstance(delta, int):
- raise TypeError('delta must be an int instance')
-
- return self._incr_append(key, delta)
-
- def _append_tail(self, key, tail):
- return self._incr_append(key, tail)
-
- def _purge_expired(self):
- """Removes expired keys from the cache."""
-
- now = timeutils.utcnow_ts()
- for timeout in sorted(self._keys_expires.keys()):
-
- # NOTE(flaper87): If timeout is greater
- # than `now`, stop the iteration, remaining
- # keys have not expired.
- if now < timeout:
- break
-
- # NOTE(flaper87): Unset every key in
- # this set from the cache if its timeout
- # is equal to `timeout`. (The key might
- # have been updated)
- for subkey in self._keys_expires.pop(timeout):
- try:
- if self._cache[subkey][0] == timeout:
- del self._cache[subkey]
- except KeyError:
- continue
-
- def __delitem__(self, key):
- self._purge_expired()
-
- # NOTE(flaper87): Delete the key. Using pop
- # since it could have been deleted already
- value = self._cache.pop(key, None)
-
- if value:
- try:
- # NOTE(flaper87): Keys with ttl == 0
- # don't exist in the _keys_expires dict
- self._keys_expires[value[0]].remove(key)
- except (KeyError, ValueError):
- pass
-
- def _clear(self):
- self._cache = {}
- self._keys_expires = collections.defaultdict(set)
-
- def _get_many(self, keys, default):
- return super(MemoryBackend, self)._get_many(keys, default)
-
- def _set_many(self, data, ttl=0):
- return super(MemoryBackend, self)._set_many(data, ttl)
-
- def _unset_many(self, keys):
- return super(MemoryBackend, self)._unset_many(keys)
diff --git a/openstack/common/cache/backends.py b/openstack/common/cache/backends.py
deleted file mode 100644
index 1bea8912..00000000
--- a/openstack/common/cache/backends.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-import six
-
-
-NOTSET = object()
-
-
-@six.add_metaclass(abc.ABCMeta)
-class BaseCache(object):
- """Base Cache Abstraction
-
- :params parsed_url: Parsed url object.
- :params options: A dictionary with configuration parameters
- for the cache. For example:
-
- - default_ttl: An integer defining the default ttl for keys.
- """
-
- def __init__(self, parsed_url, options=None):
- self._parsed_url = parsed_url
- self._options = options or {}
- self._default_ttl = int(self._options.get('default_ttl', 0))
-
- @abc.abstractmethod
- def _set(self, key, value, ttl, not_exists=False):
- """Implementations of this class have to override this method."""
-
- def set(self, key, value, ttl, not_exists=False):
- """Sets or updates a cache entry
-
- .. note:: Thread-safety is required and has to be guaranteed by the
- backend implementation.
-
- :params key: Item key as string.
- :type key: `unicode string`
- :params value: Value to assign to the key. This can be anything that
- is handled by the current backend.
- :params ttl: Key's timeout in seconds. 0 means no timeout.
- :type ttl: int
- :params not_exists: If True, the key will be set if it doesn't exist.
- Otherwise, it'll always be set.
- :type not_exists: bool
-
- :returns: True if the operation succeeds, False otherwise.
- """
- if ttl is None:
- ttl = self._default_ttl
-
- return self._set(key, value, ttl, not_exists)
-
- def __setitem__(self, key, value):
- self.set(key, value, self._default_ttl)
-
- def setdefault(self, key, value):
- """Sets the key value to `value` if it doesn't exist
-
- :params key: Item key as string.
- :type key: `unicode string`
- :params value: Value to assign to the key. This can be anything that
- is handled by the current backend.
- """
- try:
- return self[key]
- except KeyError:
- self[key] = value
- return value
-
- @abc.abstractmethod
- def _get(self, key, default):
- """Implementations of this class have to override this method."""
-
- def get(self, key, default=None):
- """Gets one item from the cache
-
- .. note:: Thread-safety is required and it has to be guaranteed
- by the backend implementation.
-
- :params key: Key for the item to retrieve from the cache.
- :params default: The default value to return.
-
- :returns: `key`'s value in the cache if it exists, otherwise
- `default` should be returned.
- """
- return self._get(key, default)
-
- def __getitem__(self, key):
- value = self.get(key, NOTSET)
-
- if value is NOTSET:
- raise KeyError
-
- return value
-
- @abc.abstractmethod
- def __delitem__(self, key):
- """Removes an item from cache.
-
- .. note:: Thread-safety is required and it has to be guaranteed by
- the backend implementation.
-
- :params key: The key to remove.
-
- :returns: The key value if there's one
- """
-
- @abc.abstractmethod
- def _clear(self):
- """Implementations of this class have to override this method."""
-
- def clear(self):
- """Removes all items from the cache.
-
- .. note:: Thread-safety is required and it has to be guaranteed by
- the backend implementation.
- """
- return self._clear()
-
- @abc.abstractmethod
- def _incr(self, key, delta):
- """Implementations of this class have to override this method."""
-
- def incr(self, key, delta=1):
- """Increments the value for a key
-
- :params key: The key for the value to be incremented
- :params delta: Number of units by which to increment the value.
- Pass a negative number to decrement the value.
-
- :returns: The new value
- """
- return self._incr(key, delta)
-
- @abc.abstractmethod
- def _append_tail(self, key, tail):
- """Implementations of this class have to override this method."""
-
- def append_tail(self, key, tail):
- """Appends `tail` to `key`'s value.
-
- :params key: The key of the value to which `tail` should be appended.
- :params tail: The list of values to append to the original.
-
- :returns: The new value
- """
-
- if not hasattr(tail, "__iter__"):
- raise TypeError('Tail must be an iterable')
-
- if not isinstance(tail, list):
- # NOTE(flaper87): Make sure we pass a list
- # down to the implementation. Not all drivers
- # have support for generators, sets or other
- # iterables.
- tail = list(tail)
-
- return self._append_tail(key, tail)
-
- def append(self, key, value):
- """Appends `value` to `key`'s value.
-
- :params key: The key of the value to which `tail` should be appended.
- :params value: The value to append to the original.
-
- :returns: The new value
- """
- return self.append_tail(key, [value])
-
- @abc.abstractmethod
- def __contains__(self, key):
- """Verifies that a key exists.
-
- :params key: The key to verify.
-
- :returns: True if the key exists, otherwise False.
- """
-
- @abc.abstractmethod
- def _get_many(self, keys, default):
- """Implementations of this class have to override this method."""
- return ((k, self.get(k, default=default)) for k in keys)
-
- def get_many(self, keys, default=NOTSET):
- """Gets keys' value from cache
-
- :params keys: List of keys to retrieve.
- :params default: The default value to return for each key that is not
- in the cache.
-
- :returns: A generator of (key, value)
- """
- return self._get_many(keys, default)
-
- @abc.abstractmethod
- def _set_many(self, data, ttl):
- """Implementations of this class have to override this method."""
-
- for key, value in data.items():
- self.set(key, value, ttl=ttl)
-
- def set_many(self, data, ttl=None):
- """Puts several items into the cache at once
-
- Depending on the backend, this operation may or may not be efficient.
- The default implementation calls set for each (key, value) pair
- passed, other backends support set_many operations as part of their
- protocols.
-
- :params data: A dictionary like {key: val} to store in the cache.
- :params ttl: Key's timeout in seconds.
- """
-
- if ttl is None:
- ttl = self._default_ttl
-
- self._set_many(data, ttl)
-
- def update(self, **kwargs):
- """Sets several (key, value) paris.
-
- Refer to the `set_many` docstring.
- """
- self.set_many(kwargs, ttl=self._default_ttl)
-
- @abc.abstractmethod
- def _unset_many(self, keys):
- """Implementations of this class have to override this method."""
- for key in keys:
- del self[key]
-
- def unset_many(self, keys):
- """Removes several keys from the cache at once
-
- :params keys: List of keys to unset.
- """
- self._unset_many(keys)
diff --git a/openstack/common/cache/cache.py b/openstack/common/cache/cache.py
deleted file mode 100644
index 2b463bac..00000000
--- a/openstack/common/cache/cache.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Cache library.
-
-Supported configuration options:
-
-`default_backend`: Name of the cache backend to use.
-`key_namespace`: Namespace under which keys will be created.
-"""
-
-########################################################################
-#
-# THIS MODULE IS DEPRECATED
-#
-# Please refer to
-# https://etherpad.openstack.org/p/kilo-oslo-library-proposals for
-# the discussion leading to this deprecation.
-#
-# We recommend helping with the new oslo.cache library being created
-# as a wrapper for dogpile.
-#
-########################################################################
-
-
-from six.moves.urllib import parse
-from stevedore import driver
-
-
-def _get_oslo_configs():
- """Returns the oslo config options to register."""
- # NOTE(flaper87): Oslo config should be
- # optional. Instead of doing try / except
- # at the top of this file, lets import cfg
- # here and assume that the caller of this
- # function already took care of this dependency.
- from oslo_config import cfg
-
- return [
- cfg.StrOpt('cache_url', default='memory://',
- help='URL to connect to the cache back end.')
- ]
-
-
-def register_oslo_configs(conf):
- """Registers a cache configuration options
-
- :params conf: Config object.
- :type conf: `cfg.ConfigOptions`
- """
- conf.register_opts(_get_oslo_configs())
-
-
-def get_cache(url='memory://'):
- """Loads the cache backend
-
- This function loads the cache backend
- specified in the given configuration.
-
- :param conf: Configuration instance to use
- """
-
- parsed = parse.urlparse(url)
- backend = parsed.scheme
-
- query = parsed.query
- # NOTE(flaper87): We need the following hack
- # for python versions < 2.7.5. Previous versions
- # of python parsed query params just for 'known'
- # schemes. This was changed in this patch:
- # http://hg.python.org/cpython/rev/79e6ff3d9afd
- if not query and '?' in parsed.path:
- query = parsed.path.split('?', 1)[-1]
- parameters = parse.parse_qsl(query)
- kwargs = {'options': dict(parameters)}
-
- mgr = driver.DriverManager('openstack.common.cache.backends', backend,
- invoke_on_load=True,
- invoke_args=[parsed],
- invoke_kwds=kwargs)
- return mgr.driver
diff --git a/openstack/common/cliutils.py b/openstack/common/cliutils.py
deleted file mode 100644
index bd6a70f9..00000000
--- a/openstack/common/cliutils.py
+++ /dev/null
@@ -1,272 +0,0 @@
-# Copyright 2012 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# W0603: Using the global statement
-# W0621: Redefining name %s from outer scope
-# pylint: disable=W0603,W0621
-
-from __future__ import print_function
-
-import getpass
-import inspect
-import os
-import sys
-import textwrap
-
-from oslo_utils import encodeutils
-from oslo_utils import strutils
-import prettytable
-import six
-from six import moves
-
-from openstack.common._i18n import _
-
-
-class MissingArgs(Exception):
- """Supplied arguments are not sufficient for calling a function."""
- def __init__(self, missing):
- self.missing = missing
- msg = _("Missing arguments: %s") % ", ".join(missing)
- super(MissingArgs, self).__init__(msg)
-
-
-def validate_args(fn, *args, **kwargs):
- """Check that the supplied args are sufficient for calling a function.
-
- >>> validate_args(lambda a: None)
- Traceback (most recent call last):
- ...
- MissingArgs: Missing argument(s): a
- >>> validate_args(lambda a, b, c, d: None, 0, c=1)
- Traceback (most recent call last):
- ...
- MissingArgs: Missing argument(s): b, d
-
- :param fn: the function to check
- :param arg: the positional arguments supplied
- :param kwargs: the keyword arguments supplied
- """
- argspec = inspect.getargspec(fn)
-
- num_defaults = len(argspec.defaults or [])
- required_args = argspec.args[:len(argspec.args) - num_defaults]
-
- def isbound(method):
- return getattr(method, '__self__', None) is not None
-
- if isbound(fn):
- required_args.pop(0)
-
- missing = [arg for arg in required_args if arg not in kwargs]
- missing = missing[len(args):]
- if missing:
- raise MissingArgs(missing)
-
-
-def arg(*args, **kwargs):
- """Decorator for CLI args.
-
- Example:
-
- >>> @arg("name", help="Name of the new entity")
- ... def entity_create(args):
- ... pass
- """
- def _decorator(func):
- add_arg(func, *args, **kwargs)
- return func
- return _decorator
-
-
-def env(*args, **kwargs):
- """Returns the first environment variable set.
-
- If all are empty, defaults to '' or keyword arg `default`.
- """
- for arg in args:
- value = os.environ.get(arg)
- if value:
- return value
- return kwargs.get('default', '')
-
-
-def add_arg(func, *args, **kwargs):
- """Bind CLI arguments to a shell.py `do_foo` function."""
-
- if not hasattr(func, 'arguments'):
- func.arguments = []
-
- # NOTE(sirp): avoid dups that can occur when the module is shared across
- # tests.
- if (args, kwargs) not in func.arguments:
- # Because of the semantics of decorator composition if we just append
- # to the options list positional options will appear to be backwards.
- func.arguments.insert(0, (args, kwargs))
-
-
-def unauthenticated(func):
- """Adds 'unauthenticated' attribute to decorated function.
-
- Usage:
-
- >>> @unauthenticated
- ... def mymethod(f):
- ... pass
- """
- func.unauthenticated = True
- return func
-
-
-def isunauthenticated(func):
- """Checks if the function does not require authentication.
-
- Mark such functions with the `@unauthenticated` decorator.
-
- :returns: bool
- """
- return getattr(func, 'unauthenticated', False)
-
-
-def print_list(objs, fields, formatters=None, sortby_index=0,
- mixed_case_fields=None, field_labels=None):
- """Print a list of objects as a table, one row per object.
-
- :param objs: iterable of :class:`Resource`
- :param fields: attributes that correspond to columns, in order
- :param formatters: `dict` of callables for field formatting
- :param sortby_index: index of the field for sorting table rows
- :param mixed_case_fields: fields corresponding to object attributes that
- have mixed case names (e.g., 'serverId')
- :param field_labels: Labels to use in the heading of the table, default to
- fields.
- """
- formatters = formatters or {}
- mixed_case_fields = mixed_case_fields or []
- field_labels = field_labels or fields
- if len(field_labels) != len(fields):
- raise ValueError(_("Field labels list %(labels)s has different number "
- "of elements than fields list %(fields)s"),
- {'labels': field_labels, 'fields': fields})
-
- if sortby_index is None:
- kwargs = {}
- else:
- kwargs = {'sortby': field_labels[sortby_index]}
- pt = prettytable.PrettyTable(field_labels)
- pt.align = 'l'
-
- for o in objs:
- row = []
- for field in fields:
- if field in formatters:
- row.append(formatters[field](o))
- else:
- if field in mixed_case_fields:
- field_name = field.replace(' ', '_')
- else:
- field_name = field.lower().replace(' ', '_')
- data = getattr(o, field_name, '')
- row.append(data)
- pt.add_row(row)
-
- if six.PY3:
- print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
- else:
- print(encodeutils.safe_encode(pt.get_string(**kwargs)))
-
-
-def print_dict(dct, dict_property="Property", wrap=0, dict_value='Value'):
- """Print a `dict` as a table of two columns.
-
- :param dct: `dict` to print
- :param dict_property: name of the first column
- :param wrap: wrapping for the second column
- :param dict_value: header label for the value (second) column
- """
- pt = prettytable.PrettyTable([dict_property, dict_value])
- pt.align = 'l'
- for k, v in sorted(dct.items()):
- # convert dict to str to check length
- if isinstance(v, dict):
- v = six.text_type(v)
- if wrap > 0:
- v = textwrap.fill(six.text_type(v), wrap)
- # if value has a newline, add in multiple rows
- # e.g. fault with stacktrace
- if v and isinstance(v, six.string_types) and r'\n' in v:
- lines = v.strip().split(r'\n')
- col1 = k
- for line in lines:
- pt.add_row([col1, line])
- col1 = ''
- else:
- pt.add_row([k, v])
-
- if six.PY3:
- print(encodeutils.safe_encode(pt.get_string()).decode())
- else:
- print(encodeutils.safe_encode(pt.get_string()))
-
-
-def get_password(max_password_prompts=3):
- """Read password from TTY."""
- verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
- pw = None
- if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
- # Check for Ctrl-D
- try:
- for __ in moves.range(max_password_prompts):
- pw1 = getpass.getpass("OS Password: ")
- if verify:
- pw2 = getpass.getpass("Please verify: ")
- else:
- pw2 = pw1
- if pw1 == pw2 and pw1:
- pw = pw1
- break
- except EOFError:
- pass
- return pw
-
-
-def service_type(stype):
- """Adds 'service_type' attribute to decorated function.
-
- Usage:
-
- .. code-block:: python
-
- @service_type('volume')
- def mymethod(f):
- ...
- """
- def inner(f):
- f.service_type = stype
- return f
- return inner
-
-
-def get_service_type(f):
- """Retrieves service type from function."""
- return getattr(f, 'service_type', None)
-
-
-def pretty_choice_list(l):
- return ', '.join("'%s'" % i for i in l)
-
-
-def exit(msg=''):
- if msg:
- print (msg, file=sys.stderr)
- sys.exit(1)
diff --git a/openstack/common/crypto/__init__.py b/openstack/common/crypto/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/openstack/common/crypto/__init__.py
+++ /dev/null
diff --git a/openstack/common/crypto/utils.py b/openstack/common/crypto/utils.py
deleted file mode 100644
index e3b7a87e..00000000
--- a/openstack/common/crypto/utils.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-########################################################################
-#
-# THIS MODULE IS DEPRECATED
-#
-# Please refer to
-# https://etherpad.openstack.org/p/kilo-oslo-library-proposals for
-# the discussion leading to this deprecation.
-#
-# We recommend checking out Barbican or the cryptography.py project
-# (https://pypi.python.org/pypi/cryptography) instead of this module.
-#
-########################################################################
-
-import base64
-
-from Crypto.Hash import HMAC
-from Crypto import Random
-from oslo_utils import importutils
-import six
-
-from openstack.common._i18n import _
-
-bchr = six.int2byte
-
-
-class CryptoutilsException(Exception):
- """Generic Exception for Crypto utilities."""
-
- message = _("An unknown error occurred in crypto utils.")
-
-
-class CipherBlockLengthTooBig(CryptoutilsException):
- """The block size is too big."""
-
- def __init__(self, requested, permitted):
- msg = _("Block size of %(given)d is too big, max = %(maximum)d")
- message = msg % {'given': requested, 'maximum': permitted}
- super(CryptoutilsException, self).__init__(message)
-
-
-class HKDFOutputLengthTooLong(CryptoutilsException):
- """The amount of Key Material asked is too much."""
-
- def __init__(self, requested, permitted):
- msg = _("Length of %(given)d is too long, max = %(maximum)d")
- message = msg % {'given': requested, 'maximum': permitted}
- super(CryptoutilsException, self).__init__(message)
-
-
-class HKDF(object):
- """An HMAC-based Key Derivation Function implementation (RFC5869)
-
- This class creates an object that allows to use HKDF to derive keys.
- """
-
- def __init__(self, hashtype='SHA256'):
- self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
- self.max_okm_length = 255 * self.hashfn.digest_size
-
- def extract(self, ikm, salt=None):
- """An extract function that can be used to derive a robust key given
- weak Input Key Material (IKM) which could be a password.
- Returns a pseudorandom key (of HashLen octets)
-
- :param ikm: input keying material (ex a password)
- :param salt: optional salt value (a non-secret random value)
- """
- if salt is None:
- salt = b'\x00' * self.hashfn.digest_size
-
- return HMAC.new(salt, ikm, self.hashfn).digest()
-
- def expand(self, prk, info, length):
- """An expand function that will return arbitrary length output that can
- be used as keys.
- Returns a buffer usable as key material.
-
- :param prk: a pseudorandom key of at least HashLen octets
- :param info: optional string (can be a zero-length string)
- :param length: length of output keying material (<= 255 * HashLen)
- """
- if length > self.max_okm_length:
- raise HKDFOutputLengthTooLong(length, self.max_okm_length)
-
- N = (length + self.hashfn.digest_size - 1) // self.hashfn.digest_size
-
- okm = b""
- tmp = b""
- for block in range(1, N + 1):
- tmp = HMAC.new(prk, tmp + info + bchr(block), self.hashfn).digest()
- okm += tmp
-
- return okm[:length]
-
-
-MAX_CB_SIZE = 256
-
-
-class SymmetricCrypto(object):
- """Symmetric Key Crypto object.
-
- This class creates a Symmetric Key Crypto object that can be used
- to encrypt, decrypt, or sign arbitrary data.
-
- :param enctype: Encryption Cipher name (default: AES)
- :param hashtype: Hash/HMAC type name (default: SHA256)
- """
-
- def __init__(self, enctype='AES', hashtype='SHA256'):
- self.cipher = importutils.import_module('Crypto.Cipher.' + enctype)
- self.hashfn = importutils.import_module('Crypto.Hash.' + hashtype)
-
- def new_key(self, size):
- return Random.new().read(size)
-
- def encrypt(self, key, msg, b64encode=True):
- """Encrypt the provided msg and returns the cyphertext optionally
- base64 encoded.
-
- Uses AES-128-CBC with a Random IV by default.
-
- The plaintext is padded to reach blocksize length.
- The last byte of the block is the length of the padding.
- The length of the padding does not include the length byte itself.
-
- :param key: The Encryption key.
- :param msg: the plain text.
-
- :returns enc: a block of encrypted data.
- """
- iv = Random.new().read(self.cipher.block_size)
- cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
-
- # CBC mode requires a fixed block size. Append padding and length of
- # padding.
- if self.cipher.block_size > MAX_CB_SIZE:
- raise CipherBlockLengthTooBig(self.cipher.block_size, MAX_CB_SIZE)
- r = len(msg) % self.cipher.block_size
- padlen = self.cipher.block_size - r - 1
- msg += b'\x00' * padlen
- msg += bchr(padlen)
-
- enc = iv + cipher.encrypt(msg)
- if b64encode:
- enc = base64.b64encode(enc)
- return enc
-
- def decrypt(self, key, msg, b64decode=True):
- """Decrypts the provided ciphertext, optionally base64 encoded, and
- returns the plaintext message, after padding is removed.
-
- Uses AES-128-CBC with an IV by default.
-
- :param key: The Encryption key.
- :param msg: the ciphetext, the first block is the IV
-
- :returns plain: the plaintext message.
- """
- if b64decode:
- msg = base64.b64decode(msg)
- iv = msg[:self.cipher.block_size]
- cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)
-
- padded = cipher.decrypt(msg[self.cipher.block_size:])
- l = ord(padded[-1:]) + 1
- plain = padded[:-l]
- return plain
-
- def sign(self, key, msg, b64encode=True):
- """Signs a message string and returns a base64 encoded signature.
-
- Uses HMAC-SHA-256 by default.
-
- :param key: The Signing key.
- :param msg: the message to sign.
-
- :returns out: a base64 encoded signature.
- """
- h = HMAC.new(key, msg, self.hashfn)
- out = h.digest()
- if b64encode:
- out = base64.b64encode(out)
- return out
diff --git a/openstack/common/imageutils.py b/openstack/common/imageutils.py
deleted file mode 100644
index 7d78d113..00000000
--- a/openstack/common/imageutils.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-# Copyright (c) 2010 Citrix Systems, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Helper methods to deal with images.
-"""
-
-import re
-
-from oslo_utils import strutils
-
-from openstack.common._i18n import _
-
-
-class QemuImgInfo(object):
- BACKING_FILE_RE = re.compile((r"^(.*?)\s*\(actual\s+path\s*:"
- r"\s+(.*?)\)\s*$"), re.I)
- TOP_LEVEL_RE = re.compile(r"^([\w\d\s\_\-]+):(.*)$")
- SIZE_RE = re.compile(r"(\d*\.?\d+)(\w+)?(\s*\(\s*(\d+)\s+bytes\s*\))?",
- re.I)
-
- def __init__(self, cmd_output=None):
- details = self._parse(cmd_output or '')
- self.image = details.get('image')
- self.backing_file = details.get('backing_file')
- self.file_format = details.get('file_format')
- self.virtual_size = details.get('virtual_size')
- self.cluster_size = details.get('cluster_size')
- self.disk_size = details.get('disk_size')
- self.snapshots = details.get('snapshot_list', [])
- self.encrypted = details.get('encrypted')
-
- def __str__(self):
- lines = [
- 'image: %s' % self.image,
- 'file_format: %s' % self.file_format,
- 'virtual_size: %s' % self.virtual_size,
- 'disk_size: %s' % self.disk_size,
- 'cluster_size: %s' % self.cluster_size,
- 'backing_file: %s' % self.backing_file,
- ]
- if self.snapshots:
- lines.append("snapshots: %s" % self.snapshots)
- if self.encrypted:
- lines.append("encrypted: %s" % self.encrypted)
- return "\n".join(lines)
-
- def _canonicalize(self, field):
- # Standardize on underscores/lc/no dash and no spaces
- # since qemu seems to have mixed outputs here... and
- # this format allows for better integration with python
- # - i.e. for usage in kwargs and such...
- field = field.lower().strip()
- for c in (" ", "-"):
- field = field.replace(c, '_')
- return field
-
- def _extract_bytes(self, details):
- # Replace it with the byte amount
- real_size = self.SIZE_RE.search(details)
- if not real_size:
- raise ValueError(_('Invalid input value "%s".') % details)
- magnitude = real_size.group(1)
- unit_of_measure = real_size.group(2)
- bytes_info = real_size.group(3)
- if bytes_info:
- return int(real_size.group(4))
- elif not unit_of_measure:
- return int(magnitude)
- return strutils.string_to_bytes('%s%sB' % (magnitude, unit_of_measure),
- return_int=True)
-
- def _extract_details(self, root_cmd, root_details, lines_after):
- real_details = root_details
- if root_cmd == 'backing_file':
- # Replace it with the real backing file
- backing_match = self.BACKING_FILE_RE.match(root_details)
- if backing_match:
- real_details = backing_match.group(2).strip()
- elif root_cmd in ['virtual_size', 'cluster_size', 'disk_size']:
- # Replace it with the byte amount (if we can convert it)
- if root_details == 'None':
- real_details = 0
- else:
- real_details = self._extract_bytes(root_details)
- elif root_cmd == 'file_format':
- real_details = real_details.strip().lower()
- elif root_cmd == 'snapshot_list':
- # Next line should be a header, starting with 'ID'
- if not lines_after or not lines_after.pop(0).startswith("ID"):
- msg = _("Snapshot list encountered but no header found!")
- raise ValueError(msg)
- real_details = []
- # This is the sprintf pattern we will try to match
- # "%-10s%-20s%7s%20s%15s"
- # ID TAG VM SIZE DATE VM CLOCK (current header)
- while lines_after:
- line = lines_after[0]
- line_pieces = line.split()
- if len(line_pieces) != 6:
- break
- # Check against this pattern in the final position
- # "%02d:%02d:%02d.%03d"
- date_pieces = line_pieces[5].split(":")
- if len(date_pieces) != 3:
- break
- lines_after.pop(0)
- real_details.append({
- 'id': line_pieces[0],
- 'tag': line_pieces[1],
- 'vm_size': line_pieces[2],
- 'date': line_pieces[3],
- 'vm_clock': line_pieces[4] + " " + line_pieces[5],
- })
- return real_details
-
- def _parse(self, cmd_output):
- # Analysis done of qemu-img.c to figure out what is going on here
- # Find all points start with some chars and then a ':' then a newline
- # and then handle the results of those 'top level' items in a separate
- # function.
- #
- # TODO(harlowja): newer versions might have a json output format
- # we should switch to that whenever possible.
- # see: http://bit.ly/XLJXDX
- contents = {}
- lines = [x for x in cmd_output.splitlines() if x.strip()]
- while lines:
- line = lines.pop(0)
- top_level = self.TOP_LEVEL_RE.match(line)
- if top_level:
- root = self._canonicalize(top_level.group(1))
- if not root:
- continue
- root_details = top_level.group(2).strip()
- details = self._extract_details(root, root_details, lines)
- contents[root] = details
- return contents
diff --git a/openstack/common/memorycache.py b/openstack/common/memorycache.py
deleted file mode 100644
index c6e10134..00000000
--- a/openstack/common/memorycache.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Super simple fake memcache client."""
-
-import copy
-
-from debtcollector import removals
-from oslo_config import cfg
-from oslo_utils import timeutils
-
-memcache_opts = [
- cfg.ListOpt('memcached_servers',
- help='Memcached servers or None for in process cache.'),
-]
-
-CONF = cfg.CONF
-CONF.register_opts(memcache_opts)
-
-
-# Indicate that this module is deprecated for removal and oslo.cache should
-# be used instead.
-removals.removed_module(__name__, 'oslo.cache')
-
-
-def list_opts():
- """Entry point for oslo-config-generator."""
- return [(None, copy.deepcopy(memcache_opts))]
-
-
-def get_client(memcached_servers=None):
- client_cls = Client
-
- if not memcached_servers:
- memcached_servers = CONF.memcached_servers
- if memcached_servers:
- import memcache
- client_cls = memcache.Client
-
- return client_cls(memcached_servers, debug=0)
-
-
-class Client(object):
- """Replicates a tiny subset of memcached client interface."""
-
- def __init__(self, *args, **kwargs):
- """Ignores the passed in args."""
- self.cache = {}
-
- def get(self, key):
- """Retrieves the value for a key or None.
-
- This expunges expired keys during each get.
- """
-
- now = timeutils.utcnow_ts()
- for k in list(self.cache):
- (timeout, _value) = self.cache[k]
- if timeout and now >= timeout:
- del self.cache[k]
-
- return self.cache.get(key, (0, None))[1]
-
- def set(self, key, value, time=0, min_compress_len=0):
- """Sets the value for a key."""
- timeout = 0
- if time != 0:
- timeout = timeutils.utcnow_ts() + time
- self.cache[key] = (timeout, value)
- return True
-
- def add(self, key, value, time=0, min_compress_len=0):
- """Sets the value for a key if it doesn't exist."""
- if self.get(key) is not None:
- return False
- return self.set(key, value, time, min_compress_len)
-
- def incr(self, key, delta=1):
- """Increments the value for a key."""
- value = self.get(key)
- if value is None:
- return None
- new_value = int(value) + delta
- self.cache[key] = (self.cache[key][0], str(new_value))
- return new_value
-
- def delete(self, key, time=0):
- """Deletes the value associated with a key."""
- if key in self.cache:
- del self.cache[key]
diff --git a/openstack/common/scheduler/__init__.py b/openstack/common/scheduler/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/openstack/common/scheduler/__init__.py
+++ /dev/null
diff --git a/openstack/common/scheduler/base_filter.py b/openstack/common/scheduler/base_filter.py
deleted file mode 100644
index 193da286..00000000
--- a/openstack/common/scheduler/base_filter.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright (c) 2011-2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Filter support
-"""
-import logging
-
-from openstack.common._i18n import _LI
-from openstack.common.scheduler import base_handler
-
-LOG = logging.getLogger(__name__)
-
-
-class BaseFilter(object):
- """Base class for all filter classes."""
- def _filter_one(self, obj, filter_properties):
- """Return True if it passes the filter, False otherwise.
- Override this in a subclass.
- """
- return True
-
- def filter_all(self, filter_obj_list, filter_properties):
- """Yield objects that pass the filter.
-
- Can be overridden in a subclass, if you need to base filtering
- decisions on all objects. Otherwise, one can just override
- _filter_one() to filter a single object.
- """
- for obj in filter_obj_list:
- if self._filter_one(obj, filter_properties):
- yield obj
-
- # Set to true in a subclass if a filter only needs to be run once
- # for each request rather than for each instance
- run_filter_once_per_request = False
-
- def run_filter_for_index(self, index):
- """Return True if the filter needs to be run for the "index-th"
- instance in a request. Only need to override this if a filter
- needs anything other than "first only" or "all" behaviour.
- """
- return not (self.run_filter_once_per_request and index > 0)
-
-
-class BaseFilterHandler(base_handler.BaseHandler):
- """Base class to handle loading filter classes.
-
- This class should be subclassed where one needs to use filters.
- """
-
- def get_filtered_objects(self, filter_classes, objs,
- filter_properties, index=0):
- """Get objects after filter
-
- :param filter_classes: filters that will be used to filter the
- objects
- :param objs: objects that will be filtered
- :param filter_properties: client filter properties
- :param index: This value needs to be increased in the caller
- function of get_filtered_objects when handling
- each resource.
- """
- list_objs = list(objs)
- LOG.debug("Starting with %d host(s)", len(list_objs))
- for filter_cls in filter_classes:
- cls_name = filter_cls.__name__
- filter_class = filter_cls()
-
- if filter_class.run_filter_for_index(index):
- objs = filter_class.filter_all(list_objs, filter_properties)
- if objs is None:
- LOG.debug("Filter %(cls_name)s says to stop filtering",
- {'cls_name': cls_name})
- return
- list_objs = list(objs)
- msg = (_LI("Filter %(cls_name)s returned %(obj_len)d host(s)")
- % {'cls_name': cls_name, 'obj_len': len(list_objs)})
- if not list_objs:
- LOG.info(msg)
- break
- LOG.debug(msg)
- return list_objs
diff --git a/openstack/common/scheduler/base_handler.py b/openstack/common/scheduler/base_handler.py
deleted file mode 100644
index 44c8eca5..00000000
--- a/openstack/common/scheduler/base_handler.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (c) 2011-2013 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-A common base for handling extension classes.
-
-Used by BaseFilterHandler and BaseWeightHandler
-"""
-
-import inspect
-
-from stevedore import extension
-
-
-class BaseHandler(object):
- """Base class to handle loading filter and weight classes."""
- def __init__(self, modifier_class_type, modifier_namespace):
- self.namespace = modifier_namespace
- self.modifier_class_type = modifier_class_type
- self.extension_manager = extension.ExtensionManager(modifier_namespace)
-
- def _is_correct_class(self, cls):
- """Return whether an object is a class of the correct type and
- is not prefixed with an underscore.
- """
- return (inspect.isclass(cls) and
- not cls.__name__.startswith('_') and
- issubclass(cls, self.modifier_class_type))
-
- def get_all_classes(self):
- # We use a set, as some classes may have an entrypoint of their own,
- # and also be returned by a function such as 'all_filters' for example
- return [ext.plugin for ext in self.extension_manager if
- self._is_correct_class(ext.plugin)]
diff --git a/openstack/common/scheduler/base_weight.py b/openstack/common/scheduler/base_weight.py
deleted file mode 100644
index d4f6a319..00000000
--- a/openstack/common/scheduler/base_weight.py
+++ /dev/null
@@ -1,147 +0,0 @@
-# Copyright (c) 2011-2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Pluggable Weighing support
-"""
-
-import abc
-
-import six
-
-from openstack.common.scheduler import base_handler
-
-
-def normalize(weight_list, minval=None, maxval=None):
- """Normalize the values in a list between 0 and 1.0.
-
- The normalization is made regarding the lower and upper values present in
- weight_list. If the minval and/or maxval parameters are set, these values
- will be used instead of the minimum and maximum from the list.
-
- If all the values are equal, they are normalized to 0.
- """
-
- if not weight_list:
- return ()
-
- if maxval is None:
- maxval = max(weight_list)
-
- if minval is None:
- minval = min(weight_list)
-
- maxval = float(maxval)
- minval = float(minval)
-
- if minval == maxval:
- return [0] * len(weight_list)
-
- range_ = maxval - minval
- return ((i - minval) / range_ for i in weight_list)
-
-
-class WeighedObject(object):
- """Object with weight information."""
- def __init__(self, obj, weight):
- self.obj = obj
- self.weight = weight
-
- def __repr__(self):
- return "<WeighedObject '%s': %s>" % (self.obj, self.weight)
-
-
-@six.add_metaclass(abc.ABCMeta)
-class BaseWeigher(object):
- """Base class for pluggable weighers.
-
- The attributes maxval and minval can be specified to set up the maximum
- and minimum values for the weighed objects. These values will then be
- taken into account in the normalization step, instead of taking the values
- from the calculated weights.
- """
-
- minval = None
- maxval = None
-
- def weight_multiplier(self):
- """How weighted this weigher should be.
-
- Override this method in a subclass, so that the returned value is
- read from a configuration option to permit operators specify a
- multiplier for the weigher.
- """
- return 1.0
-
- @abc.abstractmethod
- def _weigh_object(self, obj, weight_properties):
- """Override in a subclass to specify a weight for a specific
- object.
- """
-
- def weigh_objects(self, weighed_obj_list, weight_properties):
- """Weigh multiple objects.
-
- Override in a subclass if you need access to all objects in order
- to calculate weights. Do not modify the weight of an object here,
- just return a list of weights.
- """
- # Calculate the weights
- weights = []
- for obj in weighed_obj_list:
- weight = self._weigh_object(obj.obj, weight_properties)
-
- # Record the min and max values if they are None. If they anything
- # but none we assume that the weigher has set them
- if self.minval is None:
- self.minval = weight
- if self.maxval is None:
- self.maxval = weight
-
- if weight < self.minval:
- self.minval = weight
- elif weight > self.maxval:
- self.maxval = weight
-
- weights.append(weight)
-
- return weights
-
-
-class BaseWeightHandler(base_handler.BaseHandler):
- object_class = WeighedObject
-
- def get_weighed_objects(self, weigher_classes, obj_list,
- weighing_properties):
- """Return a sorted (descending), normalized list of WeighedObjects."""
-
- if not obj_list:
- return []
-
- weighed_objs = [self.object_class(obj, 0.0) for obj in obj_list]
- for weigher_cls in weigher_classes:
- weigher = weigher_cls()
- weights = weigher.weigh_objects(weighed_objs, weighing_properties)
-
- # Normalize the weights
- weights = normalize(weights,
- minval=weigher.minval,
- maxval=weigher.maxval)
-
- for i, weight in enumerate(weights):
- obj = weighed_objs[i]
- obj.weight += weigher.weight_multiplier() * weight
-
- return sorted(weighed_objs, key=lambda x: x.weight, reverse=True)
diff --git a/openstack/common/scheduler/filters/__init__.py b/openstack/common/scheduler/filters/__init__.py
deleted file mode 100644
index 113e2c43..00000000
--- a/openstack/common/scheduler/filters/__init__.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright (c) 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Scheduler host filters
-"""
-
-from openstack.common.scheduler import base_filter
-
-
-class BaseHostFilter(base_filter.BaseFilter):
- """Base class for host filters."""
- def _filter_one(self, obj, filter_properties):
- """Return True if the object passes the filter, otherwise False."""
- return self.host_passes(obj, filter_properties)
-
- def host_passes(self, host_state, filter_properties):
- """Return True if the HostState passes the filter, otherwise False.
- Override this in a subclass.
- """
- raise NotImplementedError()
-
-
-class HostFilterHandler(base_filter.BaseFilterHandler):
- def __init__(self, namespace):
- super(HostFilterHandler, self).__init__(BaseHostFilter, namespace)
diff --git a/openstack/common/scheduler/filters/availability_zone_filter.py b/openstack/common/scheduler/filters/availability_zone_filter.py
deleted file mode 100644
index 63b9051a..00000000
--- a/openstack/common/scheduler/filters/availability_zone_filter.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# Copyright (c) 2011-2012 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from openstack.common.scheduler import filters
-
-
-class AvailabilityZoneFilter(filters.BaseHostFilter):
- """Filters Hosts by availability zone."""
-
- # Availability zones do not change within a request
- run_filter_once_per_request = True
-
- def host_passes(self, host_state, filter_properties):
- spec = filter_properties.get('request_spec', {})
- props = spec.get('resource_properties', {})
- availability_zone = props.get('availability_zone')
-
- if availability_zone:
- return availability_zone == host_state.service['availability_zone']
- return True
diff --git a/openstack/common/scheduler/filters/capabilities_filter.py b/openstack/common/scheduler/filters/capabilities_filter.py
deleted file mode 100644
index 455335b5..00000000
--- a/openstack/common/scheduler/filters/capabilities_filter.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-
-import six
-
-from openstack.common.scheduler import filters
-from openstack.common.scheduler.filters import extra_specs_ops
-
-LOG = logging.getLogger(__name__)
-
-
-class CapabilitiesFilter(filters.BaseHostFilter):
- """HostFilter to work with resource (instance & volume) type records."""
-
- def _satisfies_extra_specs(self, capabilities, resource_type):
- """Check that the capabilities provided by the services satisfy
- the extra specs associated with the resource type.
- """
- extra_specs = resource_type.get('extra_specs', [])
- if not extra_specs:
- return True
-
- for key, req in six.iteritems(extra_specs):
- # Either not scope format, or in capabilities scope
- scope = key.split(':')
- if len(scope) > 1 and scope[0] != "capabilities":
- continue
- elif scope[0] == "capabilities":
- del scope[0]
-
- cap = capabilities
- for index in range(len(scope)):
- try:
- cap = cap.get(scope[index])
- except AttributeError:
- cap = None
- if cap is None:
- LOG.debug("Host doesn't provide capability '%(cap)s' "
- "listed in the extra specs",
- {'cap': scope[index]})
- return False
- if not extra_specs_ops.match(cap, req):
- LOG.debug("extra_spec requirement '%(req)s' "
- "does not match '%(cap)s'",
- {'req': req, 'cap': cap})
- return False
- return True
-
- def host_passes(self, host_state, filter_properties):
- """Return a list of hosts that can create resource_type."""
- # Note(zhiteng) Currently only Cinder and Nova are using
- # this filter, so the resource type is either instance or
- # volume.
- resource_type = filter_properties.get('resource_type')
- if not self._satisfies_extra_specs(host_state.capabilities,
- resource_type):
- LOG.debug("%(host_state)s fails resource_type extra_specs "
- "requirements", {'host_state': host_state})
- return False
- return True
diff --git a/openstack/common/scheduler/filters/extra_specs_ops.py b/openstack/common/scheduler/filters/extra_specs_ops.py
deleted file mode 100644
index 735e3037..00000000
--- a/openstack/common/scheduler/filters/extra_specs_ops.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright (c) 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import operator
-
-from oslo_utils import strutils
-
-# 1. The following operations are supported:
-# =, s==, s!=, s>=, s>, s<=, s<, <in>, <is>, <or>, ==, !=, >=, <=
-# 2. Note that <or> is handled in a different way below.
-# 3. If the first word in the extra_specs is not one of the operators,
-# it is ignored.
-_op_methods = {'=': lambda x, y: float(x) >= float(y),
- '<in>': lambda x, y: y in x,
- '<is>': lambda x, y: (strutils.bool_from_string(x) is
- strutils.bool_from_string(y)),
- '==': lambda x, y: float(x) == float(y),
- '!=': lambda x, y: float(x) != float(y),
- '>=': lambda x, y: float(x) >= float(y),
- '<=': lambda x, y: float(x) <= float(y),
- 's==': operator.eq,
- 's!=': operator.ne,
- 's<': operator.lt,
- 's<=': operator.le,
- 's>': operator.gt,
- 's>=': operator.ge}
-
-
-def match(value, req):
- words = req.split()
-
- op = method = None
- if words:
- op = words.pop(0)
- method = _op_methods.get(op)
-
- if op != '<or>' and not method:
- return value == req
-
- if value is None:
- return False
-
- if op == '<or>': # Ex: <or> v1 <or> v2 <or> v3
- while True:
- if words.pop(0) == value:
- return True
- if not words:
- break
- op = words.pop(0) # remove a keyword <or>
- if not words:
- break
- return False
-
- try:
- if words and method(value, words[0]):
- return True
- except ValueError:
- pass
-
- return False
diff --git a/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py b/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py
deleted file mode 100644
index a09a7e19..00000000
--- a/openstack/common/scheduler/filters/ignore_attempted_hosts_filter.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright (c) 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-
-from openstack.common.scheduler import filters
-
-LOG = logging.getLogger(__name__)
-
-
-class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
- """Filter out previously attempted hosts
-
- A host passes this filter if it has not already been attempted for
- scheduling. The scheduler needs to add previously attempted hosts
- to the 'retry' key of filter_properties in order for this to work
- correctly. For example::
-
- {
- 'retry': {
- 'hosts': ['host1', 'host2'],
- 'num_attempts': 3,
- }
- }
- """
-
- def host_passes(self, host_state, filter_properties):
- """Skip nodes that have already been attempted."""
- attempted = filter_properties.get('retry')
- if not attempted:
- # Re-scheduling is disabled
- LOG.debug("Re-scheduling is disabled.")
- return True
-
- hosts = attempted.get('hosts', [])
- host = host_state.host
-
- passes = host not in hosts
- pass_msg = "passes" if passes else "fails"
-
- LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: "
- "%(hosts)s" % {'host': host,
- 'pass_msg': pass_msg,
- 'hosts': hosts})
- return passes
diff --git a/openstack/common/scheduler/filters/json_filter.py b/openstack/common/scheduler/filters/json_filter.py
deleted file mode 100644
index 176edb69..00000000
--- a/openstack/common/scheduler/filters/json_filter.py
+++ /dev/null
@@ -1,151 +0,0 @@
-# Copyright (c) 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import operator
-
-from oslo_serialization import jsonutils
-import six
-
-from openstack.common.scheduler import filters
-
-
-class JsonFilter(filters.BaseHostFilter):
- """Host Filter to allow simple JSON-based grammar for
- selecting hosts.
- """
- def _op_compare(self, args, op):
- """Returns True if the specified operator can successfully
- compare the first item in the args with all the rest. Will
- return False if only one item is in the list.
- """
- if len(args) < 2:
- return False
- if op is operator.contains:
- bad = args[0] not in args[1:]
- else:
- bad = [arg for arg in args[1:]
- if not op(args[0], arg)]
- return not bool(bad)
-
- def _equals(self, args):
- """First term is == all the other terms."""
- return self._op_compare(args, operator.eq)
-
- def _less_than(self, args):
- """First term is < all the other terms."""
- return self._op_compare(args, operator.lt)
-
- def _greater_than(self, args):
- """First term is > all the other terms."""
- return self._op_compare(args, operator.gt)
-
- def _in(self, args):
- """First term is in set of remaining terms."""
- return self._op_compare(args, operator.contains)
-
- def _less_than_equal(self, args):
- """First term is <= all the other terms."""
- return self._op_compare(args, operator.le)
-
- def _greater_than_equal(self, args):
- """First term is >= all the other terms."""
- return self._op_compare(args, operator.ge)
-
- def _not(self, args):
- """Flip each of the arguments."""
- return [not arg for arg in args]
-
- def _or(self, args):
- """True if any arg is True."""
- return any(args)
-
- def _and(self, args):
- """True if all args are True."""
- return all(args)
-
- commands = {
- '=': _equals,
- '<': _less_than,
- '>': _greater_than,
- 'in': _in,
- '<=': _less_than_equal,
- '>=': _greater_than_equal,
- 'not': _not,
- 'or': _or,
- 'and': _and,
- }
-
- def _parse_string(self, string, host_state):
- """Strings prefixed with $ are capability lookups in the
- form '$variable' where 'variable' is an attribute in the
- HostState class. If $variable is a dictionary, you may
- use: $variable.dictkey
- """
- if not string:
- return None
- if not string.startswith("$"):
- return string
-
- path = string[1:].split(".")
- obj = getattr(host_state, path[0], None)
- if obj is None:
- return None
- for item in path[1:]:
- obj = obj.get(item)
- if obj is None:
- return None
- return obj
-
- def _process_filter(self, query, host_state):
- """Recursively parse the query structure."""
- if not query:
- return True
- cmd = query[0]
- method = self.commands[cmd]
- cooked_args = []
- for arg in query[1:]:
- if isinstance(arg, list):
- arg = self._process_filter(arg, host_state)
- elif isinstance(arg, six.string_types):
- arg = self._parse_string(arg, host_state)
- if arg is not None:
- cooked_args.append(arg)
- result = method(self, cooked_args)
- return result
-
- def host_passes(self, host_state, filter_properties):
- """Return a list of hosts that can fulfill the requirements
- specified in the query.
- """
- # TODO(zhiteng) Add description for filter_properties structure
- # and scheduler_hints.
- try:
- query = filter_properties['scheduler_hints']['query']
- except KeyError:
- query = None
- if not query:
- return True
-
- # NOTE(comstud): Not checking capabilities or service for
- # enabled/disabled so that a provided json filter can decide
-
- result = self._process_filter(jsonutils.loads(query), host_state)
- if isinstance(result, list):
- # If any succeeded, include the host
- result = any(result)
- if result:
- # Filter it out.
- return True
- return False
diff --git a/openstack/common/scheduler/weights/__init__.py b/openstack/common/scheduler/weights/__init__.py
deleted file mode 100644
index e2eb04a5..00000000
--- a/openstack/common/scheduler/weights/__init__.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (c) 2011 OpenStack Foundation.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Scheduler host weights
-"""
-
-
-from openstack.common.scheduler import base_weight
-
-
-class WeighedHost(base_weight.WeighedObject):
- def to_dict(self):
- return {
- 'weight': self.weight,
- 'host': self.obj.host,
- }
-
- def __repr__(self):
- return ("WeighedHost [host: %s, weight: %s]" %
- (self.obj.host, self.weight))
-
-
-class BaseHostWeigher(base_weight.BaseWeigher):
- """Base class for host weights."""
- pass
-
-
-class HostWeightHandler(base_weight.BaseWeightHandler):
- object_class = WeighedHost
-
- def __init__(self, namespace):
- super(HostWeightHandler, self).__init__(BaseHostWeigher, namespace)