summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml3
-rw-r--r--ceilometer/cache_utils.py53
-rw-r--r--ceilometer/compute/pollsters/net.py2
-rw-r--r--ceilometer/declarative.py4
-rw-r--r--ceilometer/polling/dynamic_pollster.py463
-rw-r--r--ceilometer/polling/manager.py86
-rw-r--r--ceilometer/publisher/gnocchi.py38
-rw-r--r--ceilometer/publisher/utils.py2
-rw-r--r--ceilometer/sample.py5
-rw-r--r--ceilometer/tests/unit/compute/pollsters/test_net.py21
-rw-r--r--ceilometer/tests/unit/polling/test_dynamic_pollster.py835
-rw-r--r--ceilometer/tests/unit/polling/test_manager.py81
-rw-r--r--ceilometer/tests/unit/polling/test_non_openstack_dynamic_pollster.py6
-rw-r--r--ceilometer/tests/unit/publisher/test_gnocchi.py69
-rw-r--r--doc/source/admin/telemetry-dynamic-pollster.rst282
-rw-r--r--releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml6
-rw-r--r--releasenotes/source/index.rst1
-rw-r--r--releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po23
-rw-r--r--releasenotes/source/zed.rst6
-rw-r--r--requirements.txt1
-rw-r--r--setup.cfg1
21 files changed, 1767 insertions, 221 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 0bab90b7..9ed570be 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -29,7 +29,7 @@
queue: telemetry
templates:
- openstack-cover-jobs
- - openstack-python3-zed-jobs
+ - openstack-python3-jobs
- publish-openstack-docs-pti
- periodic-stable-jobs
- release-notes-jobs-python3
@@ -45,7 +45,6 @@
- telemetry-dsvm-integration-ipv6-only:
irrelevant-files: *ceilometer-irrelevant-files
gate:
- queue: telemetry
jobs:
- grenade-ceilometer
- telemetry-dsvm-integration-centos-9s:
diff --git a/ceilometer/cache_utils.py b/ceilometer/cache_utils.py
new file mode 100644
index 00000000..55a9e263
--- /dev/null
+++ b/ceilometer/cache_utils.py
@@ -0,0 +1,53 @@
+#
+# Copyright 2022 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Simple wrapper for oslo_cache."""
+
+
+from oslo_cache import core as cache
+
+
+class CacheClient(object):
+ def __init__(self, region):
+ self.region = region
+
+ def get(self, key):
+ value = self.region.get(key)
+ if value == cache.NO_VALUE:
+ return None
+ return value
+
+ def set(self, key, value):
+ return self.region.set(key, value)
+
+ def delete(self, key):
+ return self.region.delete(key)
+
+
+def get_client(conf, expiration_time=0):
+ cache.configure(conf)
+ if conf.cache.enabled:
+ return CacheClient(_get_default_cache_region(
+ conf,
+ expiration_time=expiration_time
+ ))
+
+
+def _get_default_cache_region(conf, expiration_time):
+ region = cache.create_region()
+ if expiration_time != 0:
+ conf.cache.expiration_time = expiration_time
+ cache.configure_cache_region(conf, region)
+ return region
diff --git a/ceilometer/compute/pollsters/net.py b/ceilometer/compute/pollsters/net.py
index a452e802..752be34f 100644
--- a/ceilometer/compute/pollsters/net.py
+++ b/ceilometer/compute/pollsters/net.py
@@ -119,7 +119,7 @@ class IncomingBytesDeltaPollster(NetworkPollster):
class OutgoingBytesDeltaPollster(NetworkPollster):
- sample_name = 'network.outgoing.packets.delta'
+ sample_name = 'network.outgoing.bytes.delta'
sample_type = sample.TYPE_DELTA
sample_unit = 'B'
sample_stats_key = 'tx_bytes_delta'
diff --git a/ceilometer/declarative.py b/ceilometer/declarative.py
index ed59f9a9..d30fff52 100644
--- a/ceilometer/declarative.py
+++ b/ceilometer/declarative.py
@@ -49,6 +49,10 @@ class DynamicPollsterDefinitionException(DynamicPollsterException):
pass
+class InvalidResponseTypeException(DynamicPollsterException):
+ pass
+
+
class NonOpenStackApisDynamicPollsterException\
(DynamicPollsterDefinitionException):
pass
diff --git a/ceilometer/polling/dynamic_pollster.py b/ceilometer/polling/dynamic_pollster.py
index bb45b85f..07edc5ee 100644
--- a/ceilometer/polling/dynamic_pollster.py
+++ b/ceilometer/polling/dynamic_pollster.py
@@ -18,8 +18,11 @@
similar to the idea used for handling notifications.
"""
import copy
+import json
import re
+import subprocess
import time
+import xmltodict
from oslo_log import log
@@ -46,6 +49,89 @@ def validate_sample_type(sample_type):
% (sample_type, ceilometer_sample.TYPES))
+class XMLResponseHandler(object):
+ """This response handler converts an XML in string format to a dict"""
+
+ @staticmethod
+ def handle(response):
+ return xmltodict.parse(response)
+
+
+class JsonResponseHandler(object):
+ """This response handler converts a JSON in string format to a dict"""
+
+ @staticmethod
+ def handle(response):
+ return json.loads(response)
+
+
+class PlainTextResponseHandler(object):
+ """Response handler converts string to a list of dict [{'out'=<string>}]"""
+
+ @staticmethod
+ def handle(response):
+ return [{'out': str(response)}]
+
+
+VALID_HANDLERS = {
+ 'json': JsonResponseHandler,
+ 'xml': XMLResponseHandler,
+ 'text': PlainTextResponseHandler
+}
+
+
+def validate_response_handler(val):
+ if not isinstance(val, list):
+ raise declarative.DynamicPollsterDefinitionException(
+ "Invalid response_handlers configuration. It must be a list. "
+ "Provided value type: %s" % type(val).__name__)
+
+ for value in val:
+ if value not in VALID_HANDLERS:
+ raise declarative.DynamicPollsterDefinitionException(
+ "Invalid response_handler value [%s]. Accepted values "
+ "are [%s]" % (value, ', '.join(list(VALID_HANDLERS))))
+
+
+def validate_extra_metadata_skip_samples(val):
+ if not isinstance(val, list) or next(
+ filter(lambda v: not isinstance(v, dict), val), None):
+ raise declarative.DynamicPollsterDefinitionException(
+ "Invalid extra_metadata_fields_skip configuration."
+ " It must be a list of maps. Provided value: %s,"
+ " value type: %s." % (val, type(val).__name__))
+
+
+class ResponseHandlerChain(object):
+ """Tries to convert a string to a dict using the response handlers"""
+
+ def __init__(self, response_handlers, **meta):
+ if not isinstance(response_handlers, list):
+ response_handlers = list(response_handlers)
+
+ self.response_handlers = response_handlers
+ self.meta = meta
+
+ def handle(self, response):
+ failed_handlers = []
+ for handler in self.response_handlers:
+ try:
+ return handler.handle(response)
+ except Exception as e:
+ handler_name = handler.__name__
+ failed_handlers.append(handler_name)
+ LOG.debug(
+ "Error handling response [%s] with handler [%s]: %s. "
+ "We will try the next one, if multiple handlers were "
+ "configured.",
+ response, handler_name, e)
+
+ handlers_str = ', '.join(failed_handlers)
+ raise declarative.InvalidResponseTypeException(
+ "No remaining handlers to handle the response [%s], "
+ "used handlers [%s]. [%s]." % (response, handlers_str, self.meta))
+
+
class PollsterDefinitionBuilder(object):
def __init__(self, definitions):
@@ -128,9 +214,12 @@ class PollsterSampleExtractor(object):
self.generate_new_metadata_fields(
metadata=metadata, pollster_definitions=pollster_definitions)
+ pollster_sample['metadata'] = metadata
extra_metadata = self.definitions.retrieve_extra_metadata(
- kwargs['manager'], pollster_sample)
+ kwargs['manager'], pollster_sample, kwargs['conf'])
+ LOG.debug("Extra metadata [%s] collected for sample [%s].",
+ extra_metadata, pollster_sample)
for key in extra_metadata.keys():
if key in metadata.keys():
LOG.warning("The extra metadata key [%s] already exist in "
@@ -440,7 +529,12 @@ class PollsterDefinitions(object):
PollsterDefinition(name='timeout', default=30),
PollsterDefinition(name='extra_metadata_fields_cache_seconds',
default=3600),
- PollsterDefinition(name='extra_metadata_fields')
+ PollsterDefinition(name='extra_metadata_fields'),
+ PollsterDefinition(name='extra_metadata_fields_skip', default=[{}],
+ validator=validate_extra_metadata_skip_samples),
+ PollsterDefinition(name='response_handlers', default=['json'],
+ validator=validate_response_handler),
+ PollsterDefinition(name='base_metadata', default={})
]
extra_definitions = []
@@ -494,114 +588,116 @@ class PollsterDefinitions(object):
"Required fields %s not specified."
% missing, self.configurations)
- def retrieve_extra_metadata(self, manager, request_sample):
+ def should_skip_extra_metadata(self, skip, sample):
+ match_msg = "Sample [%s] %smatches with configured" \
+ " extra_metadata_fields_skip [%s]."
+ if skip == sample:
+ LOG.debug(match_msg, sample, "", skip)
+ return True
+ if not isinstance(skip, dict) or not isinstance(sample, dict):
+ LOG.debug(match_msg, sample, "not ", skip)
+ return False
+
+ for key in skip:
+ if key not in sample:
+ LOG.debug(match_msg, sample, "not ", skip)
+ return False
+ if not self.should_skip_extra_metadata(skip[key], sample[key]):
+ LOG.debug(match_msg, sample, "not ", skip)
+ return False
+
+ LOG.debug(match_msg, sample, "", skip)
+ return True
+
+ def skip_sample(self, request_sample, skips):
+ for skip in skips:
+ if not skip:
+ continue
+ if self.should_skip_extra_metadata(skip, request_sample):
+ LOG.debug("Skipping extra_metadata_field gathering for "
+ "sample [%s] as defined in the "
+ "extra_metadata_fields_skip [%s]", request_sample,
+ skip)
+ return True
+ return False
+
+ def retrieve_extra_metadata(self, manager, request_sample, pollster_conf):
extra_metadata_fields = self.configurations['extra_metadata_fields']
if extra_metadata_fields:
- if isinstance(self, NonOpenStackApisPollsterDefinition):
- raise declarative.NonOpenStackApisDynamicPollsterException(
- "Not supported the use of extra metadata gathering for "
- "non-openstack pollsters [%s] (yet)."
- % self.configurations['name'])
+ extra_metadata_samples = {}
+ extra_metadata_by_name = {}
+ if not isinstance(extra_metadata_fields, (list, tuple)):
+ extra_metadata_fields = [extra_metadata_fields]
+ for ext_metadata in extra_metadata_fields:
+ ext_metadata.setdefault(
+ 'extra_metadata_fields_skip',
+ self.configurations['extra_metadata_fields_skip'])
+ ext_metadata.setdefault(
+ 'sample_type', self.configurations['sample_type'])
+ ext_metadata.setdefault('unit', self.configurations['unit'])
+ ext_metadata.setdefault(
+ 'value_attribute', ext_metadata.get(
+ 'value', self.configurations['value_attribute']))
+ ext_metadata['base_metadata'] = {
+ 'extra_metadata_captured': extra_metadata_samples,
+ 'extra_metadata_by_name': extra_metadata_by_name,
+ 'sample': request_sample
+ }
+ parent_cache_ttl = self.configurations[
+ 'extra_metadata_fields_cache_seconds']
+ cache_ttl = ext_metadata.get(
+ 'extra_metadata_fields_cache_seconds', parent_cache_ttl
+ )
+ response_cache = self.response_cache
+ extra_metadata_pollster = DynamicPollster(
+ ext_metadata, conf=pollster_conf, cache_ttl=cache_ttl,
+ extra_metadata_responses_cache=response_cache,
+ )
+
+ skips = ext_metadata['extra_metadata_fields_skip']
+ if self.skip_sample(request_sample, skips):
+ continue
- return self._retrieve_extra_metadata(
- extra_metadata_fields, manager, request_sample)
+ resources = [None]
+ if ext_metadata.get('endpoint_type'):
+ resources = manager.discover([
+ extra_metadata_pollster.default_discovery], {})
+ samples = extra_metadata_pollster.get_samples(
+ manager, None, resources)
+ for sample in samples:
+ self.fill_extra_metadata_samples(
+ extra_metadata_by_name,
+ extra_metadata_samples,
+ sample)
+ return extra_metadata_samples
LOG.debug("No extra metadata to be captured for pollsters [%s] and "
"request sample [%s].", self.definitions, request_sample)
return {}
- def _retrieve_extra_metadata(
- self, extra_metadata_fields, manager, request_sample):
- LOG.debug("Processing extra metadata fields [%s] for "
- "sample [%s].", extra_metadata_fields,
- request_sample)
-
- extra_metadata_captured = {}
- for extra_metadata in extra_metadata_fields:
- extra_metadata_name = extra_metadata['name']
-
- if extra_metadata_name in extra_metadata_captured.keys():
- LOG.warning("Duplicated extra metadata name [%s]. Therefore, "
- "we do not process this iteration [%s].",
- extra_metadata_name, extra_metadata)
+ def fill_extra_metadata_samples(self, extra_metadata_by_name,
+ extra_metadata_samples, sample):
+ extra_metadata_samples[sample.name] = sample.volume
+ LOG.debug("Merging the sample metadata [%s] of the "
+ "extra_metadata_field [%s], with the "
+ "extra_metadata_samples [%s].",
+ sample.resource_metadata,
+ sample.name,
+ extra_metadata_samples)
+ for key, value in sample.resource_metadata.items():
+ if value is None and key in extra_metadata_samples:
+ LOG.debug("Metadata [%s] for extra_metadata_field [%s] "
+ "is None, skipping metadata override by None "
+ "value", key, sample.name)
continue
+ extra_metadata_samples[key] = value
+ extra_metadata_by_name[sample.name] = {
+ 'value': sample.volume,
+ 'metadata': sample.resource_metadata
+ }
- LOG.debug("Processing extra metadata [%s] for sample [%s].",
- extra_metadata_name, request_sample)
-
- endpoint_type = 'endpoint:' + extra_metadata['endpoint_type']
- if not endpoint_type.endswith(
- PollsterDefinitions.EXTERNAL_ENDPOINT_TYPE):
- response = self.execute_openstack_extra_metadata_gathering(
- endpoint_type, extra_metadata, manager, request_sample,
- extra_metadata_captured)
- else:
- raise declarative.NonOpenStackApisDynamicPollsterException(
- "Not supported the use of extra metadata gathering for "
- "non-openstack endpoints [%s] (yet)." % extra_metadata)
-
- extra_metadata_extractor_kwargs = {
- 'value_attribute': extra_metadata['value'],
- 'sample': request_sample}
-
- extra_metadata_value = \
- self.sample_extractor.retrieve_attribute_nested_value(
- response, **extra_metadata_extractor_kwargs)
-
- LOG.debug("Generated extra metadata [%s] with value [%s].",
- extra_metadata_name, extra_metadata_value)
- extra_metadata_captured[extra_metadata_name] = extra_metadata_value
-
- return extra_metadata_captured
-
- def execute_openstack_extra_metadata_gathering(self, endpoint_type,
- extra_metadata, manager,
- request_sample,
- extra_metadata_captured):
- url_for_endpoint_type = manager.discover(
- [endpoint_type], self.response_cache)
-
- LOG.debug("URL [%s] found for endpoint type [%s].",
- url_for_endpoint_type, endpoint_type)
-
- if url_for_endpoint_type:
- url_for_endpoint_type = url_for_endpoint_type[0]
-
- self.sample_gatherer.generate_url_path(
- extra_metadata, request_sample, extra_metadata_captured)
-
- cached_response, max_ttl_for_cache = self.response_cache.get(
- extra_metadata['url_path'], (None, None))
-
- extra_metadata_fields_cache_seconds = extra_metadata.get(
- 'extra_metadata_fields_cache_seconds',
- self.configurations['extra_metadata_fields_cache_seconds'])
-
- current_time = time.time()
- if cached_response and max_ttl_for_cache >= current_time:
- LOG.debug("Returning response [%s] for request [%s] as the TTL "
- "[max=%s, current_time=%s] has not expired yet.",
- cached_response, extra_metadata['url_path'],
- max_ttl_for_cache, current_time)
- return cached_response
-
- if cached_response:
- LOG.debug("Cleaning cached response [%s] for request [%s] "
- "as the TTL [max=%s, current_time=%s] has expired.",
- cached_response, extra_metadata['url_path'],
- max_ttl_for_cache, current_time)
-
- response = self.sample_gatherer.execute_request_for_definitions(
- extra_metadata, **{'manager': manager,
- 'keystone_client': manager._keystone,
- 'resource': url_for_endpoint_type,
- 'execute_id_overrides': False})
-
- max_ttl_for_cache = time.time() + extra_metadata_fields_cache_seconds
-
- cache_tuple = (response, max_ttl_for_cache)
- self.response_cache[extra_metadata['url_path']] = cache_tuple
- return response
+ LOG.debug("extra_metadata_samples after merging: [%s].",
+ extra_metadata_samples)
class MultiMetricPollsterDefinitions(PollsterDefinitions):
@@ -655,6 +751,47 @@ class PollsterSampleGatherer(object):
def __init__(self, definitions):
self.definitions = definitions
+ self.response_handler_chain = ResponseHandlerChain(
+ map(VALID_HANDLERS.get,
+ self.definitions.configurations['response_handlers']),
+ url_path=definitions.configurations['url_path']
+ )
+
+ def get_cache_key(self, definitions, **kwargs):
+ return self.get_request_linked_samples_url(kwargs, definitions)
+
+ def get_cached_response(self, definitions, **kwargs):
+ if self.definitions.cache_ttl == 0:
+ return
+ cache_key = self.get_cache_key(definitions, **kwargs)
+ response_cache = self.definitions.response_cache
+ cached_response, max_ttl_for_cache = response_cache.get(
+ cache_key, (None, None))
+
+ current_time = time.time()
+ if cached_response and max_ttl_for_cache >= current_time:
+ LOG.debug("Returning response [%s] for request [%s] as the TTL "
+ "[max=%s, current_time=%s] has not expired yet.",
+ cached_response, definitions,
+ max_ttl_for_cache, current_time)
+ return cached_response
+
+ if cached_response and max_ttl_for_cache < current_time:
+ LOG.debug("Cleaning cached response [%s] for request [%s] "
+ "as the TTL [max=%s, current_time=%s] has expired.",
+ cached_response, definitions,
+ max_ttl_for_cache, current_time)
+ response_cache.pop(cache_key, None)
+
+ def store_cached_response(self, definitions, resp, **kwargs):
+ if self.definitions.cache_ttl == 0:
+ return
+ cache_key = self.get_cache_key(definitions, **kwargs)
+ extra_metadata_fields_cache_seconds = self.definitions.cache_ttl
+ max_ttl_for_cache = time.time() + extra_metadata_fields_cache_seconds
+
+ cache_tuple = (resp, max_ttl_for_cache)
+ self.definitions.response_cache[cache_key] = cache_tuple
@property
def default_discovery(self):
@@ -665,20 +802,24 @@ class PollsterSampleGatherer(object):
self.definitions.configurations, **kwargs)
def execute_request_for_definitions(self, definitions, **kwargs):
- resp, url = self._internal_execute_request_get_samples(
- definitions=definitions, **kwargs)
+ if response_dict := self.get_cached_response(definitions, **kwargs):
+ url = 'cached'
+ else:
+ resp, url = self._internal_execute_request_get_samples(
+ definitions=definitions, **kwargs)
+ response_dict = self.response_handler_chain.handle(resp.text)
+ self.store_cached_response(definitions, response_dict, **kwargs)
- response_json = resp.json()
- entry_size = len(response_json)
- LOG.debug("Entries [%s] in the JSON for request [%s] "
+ entry_size = len(response_dict)
+ LOG.debug("Entries [%s] in the DICT for request [%s] "
"for dynamic pollster [%s].",
- response_json, url, definitions['name'])
+ response_dict, url, definitions['name'])
if entry_size > 0:
samples = self.retrieve_entries_from_response(
- response_json, definitions)
+ response_dict, definitions)
url_to_next_sample = self.get_url_to_next_sample(
- response_json, definitions)
+ response_dict, definitions)
self.prepare_samples(definitions, samples, **kwargs)
@@ -707,21 +848,6 @@ class PollsterSampleGatherer(object):
self.generate_new_attributes_in_sample(
request_sample, resource_id_attribute, 'id')
- def generate_url_path(self, extra_metadata, sample,
- extra_metadata_captured):
- if not extra_metadata.get('url_path_original'):
- extra_metadata[
- 'url_path_original'] = extra_metadata['url_path']
-
- extra_metadata['url_path'] = eval(
- extra_metadata['url_path_original'])
-
- LOG.debug("URL [%s] generated for pattern [%s] for sample [%s] and "
- "extra metadata captured [%s].",
- extra_metadata['url_path'],
- extra_metadata['url_path_original'], sample,
- extra_metadata_captured)
-
def generate_new_attributes_in_sample(
self, sample, attribute_key, new_attribute_key):
@@ -798,6 +924,15 @@ class PollsterSampleGatherer(object):
def get_request_url(self, kwargs, url_path):
endpoint = kwargs['resource']
+ params = copy.deepcopy(
+ self.definitions.configurations.get(
+ 'base_metadata', {}))
+ try:
+ url_path = eval(url_path, params)
+ except Exception:
+ LOG.debug("Cannot eval path [%s] with params [%s],"
+ " using [%s] instead.",
+ url_path, params, url_path)
return urlparse.urljoin(endpoint, url_path)
def retrieve_entries_from_response(self, response_json, definitions):
@@ -836,6 +971,57 @@ class NonOpenStackApisPollsterDefinition(PollsterDefinitions):
return configurations.get('module')
+class HostCommandPollsterDefinition(PollsterDefinitions):
+
+ extra_definitions = [
+ PollsterDefinition(name='endpoint_type', required=False),
+ PollsterDefinition(name='url_path', required=False),
+ PollsterDefinition(name='host_command', required=True)]
+
+ def __init__(self, configurations):
+ super(HostCommandPollsterDefinition, self).__init__(
+ configurations)
+ self.sample_gatherer = HostCommandSamplesGatherer(self)
+
+ @staticmethod
+ def is_field_applicable_to_definition(configurations):
+ return configurations.get('host_command')
+
+
+class HostCommandSamplesGatherer(PollsterSampleGatherer):
+
+ class Response(object):
+ def __init__(self, text):
+ self.text = text
+
+ def get_cache_key(self, definitions, **kwargs):
+ return self.get_command(definitions)
+
+ def _internal_execute_request_get_samples(self, definitions, **kwargs):
+ command = self.get_command(definitions, **kwargs)
+ LOG.debug('Running Host command: [%s]', command)
+ result = subprocess.getoutput(command)
+ LOG.debug('Host command [%s] result: [%s]', command, result)
+ return self.Response(result), command
+
+ def get_command(self, definitions, next_sample_url=None, **kwargs):
+ command = next_sample_url or definitions['host_command']
+ params = copy.deepcopy(
+ self.definitions.configurations.get(
+ 'base_metadata', {}))
+ try:
+ command = eval(command, params)
+ except Exception:
+ LOG.debug("Cannot eval command [%s] with params [%s],"
+ " using [%s] instead.",
+ command, params, command)
+ return command
+
+ @property
+ def default_discovery(self):
+ return 'local_node'
+
+
class NonOpenStackApisSamplesGatherer(PollsterSampleGatherer):
@property
@@ -850,6 +1036,9 @@ class NonOpenStackApisSamplesGatherer(PollsterSampleGatherer):
if override_credentials:
credentials = override_credentials
+ if not isinstance(credentials, str):
+ credentials = self.normalize_credentials_to_string(credentials)
+
url = self.get_request_linked_samples_url(kwargs, definitions)
authenticator_module_name = definitions['module']
@@ -878,6 +1067,17 @@ class NonOpenStackApisSamplesGatherer(PollsterSampleGatherer):
return resp, url
+ @staticmethod
+ def normalize_credentials_to_string(credentials):
+ if isinstance(credentials, bytes):
+ credentials = credentials.decode('utf-8')
+ else:
+ credentials = str(credentials)
+ LOG.debug("Credentials [%s] were not defined as a string. "
+ "Therefore, we converted it to a string like object.",
+ credentials)
+ return credentials
+
def create_request_arguments(self, definitions):
request_arguments = super(
NonOpenStackApisSamplesGatherer, self).create_request_arguments(
@@ -913,8 +1113,10 @@ class DynamicPollster(plugin_base.PollsterBase):
# Mandatory name field
name = ""
- def __init__(self, pollster_definitions={}, conf=None,
- supported_definitions=[NonOpenStackApisPollsterDefinition,
+ def __init__(self, pollster_definitions={}, conf=None, cache_ttl=0,
+ extra_metadata_responses_cache=None,
+ supported_definitions=[HostCommandPollsterDefinition,
+ NonOpenStackApisPollsterDefinition,
MultiMetricPollsterDefinitions,
SingleMetricPollsterDefinitions]):
super(DynamicPollster, self).__init__(conf)
@@ -924,6 +1126,10 @@ class DynamicPollster(plugin_base.PollsterBase):
self.definitions = PollsterDefinitionBuilder(
self.supported_definitions).build_definitions(pollster_definitions)
+ self.definitions.cache_ttl = cache_ttl
+ self.definitions.response_cache = extra_metadata_responses_cache
+ if extra_metadata_responses_cache is None:
+ self.definitions.response_cache = {}
self.pollster_definitions = self.definitions.configurations
if 'metadata_fields' in self.pollster_definitions:
LOG.debug("Metadata fields configured to [%s].",
@@ -957,9 +1163,12 @@ class DynamicPollster(plugin_base.PollsterBase):
for r in resources:
LOG.debug("Executing get sample for resource [%s].", r)
samples = self.load_samples(r, manager)
+ if not isinstance(samples, (list, tuple)):
+ samples = [samples]
for pollster_sample in samples:
- kwargs = {'manager': manager, 'resource': r}
- sample = self.extract_sample(pollster_sample, **kwargs)
+ sample = self.extract_sample(
+ pollster_sample, manager=manager,
+ resource=r, conf=self.conf)
if isinstance(sample, SkippedSample):
continue
yield from sample
diff --git a/ceilometer/polling/manager.py b/ceilometer/polling/manager.py
index 6b9289d1..3545801f 100644
--- a/ceilometer/polling/manager.py
+++ b/ceilometer/polling/manager.py
@@ -35,6 +35,7 @@ from tooz import coordination
from urllib import parse as urlparse
from ceilometer import agent
+from ceilometer import cache_utils
from ceilometer import declarative
from ceilometer import keystone_client
from ceilometer import messaging
@@ -45,6 +46,8 @@ from ceilometer import utils
LOG = log.getLogger(__name__)
+CACHE_DURATION = 3600
+
POLLING_OPTS = [
cfg.StrOpt('cfg_file',
default="polling.yaml",
@@ -64,7 +67,18 @@ POLLING_OPTS = [
cfg.MultiStrOpt('pollsters_definitions_dirs',
default=["/etc/ceilometer/pollsters.d"],
help="List of directories with YAML files used "
- "to created pollsters.")
+ "to created pollsters."),
+ cfg.BoolOpt('tenant_name_discovery',
+ default=False,
+ help="Identify project and user names from polled samples"
+ "By default, collecting these values is disabled due"
+ "to the fact that it could overwhelm keystone service"
+ "with lots of continuous requests depending upon the"
+ "number of projects, users and samples polled from"
+ "the environment. While using this feature, it is"
+ "recommended that ceilometer be configured with a"
+ "caching backend to reduce the number of calls"
+ "made to keystone"),
]
@@ -138,11 +152,39 @@ class PollingTask(object):
self._telemetry_secret = self.manager.conf.publisher.telemetry_secret
+ self.ks_client = self.manager.keystone
+
+ self.cache_client = cache_utils.get_client(
+ self.manager.conf,
+ expiration_time=CACHE_DURATION
+ )
+
def add(self, pollster, source):
self.pollster_matches[source.name].add(pollster)
key = Resources.key(source.name, pollster)
self.resources[key].setup(source)
+ def resolve_uuid_from_cache(self, attr, uuid):
+ if self.cache_client:
+ name = self.cache_client.get(uuid)
+ if name:
+ return name
+ name = self.resolve_uuid_from_keystone(attr, uuid)
+ self.cache_client.set(uuid, name)
+ return name
+
+ # Retrieve project and user names from Keystone only
+ # if ceilometer doesn't have a caching backend
+ return self.resolve_uuid_from_keystone(attr, uuid)
+
+ def resolve_uuid_from_keystone(self, attr, uuid):
+ try:
+ return getattr(self.ks_client, attr).get(uuid).name
+ except AttributeError as e:
+ LOG.warning("Found '%s' while resolving uuid %s to name", e, uuid)
+ except ka_exceptions.NotFound as e:
+ LOG.warning(e.message)
+
def poll_and_notify(self):
"""Polling sample and notify."""
cache = {}
@@ -194,6 +236,25 @@ class PollingTask(object):
for sample in samples:
# Note(yuywz): Unify the timestamp of polled samples
sample.set_timestamp(polling_timestamp)
+
+ if self.manager.conf.tenant_name_discovery:
+
+ # Try to resolve project UUIDs from cache first,
+ # and then keystone
+ if sample.project_id:
+ sample.project_name = \
+ self.resolve_uuid_from_cache(
+ "projects", sample.project_id
+ )
+
+ # Try to resolve user UUIDs from cache first,
+ # and then keystone
+ if sample.user_id:
+ sample.user_name = \
+ self.resolve_uuid_from_cache(
+ "users", sample.user_id
+ )
+
sample_dict = (
publisher_utils.meter_message_from_counter(
sample, self._telemetry_secret
@@ -253,7 +314,8 @@ class AgentManager(cotyledon.Service):
for namespace in namespaces)
# Create dynamic pollsters
- extensions_dynamic_pollsters = self.create_dynamic_pollsters()
+ extensions_dynamic_pollsters = self.create_dynamic_pollsters(
+ namespaces)
self.extensions = list(itertools.chain(*list(extensions))) + list(
itertools.chain(*list(extensions_fb))) + list(
@@ -291,15 +353,18 @@ class AgentManager(cotyledon.Service):
self._keystone = None
self._keystone_last_exception = None
- def create_dynamic_pollsters(self):
+ def create_dynamic_pollsters(self, namespaces):
"""Creates dynamic pollsters
This method Creates dynamic pollsters based on configurations placed on
'pollsters_definitions_dirs'
+ :param namespaces: The namespaces we are running on to validate if
+ the pollster should be instantiated or not.
:return: a list with the dynamic pollsters defined by the operator.
"""
+ namespaces_set = set(namespaces)
pollsters_definitions_dirs = self.conf.pollsters_definitions_dirs
if not pollsters_definitions_dirs:
LOG.info("Variable 'pollsters_definitions_dirs' not defined.")
@@ -333,6 +398,21 @@ class AgentManager(cotyledon.Service):
for pollster_cfg in pollsters_cfg:
pollster_name = pollster_cfg['name']
+ pollster_namespaces = pollster_cfg.get(
+ 'namespaces', ['central'])
+ if isinstance(pollster_namespaces, list):
+ pollster_namespaces = set(pollster_namespaces)
+ else:
+ pollster_namespaces = {pollster_namespaces}
+
+ if not bool(namespaces_set & pollster_namespaces):
+ LOG.info("The pollster [%s] is not configured to run in "
+ "these namespaces %s, the configured namespaces "
+ "for this pollster are %s. Therefore, we are "
+ "skipping it.", pollster_name, namespaces_set,
+ pollster_namespaces)
+ continue
+
if pollster_name not in pollsters_definitions:
LOG.info("Loading dynamic pollster [%s] from file [%s].",
pollster_name, pollsters_definitions_file)
diff --git a/ceilometer/publisher/gnocchi.py b/ceilometer/publisher/gnocchi.py
index 530013f9..19f186c7 100644
--- a/ceilometer/publisher/gnocchi.py
+++ b/ceilometer/publisher/gnocchi.py
@@ -24,6 +24,7 @@ import uuid
from gnocchiclient import exceptions as gnocchi_exc
from keystoneauth1 import exceptions as ka_exceptions
+import oslo_cache
from oslo_log import log
from oslo_utils import timeutils
from stevedore import extension
@@ -214,7 +215,6 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
self.cache = None
try:
- import oslo_cache
oslo_cache.configure(conf)
# NOTE(cdent): The default cache backend is a real but
# noop backend. We don't want to use that here because
@@ -225,8 +225,6 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
self.cache = oslo_cache.configure_cache_region(
conf, cache_region)
self.cache.key_mangler = cache_key_mangler
- except ImportError:
- pass
except oslo_cache.exception.ConfigurationError as exc:
LOG.warning('unable to configure oslo_cache: %s', exc)
@@ -327,7 +325,18 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
# NOTE(sileht): skip sample generated by gnocchi itself
data = [s for s in data if not self._is_gnocchi_activity(s)]
- data.sort(key=operator.attrgetter('resource_id'))
+
+ def value_to_sort(object_to_sort):
+ value = object_to_sort.resource_id
+ if not value:
+ LOG.debug("Resource ID was not defined for sample data [%s]. "
+ "Therefore, we will use an empty string as the "
+ "resource ID.", object_to_sort)
+ value = ''
+
+ return value
+
+ data.sort(key=value_to_sort)
resource_grouped_samples = itertools.groupby(
data, key=operator.attrgetter('resource_id'))
@@ -371,9 +380,13 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
try:
self.batch_measures(measures, gnocchi_data)
except gnocchi_exc.ClientException as e:
- LOG.error(str(e))
+ LOG.error("Gnocchi client exception while pushing measures [%s] "
+ "for gnocchi data [%s]: [%s].", measures, gnocchi_data,
+ str(e))
except Exception as e:
- LOG.error(str(e), exc_info=True)
+ LOG.error("Unexpected exception while pushing measures [%s] for "
+ "gnocchi data [%s]: [%s].", measures, gnocchi_data,
+ str(e), exc_info=True)
for info in gnocchi_data.values():
resource = info["resource"]
@@ -385,9 +398,15 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
self._if_not_cached(resource_type, resource['id'],
resource_extra)
except gnocchi_exc.ClientException as e:
- LOG.error(str(e))
+ LOG.error("Gnocchi client exception updating resource type "
+ "[%s] with ID [%s] for resource data [%s]: [%s].",
+ resource_type, resource.get('id'), resource_extra,
+ str(e))
except Exception as e:
- LOG.error(str(e), exc_info=True)
+ LOG.error("Unexpected exception updating resource type [%s] "
+ "with ID [%s] for resource data [%s]: [%s].",
+ resource_type, resource.get('id'), resource_extra,
+ str(e), exc_info=True)
@staticmethod
def _extract_resources_from_error(e, resource_infos):
@@ -402,6 +421,9 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
# NOTE(sileht): We don't care about error here, we want
# resources metadata always been updated
try:
+ LOG.debug("Executing batch resource metrics measures for resource "
+ "[%s] and measures [%s].", resource_infos, measures)
+
self._gnocchi.metric.batch_resources_metrics_measures(
measures, create_metrics=True)
except gnocchi_exc.BadRequest as e:
diff --git a/ceilometer/publisher/utils.py b/ceilometer/publisher/utils.py
index 75df2b70..0d1e7be0 100644
--- a/ceilometer/publisher/utils.py
+++ b/ceilometer/publisher/utils.py
@@ -126,7 +126,9 @@ def meter_message_from_counter(sample, secret):
'counter_unit': sample.unit,
'counter_volume': sample.volume,
'user_id': sample.user_id,
+ 'user_name': sample.user_name,
'project_id': sample.project_id,
+ 'project_name': sample.project_name,
'resource_id': sample.resource_id,
'timestamp': sample.timestamp,
'resource_metadata': sample.resource_metadata,
diff --git a/ceilometer/sample.py b/ceilometer/sample.py
index c86caa35..536b561d 100644
--- a/ceilometer/sample.py
+++ b/ceilometer/sample.py
@@ -94,13 +94,16 @@ class Sample(object):
def __init__(self, name, type, unit, volume, user_id, project_id,
resource_id, timestamp=None, resource_metadata=None,
- source=None, id=None, monotonic_time=None):
+ source=None, id=None, monotonic_time=None,
+ user_name=None, project_name=None):
self.name = name
self.type = type
self.unit = unit
self.volume = volume
self.user_id = user_id
+ self.user_name = user_name
self.project_id = project_id
+ self.project_name = project_name
self.resource_id = resource_id
self.timestamp = timestamp
self.resource_metadata = resource_metadata or {}
diff --git a/ceilometer/tests/unit/compute/pollsters/test_net.py b/ceilometer/tests/unit/compute/pollsters/test_net.py
index 190c93df..7db171de 100644
--- a/ceilometer/tests/unit/compute/pollsters/test_net.py
+++ b/ceilometer/tests/unit/compute/pollsters/test_net.py
@@ -120,12 +120,13 @@ class TestNetPollster(base.TestPollsterBase):
self.faux_instance = FauxInstance(**self.INSTANCE_PROPERTIES)
- def _check_get_samples(self, factory, expected, kind='cumulative'):
+ def _check_get_samples(self, factory, expected, expected_name,
+ kind='cumulative'):
mgr = manager.AgentManager(0, self.CONF)
pollster = factory(self.CONF)
samples = list(pollster.get_samples(mgr, {}, [self.instance]))
self.assertEqual(3, len(samples)) # one for each nic
- self.assertEqual(set([samples[0].name]),
+ self.assertEqual(set([expected_name]),
set([s.name for s in samples]))
def _verify_vnic_metering(ip, expected_volume, expected_rid):
@@ -149,6 +150,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 9,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.bytes',
)
def test_outgoing_bytes(self):
@@ -160,6 +162,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 11,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.bytes',
)
def test_incoming_bytes_delta(self):
@@ -171,6 +174,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 46,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.bytes.delta',
'delta',
)
@@ -183,6 +187,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 47,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.bytes.delta',
'delta',
)
@@ -195,6 +200,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 10,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.packets',
)
def test_outgoing_packets(self):
@@ -206,6 +212,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 12,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.packets',
)
def test_incoming_drops(self):
@@ -217,6 +224,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 28,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.packets.drop',
)
def test_outgoing_drops(self):
@@ -228,6 +236,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 30,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.packets.drop',
)
def test_incoming_errors(self):
@@ -239,6 +248,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 29,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.packets.error',
)
def test_outgoing_errors(self):
@@ -250,6 +260,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 31,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.packets.error',
)
def test_metadata(self):
@@ -309,12 +320,12 @@ class TestNetRatesPollster(base.TestPollsterBase):
]
self.inspector.inspect_vnic_rates = mock.Mock(return_value=vnics)
- def _check_get_samples(self, factory, expected):
+ def _check_get_samples(self, factory, expected, expected_name):
mgr = manager.AgentManager(0, self.CONF)
pollster = factory(self.CONF)
samples = list(pollster.get_samples(mgr, {}, [self.instance]))
self.assertEqual(3, len(samples)) # one for each nic
- self.assertEqual(set([samples[0].name]),
+ self.assertEqual(set([expected_name]),
set([s.name for s in samples]))
def _verify_vnic_metering(ip, expected_volume, expected_rid):
@@ -338,6 +349,7 @@ class TestNetRatesPollster(base.TestPollsterBase):
('192.168.0.4', 5,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.bytes.rate',
)
def test_outgoing_bytes_rate(self):
@@ -349,4 +361,5 @@ class TestNetRatesPollster(base.TestPollsterBase):
('192.168.0.4', 6,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.bytes.rate',
)
diff --git a/ceilometer/tests/unit/polling/test_dynamic_pollster.py b/ceilometer/tests/unit/polling/test_dynamic_pollster.py
index e596f7b5..f85af729 100644
--- a/ceilometer/tests/unit/polling/test_dynamic_pollster.py
+++ b/ceilometer/tests/unit/polling/test_dynamic_pollster.py
@@ -14,13 +14,14 @@
"""Tests for OpenStack dynamic pollster
"""
import copy
+import json
import logging
from unittest import mock
import requests
from urllib import parse as urlparse
-from ceilometer.declarative import DynamicPollsterDefinitionException
+from ceilometer import declarative
from ceilometer.polling import dynamic_pollster
from ceilometer import sample
from oslotest import base
@@ -107,6 +108,11 @@ class TestDynamicPollster(base.BaseTestCase):
class FakeResponse(object):
status_code = None
json_object = None
+ _text = None
+
+ @property
+ def text(self):
+ return self._text or json.dumps(self.json_object)
def json(self):
return self.json_object
@@ -242,9 +248,10 @@ class TestDynamicPollster(base.BaseTestCase):
pollster_definition = copy.deepcopy(
self.pollster_definition_only_required_fields)
pollster_definition.pop(key)
- exception = self.assertRaises(DynamicPollsterDefinitionException,
- dynamic_pollster.DynamicPollster,
- pollster_definition)
+ exception = self.assertRaises(
+ declarative.DynamicPollsterDefinitionException,
+ dynamic_pollster.DynamicPollster,
+ pollster_definition)
self.assertEqual("Required fields ['%s'] not specified."
% key, exception.brief_message)
@@ -252,7 +259,7 @@ class TestDynamicPollster(base.BaseTestCase):
self.pollster_definition_only_required_fields[
'sample_type'] = "invalid_sample_type"
exception = self.assertRaises(
- DynamicPollsterDefinitionException,
+ declarative.DynamicPollsterDefinitionException,
dynamic_pollster.DynamicPollster,
self.pollster_definition_only_required_fields)
self.assertEqual("Invalid sample type [invalid_sample_type]. "
@@ -314,6 +321,763 @@ class TestDynamicPollster(base.BaseTestCase):
self.assertEqual(3, len(samples))
@mock.patch('keystoneclient.v2_0.client.Client')
+ def test_execute_request_json_response_handler(
+ self, client_mock):
+ pollster = dynamic_pollster.DynamicPollster(
+ self.pollster_definition_only_required_fields)
+
+ return_value = self.FakeResponse()
+ return_value.status_code = requests.codes.ok
+ return_value._text = '{"test": [1,2,3]}'
+
+ client_mock.session.get.return_value = return_value
+
+ samples = pollster.definitions.sample_gatherer. \
+ execute_request_get_samples(
+ keystone_client=client_mock,
+ resource="https://endpoint.server.name/")
+
+ self.assertEqual(3, len(samples))
+
+ @mock.patch('keystoneclient.v2_0.client.Client')
+ def test_execute_request_xml_response_handler(
+ self, client_mock):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ definitions['response_handlers'] = ['xml']
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+
+ return_value = self.FakeResponse()
+ return_value.status_code = requests.codes.ok
+ return_value._text = '<test>123</test>'
+ client_mock.session.get.return_value = return_value
+
+ samples = pollster.definitions.sample_gatherer. \
+ execute_request_get_samples(
+ keystone_client=client_mock,
+ resource="https://endpoint.server.name/")
+
+ self.assertEqual(3, len(samples))
+
+ @mock.patch('keystoneclient.v2_0.client.Client')
+ def test_execute_request_xml_json_response_handler(
+ self, client_mock):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ definitions['response_handlers'] = ['xml', 'json']
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+
+ return_value = self.FakeResponse()
+ return_value.status_code = requests.codes.ok
+ return_value._text = '<test>123</test>'
+ client_mock.session.get.return_value = return_value
+
+ samples = pollster.definitions.sample_gatherer. \
+ execute_request_get_samples(
+ keystone_client=client_mock,
+ resource="https://endpoint.server.name/")
+
+ self.assertEqual(3, len(samples))
+
+ return_value._text = '{"test": [1,2,3,4]}'
+
+ samples = pollster.definitions.sample_gatherer. \
+ execute_request_get_samples(
+ keystone_client=client_mock,
+ resource="https://endpoint.server.name/")
+
+ self.assertEqual(4, len(samples))
+
+ @mock.patch('keystoneclient.v2_0.client.Client')
+ def test_execute_request_extra_metadata_fields_cache_disabled(
+ self, client_mock):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ extra_metadata_fields = {
+ 'extra_metadata_fields_cache_seconds': 0,
+ 'name': "project_name",
+ 'endpoint_type': "identity",
+ 'url_path': "'/v3/projects/' + str(sample['project_id'])",
+ 'value': "name",
+ }
+ definitions['value_attribute'] = 'project_id'
+ definitions['extra_metadata_fields'] = extra_metadata_fields
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+
+ return_value = self.FakeResponse()
+ return_value.status_code = requests.codes.ok
+ return_value._text = '''
+ {"projects": [
+ {"project_id": 9999, "name": "project1"},
+ {"project_id": 8888, "name": "project2"},
+ {"project_id": 7777, "name": "project3"},
+ {"project_id": 9999, "name": "project1"},
+ {"project_id": 8888, "name": "project2"},
+ {"project_id": 7777, "name": "project3"},
+ {"project_id": 9999, "name": "project1"},
+ {"project_id": 8888, "name": "project2"},
+ {"project_id": 7777, "name": "project3"}]
+ }
+ '''
+
+ return_value9999 = self.FakeResponse()
+ return_value9999.status_code = requests.codes.ok
+ return_value9999._text = '''
+ {"project":
+ {"project_id": 9999, "name": "project1"}
+ }
+ '''
+
+ return_value8888 = self.FakeResponse()
+ return_value8888.status_code = requests.codes.ok
+ return_value8888._text = '''
+ {"project":
+ {"project_id": 8888, "name": "project2"}
+ }
+ '''
+
+ return_value7777 = self.FakeResponse()
+ return_value7777.status_code = requests.codes.ok
+ return_value7777._text = '''
+ {"project":
+ {"project_id": 7777, "name": "project3"}
+ }
+ '''
+
+ def get(url, *args, **kwargs):
+ if '9999' in url:
+ return return_value9999
+ if '8888' in url:
+ return return_value8888
+ if '7777' in url:
+ return return_value7777
+ return return_value
+
+ client_mock.session.get.side_effect = get
+ manager = mock.Mock
+ manager._keystone = client_mock
+
+ def discover(*args, **kwargs):
+ return ["https://endpoint.server.name/"]
+
+ manager.discover = discover
+ samples = pollster.get_samples(
+ manager=manager, cache=None,
+ resources=["https://endpoint.server.name/"])
+
+ samples = list(samples)
+
+ n_calls = client_mock.session.get.call_count
+ self.assertEqual(9, len(samples))
+ self.assertEqual(10, n_calls)
+
+ @mock.patch('keystoneclient.v2_0.client.Client')
+ def test_execute_request_extra_metadata_fields_cache_enabled(
+ self, client_mock):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ extra_metadata_fields = {
+ 'extra_metadata_fields_cache_seconds': 3600,
+ 'name': "project_name",
+ 'endpoint_type': "identity",
+ 'url_path': "'/v3/projects/' + str(sample['project_id'])",
+ 'value': "name",
+ }
+ definitions['value_attribute'] = 'project_id'
+ definitions['extra_metadata_fields'] = extra_metadata_fields
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+
+ return_value = self.FakeResponse()
+ return_value.status_code = requests.codes.ok
+ return_value._text = '''
+ {"projects": [
+ {"project_id": 9999, "name": "project1"},
+ {"project_id": 8888, "name": "project2"},
+ {"project_id": 7777, "name": "project3"},
+ {"project_id": 9999, "name": "project4"},
+ {"project_id": 8888, "name": "project5"},
+ {"project_id": 7777, "name": "project6"},
+ {"project_id": 9999, "name": "project7"},
+ {"project_id": 8888, "name": "project8"},
+ {"project_id": 7777, "name": "project9"}]
+ }
+ '''
+
+ return_value9999 = self.FakeResponse()
+ return_value9999.status_code = requests.codes.ok
+ return_value9999._text = '''
+ {"project":
+ {"project_id": 9999, "name": "project1"}
+ }
+ '''
+
+ return_value8888 = self.FakeResponse()
+ return_value8888.status_code = requests.codes.ok
+ return_value8888._text = '''
+ {"project":
+ {"project_id": 8888, "name": "project2"}
+ }
+ '''
+
+ return_value7777 = self.FakeResponse()
+ return_value7777.status_code = requests.codes.ok
+ return_value7777._text = '''
+ {"project":
+ {"project_id": 7777, "name": "project3"}
+ }
+ '''
+
+ def get(url, *args, **kwargs):
+ if '9999' in url:
+ return return_value9999
+ if '8888' in url:
+ return return_value8888
+ if '7777' in url:
+ return return_value7777
+ return return_value
+
+ client_mock.session.get.side_effect = get
+ manager = mock.Mock
+ manager._keystone = client_mock
+
+ def discover(*args, **kwargs):
+ return ["https://endpoint.server.name/"]
+
+ manager.discover = discover
+ samples = pollster.get_samples(
+ manager=manager, cache=None,
+ resources=["https://endpoint.server.name/"])
+
+ samples = list(samples)
+
+ n_calls = client_mock.session.get.call_count
+ self.assertEqual(9, len(samples))
+ self.assertEqual(4, n_calls)
+
+ @mock.patch('keystoneclient.v2_0.client.Client')
+ def test_execute_request_extra_metadata_fields(
+ self, client_mock):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ extra_metadata_fields = [{
+ 'name': "project_name",
+ 'endpoint_type': "identity",
+ 'url_path': "'/v3/projects/' + str(sample['project_id'])",
+ 'value': "name",
+ 'metadata_fields': ['meta']
+ }, {
+ 'name': "project_alias",
+ 'endpoint_type': "identity",
+ 'url_path': "'/v3/projects/' + "
+ "str(extra_metadata_captured['project_name'])",
+ 'value': "name",
+ 'metadata_fields': ['meta']
+ }, {
+ 'name': "project_meta",
+ 'endpoint_type': "identity",
+ 'url_path': "'/v3/projects/' + "
+ "str(extra_metadata_by_name['project_name']"
+ "['metadata']['meta'])",
+ 'value': "project_id",
+ 'metadata_fields': ['meta']
+ }]
+ definitions['value_attribute'] = 'project_id'
+ definitions['extra_metadata_fields'] = extra_metadata_fields
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+
+ return_value = self.FakeResponse()
+ return_value.status_code = requests.codes.ok
+ return_value._text = '''
+ {"projects": [
+ {"project_id": 9999, "name": "project1"},
+ {"project_id": 8888, "name": "project2"},
+ {"project_id": 7777, "name": "project3"}]
+ }
+ '''
+
+ return_value9999 = self.FakeResponse()
+ return_value9999.status_code = requests.codes.ok
+ return_value9999._text = '''
+ {"project":
+ {"project_id": 9999, "name": "project1",
+ "meta": "m1"}
+ }
+ '''
+
+ return_value8888 = self.FakeResponse()
+ return_value8888.status_code = requests.codes.ok
+ return_value8888._text = '''
+ {"project":
+ {"project_id": 8888, "name": "project2",
+ "meta": "m2"}
+ }
+ '''
+
+ return_value7777 = self.FakeResponse()
+ return_value7777.status_code = requests.codes.ok
+ return_value7777._text = '''
+ {"project":
+ {"project_id": 7777, "name": "project3",
+ "meta": "m3"}
+ }
+ '''
+
+ return_valueP1 = self.FakeResponse()
+ return_valueP1.status_code = requests.codes.ok
+ return_valueP1._text = '''
+ {"project":
+ {"project_id": 7777, "name": "p1",
+ "meta": null}
+ }
+ '''
+
+ return_valueP2 = self.FakeResponse()
+ return_valueP2.status_code = requests.codes.ok
+ return_valueP2._text = '''
+ {"project":
+ {"project_id": 7777, "name": "p2",
+ "meta": null}
+ }
+ '''
+
+ return_valueP3 = self.FakeResponse()
+ return_valueP3.status_code = requests.codes.ok
+ return_valueP3._text = '''
+ {"project":
+ {"project_id": 7777, "name": "p3",
+ "meta": null}
+ }
+ '''
+
+ return_valueM1 = self.FakeResponse()
+ return_valueM1.status_code = requests.codes.ok
+ return_valueM1._text = '''
+ {"project":
+ {"project_id": "META1", "name": "p3",
+ "meta": null}
+ }
+ '''
+
+ return_valueM2 = self.FakeResponse()
+ return_valueM2.status_code = requests.codes.ok
+ return_valueM2._text = '''
+ {"project":
+ {"project_id": "META2", "name": "p3",
+ "meta": null}
+ }
+ '''
+
+ return_valueM3 = self.FakeResponse()
+ return_valueM3.status_code = requests.codes.ok
+ return_valueM3._text = '''
+ {"project":
+ {"project_id": "META3", "name": "p3",
+ "meta": null}
+ }
+ '''
+
+ def get(url, *args, **kwargs):
+ if '9999' in url:
+ return return_value9999
+ if '8888' in url:
+ return return_value8888
+ if '7777' in url:
+ return return_value7777
+ if 'project1' in url:
+ return return_valueP1
+ if 'project2' in url:
+ return return_valueP2
+ if 'project3' in url:
+ return return_valueP3
+ if 'm1' in url:
+ return return_valueM1
+ if 'm2' in url:
+ return return_valueM2
+ if 'm3' in url:
+ return return_valueM3
+
+ return return_value
+
+ client_mock.session.get = get
+ manager = mock.Mock
+ manager._keystone = client_mock
+
+ def discover(*args, **kwargs):
+ return ["https://endpoint.server.name/"]
+
+ manager.discover = discover
+ samples = pollster.get_samples(
+ manager=manager, cache=None,
+ resources=["https://endpoint.server.name/"])
+
+ samples = list(samples)
+ self.assertEqual(3, len(samples))
+
+ self.assertEqual(samples[0].volume, 9999)
+ self.assertEqual(samples[1].volume, 8888)
+ self.assertEqual(samples[2].volume, 7777)
+
+ self.assertEqual(samples[0].resource_metadata,
+ {'project_name': 'project1',
+ 'project_alias': 'p1',
+ 'meta': 'm1',
+ 'project_meta': 'META1'})
+ self.assertEqual(samples[1].resource_metadata,
+ {'project_name': 'project2',
+ 'project_alias': 'p2',
+ 'meta': 'm2',
+ 'project_meta': 'META2'})
+ self.assertEqual(samples[2].resource_metadata,
+ {'project_name': 'project3',
+ 'project_alias': 'p3',
+ 'meta': 'm3',
+ 'project_meta': 'META3'})
+
+ @mock.patch('keystoneclient.v2_0.client.Client')
+ def test_execute_request_extra_metadata_fields_skip(
+ self, client_mock):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ extra_metadata_fields = [{
+ 'name': "project_name",
+ 'endpoint_type': "identity",
+ 'url_path': "'/v3/projects/' + str(sample['project_id'])",
+ 'value': "name",
+ }, {
+ 'name': "project_alias",
+ 'endpoint_type': "identity",
+ 'extra_metadata_fields_skip': [{
+ 'value': 7777
+ }],
+ 'url_path': "'/v3/projects/' + "
+ "str(sample['p_name'])",
+ 'value': "name",
+ }]
+ definitions['value_attribute'] = 'project_id'
+ definitions['metadata_fields'] = ['to_skip', 'p_name']
+ definitions['extra_metadata_fields'] = extra_metadata_fields
+ definitions['extra_metadata_fields_skip'] = [{
+ 'metadata': {
+ 'to_skip': 'skip1'
+ }
+ }, {
+ 'value': 8888
+ }]
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+
+ return_value = self.FakeResponse()
+ return_value.status_code = requests.codes.ok
+ return_value._text = '''
+ {"projects": [
+ {"project_id": 9999, "p_name": "project1",
+ "to_skip": "skip1"},
+ {"project_id": 8888, "p_name": "project2",
+ "to_skip": "skip2"},
+ {"project_id": 7777, "p_name": "project3",
+ "to_skip": "skip3"},
+ {"project_id": 6666, "p_name": "project4",
+ "to_skip": "skip4"}]
+ }
+ '''
+
+ return_value9999 = self.FakeResponse()
+ return_value9999.status_code = requests.codes.ok
+ return_value9999._text = '''
+ {"project":
+ {"project_id": 9999, "name": "project1"}
+ }
+ '''
+
+ return_value8888 = self.FakeResponse()
+ return_value8888.status_code = requests.codes.ok
+ return_value8888._text = '''
+ {"project":
+ {"project_id": 8888, "name": "project2"}
+ }
+ '''
+
+ return_value7777 = self.FakeResponse()
+ return_value7777.status_code = requests.codes.ok
+ return_value7777._text = '''
+ {"project":
+ {"project_id": 7777, "name": "project3"}
+ }
+ '''
+
+ return_value6666 = self.FakeResponse()
+ return_value6666.status_code = requests.codes.ok
+ return_value6666._text = '''
+ {"project":
+ {"project_id": 6666, "name": "project4"}
+ }
+ '''
+
+ return_valueP1 = self.FakeResponse()
+ return_valueP1.status_code = requests.codes.ok
+ return_valueP1._text = '''
+ {"project":
+ {"project_id": 7777, "name": "p1"}
+ }
+ '''
+
+ return_valueP2 = self.FakeResponse()
+ return_valueP2.status_code = requests.codes.ok
+ return_valueP2._text = '''
+ {"project":
+ {"project_id": 7777, "name": "p2"}
+ }
+ '''
+
+ return_valueP3 = self.FakeResponse()
+ return_valueP3.status_code = requests.codes.ok
+ return_valueP3._text = '''
+ {"project":
+ {"project_id": 7777, "name": "p3"}
+ }
+ '''
+
+ return_valueP4 = self.FakeResponse()
+ return_valueP4.status_code = requests.codes.ok
+ return_valueP4._text = '''
+ {"project":
+ {"project_id": 6666, "name": "p4"}
+ }
+ '''
+
+ def get(url, *args, **kwargs):
+ if '9999' in url:
+ return return_value9999
+ if '8888' in url:
+ return return_value8888
+ if '7777' in url:
+ return return_value7777
+ if '6666' in url:
+ return return_value6666
+ if 'project1' in url:
+ return return_valueP1
+ if 'project2' in url:
+ return return_valueP2
+ if 'project3' in url:
+ return return_valueP3
+ if 'project4' in url:
+ return return_valueP4
+
+ return return_value
+
+ client_mock.session.get = get
+ manager = mock.Mock
+ manager._keystone = client_mock
+
+ def discover(*args, **kwargs):
+ return ["https://endpoint.server.name/"]
+
+ manager.discover = discover
+ samples = pollster.get_samples(
+ manager=manager, cache=None,
+ resources=["https://endpoint.server.name/"])
+
+ samples = list(samples)
+ self.assertEqual(4, len(samples))
+
+ self.assertEqual(samples[0].volume, 9999)
+ self.assertEqual(samples[1].volume, 8888)
+ self.assertEqual(samples[2].volume, 7777)
+
+ self.assertEqual(samples[0].resource_metadata,
+ {'p_name': 'project1', 'project_alias': 'p1',
+ 'to_skip': 'skip1'})
+ self.assertEqual(samples[1].resource_metadata,
+ {'p_name': 'project2', 'project_alias': 'p2',
+ 'to_skip': 'skip2'})
+ self.assertEqual(samples[2].resource_metadata,
+ {'p_name': 'project3', 'project_name': 'project3',
+ 'to_skip': 'skip3'})
+ self.assertEqual(samples[3].resource_metadata,
+ {'p_name': 'project4',
+ 'project_alias': 'p4',
+ 'project_name': 'project4',
+ 'to_skip': 'skip4'})
+
+ @mock.patch('keystoneclient.v2_0.client.Client')
+ def test_execute_request_extra_metadata_fields_different_requests(
+ self, client_mock):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+
+ command = ''' \'\'\'echo '{"project":
+ {"project_id": \'\'\'+ str(sample['project_id'])
+ +\'\'\' , "name": "project1"}}' \'\'\' '''.replace('\n', '')
+
+ command2 = ''' \'\'\'echo '{"project":
+ {"project_id": \'\'\'+ str(sample['project_id'])
+ +\'\'\' , "name": "project2"}}' \'\'\' '''.replace('\n', '')
+
+ extra_metadata_fields_embedded = {
+ 'name': "project_name2",
+ 'host_command': command2,
+ 'value': "name",
+ }
+
+ extra_metadata_fields = {
+ 'name': "project_id2",
+ 'host_command': command,
+ 'value': "project_id",
+ 'extra_metadata_fields': extra_metadata_fields_embedded
+ }
+
+ definitions['value_attribute'] = 'project_id'
+ definitions['extra_metadata_fields'] = extra_metadata_fields
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+
+ return_value = self.FakeResponse()
+ return_value.status_code = requests.codes.ok
+ return_value._text = '''
+ {"projects": [
+ {"project_id": 9999, "name": "project1"},
+ {"project_id": 8888, "name": "project2"},
+ {"project_id": 7777, "name": "project3"}]
+ }
+ '''
+
+ def get(url, *args, **kwargs):
+ return return_value
+
+ client_mock.session.get = get
+ manager = mock.Mock
+ manager._keystone = client_mock
+
+ def discover(*args, **kwargs):
+ return ["https://endpoint.server.name/"]
+
+ manager.discover = discover
+ samples = pollster.get_samples(
+ manager=manager, cache=None,
+ resources=["https://endpoint.server.name/"])
+
+ samples = list(samples)
+ self.assertEqual(3, len(samples))
+
+ self.assertEqual(samples[0].volume, 9999)
+ self.assertEqual(samples[1].volume, 8888)
+ self.assertEqual(samples[2].volume, 7777)
+
+ self.assertEqual(samples[0].resource_metadata,
+ {'project_id2': 9999,
+ 'project_name2': 'project2'})
+ self.assertEqual(samples[1].resource_metadata,
+ {'project_id2': 8888,
+ 'project_name2': 'project2'})
+ self.assertEqual(samples[2].resource_metadata,
+ {'project_id2': 7777,
+ 'project_name2': 'project2'})
+
+ @mock.patch('keystoneclient.v2_0.client.Client')
+ def test_execute_request_xml_json_response_handler_invalid_response(
+ self, client_mock):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ definitions['response_handlers'] = ['xml', 'json']
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+
+ return_value = self.FakeResponse()
+ return_value.status_code = requests.codes.ok
+ return_value._text = 'Invalid response'
+ client_mock.session.get.return_value = return_value
+
+ with self.assertLogs('ceilometer.polling.dynamic_pollster',
+ level='DEBUG') as logs:
+ gatherer = pollster.definitions.sample_gatherer
+ exception = self.assertRaises(
+ declarative.InvalidResponseTypeException,
+ gatherer.execute_request_get_samples,
+ keystone_client=client_mock,
+ resource="https://endpoint.server.name/")
+
+ xml_handling_error = logs.output[3]
+ json_handling_error = logs.output[4]
+
+ self.assertIn(
+ 'DEBUG:ceilometer.polling.dynamic_pollster:'
+ 'Error handling response [Invalid response] '
+ 'with handler [XMLResponseHandler]',
+ xml_handling_error)
+
+ self.assertIn(
+ 'DEBUG:ceilometer.polling.dynamic_pollster:'
+ 'Error handling response [Invalid response] '
+ 'with handler [JsonResponseHandler]',
+ json_handling_error)
+
+ self.assertEqual(
+ "InvalidResponseTypeException None: "
+ "No remaining handlers to handle the response "
+ "[Invalid response], used handlers "
+ "[XMLResponseHandler, JsonResponseHandler]. "
+ "[{'url_path': 'v1/test/endpoint/fake'}].",
+ str(exception))
+
+ def test_configure_response_handler_definition_invalid_value(self):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ definitions['response_handlers'] = ['jason']
+
+ exception = self.assertRaises(
+ declarative.DynamicPollsterDefinitionException,
+ dynamic_pollster.DynamicPollster,
+ pollster_definitions=definitions)
+ self.assertEqual("DynamicPollsterDefinitionException None: "
+ "Invalid response_handler value [jason]. "
+ "Accepted values are [json, xml, text]",
+ str(exception))
+
+ def test_configure_extra_metadata_field_skip_invalid_value(self):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ definitions['extra_metadata_fields_skip'] = 'teste'
+
+ exception = self.assertRaises(
+ declarative.DynamicPollsterDefinitionException,
+ dynamic_pollster.DynamicPollster,
+ pollster_definitions=definitions)
+ self.assertEqual("DynamicPollsterDefinitionException None: "
+ "Invalid extra_metadata_fields_skip configuration."
+ " It must be a list of maps. Provided value: teste,"
+ " value type: str.",
+ str(exception))
+
+ def test_configure_extra_metadata_field_skip_invalid_sub_value(self):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ definitions['extra_metadata_fields_skip'] = [{'test': '1'},
+ {'test': '2'},
+ 'teste']
+
+ exception = self.assertRaises(
+ declarative.DynamicPollsterDefinitionException,
+ dynamic_pollster.DynamicPollster,
+ pollster_definitions=definitions)
+ self.assertEqual("DynamicPollsterDefinitionException None: "
+ "Invalid extra_metadata_fields_skip configuration."
+ " It must be a list of maps. Provided value: "
+ "[{'test': '1'}, {'test': '2'}, 'teste'], "
+ "value type: list.",
+ str(exception))
+
+ def test_configure_response_handler_definition_invalid_type(self):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ definitions['response_handlers'] = 'json'
+
+ exception = self.assertRaises(
+ declarative.DynamicPollsterDefinitionException,
+ dynamic_pollster.DynamicPollster,
+ pollster_definitions=definitions)
+ self.assertEqual("DynamicPollsterDefinitionException None: "
+ "Invalid response_handlers configuration. "
+ "It must be a list. Provided value type: str",
+ str(exception))
+
+ @mock.patch('keystoneclient.v2_0.client.Client')
def test_execute_request_get_samples_exception_on_request(
self, client_mock):
pollster = dynamic_pollster.DynamicPollster(
@@ -331,6 +1095,57 @@ class TestDynamicPollster(base.BaseTestCase):
resource="https://endpoint.server.name/")
self.assertEqual("Mock HTTP error.", str(exception))
+ def test_execute_host_command_paged_responses(self):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ definitions['host_command'] = '''
+ echo '{"server": [{"status": "ACTIVE"}], "next": ""}'
+ '''
+ str_json = "'{\\\"server\\\": [{\\\"status\\\": \\\"INACTIVE\\\"}]}'"
+ definitions['next_sample_url_attribute'] = \
+ "next|\"echo \"+value+\"" + str_json + '"'
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+ samples = pollster.definitions.sample_gatherer. \
+ execute_request_get_samples()
+ resp_json = [{'status': 'ACTIVE'}, {'status': 'INACTIVE'}]
+ self.assertEqual(resp_json, samples)
+
+ def test_execute_host_command_response_handler(self):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ definitions['response_handlers'] = ['xml', 'json']
+ definitions['host_command'] = 'echo "<a><y>xml\n</y><s>xml</s></a>"'
+ entry = 'a'
+ definitions['response_entries_key'] = entry
+ definitions.pop('url_path')
+ definitions.pop('endpoint_type')
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+
+ samples_xml = pollster.definitions.sample_gatherer. \
+ execute_request_get_samples()
+
+ definitions['host_command'] = 'echo \'{"a": {"y":"json",' \
+ '\n"s":"json"}}\''
+ samples_json = pollster.definitions.sample_gatherer. \
+ execute_request_get_samples()
+
+ resp_xml = {'a': {'y': 'xml', 's': 'xml'}}
+ resp_json = {'a': {'y': 'json', 's': 'json'}}
+ self.assertEqual(resp_xml[entry], samples_xml)
+ self.assertEqual(resp_json[entry], samples_json)
+
+ def test_execute_host_command_invalid_command(self):
+ definitions = copy.deepcopy(
+ self.pollster_definition_only_required_fields)
+ definitions['host_command'] = 'invalid-command'
+ definitions.pop('url_path')
+ definitions.pop('endpoint_type')
+ pollster = dynamic_pollster.DynamicPollster(definitions)
+
+ self.assertRaises(
+ declarative.InvalidResponseTypeException,
+ pollster.definitions.sample_gatherer.execute_request_get_samples)
+
def test_generate_new_metadata_fields_no_metadata_mapping(self):
metadata = {'name': 'someName',
'value': 1}
@@ -728,6 +1543,10 @@ class TestDynamicPollster(base.BaseTestCase):
def internal_execute_request_get_samples_mock(self, **kwargs):
class Response:
+ @property
+ def text(self):
+ return json.dumps([sample])
+
def json(self):
return [sample]
return Response(), "url"
@@ -953,7 +1772,7 @@ class TestDynamicPollster(base.BaseTestCase):
sample = pollster.definitions.sample_extractor.generate_sample(
pollster_sample, pollster.definitions.configurations,
- manager=mock.Mock())
+ manager=mock.Mock(), conf={})
self.assertEqual(1, sample.volume)
self.assertEqual(2, len(sample.resource_metadata))
@@ -975,7 +1794,7 @@ class TestDynamicPollster(base.BaseTestCase):
sample = pollster.definitions.sample_extractor.generate_sample(
pollster_sample, pollster.definitions.configurations,
- manager=mock.Mock())
+ manager=mock.Mock(), conf={})
self.assertEqual(1, sample.volume)
self.assertEqual(3, len(sample.resource_metadata))
@@ -998,7 +1817,7 @@ class TestDynamicPollster(base.BaseTestCase):
sample = pollster.definitions.sample_extractor.generate_sample(
pollster_sample, pollster.definitions.configurations,
- manager=mock.Mock())
+ manager=mock.Mock(), conf={})
self.assertEqual(1, sample.volume)
self.assertEqual(3, len(sample.resource_metadata))
diff --git a/ceilometer/tests/unit/polling/test_manager.py b/ceilometer/tests/unit/polling/test_manager.py
index e805b9be..8cab92dc 100644
--- a/ceilometer/tests/unit/polling/test_manager.py
+++ b/ceilometer/tests/unit/polling/test_manager.py
@@ -378,6 +378,17 @@ class TestPollingAgent(BaseAgent):
super(TestPollingAgent, self).setUp()
self.mgr = self.create_manager()
self.mgr.extensions = self.create_extension_list()
+ ks_client = mock.Mock(auth_token='fake_token')
+ ks_client.projects.get.return_value = mock.Mock(
+ name='admin', id='4465ecd1438b4d23a866cf8447387a7b'
+ )
+ ks_client.users.get.return_value = mock.Mock(
+ name='admin', id='c0c935468e654d5a8baae1a08adf4dfb'
+ )
+ self.useFixture(fixtures.MockPatch(
+ 'ceilometer.keystone_client.get_client',
+ return_value=ks_client))
+ self.ks_client = ks_client
self.setup_polling()
@mock.patch('ceilometer.polling.manager.PollingManager')
@@ -422,6 +433,76 @@ class TestPollingAgent(BaseAgent):
self.assertIn(60, polling_tasks.keys())
self.assertNotIn(10, polling_tasks.keys())
+ @mock.patch('glob.glob')
+ @mock.patch('ceilometer.declarative.load_definitions')
+ def test_setup_polling_dynamic_pollster_namespace(self, load_mock,
+ glob_mock):
+ glob_mock.return_value = ['test.yml']
+ load_mock.return_value = [{
+ 'name': "test.dynamic.pollster",
+ 'namespaces': "dynamic",
+ 'sample_type': 'gauge',
+ 'unit': 'test',
+ 'endpoint_type': 'test',
+ 'url_path': 'test',
+ 'value_attribute': 'test'
+ }, {
+ 'name': "test.compute.central.pollster",
+ 'sample_type': 'gauge',
+ 'namespaces': ["compute", "central"],
+ 'unit': 'test',
+ 'endpoint_type': 'test',
+ 'url_path': 'test',
+ 'value_attribute': 'test'
+ }, {
+ 'name': "test.compute.pollster",
+ 'namespaces': ["compute"],
+ 'sample_type': 'gauge',
+ 'unit': 'test',
+ 'endpoint_type': 'test',
+ 'url_path': 'test',
+ 'value_attribute': 'test'
+ }, {
+ 'name': "test.central.pollster",
+ 'sample_type': 'gauge',
+ 'unit': 'test',
+ 'endpoint_type': 'test',
+ 'url_path': 'test',
+ 'value_attribute': 'test'
+ }]
+ mgr = manager.AgentManager(0, self.CONF, namespaces=['dynamic'])
+ self.assertEqual(len(mgr.extensions), 1)
+ self.assertEqual(
+ mgr.extensions[0].definitions.configurations['name'],
+ 'test.dynamic.pollster')
+
+ mgr = manager.AgentManager(0, self.CONF)
+ self.assertEqual(
+ mgr.extensions[-3].definitions.configurations['name'],
+ 'test.compute.central.pollster')
+ self.assertEqual(
+ mgr.extensions[-2].definitions.configurations['name'],
+ 'test.compute.pollster')
+ self.assertEqual(
+ mgr.extensions[-1].definitions.configurations['name'],
+ 'test.central.pollster')
+
+ mgr = manager.AgentManager(0, self.CONF, namespaces=['compute'])
+ self.assertEqual(
+ mgr.extensions[-2].definitions.configurations['name'],
+ 'test.compute.central.pollster')
+ self.assertEqual(
+ mgr.extensions[-1].definitions.configurations['name'],
+ 'test.compute.pollster')
+
+ mgr = manager.AgentManager(0, self.CONF, ['central'])
+ self.assertEqual(
+ mgr.extensions[-2].definitions.configurations['name'],
+ 'test.compute.central.pollster')
+ self.assertEqual(
+ mgr.extensions[-1].definitions.configurations['name'],
+ 'test.central.pollster')
+
def test_setup_polling_task_same_interval(self):
self.polling_cfg['sources'].append({
'name': 'test_polling_1',
diff --git a/ceilometer/tests/unit/polling/test_non_openstack_dynamic_pollster.py b/ceilometer/tests/unit/polling/test_non_openstack_dynamic_pollster.py
index d8f32ff3..63c633bd 100644
--- a/ceilometer/tests/unit/polling/test_non_openstack_dynamic_pollster.py
+++ b/ceilometer/tests/unit/polling/test_non_openstack_dynamic_pollster.py
@@ -15,6 +15,7 @@
"""
import copy
+import json
import sys
from unittest import mock
@@ -312,6 +313,11 @@ class TestNonOpenStackApisDynamicPollster(base.BaseTestCase):
def internal_execute_request_get_samples_mock(
self, definitions, **kwargs):
class Response:
+
+ @property
+ def text(self):
+ return json.dumps([sample])
+
def json(self):
return [sample]
return Response(), "url"
diff --git a/ceilometer/tests/unit/publisher/test_gnocchi.py b/ceilometer/tests/unit/publisher/test_gnocchi.py
index e5d307c4..741594d5 100644
--- a/ceilometer/tests/unit/publisher/test_gnocchi.py
+++ b/ceilometer/tests/unit/publisher/test_gnocchi.py
@@ -375,18 +375,30 @@ class PublisherTest(base.BaseTestCase):
@mock.patch('ceilometer.publisher.gnocchi.GnocchiPublisher'
'.batch_measures')
def test_unhandled_meter_with_no_resource_id(self, fake_batch):
- samples = [sample.Sample(
- name='unknown.meter',
- unit='GB',
- type=sample.TYPE_GAUGE,
- volume=2,
- user_id='test_user',
- project_id='test_project',
- source='openstack',
- timestamp='2014-05-08 20:23:48.028195',
- resource_id=None,
- resource_metadata={}
- )]
+ samples = [
+ sample.Sample(
+ name='unknown.meter',
+ unit='GB',
+ type=sample.TYPE_GAUGE,
+ volume=2,
+ user_id='test_user',
+ project_id='test_project',
+ source='openstack',
+ timestamp='2014-05-08 20:23:48.028195',
+ resource_id=None,
+ resource_metadata={}),
+ sample.Sample(
+ name='unknown.meter',
+ unit='GB',
+ type=sample.TYPE_GAUGE,
+ volume=2,
+ user_id='test_user',
+ project_id='test_project',
+ source='openstack',
+ timestamp='2014-05-08 20:23:48.028195',
+ resource_id="Some-other-resource-id",
+ resource_metadata={})
+ ]
url = netutils.urlsplit("gnocchi://")
d = gnocchi.GnocchiPublisher(self.conf.conf, url)
d._already_checked_archive_policies = True
@@ -692,6 +704,8 @@ class PublisherWorkflowTest(base.BaseTestCase,
@mock.patch('ceilometer.publisher.gnocchi.LOG')
@mock.patch('gnocchiclient.v1.client.Client')
def test_workflow(self, fakeclient_cls, logger):
+ url = netutils.urlsplit("gnocchi://")
+ publisher = gnocchi.GnocchiPublisher(self.conf.conf, url)
fakeclient = fakeclient_cls.return_value
@@ -722,12 +736,27 @@ class PublisherWorkflowTest(base.BaseTestCase,
{resource_id: {metric_name: self.metric_attributes}},
create_metrics=True)
]
+
+ resource_definition = publisher.metric_map.get(self.sample.name)
+
+ expected_measures_in_log = {resource_id: {self.sample.name: {
+ 'measures': [{'timestamp': self.sample.timestamp,
+ 'value': self.sample.volume}],
+ 'archive_policy_name': resource_definition.metrics[
+ metric_name]["archive_policy_name"],
+ 'unit': self.sample.unit}}}
+
+ resource_type = resource_definition.cfg['resource_type']
+
expected_debug = [
mock.call('filtered project found: %s',
'a2d42c23-d518-46b6-96ab-3fba2e146859'),
mock.call('Processing sample [%s] for resource ID [%s].',
self.sample, resource_id),
- ]
+ mock.call('Executing batch resource metrics measures for resource '
+ '[%s] and measures [%s].',
+ mock.ANY,
+ expected_measures_in_log)]
measures_posted = False
batch_side_effect = []
@@ -792,8 +821,6 @@ class PublisherWorkflowTest(base.BaseTestCase,
batch = fakeclient.metric.batch_resources_metrics_measures
batch.side_effect = batch_side_effect
- url = netutils.urlsplit("gnocchi://")
- publisher = gnocchi.GnocchiPublisher(self.conf.conf, url)
publisher.publish_samples([self.sample])
# Check that the last log message is the expected one
@@ -801,7 +828,17 @@ class PublisherWorkflowTest(base.BaseTestCase,
or self.create_resource_fail
or self.retry_post_measures_fail
or (self.update_resource_fail and self.patchable_attributes)):
- logger.error.assert_called_with('boom!', exc_info=True)
+
+ if self.update_resource_fail and self.patchable_attributes:
+ logger.error.assert_called_with(
+ 'Unexpected exception updating resource type [%s] with '
+ 'ID [%s] for resource data [%s]: [%s].', resource_type,
+ resource_id, mock.ANY, 'boom!', exc_info=True)
+ else:
+ logger.error.assert_called_with(
+ 'Unexpected exception while pushing measures [%s] for '
+ 'gnocchi data [%s]: [%s].', expected_measures_in_log,
+ mock.ANY, 'boom!', exc_info=True)
else:
self.assertEqual(0, logger.error.call_count)
self.assertEqual(expected_calls, fakeclient.mock_calls)
diff --git a/doc/source/admin/telemetry-dynamic-pollster.rst b/doc/source/admin/telemetry-dynamic-pollster.rst
index d861c07f..b622ae64 100644
--- a/doc/source/admin/telemetry-dynamic-pollster.rst
+++ b/doc/source/admin/telemetry-dynamic-pollster.rst
@@ -45,7 +45,7 @@ attributes to define a dynamic pollster:
the unit or some other meaningful String value;
* ``value_attribute``: mandatory attribute; defines the attribute in the
- JSON response from the URL of the component being polled. We also accept
+ response from the URL of the component being polled. We also accept
nested values dictionaries. To use a nested value one can simply use
``attribute1.attribute2.<asMuchAsNeeded>.lastattribute``. It is also
possible to reference the sample itself using ``"." (dot)``; the self
@@ -198,6 +198,11 @@ attributes to define a dynamic pollster:
executed serially, one after the other. Therefore, if the request hangs,
all pollsters (including the non-dynamic ones) will stop executing.
+* ``namespaces``: optional parameter. Defines the namespaces (running
+ ceilometer instances) where the pollster will be instantiated. This
+ parameter accepts a single string value or a list of strings. The
+ default value is `central`.
+
The complete YAML configuration to gather data from Magnum (that has been used
as an example) is the following:
@@ -281,6 +286,117 @@ desires):
name: "display_name"
default_value: 0
+* ``response_handlers``: optional parameter. Defines the response
+ handlers used to handle the response. For now, the supported values
+ are:
+
+ ``json``: This handler will interpret the response as a `JSON` and will
+ convert it to a `dictionary` which can be manipulated using the
+ operations options when mapping the attributes:
+
+ .. code-block:: yaml
+
+ ---
+
+ - name: "dynamic.json.response"
+ sample_type: "gauge"
+ [...]
+ response_handlers:
+ - json
+
+ Response to handle:
+
+ .. code-block:: json
+
+ {
+ "test": {
+ "list": [1, 2, 3]
+ }
+ }
+
+ Response handled:
+
+ .. code-block:: python
+
+ {
+ 'test': {
+ 'list': [1, 2, 3]
+ }
+ }
+
+
+ ``xml``: This handler will interpret the response as an `XML` and will
+ convert it to a `dictionary` which can be manipulated using the
+ operations options when mapping the attributes:
+
+ .. code-block:: yaml
+
+ ---
+
+ - name: "dynamic.json.response"
+ sample_type: "gauge"
+ [...]
+ response_handlers:
+ - xml
+
+ Response to handle:
+
+ .. code-block:: xml
+
+ <test>
+ <list>1</list>
+ <list>2</list>
+ <list>3</list>
+ </test>
+
+ Response handled:
+
+ .. code-block:: python
+
+ {
+ 'test': {
+ 'list': [1, 2, 3]
+ }
+ }
+
+ ``text``: This handler will interpret the response as a `PlainText` and
+ will convert it to a `dictionary` which can be manipulated using the
+ operations options when mapping the attributes:
+
+ .. code-block:: yaml
+
+ ---
+
+ - name: "dynamic.json.response"
+ sample_type: "gauge"
+ [...]
+ response_handlers:
+ - text
+
+ Response to handle:
+
+ .. code-block:: text
+
+ Plain text response
+
+ Response handled:
+
+ .. code-block:: python
+
+ {
+ 'out': "Plain text response"
+ }
+
+ They can be used together or individually. If not defined, the
+ `default` value will be `json`. If you set 2 or more response
+ handlers, the first configured handler will be used to try to
+ handle the response, if it is not possible, a `DEBUG` log
+ message will be displayed, then the next will be used
+ and so on. If no configured handler was able to handle
+ the response, an empty dict will be returned and a `WARNING`
+ log will be displayed to warn operators that the response was
+ not able to be handled by any configured handler.
+
The dynamic pollsters system configuration (for non-OpenStack APIs)
-------------------------------------------------------------------
@@ -355,6 +471,62 @@ ones), we can use the `successful_ops`.
resource_id_attribute: "user"
response_entries_key: "summary"
+The dynamic pollsters system configuration (for local host commands)
+--------------------------------------------------------------------
+
+The dynamic pollster system can also be used for local host commands,
+these commands must be installed in the system that is running the
+Ceilometer compute agent.
+To configure local hosts commands, one can use all but two attributes of
+the Dynamic pollster system. The attributes that are not supported are
+the ``endpoint_type`` and ``url_path``. The dynamic pollster system for
+local host commands is activated automatically when one uses the
+configuration ``host_command``.
+
+The extra parameter (in addition to the original ones) that is available
+when using the local host commands dynamic pollster sub-subsystem is the
+following:
+
+* ``host_command``: required parameter. It is the host command that will
+ be executed in the same host the Ceilometer dynamic pollster agent is
+ running. The output of the command will be processed by the pollster and
+ stored in the configured backend.
+
+As follows we present an example on how to use the local host command:
+
+.. code-block:: yaml
+
+ ---
+
+ - name: "dynamic.host.command"
+ sample_type: "gauge"
+ unit: "request"
+ value_attribute: "value"
+ response_entries_key: "test"
+ host_command: "echo '<test><user_id>id1_u</user_id><project_id>id1_p</project_id><id>id1</id><meta>meta-data-to-store</meta><value>1</value></test>'"
+ metadata_fields:
+ - "meta"
+ response_handlers:
+ - xml
+
+To execute multi page host commands, the `next_sample_url_attribute`
+must generate the next sample command, like the following example:
+
+.. code-block:: yaml
+
+ ---
+
+ - name: "dynamic.s3.objects.size"
+ sample_type: "gauge"
+ unit: "request"
+ value_attribute: "Size"
+ project_id_attribute: "Owner.ID"
+ user_id_attribute: "Owner.ID"
+ resource_id_attribute: "Key"
+ response_entries_key: "Contents"
+ host_command: "aws s3api list-objects"
+ next_sample_url_attribute: NextToken | 'aws s3api list-objects --starting-token "' + value + '"'
+
Operations on extracted attributes
----------------------------------
@@ -760,12 +932,10 @@ we only have the `tenant_id`, which must be used as the `project_id`. However,
for billing and later invoicing one might need/want the project name, domain
id, and other metadata that are available in Keystone (and maybe some others
that are scattered over other components). To achieve that, one can use the
-OpenStack metadata enrichment option. This feature is only available
-to *OpenStack pollsters*, and can only gather extra metadata from OpenStack
-APIs. As follows we present an example that shows a dynamic pollster
-configuration to gather virtual machine (VM) status, and to enrich the data
-pushed to the storage backend (e.g. Gnocchi) with project name, domain ID,
-and domain name.
+OpenStack metadata enrichment option. As follows we present an example that
+shows a dynamic pollster configuration to gather virtual machine (VM) status,
+and to enrich the data pushed to the storage backend (e.g. Gnocchi) with
+project name, domain ID, and domain name.
.. code-block:: yaml
@@ -813,6 +983,13 @@ and domain name.
"locked": "dynamic_locked"
"tags | ','.join(value)": "dynamic_tags"
extra_metadata_fields_cache_seconds: 3600
+ extra_metadata_fields_skip:
+ - value: '1'
+ metadata:
+ dynamic_flavor_vcpus: 4
+ - value: '1'
+ metadata:
+ dynamic_flavor_vcpus: 2
extra_metadata_fields:
- name: "project_name"
endpoint_type: "identity"
@@ -821,26 +998,59 @@ and domain name.
"Openstack-API-Version": "identity latest"
value: "name"
extra_metadata_fields_cache_seconds: 1800 # overriding the default cache policy
+ metadata_fields:
+ - id
- name: "domain_id"
endpoint_type: "identity"
url_path: "'/v3/projects/' + str(sample['project_id'])"
headers:
"Openstack-API-Version": "identity latest"
value: "domain_id"
+ metadata_fields:
+ - id
- name: "domain_name"
endpoint_type: "identity"
url_path: "'/v3/domains/' + str(extra_metadata_captured['domain_id'])"
headers:
"Openstack-API-Version": "identity latest"
value: "name"
+ metadata_fields:
+ - id
+ - name: "operating-system"
+ host_command: "'get-vm --vm-name ' + str(extra_metadata_by_name['project_name']['metadata']['id'])"
+ value: "os"
+
The above example can be used to gather and persist in the backend the
status of VMs. It will persist `1` in the backend as a measure for every
collecting period if the VM's status is `ACTIVE`, and `0` otherwise. This is
quite useful to create hashmap rating rules for running VMs in CloudKitty.
Then, to enrich the resource in the storage backend, we are adding extra
-metadata that are collected in Keystone via the `extra_metadata_fields`
-options.
+metadata that are collected in Keystone and in the local host via the
+`extra_metadata_fields` options. If you have multiples `extra_metadata_fields`
+defining the same `metadata_field`, the last not `None` metadata value will
+be used.
+
+To operate values in the `extra_metadata_fields`, you can access 3 local
+variables:
+
+* ``sample``: it is a dictionary which holds the current data of the root
+ sample. The root sample is the final sample that will be persisted in the
+ configured storage backend.
+
+* ``extra_metadata_captured``: it is a dictionary which holds the current
+ data of all `extra_metadata_fields` processed before this one.
+ If you have multiples `extra_metadata_fields` defining the same
+ `metadata_field`, the last not `None` metadata value will be used.
+
+* ``extra_metadata_by_name``: it is a dictionary which holds the data of
+ all `extra_metadata_fields` processed before this one. No data is
+ overwritten in this variable. To access an specific `extra_metadata_field`
+ using this variable, you can do
+ `extra_metadata_by_name['<extra_metadata_field_name>']['value']` to get
+ its value, or
+ `extra_metadata_by_name['<extra_metadata_field_name>']['metadata']['<metadata>']`
+ to get its metadata.
The metadata enrichment feature has the following options:
@@ -853,41 +1063,19 @@ The metadata enrichment feature has the following options:
value can be increased of decreased.
* ``extra_metadata_fields``: optional parameter. This option is a list of
- objects, where each one of its elements is an extra metadata definition.
- Each one of the extra metadata definition can have the options defined in
- the dynamic pollsters such as to handle paged responses, operations on the
- extracted values, headers and so on. The basic options that must be
- defined for an extra metadata definitions are the following:
-
- * ``name``: This option is mandatory. The name of the extra metadata.
- This is the name that is going to be used by the metadata. If there is
- already any other metadata gathered via `metadata_fields` option or
- transformed via `metadata_mapping` configuration, this metadata is
- going to be discarded.
-
- * ``endpoint_type``: The endpoint type that we want to execute the
- call against. This option is mandatory. It works similarly to the
- `endpoint_type` option in the dynamic pollster definition.
-
- * ``url_path``: This option is mandatory. It works similarly to the
- `url_path` option in the dynamic pollster definition. However, this
- `one enables operators to execute/evaluate expressions in runtime, which
- `allows one to retrieve the information from previously gathered
- metadata via ``extra_metadata_captured` dictionary, or via the
- `sample` itself.
-
- * ``value``: This configuration is mandatory. It works similarly to the
- `value_attribute` option in the dynamic pollster definition. It is
- the value we want to extract from the response, and assign in the
- metadata being generated.
-
- * ``headers``: This option is optional. It works similarly to the
- `headers` option in the dynamic pollster definition.
-
- * ``next_sample_url_attribute``: This option is optional. It works
- similarly to the `next_sample_url_attribute` option in the dynamic
- pollster definition.
-
- * ``response_entries_key``: This option is optional. It works
- similarly to the `response_entries_key` option in the dynamic
- pollster definition.
+ objects or a single one, where each one of its elements is an
+ dynamic pollster configuration set. Each one of the extra metadata
+ definition can have the same options defined in the dynamic pollsters,
+ including the `extra_metadata_fields` option, so this option is a
+ multi-level option. When defined, the result of the collected data will
+ be merged in the final sample resource metadata. If some of the required
+ dynamic pollster configuration is not set in the `extra_metadata_fields`,
+ will be used the parent pollster configuration, except the `name`.
+
+* ``extra_metadata_fields_skip``: optional parameter. This option is a list
+ of objects or a single one, where each one of its elements is a set of
+ key/value pairs. When defined, if any set of key/value pairs is a subset
+ of the collected sample, then the extra_metadata_fields gathering of this
+ sample will be skipped.
+
+
diff --git a/releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml b/releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml
new file mode 100644
index 00000000..43b6042b
--- /dev/null
+++ b/releasenotes/notes/add-tenant-name-discovery-668260bb4b2b0e8c.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Identify user and projects names with the help of their UUIDs
+ in the polled samples. If they are identified, set "project_name"
+ and "user_name" fields in the sample to the corresponding values.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 789ff7f4..f8ddb251 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
:maxdepth: 1
unreleased
+ zed
yoga
xena
wallaby
diff --git a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
index e34703b9..950ff5b2 100644
--- a/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
+++ b/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
@@ -8,11 +8,11 @@ msgid ""
msgstr ""
"Project-Id-Version: Ceilometer Release Notes\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2022-08-30 11:49+0000\n"
+"POT-Creation-Date: 2022-10-26 16:45+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"PO-Revision-Date: 2022-09-05 10:12+0000\n"
+"PO-Revision-Date: 2022-11-04 10:31+0000\n"
"Last-Translator: Andi Chandler <andi@gowling.com>\n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
@@ -70,8 +70,11 @@ msgstr "17.0.0"
msgid "18.0.0"
msgstr "18.0.0"
-msgid "18.0.0-21"
-msgstr "18.0.0-21"
+msgid "19.0.0"
+msgstr "19.0.0"
+
+msgid "19.0.0-13"
+msgstr "19.0.0-13"
msgid "5.0.1"
msgstr "5.0.1"
@@ -556,6 +559,15 @@ msgid "Gnocchi dispatcher now uses client rather than direct http requests"
msgstr "Gnocchi dispatcher now uses client rather than direct HTTP requests"
msgid ""
+"Identify user and projects names with the help of their UUIDs in the polled "
+"samples. If they are identified, set \"project_name\" and \"user_name\" "
+"fields in the sample to the corresponding values."
+msgstr ""
+"Identify user and projects names with the help of their UUIDs in the polled "
+"samples. If they are identified, set \"project_name\" and \"user_name\" "
+"fields in the sample to the corresponding values."
+
+msgid ""
"If workload partitioning of the notification agent is enabled, the "
"notification agent should not run alongside pre-Queens agents. Doing so may "
"result in missed samples when leveraging transformations. To upgrade without "
@@ -1361,6 +1373,9 @@ msgstr "Xena Series Release Notes"
msgid "Yoga Series Release Notes"
msgstr "Yoga Series Release Notes"
+msgid "Zed Series Release Notes"
+msgstr "Zed Series Release Notes"
+
msgid ""
"[`bug 1254800 <https://bugs.launchpad.net/ceilometer/+bug/1254800>`_] Add "
"better support to catch race conditions when creating event_types"
diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst
new file mode 100644
index 00000000..9608c05e
--- /dev/null
+++ b/releasenotes/source/zed.rst
@@ -0,0 +1,6 @@
+========================
+Zed Series Release Notes
+========================
+
+.. release-notes::
+ :branch: stable/zed
diff --git a/requirements.txt b/requirements.txt
index 9ae1bdf4..2a58eda8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,6 +6,7 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
+xmltodict>=0.13.0 # MIT License
cachetools>=2.1.0 # MIT License
cotyledon>=1.3.0 #Apache-2.0
futurist>=1.8.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 1caccacb..7d829aca 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -45,6 +45,7 @@ ceilometer.sample.endpoint =
ceilometer.discover.compute =
local_instances = ceilometer.compute.discovery:InstanceDiscovery
+ local_node = ceilometer.polling.discovery.localnode:LocalNodeDiscovery
ceilometer.discover.central =
barbican = ceilometer.polling.discovery.non_openstack_credentials_discovery:NonOpenStackCredentialsDiscovery