summaryrefslogtreecommitdiff
path: root/ceilometer
diff options
context:
space:
mode:
Diffstat (limited to 'ceilometer')
-rw-r--r--ceilometer/cache_utils.py55
-rw-r--r--ceilometer/compute/pollsters/net.py2
-rw-r--r--ceilometer/polling/discovery/non_openstack_credentials_discovery.py2
-rw-r--r--ceilometer/polling/manager.py15
-rw-r--r--ceilometer/publisher/gnocchi.py74
-rw-r--r--ceilometer/tests/unit/compute/pollsters/test_net.py21
-rw-r--r--ceilometer/tests/unit/polling/test_non_openstack_credentials_discovery.py10
-rw-r--r--ceilometer/tests/unit/publisher/test_gnocchi.py11
-rw-r--r--ceilometer/tests/unit/test_cache_utils.py65
9 files changed, 184 insertions, 71 deletions
diff --git a/ceilometer/cache_utils.py b/ceilometer/cache_utils.py
index 55a9e263..31c1c0e9 100644
--- a/ceilometer/cache_utils.py
+++ b/ceilometer/cache_utils.py
@@ -14,9 +14,22 @@
# under the License.
"""Simple wrapper for oslo_cache."""
-
+import uuid
from oslo_cache import core as cache
+from oslo_cache import exception
+from oslo_log import log
+from oslo_utils.secretutils import md5
+
+# Default cache expiration period
+CACHE_DURATION = 86400
+
+NAME_ENCODED = __name__.encode('utf-8')
+CACHE_NAMESPACE = uuid.UUID(
+ bytes=md5(NAME_ENCODED, usedforsecurity=False).digest()
+)
+
+LOG = log.getLogger(__name__)
class CacheClient(object):
@@ -36,18 +49,30 @@ class CacheClient(object):
return self.region.delete(key)
-def get_client(conf, expiration_time=0):
+def get_client(conf):
cache.configure(conf)
- if conf.cache.enabled:
- return CacheClient(_get_default_cache_region(
- conf,
- expiration_time=expiration_time
- ))
-
-
-def _get_default_cache_region(conf, expiration_time):
- region = cache.create_region()
- if expiration_time != 0:
- conf.cache.expiration_time = expiration_time
- cache.configure_cache_region(conf, region)
- return region
+ if 'cache' in conf.keys() and conf.cache.enabled:
+ region = get_cache_region(conf)
+ if region:
+ return CacheClient(region)
+
+
+def get_cache_region(conf):
+ # Set expiration time to default CACHE_DURATION if missing in conf
+ if not conf.cache.expiration_time:
+ conf.cache.expiration_time = CACHE_DURATION
+
+ try:
+ region = cache.create_region()
+ cache.configure_cache_region(conf, region)
+ cache.key_mangler = cache_key_mangler
+ return region
+ except exception.ConfigurationError as e:
+ LOG.error("failed to configure oslo_cache. %s", str(e))
+ LOG.warning("using keystone to identify names from polled samples")
+
+
+def cache_key_mangler(key):
+ """Construct an opaque cache key."""
+
+ return uuid.uuid5(CACHE_NAMESPACE, key).hex
diff --git a/ceilometer/compute/pollsters/net.py b/ceilometer/compute/pollsters/net.py
index a452e802..752be34f 100644
--- a/ceilometer/compute/pollsters/net.py
+++ b/ceilometer/compute/pollsters/net.py
@@ -119,7 +119,7 @@ class IncomingBytesDeltaPollster(NetworkPollster):
class OutgoingBytesDeltaPollster(NetworkPollster):
- sample_name = 'network.outgoing.packets.delta'
+ sample_name = 'network.outgoing.bytes.delta'
sample_type = sample.TYPE_DELTA
sample_unit = 'B'
sample_stats_key = 'tx_bytes_delta'
diff --git a/ceilometer/polling/discovery/non_openstack_credentials_discovery.py b/ceilometer/polling/discovery/non_openstack_credentials_discovery.py
index 61459452..0b3ccec8 100644
--- a/ceilometer/polling/discovery/non_openstack_credentials_discovery.py
+++ b/ceilometer/polling/discovery/non_openstack_credentials_discovery.py
@@ -38,7 +38,7 @@ class NonOpenStackCredentialsDiscovery(EndpointDiscovery):
if not param:
return [barbican_secret]
barbican_endpoints = super(NonOpenStackCredentialsDiscovery,
- self).discover("key-manager")
+ self).discover(manager, "key-manager")
if not barbican_endpoints:
LOG.warning("No Barbican endpoints found to execute the"
" credentials discovery process to [%s].",
diff --git a/ceilometer/polling/manager.py b/ceilometer/polling/manager.py
index 3545801f..5835fe25 100644
--- a/ceilometer/polling/manager.py
+++ b/ceilometer/polling/manager.py
@@ -46,8 +46,6 @@ from ceilometer import utils
LOG = log.getLogger(__name__)
-CACHE_DURATION = 3600
-
POLLING_OPTS = [
cfg.StrOpt('cfg_file',
default="polling.yaml",
@@ -154,10 +152,7 @@ class PollingTask(object):
self.ks_client = self.manager.keystone
- self.cache_client = cache_utils.get_client(
- self.manager.conf,
- expiration_time=CACHE_DURATION
- )
+ self.cache_client = cache_utils.get_client(self.manager.conf)
def add(self, pollster, source):
self.pollster_matches[source.name].add(pollster)
@@ -169,9 +164,11 @@ class PollingTask(object):
name = self.cache_client.get(uuid)
if name:
return name
- name = self.resolve_uuid_from_keystone(attr, uuid)
- self.cache_client.set(uuid, name)
- return name
+ # empty cache_client means either caching is not enabled or
+ # there was an error configuring cache
+ name = self.resolve_uuid_from_keystone(attr, uuid)
+ self.cache_client.set(uuid, name)
+ return name
# Retrieve project and user names from Keystone only
# if ceilometer doesn't have a caching backend
diff --git a/ceilometer/publisher/gnocchi.py b/ceilometer/publisher/gnocchi.py
index 22cebdab..45f32766 100644
--- a/ceilometer/publisher/gnocchi.py
+++ b/ceilometer/publisher/gnocchi.py
@@ -14,39 +14,29 @@
# under the License.
from collections import defaultdict
import fnmatch
-import hashlib
import itertools
import json
import operator
import pkg_resources
import threading
-import uuid
from gnocchiclient import exceptions as gnocchi_exc
from keystoneauth1 import exceptions as ka_exceptions
-import oslo_cache
from oslo_log import log
from oslo_utils import timeutils
from stevedore import extension
from urllib import parse as urlparse
+from ceilometer import cache_utils
from ceilometer import declarative
from ceilometer import gnocchi_client
from ceilometer.i18n import _
from ceilometer import keystone_client
from ceilometer import publisher
-NAME_ENCODED = __name__.encode('utf-8')
-CACHE_NAMESPACE = uuid.UUID(bytes=hashlib.md5(NAME_ENCODED).digest())
LOG = log.getLogger(__name__)
-def cache_key_mangler(key):
- """Construct an opaque cache key."""
-
- return uuid.uuid5(CACHE_NAMESPACE, key).hex
-
-
EVENT_CREATE, EVENT_UPDATE, EVENT_DELETE = ("create", "update", "delete")
@@ -213,20 +203,11 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
timeout = options.get('timeout', [6.05])[-1]
self._ks_client = keystone_client.get_client(conf)
- self.cache = None
- try:
- oslo_cache.configure(conf)
- # NOTE(cdent): The default cache backend is a real but
- # noop backend. We don't want to use that here because
- # we want to avoid the cache pathways entirely if the
- # cache has not been configured explicitly.
- if conf.cache.enabled:
- cache_region = oslo_cache.create_region()
- self.cache = oslo_cache.configure_cache_region(
- conf, cache_region)
- self.cache.key_mangler = cache_key_mangler
- except oslo_cache.exception.ConfigurationError as exc:
- LOG.warning('unable to configure oslo_cache: %s', exc)
+ # NOTE(cdent): The default cache backend is a real but
+ # noop backend. We don't want to use that here because
+ # we want to avoid the cache pathways entirely if the
+ # cache has not been configured explicitly.
+ self.cache = cache_utils.get_client(conf)
self._gnocchi_project_id = None
self._gnocchi_project_id_lock = threading.Lock()
@@ -280,22 +261,31 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
return self._gnocchi_project_id
with self._gnocchi_project_id_lock:
if self._gnocchi_project_id is None:
+ if not self.filter_project:
+ LOG.debug(
+ "Multiple executions were locked on "
+ "self._gnocchi_project_id_lock`. This execution "
+ "should no call `_internal_gnocchi_project_discovery` "
+ "as `self.filter_project` is None.")
+ return None
try:
project = self._ks_client.projects.find(
name=self.filter_project,
domain=self.filter_domain)
except ka_exceptions.NotFound:
- LOG.warning('filtered project not found in keystone,'
- ' ignoring the filter_project '
- 'option')
+ LOG.warning('Filtered project [%s] not found in keystone, '
+ 'ignoring the filter_project option' %
+ self.filter_project)
+
self.filter_project = None
return None
except Exception:
- LOG.exception('fail to retrieve filtered project ')
+ LOG.exception('Failed to retrieve filtered project [%s].'
+ % self.filter_project)
raise
self._gnocchi_project_id = project.id
- LOG.debug("filtered project found: %s",
- self._gnocchi_project_id)
+ LOG.debug("Filtered project [%s] found with ID [%s].",
+ self.filter_project, self._gnocchi_project_id)
return self._gnocchi_project_id
def _is_swift_account_sample(self, sample):
@@ -320,11 +310,29 @@ class GnocchiPublisher(publisher.ConfigPublisherBase):
if operation:
return rd, operation
+ def filter_gnocchi_activity_openstack(self, samples):
+ """Skip sample generated by gnocchi itself
+
+ This method will filter out the samples that are generated by
+ Gnocchi itself.
+ """
+ filtered_samples = []
+ for sample in samples:
+ if not self._is_gnocchi_activity(sample):
+ filtered_samples.append(sample)
+ LOG.debug("Sample [%s] is not a Gnocchi activity; therefore, "
+ "we do not filter it out and push it to Gnocchi.",
+ sample)
+ else:
+ LOG.debug("Sample [%s] is a Gnocchi activity; therefore, "
+ "we filter it out and do not push it to Gnocchi.",
+ sample)
+ return filtered_samples
+
def publish_samples(self, data):
self.ensures_archives_policies()
- # NOTE(sileht): skip sample generated by gnocchi itself
- data = [s for s in data if not self._is_gnocchi_activity(s)]
+ data = self.filter_gnocchi_activity_openstack(data)
def value_to_sort(object_to_sort):
value = object_to_sort.resource_id
diff --git a/ceilometer/tests/unit/compute/pollsters/test_net.py b/ceilometer/tests/unit/compute/pollsters/test_net.py
index 190c93df..7db171de 100644
--- a/ceilometer/tests/unit/compute/pollsters/test_net.py
+++ b/ceilometer/tests/unit/compute/pollsters/test_net.py
@@ -120,12 +120,13 @@ class TestNetPollster(base.TestPollsterBase):
self.faux_instance = FauxInstance(**self.INSTANCE_PROPERTIES)
- def _check_get_samples(self, factory, expected, kind='cumulative'):
+ def _check_get_samples(self, factory, expected, expected_name,
+ kind='cumulative'):
mgr = manager.AgentManager(0, self.CONF)
pollster = factory(self.CONF)
samples = list(pollster.get_samples(mgr, {}, [self.instance]))
self.assertEqual(3, len(samples)) # one for each nic
- self.assertEqual(set([samples[0].name]),
+ self.assertEqual(set([expected_name]),
set([s.name for s in samples]))
def _verify_vnic_metering(ip, expected_volume, expected_rid):
@@ -149,6 +150,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 9,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.bytes',
)
def test_outgoing_bytes(self):
@@ -160,6 +162,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 11,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.bytes',
)
def test_incoming_bytes_delta(self):
@@ -171,6 +174,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 46,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.bytes.delta',
'delta',
)
@@ -183,6 +187,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 47,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.bytes.delta',
'delta',
)
@@ -195,6 +200,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 10,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.packets',
)
def test_outgoing_packets(self):
@@ -206,6 +212,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 12,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.packets',
)
def test_incoming_drops(self):
@@ -217,6 +224,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 28,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.packets.drop',
)
def test_outgoing_drops(self):
@@ -228,6 +236,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 30,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.packets.drop',
)
def test_incoming_errors(self):
@@ -239,6 +248,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 29,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.packets.error',
)
def test_outgoing_errors(self):
@@ -250,6 +260,7 @@ class TestNetPollster(base.TestPollsterBase):
('192.168.0.4', 31,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.packets.error',
)
def test_metadata(self):
@@ -309,12 +320,12 @@ class TestNetRatesPollster(base.TestPollsterBase):
]
self.inspector.inspect_vnic_rates = mock.Mock(return_value=vnics)
- def _check_get_samples(self, factory, expected):
+ def _check_get_samples(self, factory, expected, expected_name):
mgr = manager.AgentManager(0, self.CONF)
pollster = factory(self.CONF)
samples = list(pollster.get_samples(mgr, {}, [self.instance]))
self.assertEqual(3, len(samples)) # one for each nic
- self.assertEqual(set([samples[0].name]),
+ self.assertEqual(set([expected_name]),
set([s.name for s in samples]))
def _verify_vnic_metering(ip, expected_volume, expected_rid):
@@ -338,6 +349,7 @@ class TestNetRatesPollster(base.TestPollsterBase):
('192.168.0.4', 5,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.incoming.bytes.rate',
)
def test_outgoing_bytes_rate(self):
@@ -349,4 +361,5 @@ class TestNetRatesPollster(base.TestPollsterBase):
('192.168.0.4', 6,
"%s-%s" % (instance_name_id, self.vnic2.name)),
],
+ 'network.outgoing.bytes.rate',
)
diff --git a/ceilometer/tests/unit/polling/test_non_openstack_credentials_discovery.py b/ceilometer/tests/unit/polling/test_non_openstack_credentials_discovery.py
index c1fffd87..4e257415 100644
--- a/ceilometer/tests/unit/polling/test_non_openstack_credentials_discovery.py
+++ b/ceilometer/tests/unit/polling/test_non_openstack_credentials_discovery.py
@@ -95,8 +95,8 @@ class TestNonOpenStackCredentialsDiscovery(base.BaseTestCase):
@mock.patch('keystoneclient.v2_0.client.Client')
def test_discover_response_ok(self, client_mock):
- def discover_mock(self, manager, param=None):
- return ["barbican_url"]
+ discover_mock = mock.MagicMock()
+ discover_mock.return_value = ["barbican_url"]
original_discover_method = EndpointDiscovery.discover
EndpointDiscovery.discover = discover_mock
@@ -108,9 +108,11 @@ class TestNonOpenStackCredentialsDiscovery(base.BaseTestCase):
client_mock.session.get.return_value = return_value
- response = self.discovery.discover(
- manager=self.FakeManager(client_mock), param="param")
+ fake_manager = self.FakeManager(client_mock)
+ response = self.discovery.discover(manager=fake_manager, param="param")
self.assertEqual(["content"], response)
+ discover_mock.assert_has_calls([
+ mock.call(fake_manager, "key-manager")])
EndpointDiscovery.discover = original_discover_method
diff --git a/ceilometer/tests/unit/publisher/test_gnocchi.py b/ceilometer/tests/unit/publisher/test_gnocchi.py
index b9ecdc1f..e8264f85 100644
--- a/ceilometer/tests/unit/publisher/test_gnocchi.py
+++ b/ceilometer/tests/unit/publisher/test_gnocchi.py
@@ -339,9 +339,9 @@ class PublisherTest(base.BaseTestCase):
def test_activity_gnocchi_project_not_found(self, logger):
self.ks_client.projects.find.side_effect = ka_exceptions.NotFound
self._do_test_activity_filter(2)
- logger.warning.assert_called_with('filtered project not found in '
- 'keystone, ignoring the '
- 'filter_project option')
+ logger.warning.assert_called_with(
+ 'Filtered project [service] not found in keystone, ignoring the '
+ 'filter_project option')
def test_activity_filter_match_swift_event(self):
self.samples[0].name = 'storage.objects.outgoing.bytes'
@@ -749,8 +749,11 @@ class PublisherWorkflowTest(base.BaseTestCase,
resource_type = resource_definition.cfg['resource_type']
expected_debug = [
- mock.call('filtered project found: %s',
+ mock.call('Filtered project [%s] found with ID [%s].', 'service',
'a2d42c23-d518-46b6-96ab-3fba2e146859'),
+ mock.call('Sample [%s] is not a Gnocchi activity; therefore, we '
+ 'do not filter it out and push it to Gnocchi.',
+ self.sample),
mock.call('Processing sample [%s] for resource ID [%s].',
self.sample, resource_id),
mock.call('Executing batch resource metrics measures for resource '
diff --git a/ceilometer/tests/unit/test_cache_utils.py b/ceilometer/tests/unit/test_cache_utils.py
new file mode 100644
index 00000000..245eaa34
--- /dev/null
+++ b/ceilometer/tests/unit/test_cache_utils.py
@@ -0,0 +1,65 @@
+#
+# Copyright 2022 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from ceilometer import cache_utils
+from ceilometer import service as ceilometer_service
+from oslo_cache import core as cache
+from oslo_config import fixture as config_fixture
+from oslotest import base
+
+
+class CacheConfFixture(config_fixture.Config):
+ def setUp(self):
+ super(CacheConfFixture, self).setUp()
+ self.conf = ceilometer_service.\
+ prepare_service(argv=[], config_files=[])
+ cache.configure(self.conf)
+ self.config(enabled=True, group='cache')
+
+
+class TestOsloCache(base.BaseTestCase):
+ def setUp(self):
+ super(TestOsloCache, self).setUp()
+
+ conf = ceilometer_service.prepare_service(argv=[], config_files=[])
+
+ dict_conf_fixture = CacheConfFixture(conf)
+ self.useFixture(dict_conf_fixture)
+ dict_conf_fixture.config(expiration_time=600,
+ backend='oslo_cache.dict',
+ group='cache')
+ self.dict_conf = dict_conf_fixture.conf
+
+ # enable_retry_client is only supported by
+ # 'dogpile.cache.pymemcache' backend which makes this
+ # incorrect config
+ faulty_conf_fixture = CacheConfFixture(conf)
+ self.useFixture(faulty_conf_fixture)
+ faulty_conf_fixture.config(expiration_time=600,
+ backend='dogpile.cache.memcached',
+ group='cache',
+ enable_retry_client='true')
+ self.faulty_cache_conf = faulty_conf_fixture.conf
+
+ self.no_cache_conf = ceilometer_service.\
+ prepare_service(argv=[], config_files=[])
+
+ def test_get_cache_region(self):
+ self.assertIsNotNone(cache_utils.get_cache_region(self.dict_conf))
+
+ def test_get_client(self):
+ self.assertIsNotNone(cache_utils.get_client(self.dict_conf))
+ self.assertIsNone(cache_utils.get_client(self.no_cache_conf))
+ self.assertIsNone(cache_utils.get_client(self.faulty_cache_conf))