summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ceilometer/api/controllers/v2.py11
-rw-r--r--ceilometer/compute/virt/hyperv/inspector.py8
-rw-r--r--ceilometer/compute/virt/hyperv/utilsv2.py37
-rw-r--r--ceilometer/openstack/common/middleware/audit.py2
-rw-r--r--ceilometer/openstack/common/middleware/notifier.py2
-rw-r--r--ceilometer/openstack/common/rpc/impl_kombu.py5
-rw-r--r--ceilometer/openstack/common/rpc/impl_qpid.py18
-rw-r--r--requirements.txt4
-rw-r--r--setup.cfg2
-rw-r--r--test-requirements.txt4
-rw-r--r--tests/api/v1/test_app.py8
-rw-r--r--tests/api/v2/test_acl_scenarios.py6
-rw-r--r--tests/api/v2/test_alarm_scenarios.py65
-rw-r--r--tests/api/v2/test_app.py8
-rw-r--r--tests/compute/virt/hyperv/test_inspector.py18
-rw-r--r--tests/compute/virt/hyperv/test_utilsv2.py33
-rw-r--r--tox.ini4
17 files changed, 177 insertions, 58 deletions
diff --git a/ceilometer/api/controllers/v2.py b/ceilometer/api/controllers/v2.py
index a0abc98c..da712866 100644
--- a/ceilometer/api/controllers/v2.py
+++ b/ceilometer/api/controllers/v2.py
@@ -1082,7 +1082,7 @@ class AlarmThresholdRule(_Base):
#note(sileht): wsme mandatory doesn't work as expected
#workaround for https://bugs.launchpad.net/wsme/+bug/1227004
for field in ['meter_name', 'threshold']:
- if not getattr(threshold_rule, field):
+ if getattr(threshold_rule, field) in (wsme.Unset, None):
error = _("threshold_rule/%s is mandatory") % field
pecan.response.translatable_error = error
raise wsme.exc.ClientSideError(unicode(error))
@@ -1425,6 +1425,15 @@ class AlarmController(rest.RestController):
#https://bugs.launchpad.net/wsme/+bug/1220678
Alarm.validate(data)
+ # should check if there is any circle in the dependency, but for
+ # efficiency reason, here only check alarm cannot depend on itself
+ if data.type == 'combination':
+ if self._id in data.combination_rule.alarm_ids:
+ error = _('Cannot specify alarm %s itself in '
+ 'combination rule') % self._id
+ pecan.response.translatable_error = error
+ raise wsme.exc.ClientSideError(unicode(error))
+
old_alarm = Alarm.from_db_model(alarm_in).as_dict(storage.models.Alarm)
updated_alarm = data.as_dict(storage.models.Alarm)
try:
diff --git a/ceilometer/compute/virt/hyperv/inspector.py b/ceilometer/compute/virt/hyperv/inspector.py
index 6a29e01c..b3273049 100644
--- a/ceilometer/compute/virt/hyperv/inspector.py
+++ b/ceilometer/compute/virt/hyperv/inspector.py
@@ -62,9 +62,9 @@ class HyperVInspector(virt_inspector.Inspector):
parameters=None)
stats = virt_inspector.InterfaceStats(
- rx_bytes=vnic_metrics['rx_bytes'],
+ rx_bytes=vnic_metrics['rx_mb'] * 1024 * 1024,
rx_packets=0,
- tx_bytes=vnic_metrics['tx_bytes'],
+ tx_bytes=vnic_metrics['tx_mb'] * 1024 * 1024,
tx_packets=0)
yield (interface, stats)
@@ -79,9 +79,9 @@ class HyperVInspector(virt_inspector.Inspector):
stats = virt_inspector.DiskStats(
read_requests=0,
# Return bytes
- read_bytes=disk_metrics['read_mb'] * 1024,
+ read_bytes=disk_metrics['read_mb'] * 1024 * 1024,
write_requests=0,
- write_bytes=disk_metrics['write_mb'] * 1024,
+ write_bytes=disk_metrics['write_mb'] * 1024 * 1024,
errors=0)
yield (disk, stats)
diff --git a/ceilometer/compute/virt/hyperv/utilsv2.py b/ceilometer/compute/virt/hyperv/utilsv2.py
index 187fd84e..980cec4f 100644
--- a/ceilometer/compute/virt/hyperv/utilsv2.py
+++ b/ceilometer/compute/virt/hyperv/utilsv2.py
@@ -48,14 +48,16 @@ class UtilsV2(object):
_PROC_SETTING = 'Msvm_ProcessorSettingData'
_SYNTH_ETH_PORT = 'Msvm_SyntheticEthernetPortSettingData'
_ETH_PORT_ALLOC = 'Msvm_EthernetPortAllocationSettingData'
+ _PORT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_STORAGE_ALLOC = 'Msvm_StorageAllocationSettingData'
_VS_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_AGGREG_METRIC = 'Msvm_AggregationMetricDefinition'
_METRICS_ME = 'Msvm_MetricForME'
+ _BASE_METRICS_VALUE = 'Msvm_BaseMetricValue'
_CPU_METRIC_NAME = 'Aggregated Average CPU Utilization'
- _NET_IN_METRIC_NAME = 'Aggregated Filtered Incoming Network Traffic'
- _NET_OUT_METRIC_NAME = 'Aggregated Filtered Outgoing Network Traffic'
+ _NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic'
+ _NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic'
# Disk metrics are supported from Hyper-V 2012 R2
_DISK_RD_METRIC_NAME = 'Disk Data Read'
_DISK_WR_METRIC_NAME = 'Disk Data Written'
@@ -104,12 +106,16 @@ class UtilsV2(object):
for port in ports:
vnic = [v for v in vnics if port.Parent == v.path_()][0]
- metric_values = self._get_metric_values(
- port, [metric_def_in, metric_def_out])
+
+ metric_value_instances = self._get_metric_value_instances(
+ port.associators(wmi_result_class=self._PORT_ACL_SET_DATA),
+ self._BASE_METRICS_VALUE)
+ metric_values = self._sum_metric_values_by_defs(
+ metric_value_instances, [metric_def_in, metric_def_out])
yield {
- 'rx_bytes': metric_values[0],
- 'tx_bytes': metric_values[1],
+ 'rx_mb': metric_values[0],
+ 'tx_mb': metric_values[1],
'element_name': vnic.ElementName,
'address': vnic.Address
}
@@ -142,10 +148,7 @@ class UtilsV2(object):
tot_metric_val += long(metric.MetricValue)
return tot_metric_val
- def _get_metric_values(self, element, metric_defs):
- element_metrics = element.associators(
- wmi_association_class=self._METRICS_ME)
-
+ def _sum_metric_values_by_defs(self, element_metrics, metric_defs):
metric_values = []
for metric_def in metric_defs:
if metric_def:
@@ -156,6 +159,20 @@ class UtilsV2(object):
metric_values.append(0)
return metric_values
+ def _get_metric_value_instances(self, elements, result_class):
+ instances = []
+ for el in elements:
+ associators = el.associators(wmi_result_class=result_class)
+ if associators:
+ instances.append(associators[0])
+
+ return instances
+
+ def _get_metric_values(self, element, metric_defs):
+ element_metrics = element.associators(
+ wmi_association_class=self._METRICS_ME)
+ return self._sum_metric_values_by_defs(element_metrics, metric_defs)
+
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
diff --git a/ceilometer/openstack/common/middleware/audit.py b/ceilometer/openstack/common/middleware/audit.py
index 1bda8d11..bb69e313 100644
--- a/ceilometer/openstack/common/middleware/audit.py
+++ b/ceilometer/openstack/common/middleware/audit.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright (c) 2013 OpenStack LLC.
+# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
diff --git a/ceilometer/openstack/common/middleware/notifier.py b/ceilometer/openstack/common/middleware/notifier.py
index ab744ff0..8006fe74 100644
--- a/ceilometer/openstack/common/middleware/notifier.py
+++ b/ceilometer/openstack/common/middleware/notifier.py
@@ -66,7 +66,7 @@ class RequestNotifier(base.Middleware):
"""
return dict((k, v) for k, v in environ.iteritems()
- if k.isupper())
+ if k.isupper() and k != 'HTTP_X_AUTH_TOKEN')
@log_and_ignore_error
def process_request(self, request):
diff --git a/ceilometer/openstack/common/rpc/impl_kombu.py b/ceilometer/openstack/common/rpc/impl_kombu.py
index cf55a9f0..36355322 100644
--- a/ceilometer/openstack/common/rpc/impl_kombu.py
+++ b/ceilometer/openstack/common/rpc/impl_kombu.py
@@ -459,6 +459,9 @@ class Connection(object):
self.params_list = params_list
+ brokers_count = len(self.params_list)
+ self.next_broker_indices = itertools.cycle(range(brokers_count))
+
self.memory_transport = self.conf.fake_rabbit
self.connection = None
@@ -529,7 +532,7 @@ class Connection(object):
attempt = 0
while True:
- params = self.params_list[attempt % len(self.params_list)]
+ params = self.params_list[next(self.next_broker_indices)]
attempt += 1
try:
self._connect(params)
diff --git a/ceilometer/openstack/common/rpc/impl_qpid.py b/ceilometer/openstack/common/rpc/impl_qpid.py
index 6a6d1b41..ac9933c3 100644
--- a/ceilometer/openstack/common/rpc/impl_qpid.py
+++ b/ceilometer/openstack/common/rpc/impl_qpid.py
@@ -369,7 +369,7 @@ class DirectPublisher(Publisher):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
- node_name = msg_id
+ node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
@@ -468,6 +468,10 @@ class Connection(object):
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
+
+ brokers_count = len(self.brokers)
+ self.next_broker_indices = itertools.cycle(range(brokers_count))
+
self.connection_create(self.brokers[0])
self.reconnect()
@@ -495,29 +499,27 @@ class Connection(object):
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
- attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
- except qpid_exceptions.ConnectionError:
+ except qpid_exceptions.MessagingError:
pass
- broker = self.brokers[attempt % len(self.brokers)]
- attempt += 1
+ broker = self.brokers[next(self.next_broker_indices)]
try:
self.connection_create(broker)
self.connection.open()
- except qpid_exceptions.ConnectionError as e:
+ except qpid_exceptions.MessagingError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
- delay = min(2 * delay, 60)
+ delay = min(delay + 1, 5)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
@@ -539,7 +541,7 @@ class Connection(object):
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
- qpid_exceptions.ConnectionError) as e:
+ qpid_exceptions.MessagingError) as e:
if error_callback:
error_callback(e)
self.reconnect()
diff --git a/requirements.txt b/requirements.txt
index b6709002..c03110a4 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,8 +4,8 @@ kombu>=2.4.8
iso8601>=0.1.8
argparse
SQLAlchemy>=0.7.8,<=0.7.99
-sqlalchemy-migrate>=0.7.2
-alembic>=0.4.1
+sqlalchemy-migrate>=0.7.2,!=0.9.2
+alembic>=0.4.1,<0.6.4
netaddr
pymongo>=2.4
eventlet>=0.13.0
diff --git a/setup.cfg b/setup.cfg
index 14bd8b7e..71776ff9 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,6 +1,6 @@
[metadata]
name = ceilometer
-version = 2013.2.3
+version = 2013.2.4
summary = OpenStack Metering
description-file =
README.rst
diff --git a/test-requirements.txt b/test-requirements.txt
index 2fc83439..2380f226 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -10,9 +10,9 @@ mox>=0.5.3
fixtures>=0.3.14
Babel>=1.3
http://tarballs.openstack.org/nova/nova-stable-havana.tar.gz#egg=nova
-swift
+http://tarballs.openstack.org/swift/swift-master.tar.gz#egg=swift
# Docs Requirements
-sphinx>=1.1.2,<1.2
+sphinx>=1.1.2,<1.1.999
sphinxcontrib-pecanwsme>=0.5
docutils==0.9.1
oslo.sphinx
diff --git a/tests/api/v1/test_app.py b/tests/api/v1/test_app.py
index 89368bc8..1a257c5a 100644
--- a/tests/api/v1/test_app.py
+++ b/tests/api/v1/test_app.py
@@ -34,21 +34,21 @@ class TestApp(base.TestCase):
cfg.CONF.reset()
def test_keystone_middleware_conf(self):
- cfg.CONF.set_override("auth_protocol", "foottp",
+ cfg.CONF.set_override("auth_protocol", "file",
group=acl.OPT_GROUP_NAME)
cfg.CONF.set_override("auth_version", "v2.0", group=acl.OPT_GROUP_NAME)
cfg.CONF.set_override("auth_uri", None,
group=acl.OPT_GROUP_NAME)
api_app = app.make_app(cfg.CONF, attach_storage=False)
- self.assertTrue(api_app.wsgi_app.auth_uri.startswith('foottp'))
+ self.assertTrue(api_app.wsgi_app.auth_uri.startswith('file'))
def test_keystone_middleware_parse_conffile(self):
tmpfile = self.temp_config_file_path()
with open(tmpfile, "w") as f:
- f.write("[%s]\nauth_protocol = barttp" % acl.OPT_GROUP_NAME)
+ f.write("[%s]\nauth_protocol = file" % acl.OPT_GROUP_NAME)
f.write("\nauth_version = v2.0")
service.prepare_service(['ceilometer-api',
'--config-file=%s' % tmpfile])
api_app = app.make_app(cfg.CONF, attach_storage=False)
- self.assertTrue(api_app.wsgi_app.auth_uri.startswith('barttp'))
+ self.assertTrue(api_app.wsgi_app.auth_uri.startswith('file'))
os.unlink(tmpfile)
diff --git a/tests/api/v2/test_acl_scenarios.py b/tests/api/v2/test_acl_scenarios.py
index 4a5a661f..708a9d23 100644
--- a/tests/api/v2/test_acl_scenarios.py
+++ b/tests/api/v2/test_acl_scenarios.py
@@ -43,7 +43,8 @@ class FakeMemcache(object):
if key == "tokens/%s" % VALID_TOKEN:
dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
return json.dumps(({'access': {
- 'token': {'id': VALID_TOKEN},
+ 'token': {'id': VALID_TOKEN,
+ 'expires': timeutils.isotime(dt)},
'user': {
'id': 'user_id1',
'name': 'user_name1',
@@ -56,7 +57,8 @@ class FakeMemcache(object):
if key == "tokens/%s" % VALID_TOKEN2:
dt = timeutils.utcnow() + datetime.timedelta(minutes=5)
return json.dumps(({'access': {
- 'token': {'id': VALID_TOKEN2},
+ 'token': {'id': VALID_TOKEN2,
+ 'expires': timeutils.isotime(dt)},
'user': {
'id': 'user_id2',
'name': 'user-good',
diff --git a/tests/api/v2/test_alarm_scenarios.py b/tests/api/v2/test_alarm_scenarios.py
index 77b2cd86..0cd6390a 100644
--- a/tests/api/v2/test_alarm_scenarios.py
+++ b/tests/api/v2/test_alarm_scenarios.py
@@ -677,6 +677,23 @@ class TestAlarms(FunctionalTest,
else:
self.fail("Alarm not found")
+ def test_post_alarm_threshold_zero(self):
+ """Alarm should allow threshold value to be zero."""
+ json_body = {
+ 'type': 'threshold',
+ 'name': 'zero-test',
+ 'threshold_rule': {
+ 'threshold': 0.0,
+ 'meter_name': 'instance',
+ }
+ }
+
+ self.post_json('/alarms', params=json_body, status=201,
+ headers=self.auth_headers)
+ alarms = list(self.conn.get_alarms(name='zero-test'))
+ self.assertEqual(1, len(alarms))
+ self.assertEqual(0.0, alarms[0].rule['threshold'])
+
def test_post_alarm_combination(self):
json = {
'enabled': False,
@@ -965,6 +982,54 @@ class TestAlarms(FunctionalTest,
'value': self.auth_headers['X-Project-Id']})
self.assertEqual(resp.status_code, 200)
+ def test_put_alarm_threshold_zero(self):
+ """Alarm should allow threshold value to be zero."""
+ json_body = {
+ 'type': 'threshold',
+ 'name': 'name1',
+ 'threshold_rule': {
+ 'threshold': 0.0,
+ 'meter_name': 'instance',
+ }
+ }
+
+ alarms = self.get_json('/alarms',
+ q=[{'field': 'name',
+ 'value': 'name1',
+ }])
+ self.assertEqual(1, len(alarms))
+ alarm_id = alarms[0]['alarm_id']
+
+ self.put_json('/alarms/%s' % alarm_id, params=json_body,
+ status=200, headers=self.auth_headers)
+ alarms = list(self.conn.get_alarms(name='name1'))
+ self.assertEqual(1, len(alarms))
+ self.assertEqual(0.0, alarms[0].rule['threshold'])
+
+ def test_put_alarm_combination_cannot_specify_itself(self):
+ json = {
+ 'name': 'name4',
+ 'type': 'combination',
+ 'combination_rule': {
+ 'alarm_ids': ['d'],
+ }
+ }
+
+ data = self.get_json('/alarms',
+ q=[{'field': 'name',
+ 'value': 'name4',
+ }])
+ self.assertEqual(1, len(data))
+ alarm_id = data[0]['alarm_id']
+
+ resp = self.put_json('/alarms/%s' % alarm_id,
+ expect_errors=True, status=400,
+ params=json,
+ headers=self.auth_headers)
+
+ msg = 'Cannot specify alarm %s itself in combination rule' % alarm_id
+ self.assertEqual(msg, resp.json['error_message']['faultstring'])
+
def test_delete_alarm(self):
data = self.get_json('/alarms')
self.assertEqual(4, len(data))
diff --git a/tests/api/v2/test_app.py b/tests/api/v2/test_app.py
index e2b4a78a..a7c930c2 100644
--- a/tests/api/v2/test_app.py
+++ b/tests/api/v2/test_app.py
@@ -41,7 +41,7 @@ class TestApp(base.TestCase):
cfg.CONF.reset()
def test_keystone_middleware_conf(self):
- cfg.CONF.set_override("auth_protocol", "foottp",
+ cfg.CONF.set_override("auth_protocol", "file",
group=acl.OPT_GROUP_NAME)
cfg.CONF.set_override("auth_version", "v2.0", group=acl.OPT_GROUP_NAME)
cfg.CONF.set_override("pipeline_cfg_file",
@@ -50,7 +50,7 @@ class TestApp(base.TestCase):
cfg.CONF.set_override("auth_uri", None, group=acl.OPT_GROUP_NAME)
api_app = app.setup_app()
- self.assertTrue(api_app.auth_uri.startswith('foottp'))
+ self.assertTrue(api_app.auth_uri.startswith('file'))
def test_keystone_middleware_parse_conffile(self):
tmpfile = self.temp_config_file_path()
@@ -59,13 +59,13 @@ class TestApp(base.TestCase):
f.write("pipeline_cfg_file = %s\n" %
self.path_get("etc/ceilometer/pipeline.yaml"))
f.write("[%s]\n" % acl.OPT_GROUP_NAME)
- f.write("auth_protocol = barttp\n")
+ f.write("auth_protocol = file\n")
f.write("auth_version = v2.0\n")
service.prepare_service(['ceilometer-api',
'--config-file=%s' % tmpfile])
cfg.CONF.set_override('connection', "log://", group="database")
api_app = app.setup_app()
- self.assertTrue(api_app.auth_uri.startswith('barttp'))
+ self.assertTrue(api_app.auth_uri.startswith('file'))
os.unlink(tmpfile)
diff --git a/tests/compute/virt/hyperv/test_inspector.py b/tests/compute/virt/hyperv/test_inspector.py
index 05477f40..8f7914e7 100644
--- a/tests/compute/virt/hyperv/test_inspector.py
+++ b/tests/compute/virt/hyperv/test_inspector.py
@@ -71,14 +71,14 @@ class TestHyperVInspection(test_base.TestCase):
def test_inspect_vnics(self):
fake_instance_name = 'fake_instance_name'
- fake_rx_bytes = 1000
- fake_tx_bytes = 2000
+ fake_rx_mb = 1000
+ fake_tx_mb = 2000
fake_element_name = 'fake_element_name'
fake_address = 'fake_address'
self._inspector._utils.get_vnic_metrics.return_value = [{
- 'rx_bytes': fake_rx_bytes,
- 'tx_bytes': fake_tx_bytes,
+ 'rx_mb': fake_rx_mb,
+ 'tx_mb': fake_tx_mb,
'element_name': fake_element_name,
'address': fake_address}]
@@ -93,8 +93,8 @@ class TestHyperVInspection(test_base.TestCase):
self.assertEqual(fake_element_name, inspected_vnic.name)
self.assertEqual(fake_address, inspected_vnic.mac)
- self.assertEqual(fake_rx_bytes, inspected_stats.rx_bytes)
- self.assertEqual(fake_tx_bytes, inspected_stats.tx_bytes)
+ self.assertEqual(fake_rx_mb * 1024 * 1024, inspected_stats.rx_bytes)
+ self.assertEqual(fake_tx_mb * 1024 * 1024, inspected_stats.tx_bytes)
def test_inspect_disks(self):
fake_instance_name = 'fake_instance_name'
@@ -122,5 +122,7 @@ class TestHyperVInspection(test_base.TestCase):
self.assertEqual(fake_device, inspected_disk.device)
- self.assertEqual(fake_read_mb * 1024, inspected_stats.read_bytes)
- self.assertEqual(fake_write_mb * 1024, inspected_stats.write_bytes)
+ self.assertEqual(fake_read_mb * 1024 * 1024,
+ inspected_stats.read_bytes)
+ self.assertEqual(fake_write_mb * 1024 * 1024,
+ inspected_stats.write_bytes)
diff --git a/tests/compute/virt/hyperv/test_utilsv2.py b/tests/compute/virt/hyperv/test_utilsv2.py
index d9f7d1f4..804112c4 100644
--- a/tests/compute/virt/hyperv/test_utilsv2.py
+++ b/tests/compute/virt/hyperv/test_utilsv2.py
@@ -28,6 +28,8 @@ from ceilometer.tests import base as test_base
class TestUtilsV2(test_base.TestCase):
+ _FAKE_RETURN_CLASS = 'fake_return_class'
+
def setUp(self):
self._utils = utilsv2.UtilsV2()
self._utils._conn = mock.MagicMock()
@@ -85,13 +87,17 @@ class TestUtilsV2(test_base.TestCase):
self.assertEqual(fake_cpu_count, cpu_metrics[1])
self.assertEqual(fake_uptime, cpu_metrics[2])
- def test_get_vnic_metrics(self):
+ @mock.patch('ceilometer.compute.virt.hyperv.utilsv2.UtilsV2'
+ '._sum_metric_values_by_defs')
+ @mock.patch('ceilometer.compute.virt.hyperv.utilsv2.UtilsV2'
+ '._get_metric_value_instances')
+ def test_get_vnic_metrics(self, mock_get_instances, mock_get_by_defs):
fake_vm_element_name = "fake_vm_element_name"
fake_vnic_element_name = "fake_vnic_name"
fake_vnic_address = "fake_vnic_address"
fake_vnic_path = "fake_vnic_path"
- fake_rx_bytes = 1000
- fake_tx_bytes = 2000
+ fake_rx_mb = 1000
+ fake_tx_mb = 2000
self._utils._lookup_vm = mock.MagicMock()
self._utils._get_vm_resources = mock.MagicMock()
@@ -108,15 +114,13 @@ class TestUtilsV2(test_base.TestCase):
self._utils._get_metric_def = mock.MagicMock()
- self._utils._get_metric_values = mock.MagicMock()
- self._utils._get_metric_values.return_value = [fake_rx_bytes,
- fake_tx_bytes]
+ mock_get_by_defs.return_value = [fake_rx_mb, fake_tx_mb]
vnic_metrics = list(self._utils.get_vnic_metrics(fake_vm_element_name))
self.assertEqual(1, len(vnic_metrics))
- self.assertEqual(fake_rx_bytes, vnic_metrics[0]['rx_bytes'])
- self.assertEqual(fake_tx_bytes, vnic_metrics[0]['tx_bytes'])
+ self.assertEqual(fake_rx_mb, vnic_metrics[0]['rx_mb'])
+ self.assertEqual(fake_tx_mb, vnic_metrics[0]['tx_mb'])
self.assertEqual(fake_vnic_element_name,
vnic_metrics[0]['element_name'])
self.assertEqual(fake_vnic_address, vnic_metrics[0]['address'])
@@ -150,6 +154,19 @@ class TestUtilsV2(test_base.TestCase):
self.assertEqual(fake_instance_id, disk_metrics[0]['instance_id'])
self.assertEqual(fake_host_resource, disk_metrics[0]['host_resource'])
+ def test_get_metric_value_instances(self):
+ mock_el1 = mock.MagicMock()
+ mock_associator = mock.MagicMock()
+ mock_el1.associators.return_value = [mock_associator]
+
+ mock_el2 = mock.MagicMock()
+ mock_el2.associators.return_value = []
+
+ returned = self._utils._get_metric_value_instances(
+ [mock_el1, mock_el2], self._FAKE_RETURN_CLASS)
+
+ self.assertEqual([mock_associator], returned)
+
def test_lookup_vm(self):
fake_vm_element_name = "fake_vm_element_name"
fake_vm = "fake_vm"
diff --git a/tox.ini b/tox.ini
index 5de7b888..db457cc1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,17 +8,19 @@ deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
install_command = pip install -U {opts} {packages}
usedevelop = True
+# Note the hash seed is set to 0 until ceilometer can be tested with a
+# random hash seed successfully.
setenv = VIRTUAL_ENV={envdir}
LANG=en_US.UTF-8
LANGUAGE=en_US:en
LC_ALL=C
EVENTLET_NO_GREENDNS=yes
+ PYTHONHASHSEED=0
commands =
bash -x {toxinidir}/run-tests.sh {posargs}
downloadcache = {toxworkdir}/_download
[testenv:cover]
-setenv = VIRTUAL_ENV={envdir}
commands = bash -x {toxinidir}/run-tests.sh --coverage
[testenv:pep8]