summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.zuul.yaml67
-rw-r--r--bindep.txt5
-rw-r--r--ceilometer/cmd/storage.py113
-rw-r--r--ceilometer/dispatcher/__init__.py92
-rw-r--r--ceilometer/dispatcher/database.py71
-rw-r--r--ceilometer/event/models.py38
-rw-r--r--ceilometer/opts.py4
-rw-r--r--ceilometer/publisher/direct.py98
-rw-r--r--ceilometer/service.py2
-rw-r--r--ceilometer/storage/__init__.py147
-rw-r--r--ceilometer/storage/base.py253
-rw-r--r--ceilometer/storage/hbase/__init__.py0
-rw-r--r--ceilometer/storage/hbase/base.py91
-rw-r--r--ceilometer/storage/hbase/inmemory.py281
-rw-r--r--ceilometer/storage/hbase/migration.py74
-rw-r--r--ceilometer/storage/hbase/utils.py448
-rw-r--r--ceilometer/storage/impl_hbase.py440
-rw-r--r--ceilometer/storage/impl_log.py130
-rw-r--r--ceilometer/storage/impl_mongodb.py710
-rw-r--r--ceilometer/storage/impl_sqlalchemy.py838
-rw-r--r--ceilometer/storage/models.py148
-rw-r--r--ceilometer/storage/mongo/__init__.py0
-rw-r--r--ceilometer/storage/mongo/utils.py590
-rw-r--r--ceilometer/storage/pymongo_base.py175
-rw-r--r--ceilometer/storage/sqlalchemy/__init__.py0
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/README4
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/__init__.py0
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/manage.py5
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg25
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py95
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py23
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py29
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py23
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py24
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py25
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py46
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py60
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py24
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py23
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py37
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py58
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py23
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py44
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py63
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py60
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py54
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py26
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py26
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py68
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py77
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql29
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py26
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py86
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql34
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py56
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py58
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py24
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py42
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py138
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py24
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py110
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py87
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py23
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py21
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py33
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py84
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py68
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py44
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py131
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py56
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py24
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py54
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py21
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py19
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py37
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py21
-rw-r--r--ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py0
-rw-r--r--ceilometer/storage/sqlalchemy/migration.py29
-rw-r--r--ceilometer/storage/sqlalchemy/models.py250
-rw-r--r--ceilometer/storage/sqlalchemy/utils.py131
-rw-r--r--ceilometer/tests/db.py214
-rw-r--r--ceilometer/tests/functional/__init__.py0
-rwxr-xr-xceilometer/tests/functional/hooks/post_test_hook.sh51
-rw-r--r--ceilometer/tests/functional/publisher/__init__.py0
-rw-r--r--ceilometer/tests/functional/publisher/test_direct.py80
-rw-r--r--ceilometer/tests/functional/storage/__init__.py0
-rw-r--r--ceilometer/tests/functional/storage/test_impl_hbase.py94
-rw-r--r--ceilometer/tests/functional/storage/test_impl_log.py29
-rw-r--r--ceilometer/tests/functional/storage/test_impl_mongodb.py117
-rw-r--r--ceilometer/tests/functional/storage/test_impl_sqlalchemy.py154
-rw-r--r--ceilometer/tests/functional/storage/test_pymongo_base.py145
-rw-r--r--ceilometer/tests/functional/storage/test_storage_scenarios.py2805
-rw-r--r--ceilometer/tests/mocks.py81
-rw-r--r--ceilometer/tests/unit/dispatcher/__init__.py0
-rw-r--r--ceilometer/tests/unit/dispatcher/test_db.py85
-rw-r--r--ceilometer/tests/unit/dispatcher/test_dispatcher.py42
-rw-r--r--ceilometer/tests/unit/event/test_endpoint.py1
-rw-r--r--ceilometer/tests/unit/storage/__init__.py0
-rw-r--r--ceilometer/tests/unit/storage/sqlalchemy/__init__.py0
-rw-r--r--ceilometer/tests/unit/storage/sqlalchemy/test_models.py96
-rw-r--r--ceilometer/tests/unit/storage/test_base.py54
-rw-r--r--ceilometer/tests/unit/storage/test_get_connection.py82
-rw-r--r--ceilometer/tests/unit/test_bin.py49
-rw-r--r--ceilometer/tests/unit/test_notification.py1
-rw-r--r--devstack/plugin.sh48
-rwxr-xr-xdevstack/upgrade/upgrade.sh13
-rw-r--r--doc/source/admin/telemetry-data-pipelines.rst67
-rw-r--r--doc/source/contributor/install/custom.rst1
-rw-r--r--doc/source/contributor/install/manual.rst8
-rw-r--r--doc/source/contributor/new_resource_types.rst2
-rw-r--r--doc/source/contributor/testing.rst15
-rw-r--r--doc/source/install/install-base-config-common.inc2
-rw-r--r--etc/ceilometer/ceilometer-config-generator.conf1
-rw-r--r--playbooks/legacy/ceilometer-dsvm-functional-mongodb/post.yaml80
-rw-r--r--playbooks/legacy/ceilometer-dsvm-functional-mongodb/run.yaml55
-rw-r--r--playbooks/legacy/ceilometer-dsvm-functional-mysql/post.yaml80
-rw-r--r--playbooks/legacy/ceilometer-dsvm-functional-mysql/run.yaml55
-rw-r--r--playbooks/legacy/ceilometer-tox-py27-mongodb/post.yaml67
-rw-r--r--playbooks/legacy/ceilometer-tox-py27-mongodb/run.yaml86
-rw-r--r--playbooks/legacy/ceilometer-tox-py27-mysql/post.yaml67
-rw-r--r--playbooks/legacy/ceilometer-tox-py27-mysql/run.yaml86
-rw-r--r--playbooks/legacy/ceilometer-tox-py27-postgresql/post.yaml67
-rw-r--r--playbooks/legacy/ceilometer-tox-py27-postgresql/run.yaml86
-rw-r--r--requirements.txt3
-rwxr-xr-xrun-tests.sh22
-rw-r--r--setup.cfg27
-rw-r--r--test-requirements.txt2
-rwxr-xr-xtools/make_test_data.py229
-rwxr-xr-xtools/make_test_data.sh77
-rwxr-xr-xtools/migrate_data_to_gnocchi.py193
-rw-r--r--tox.ini9
131 files changed, 47 insertions, 13098 deletions
diff --git a/.zuul.yaml b/.zuul.yaml
index 1c1521b1..798f8ff5 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,51 +1,4 @@
- job:
- name: ceilometer-dsvm-functional-mongodb
- parent: legacy-dsvm-base
- run: playbooks/legacy/ceilometer-dsvm-functional-mongodb/run
- post-run: playbooks/legacy/ceilometer-dsvm-functional-mongodb/post
- timeout: 7800
- required-projects:
- - openstack-infra/devstack-gate
- - openstack/ceilometer
-
-- job:
- name: ceilometer-dsvm-functional-mysql
- parent: legacy-dsvm-base
- run: playbooks/legacy/ceilometer-dsvm-functional-mysql/run
- post-run: playbooks/legacy/ceilometer-dsvm-functional-mysql/post
- timeout: 7800
- required-projects:
- - openstack-infra/devstack-gate
- - openstack/ceilometer
-
-- job:
- name: ceilometer-tox-py27-mongodb
- parent: legacy-base
- run: playbooks/legacy/ceilometer-tox-py27-mongodb/run
- post-run: playbooks/legacy/ceilometer-tox-py27-mongodb/post
- timeout: 2400
- required-projects:
- - openstack/requirements
-
-- job:
- name: ceilometer-tox-py27-mysql
- parent: legacy-base
- run: playbooks/legacy/ceilometer-tox-py27-mysql/run
- post-run: playbooks/legacy/ceilometer-tox-py27-mysql/post
- timeout: 2400
- required-projects:
- - openstack/requirements
-
-- job:
- name: ceilometer-tox-py27-postgresql
- parent: legacy-base
- run: playbooks/legacy/ceilometer-tox-py27-postgresql/run
- post-run: playbooks/legacy/ceilometer-tox-py27-postgresql/post
- timeout: 2400
- required-projects:
- - openstack/requirements
-
-- job:
name: grenade-dsvm-ceilometer
parent: legacy-dsvm-base
run: playbooks/legacy/grenade-dsvm-ceilometer/run
@@ -76,16 +29,6 @@
name: openstack/ceilometer
check:
jobs:
- - ceilometer-dsvm-functional-mongodb:
- branches: ^stable/newton$
- - ceilometer-dsvm-functional-mysql:
- branches: ^stable/newton$
- - ceilometer-tox-py27-mongodb:
- branches: ^(?!stable/newton)
- - ceilometer-tox-py27-mysql:
- branches: ^(?!stable/newton)
- - ceilometer-tox-py27-postgresql:
- branches: ^(?!stable/newton)
- grenade-dsvm-ceilometer:
branches: ^(?!stable/newton).*$
irrelevant-files:
@@ -94,16 +37,6 @@
- telemetry-dsvm-integration-ceilometer
gate:
jobs:
- - ceilometer-dsvm-functional-mongodb:
- branches: ^stable/newton$
- - ceilometer-dsvm-functional-mysql:
- branches: ^stable/newton$
- - ceilometer-tox-py27-mongodb:
- branches: ^(?!stable/newton)
- - ceilometer-tox-py27-mysql:
- branches: ^(?!stable/newton)
- - ceilometer-tox-py27-postgresql:
- branches: ^(?!stable/newton)
- grenade-dsvm-ceilometer:
branches: ^(?!stable/newton).*$
irrelevant-files:
diff --git a/bindep.txt b/bindep.txt
index a570cc63..9dcccdb9 100644
--- a/bindep.txt
+++ b/bindep.txt
@@ -1,11 +1,6 @@
-libpq-dev [platform:dpkg]
libxml2-dev [platform:dpkg test]
libxslt-devel [platform:rpm test]
libxslt1-dev [platform:dpkg test]
-postgresql [platform:dpkg]
-mysql-client [platform:dpkg]
-mysql-server [platform:dpkg]
build-essential [platform:dpkg]
libffi-dev [platform:dpkg]
-mongodb [platform:dpkg]
gettext [platform:dpkg]
diff --git a/ceilometer/cmd/storage.py b/ceilometer/cmd/storage.py
index 22d87547..70703d39 100644
--- a/ceilometer/cmd/storage.py
+++ b/ceilometer/cmd/storage.py
@@ -16,13 +16,9 @@
from oslo_config import cfg
from oslo_log import log
-from six import moves
-import six.moves.urllib.parse as urlparse
-import sqlalchemy as sa
import tenacity
from ceilometer import service
-from ceilometer import storage
LOG = log.getLogger(__name__)
@@ -30,9 +26,6 @@ LOG = log.getLogger(__name__)
def upgrade():
conf = cfg.ConfigOpts()
conf.register_cli_opts([
- cfg.BoolOpt('skip-metering-database',
- help='Skip metering database upgrade.',
- default=False),
cfg.BoolOpt('skip-gnocchi-resource-types',
help='Skip gnocchi resource-types upgrade.',
default=False),
@@ -43,19 +36,6 @@ def upgrade():
])
service.prepare_service(conf=conf)
- if conf.skip_metering_database:
- LOG.info("Skipping metering database upgrade")
- else:
-
- url = (getattr(conf.database, 'metering_connection') or
- conf.database.connection)
- if url:
- LOG.debug("Upgrading metering database")
- storage.get_connection(conf, url).upgrade()
- else:
- LOG.info("Skipping metering database upgrade, "
- "legacy database backend not configured.")
-
if conf.skip_gnocchi_resource_types:
LOG.info("Skipping Gnocchi resource types upgrade")
else:
@@ -75,96 +55,3 @@ def upgrade():
exceptions.SSLError,
))
)(gnocchi_client.upgrade_resource_types, conf)
-
-
-def expirer():
- conf = service.prepare_service()
-
- if conf.database.metering_time_to_live > 0:
- LOG.debug("Clearing expired metering data")
- storage_conn = storage.get_connection_from_config(conf)
- storage_conn.clear_expired_metering_data(
- conf.database.metering_time_to_live)
- else:
- LOG.info("Nothing to clean, database metering time to live "
- "is disabled")
-
-
-def db_clean_legacy():
- conf = cfg.ConfigOpts()
- conf.register_cli_opts([
- cfg.strOpt('confirm-drop-table',
- short='n',
- help='confirm to drop the legacy tables')])
- if not conf.confirm_drop_table:
- confirm = moves.input("Do you really want to drop the legacy "
- "alarm and event tables? This will destroy "
- "data definitively if it exist. Please type "
- "'YES' to confirm: ")
- if confirm != 'YES':
- print("DB legacy cleanup aborted!")
- return
-
- service.prepare_service(conf=conf)
-
- url = (getattr(conf.database, "metering_connection") or
- conf.database.connection)
- parsed = urlparse.urlparse(url)
-
- if parsed.password:
- masked_netloc = '****'.join(parsed.netloc.rsplit(parsed.password))
- masked_url = parsed._replace(netloc=masked_netloc)
- masked_url = urlparse.urlunparse(masked_url)
- else:
- masked_url = url
- LOG.info('Starting to drop event, alarm and alarm history tables in '
- 'backend: %s', masked_url)
-
- connection_scheme = parsed.scheme
- conn = storage.get_connection_from_config(conf)
- if connection_scheme in ('mysql', 'mysql+pymysql', 'postgresql',
- 'sqlite'):
- engine = conn._engine_facade.get_engine()
- meta = sa.MetaData(bind=engine)
- for table_name in ('alarm', 'alarm_history',
- 'trait_text', 'trait_int',
- 'trait_float', 'trait_datetime',
- 'event', 'event_type'):
- if engine.has_table(table_name):
- table = sa.Table(table_name, meta, autoload=True)
- table.drop()
- LOG.info("Legacy %s table of SQL backend has been "
- "dropped.", table_name)
- else:
- LOG.info('%s table does not exist.', table_name)
-
- elif connection_scheme == 'hbase':
- with conn.conn_pool.connection() as h_conn:
- tables = h_conn.tables()
- table_name_mapping = {'alarm': 'alarm',
- 'alarm_h': 'alarm history',
- 'event': 'event'}
- for table_name in ('alarm', 'alarm_h', 'event'):
- try:
- if table_name in tables:
- h_conn.disable_table(table_name)
- h_conn.delete_table(table_name)
- LOG.info("Legacy %s table of Hbase backend "
- "has been dropped.",
- table_name_mapping[table_name])
- else:
- LOG.info('%s table does not exist.',
- table_name_mapping[table_name])
- except Exception as e:
- LOG.error('Error occurred while dropping alarm '
- 'tables of Hbase, %s', e)
-
- elif connection_scheme == 'mongodb':
- for table_name in ('alarm', 'alarm_history', 'event'):
- if table_name in conn.db.conn.collection_names():
- conn.db.conn.drop_collection(table_name)
- LOG.info("Legacy %s table of Mongodb backend has been "
- "dropped.", table_name)
- else:
- LOG.info('%s table does not exist.', table_name)
- LOG.info('Legacy alarm and event tables cleanup done.')
diff --git a/ceilometer/dispatcher/__init__.py b/ceilometer/dispatcher/__init__.py
deleted file mode 100644
index bc86c73e..00000000
--- a/ceilometer/dispatcher/__init__.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#
-# Copyright 2013 IBM
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import abc
-
-from oslo_config import cfg
-from oslo_log import log
-import six
-from stevedore import named
-
-
-LOG = log.getLogger(__name__)
-
-OPTS = [
- cfg.MultiStrOpt('meter_dispatchers',
- deprecated_name='dispatcher',
- default=[],
- deprecated_for_removal=True,
- deprecated_reason='This option only be used in collector '
- 'service, the collector service has '
- 'been deprecated and will be removed '
- 'in the future, this should also be '
- 'deprecated for removal with collector '
- 'service.',
- help='Dispatchers to process metering data.'),
- cfg.MultiStrOpt('event_dispatchers',
- default=[],
- deprecated_name='dispatcher',
- deprecated_for_removal=True,
- deprecated_reason='This option only be used in collector '
- 'service, the collector service has '
- 'been deprecated and will be removed '
- 'in the future, this should also be '
- 'deprecated for removal with collector '
- 'service.',
- help='Dispatchers to process event data.'),
-]
-
-
-def _load_dispatcher_manager(conf, dispatcher_type):
- namespace = 'ceilometer.dispatcher.%s' % dispatcher_type
- conf_name = '%s_dispatchers' % dispatcher_type
-
- LOG.debug('loading dispatchers from %s', namespace)
- # set propagate_map_exceptions to True to enable stevedore
- # to propagate exceptions.
- dispatcher_manager = named.NamedExtensionManager(
- namespace=namespace,
- names=getattr(conf, conf_name),
- invoke_on_load=True,
- invoke_args=[conf],
- propagate_map_exceptions=True)
- if not list(dispatcher_manager):
- LOG.warning('Failed to load any dispatchers for %s',
- namespace)
- return dispatcher_manager
-
-
-def load_dispatcher_manager(conf):
- return (_load_dispatcher_manager(conf, 'meter'),
- _load_dispatcher_manager(conf, 'event'))
-
-
-class Base(object):
- def __init__(self, conf):
- self.conf = conf
-
-
-@six.add_metaclass(abc.ABCMeta)
-class MeterDispatcherBase(Base):
- @abc.abstractmethod
- def record_metering_data(self, data):
- """Recording metering data interface."""
-
-
-@six.add_metaclass(abc.ABCMeta)
-class EventDispatcherBase(Base):
- @abc.abstractmethod
- def record_events(self, events):
- """Record events."""
diff --git a/ceilometer/dispatcher/database.py b/ceilometer/dispatcher/database.py
deleted file mode 100644
index c7f63977..00000000
--- a/ceilometer/dispatcher/database.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-# Copyright 2013 IBM Corp
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log
-from oslo_utils import timeutils
-
-from ceilometer import dispatcher
-from ceilometer import storage
-
-LOG = log.getLogger(__name__)
-
-
-class MeterDatabaseDispatcher(dispatcher.MeterDispatcherBase):
- """Dispatcher class for recording metering data into database.
-
- The dispatcher class which records each meter into a database configured
- in ceilometer configuration file.
-
- To enable this dispatcher, the following section needs to be present in
- ceilometer.conf file
-
- [DEFAULT]
- meter_dispatchers = database
- """
-
- @property
- def conn(self):
- if not hasattr(self, "_conn"):
- self._conn = storage.get_connection_from_config(
- self.conf)
- return self._conn
-
- def record_metering_data(self, data):
- # We may have receive only one counter on the wire
- if not data:
- return
- if not isinstance(data, list):
- data = [data]
-
- for meter in data:
- LOG.debug(
- 'metering data %(counter_name)s '
- 'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s',
- {'counter_name': meter['counter_name'],
- 'resource_id': meter['resource_id'],
- 'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
- 'counter_volume': meter['counter_volume']})
- # Convert the timestamp to a datetime instance.
- # Storage engines are responsible for converting
- # that value to something they can store.
- if meter.get('timestamp'):
- ts = timeutils.parse_isotime(meter['timestamp'])
- meter['timestamp'] = timeutils.normalize_time(ts)
- try:
- self.conn.record_metering_data_batch(data)
- except Exception as err:
- LOG.error('Failed to record %(len)s: %(err)s.',
- {'len': len(data), 'err': err})
- raise
diff --git a/ceilometer/event/models.py b/ceilometer/event/models.py
index 7b4b3b9b..2232cf2d 100644
--- a/ceilometer/event/models.py
+++ b/ceilometer/event/models.py
@@ -15,15 +15,39 @@
from oslo_utils import timeutils
import six
-from ceilometer.storage import base
-
def serialize_dt(value):
"""Serializes parameter if it is datetime."""
return value.isoformat() if hasattr(value, 'isoformat') else value
-class Event(base.Model):
+class Model(object):
+ """Base class for storage API models."""
+
+ def __init__(self, **kwds):
+ self.fields = list(kwds)
+ for k, v in six.iteritems(kwds):
+ setattr(self, k, v)
+
+ def as_dict(self):
+ d = {}
+ for f in self.fields:
+ v = getattr(self, f)
+ if isinstance(v, Model):
+ v = v.as_dict()
+ elif isinstance(v, list) and v and isinstance(v[0], Model):
+ v = [sub.as_dict() for sub in v]
+ d[f] = v
+ return d
+
+ def __eq__(self, other):
+ return self.as_dict() == other.as_dict()
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+
+class Event(Model):
"""A raw event from the source system. Events have Traits.
Metrics will be derived from one or more Events.
@@ -45,8 +69,8 @@ class Event(base.Model):
:param traits: list of Traits on this Event.
:param raw: Unindexed raw notification details.
"""
- base.Model.__init__(self, message_id=message_id, event_type=event_type,
- generated=generated, traits=traits, raw=raw)
+ Model.__init__(self, message_id=message_id, event_type=event_type,
+ generated=generated, traits=traits, raw=raw)
def append_trait(self, trait_model):
self.traits.append(trait_model)
@@ -67,7 +91,7 @@ class Event(base.Model):
'raw': self.raw}
-class Trait(base.Model):
+class Trait(Model):
"""A Trait is a key/value pair of data on an Event.
The value is variant record of basic data types (int, date, float, etc).
@@ -90,7 +114,7 @@ class Trait(base.Model):
def __init__(self, name, dtype, value):
if not dtype:
dtype = Trait.NONE_TYPE
- base.Model.__init__(self, name=name, dtype=dtype, value=value)
+ Model.__init__(self, name=name, dtype=dtype, value=value)
def __repr__(self):
return "<Trait: %s %d %s>" % (self.name, self.dtype, self.value)
diff --git a/ceilometer/opts.py b/ceilometer/opts.py
index 34fe4d14..de14ada9 100644
--- a/ceilometer/opts.py
+++ b/ceilometer/opts.py
@@ -23,7 +23,6 @@ import ceilometer.compute.virt.inspector
import ceilometer.compute.virt.libvirt.utils
import ceilometer.compute.virt.vmware.inspector
import ceilometer.compute.virt.xenapi.inspector
-import ceilometer.dispatcher
import ceilometer.event.converter
import ceilometer.hardware.discovery
import ceilometer.hardware.pollsters.generic
@@ -42,7 +41,6 @@ import ceilometer.pipeline
import ceilometer.publisher.messaging
import ceilometer.publisher.utils
import ceilometer.sample
-import ceilometer.storage
import ceilometer.utils
import ceilometer.volume.discovery
@@ -75,7 +73,6 @@ def list_opts():
itertools.chain(ceilometer.agent.manager.OPTS,
ceilometer.compute.virt.inspector.OPTS,
ceilometer.compute.virt.libvirt.utils.OPTS,
- ceilometer.dispatcher.OPTS,
ceilometer.objectstore.swift.OPTS,
ceilometer.pipeline.OPTS,
ceilometer.sample.OPTS,
@@ -96,7 +93,6 @@ def list_opts():
help='Number of seconds between checks to see if group '
'membership has changed'),
]),
- ('database', ceilometer.storage.OPTS),
('dispatcher_gnocchi', (
cfg.StrOpt(
'filter_project',
diff --git a/ceilometer/publisher/direct.py b/ceilometer/publisher/direct.py
deleted file mode 100644
index 657dac95..00000000
--- a/ceilometer/publisher/direct.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-# Copyright 2015 Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from oslo_log import log
-import six.moves.urllib.parse as urlparse
-from stevedore import driver
-import stevedore.exception
-
-from ceilometer import publisher
-from ceilometer.publisher import utils
-
-LOG = log.getLogger(__name__)
-
-
-class DirectPublisher(publisher.ConfigPublisherBase):
- """A publisher that allows saving directly from the pipeline.
-
- Samples are saved to a configured dispatcher. This is useful
- where it is desirable to limit the number of external services that
- are required.
-
- By default, the database dispatcher is used to select another one we
- can use direct://?dispatcher=name_of_dispatcher, ...
- """
- def __init__(self, conf, parsed_url):
- super(DirectPublisher, self).__init__(conf, parsed_url)
- default_dispatcher = parsed_url.scheme
- if default_dispatcher == 'direct':
- LOG.warning('Direct publisher is deprecated for removal. Use '
- 'an explicit publisher instead, e.g. '
- '"database", "file", ...')
- default_dispatcher = 'database'
- options = urlparse.parse_qs(parsed_url.query)
- self.dispatcher_name = options.get('dispatcher',
- [default_dispatcher])[-1]
- self._sample_dispatcher = None
- self._event_dispatcher = None
-
- try:
- self.sample_driver = driver.DriverManager(
- 'ceilometer.dispatcher.meter', self.dispatcher_name).driver
- except stevedore.exception.NoMatches:
- self.sample_driver = None
-
- try:
- self.event_driver = driver.DriverManager(
- 'ceilometer.dispatcher.event', self.dispatcher_name).driver
- except stevedore.exception.NoMatches:
- self.event_driver = None
-
- def get_sample_dispatcher(self):
- if not self._sample_dispatcher:
- self._sample_dispatcher = self.sample_driver(self.conf)
- return self._sample_dispatcher
-
- def get_event_dispatcher(self):
- if not self._event_dispatcher:
- if self.event_driver != self.sample_driver:
- self._event_dispatcher = self.event_driver(self.conf)
- else:
- self._event_dispatcher = self.get_sample_dispatcher()
- return self._event_dispatcher
-
- def publish_samples(self, samples):
- if not self.sample_driver:
- LOG.error("Can't publish samples to a non-existing dispatcher "
- "'%s'", self.dispatcher_name)
- return
-
- if not isinstance(samples, list):
- samples = [samples]
- # not published externally; skip signing
- self.get_sample_dispatcher().record_metering_data([
- utils.meter_message_from_counter(sample, secret=None)
- for sample in samples])
-
- def publish_events(self, events):
- if not self.event_driver:
- LOG.error("Can't publish events to a non-existing dispatcher "
- "'%s'", self.dispatcher_name)
- return
-
- if not isinstance(events, list):
- events = [events]
- # not published externally; skip signing
- self.get_event_dispatcher().record_events([
- utils.message_from_event(event, secret=None) for event in events])
diff --git a/ceilometer/service.py b/ceilometer/service.py
index dca96d4e..dea79c61 100644
--- a/ceilometer/service.py
+++ b/ceilometer/service.py
@@ -15,7 +15,6 @@
import sys
from oslo_config import cfg
-from oslo_db import options as db_options
import oslo_i18n
from oslo_log import log
from oslo_reports import guru_meditation_report as gmr
@@ -45,7 +44,6 @@ def prepare_service(argv=None, config_files=None, conf=None):
['futurist=INFO', 'neutronclient=INFO',
'keystoneclient=INFO'])
log.set_defaults(default_log_levels=log_levels)
- db_options.set_defaults(conf)
conf(argv[1:], project='ceilometer', validate_default_values=True,
version=version.version_info.version_string(),
diff --git a/ceilometer/storage/__init__.py b/ceilometer/storage/__init__.py
deleted file mode 100644
index ee52ec53..00000000
--- a/ceilometer/storage/__init__.py
+++ /dev/null
@@ -1,147 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Storage backend management
-"""
-
-from oslo_config import cfg
-from oslo_log import log
-import six.moves.urllib.parse as urlparse
-from stevedore import driver
-import tenacity
-
-from ceilometer import utils
-
-
-LOG = log.getLogger(__name__)
-
-
-OPTS = [
- cfg.IntOpt('metering_time_to_live',
- default=-1,
- help="Number of seconds that samples are kept "
- "in the database for (<= 0 means forever).",
- deprecated_opts=[cfg.DeprecatedOpt('time_to_live',
- 'database')]),
- cfg.StrOpt('metering_connection',
- secret=True,
- help='The connection string used to connect to the metering '
- 'database. (if unset, connection is used)'),
- cfg.BoolOpt('sql_expire_samples_only',
- default=False,
- help="Indicates if expirer expires only samples. If set true,"
- " expired samples will be deleted, but residual"
- " resource and meter definition data will remain."),
-]
-
-
-class StorageUnknownWriteError(Exception):
- """Error raised when an unknown error occurs while recording."""
-
-
-class StorageBadVersion(Exception):
- """Error raised when the storage backend version is not good enough."""
-
-
-class StorageBadAggregate(Exception):
- """Error raised when an aggregate is unacceptable to storage backend."""
- code = 400
-
-
-def get_connection_from_config(conf):
- retries = conf.database.max_retries
-
- @tenacity.retry(
- wait=tenacity.wait_fixed(conf.database.retry_interval),
- stop=(tenacity.stop_after_attempt(retries) if retries >= 0
- else tenacity.stop_never),
- reraise=True)
- def _inner():
- url = (getattr(conf.database, 'metering_connection') or
- conf.database.connection)
- return get_connection(conf, url)
-
- return _inner()
-
-
-def get_connection(conf, url):
- """Return an open connection to the database."""
- connection_scheme = urlparse.urlparse(url).scheme
- # SqlAlchemy connections specify may specify a 'dialect' or
- # 'dialect+driver'. Handle the case where driver is specified.
- engine_name = connection_scheme.split('+')[0]
- namespace = 'ceilometer.metering.storage'
- # NOTE: translation not applied bug #1446983
- LOG.debug('looking for %(name)r driver in %(namespace)r',
- {'name': engine_name, 'namespace': namespace})
- mgr = driver.DriverManager(namespace, engine_name)
- return mgr.driver(conf, url)
-
-
-class SampleFilter(object):
- """Holds the properties for building a query from a meter/sample filter.
-
- :param user: The sample owner.
- :param project: The sample project.
- :param start_timestamp: Earliest time point in the request.
- :param start_timestamp_op: Earliest timestamp operation in the request.
- :param end_timestamp: Latest time point in the request.
- :param end_timestamp_op: Latest timestamp operation in the request.
- :param resource: Optional filter for resource id.
- :param meter: Optional filter for meter type using the meter name.
- :param source: Optional source filter.
- :param message_id: Optional sample_id filter.
- :param metaquery: Optional filter on the metadata
- """
- def __init__(self, user=None, project=None,
- start_timestamp=None, start_timestamp_op=None,
- end_timestamp=None, end_timestamp_op=None,
- resource=None, meter=None,
- source=None, message_id=None,
- metaquery=None):
- self.user = user
- self.project = project
- self.start_timestamp = utils.sanitize_timestamp(start_timestamp)
- self.start_timestamp_op = start_timestamp_op
- self.end_timestamp = utils.sanitize_timestamp(end_timestamp)
- self.end_timestamp_op = end_timestamp_op
- self.resource = resource
- self.meter = meter
- self.source = source
- self.metaquery = metaquery or {}
- self.message_id = message_id
-
- def __repr__(self):
- return ("<SampleFilter(user: %s,"
- " project: %s,"
- " start_timestamp: %s,"
- " start_timestamp_op: %s,"
- " end_timestamp: %s,"
- " end_timestamp_op: %s,"
- " resource: %s,"
- " meter: %s,"
- " source: %s,"
- " metaquery: %s,"
- " message_id: %s)>" %
- (self.user,
- self.project,
- self.start_timestamp,
- self.start_timestamp_op,
- self.end_timestamp,
- self.end_timestamp_op,
- self.resource,
- self.meter,
- self.source,
- self.metaquery,
- self.message_id))
diff --git a/ceilometer/storage/base.py b/ceilometer/storage/base.py
deleted file mode 100644
index 09bc526e..00000000
--- a/ceilometer/storage/base.py
+++ /dev/null
@@ -1,253 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Base classes for storage engines
-"""
-
-import datetime
-import inspect
-import math
-
-from oslo_utils import timeutils
-import six
-from six import moves
-
-import ceilometer
-
-
-def iter_period(start, end, period):
- """Split a time from start to end in periods of a number of seconds.
-
- This function yields the (start, end) time for each period composing the
- time passed as argument.
-
- :param start: When the period set start.
- :param end: When the period end starts.
- :param period: The duration of the period.
- """
- period_start = start
- increment = datetime.timedelta(seconds=period)
- for i in moves.xrange(int(math.ceil(
- timeutils.delta_seconds(start, end)
- / float(period)))):
- next_start = period_start + increment
- yield (period_start, next_start)
- period_start = next_start
-
-
-def _handle_sort_key(model_name, sort_key=None):
- """Generate sort keys according to the passed in sort key from user.
-
- :param model_name: Database model name be query.(meter, etc.)
- :param sort_key: sort key passed from user.
- return: sort keys list
- """
- sort_keys_extra = {'meter': ['user_id', 'project_id'],
- 'resource': ['user_id', 'project_id', 'timestamp'],
- }
-
- sort_keys = sort_keys_extra[model_name]
- if not sort_key:
- return sort_keys
- # NOTE(Fengqian): We need to put the sort key from user
- # in the first place of sort keys list.
- try:
- sort_keys.remove(sort_key)
- except ValueError:
- pass
- finally:
- sort_keys.insert(0, sort_key)
- return sort_keys
-
-
-class Model(object):
- """Base class for storage API models."""
-
- def __init__(self, **kwds):
- self.fields = list(kwds)
- for k, v in six.iteritems(kwds):
- setattr(self, k, v)
-
- def as_dict(self):
- d = {}
- for f in self.fields:
- v = getattr(self, f)
- if isinstance(v, Model):
- v = v.as_dict()
- elif isinstance(v, list) and v and isinstance(v[0], Model):
- v = [sub.as_dict() for sub in v]
- d[f] = v
- return d
-
- def __eq__(self, other):
- return self.as_dict() == other.as_dict()
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- @classmethod
- def get_field_names(cls):
- fields = inspect.getargspec(cls.__init__)[0]
- return set(fields) - set(["self"])
-
-
-class Connection(object):
- """Base class for storage system connections."""
-
- # A dictionary representing the capabilities of this driver.
- CAPABILITIES = {
- 'meters': {'query': {'simple': False,
- 'metadata': False}},
- 'resources': {'query': {'simple': False,
- 'metadata': False}},
- 'samples': {'query': {'simple': False,
- 'metadata': False,
- 'complex': False}},
- 'statistics': {'groupby': False,
- 'query': {'simple': False,
- 'metadata': False},
- 'aggregation': {'standard': False,
- 'selectable': {
- 'max': False,
- 'min': False,
- 'sum': False,
- 'avg': False,
- 'count': False,
- 'stddev': False,
- 'cardinality': False}}
- },
- }
-
- STORAGE_CAPABILITIES = {
- 'storage': {'production_ready': False},
- }
-
- def __init__(self, conf, url):
- self.conf = conf
-
- @staticmethod
- def upgrade():
- """Migrate the database to `version` or the most recent version."""
-
- def record_metering_data_batch(self, samples):
- """Record the metering data in batch"""
- for s in samples:
- self.record_metering_data(s)
-
- @staticmethod
- def record_metering_data(data):
- """Write the data to the backend storage system.
-
- :param data: a dictionary such as returned by
- ceilometer.publisher.utils.meter_message_from_counter
-
- All timestamps must be naive utc datetime object.
- """
- raise ceilometer.NotImplementedError(
- 'Recording metering data is not implemented')
-
- @staticmethod
- def clear_expired_metering_data(ttl):
- """Clear expired data from the backend storage system.
-
- Clearing occurs according to the time-to-live.
-
- :param ttl: Number of seconds to keep records for.
- """
- raise ceilometer.NotImplementedError(
- 'Clearing samples not implemented')
-
- @staticmethod
- def get_resources(user=None, project=None, source=None,
- start_timestamp=None, start_timestamp_op=None,
- end_timestamp=None, end_timestamp_op=None,
- metaquery=None, resource=None, limit=None):
- """Return an iterable of models.Resource instances.
-
- Iterable items containing resource information.
- :param user: Optional ID for user that owns the resource.
- :param project: Optional ID for project that owns the resource.
- :param source: Optional source filter.
- :param start_timestamp: Optional modified timestamp start range.
- :param start_timestamp_op: Optional timestamp start range operation.
- :param end_timestamp: Optional modified timestamp end range.
- :param end_timestamp_op: Optional timestamp end range operation.
- :param metaquery: Optional dict with metadata to match on.
- :param resource: Optional resource filter.
- :param limit: Maximum number of results to return.
- """
- raise ceilometer.NotImplementedError('Resources not implemented')
-
- @staticmethod
- def get_meters(user=None, project=None, resource=None, source=None,
- metaquery=None, limit=None, unique=False):
- """Return an iterable of model.Meter instances.
-
- Iterable items containing meter information.
- :param user: Optional ID for user that owns the resource.
- :param project: Optional ID for project that owns the resource.
- :param resource: Optional resource filter.
- :param source: Optional source filter.
- :param metaquery: Optional dict with metadata to match on.
- :param limit: Maximum number of results to return.
- :param unique: If set to true, return only unique meter information.
- """
- raise ceilometer.NotImplementedError('Meters not implemented')
-
- @staticmethod
- def get_samples(sample_filter, limit=None):
- """Return an iterable of model.Sample instances.
-
- :param sample_filter: Filter.
- :param limit: Maximum number of results to return.
- """
- raise ceilometer.NotImplementedError('Samples not implemented')
-
- @staticmethod
- def get_meter_statistics(sample_filter, period=None, groupby=None,
- aggregate=None):
- """Return an iterable of model.Statistics instances.
-
- The filter must have a meter value set.
- """
- raise ceilometer.NotImplementedError('Statistics not implemented')
-
- @staticmethod
- def clear():
- """Clear database."""
-
- @staticmethod
- def query_samples(filter_expr=None, orderby=None, limit=None):
- """Return an iterable of model.Sample objects.
-
- :param filter_expr: Filter expression for query.
- :param orderby: List of field name and direction pairs for order by.
- :param limit: Maximum number of results to return.
- """
-
- raise ceilometer.NotImplementedError('Complex query for samples '
- 'is not implemented.')
-
- @classmethod
- def get_capabilities(cls):
- """Return an dictionary with the capabilities of each driver."""
- return cls.CAPABILITIES
-
- @classmethod
- def get_storage_capabilities(cls):
- """Return a dictionary representing the performance capabilities.
-
- This is needed to evaluate the performance of each driver.
- """
- return cls.STORAGE_CAPABILITIES
diff --git a/ceilometer/storage/hbase/__init__.py b/ceilometer/storage/hbase/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/storage/hbase/__init__.py
+++ /dev/null
diff --git a/ceilometer/storage/hbase/base.py b/ceilometer/storage/hbase/base.py
deleted file mode 100644
index e815975b..00000000
--- a/ceilometer/storage/hbase/base.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-import happybase
-from oslo_log import log
-from oslo_utils import netutils
-from six.moves.urllib import parse as urlparse
-
-from ceilometer.storage.hbase import inmemory as hbase_inmemory
-
-LOG = log.getLogger(__name__)
-
-
-class Connection(object):
- """Base connection class for HBase."""
-
- _memory_instance = None
-
- def __init__(self, conf, url):
- super(Connection, self).__init__(conf, url)
- """Hbase Connection Initialization."""
- opts = self._parse_connection_url(url)
-
- if opts['host'] == '__test__':
- url = os.environ.get('CEILOMETER_TEST_HBASE_URL')
- if url:
- # Reparse URL, but from the env variable now
- opts = self._parse_connection_url(url)
- self.conn_pool = self._get_connection_pool(opts)
- else:
- # This is a in-memory usage for unit tests
- if Connection._memory_instance is None:
- LOG.debug('Creating a new in-memory HBase '
- 'Connection object')
- Connection._memory_instance = (hbase_inmemory.
- MConnectionPool())
- self.conn_pool = Connection._memory_instance
- else:
- self.conn_pool = self._get_connection_pool(opts)
-
- @staticmethod
- def _get_connection_pool(conf):
- """Return a connection pool to the database.
-
- .. note::
-
- The tests use a subclass to override this and return an
- in-memory connection pool.
- """
- LOG.debug('connecting to HBase on %(host)s:%(port)s',
- {'host': conf['host'], 'port': conf['port']})
- return happybase.ConnectionPool(
- size=100, host=conf['host'], port=conf['port'],
- table_prefix=conf['table_prefix'],
- table_prefix_separator=conf['table_prefix_separator'])
-
- @staticmethod
- def _parse_connection_url(url):
- """Parse connection parameters from a database url.
-
- .. note::
-
- HBase Thrift does not support authentication and there is no
- database name, so we are not looking for these in the url.
- """
- opts = {}
- result = netutils.urlsplit(url)
- opts['table_prefix'] = urlparse.parse_qs(
- result.query).get('table_prefix', [None])[0]
- opts['table_prefix_separator'] = urlparse.parse_qs(
- result.query).get('table_prefix_separator', ['_'])[0]
- opts['dbtype'] = result.scheme
- if ':' in result.netloc:
- opts['host'], port = result.netloc.split(':')
- else:
- opts['host'] = result.netloc
- port = 9090
- opts['port'] = port and int(port) or 9090
- return opts
diff --git a/ceilometer/storage/hbase/inmemory.py b/ceilometer/storage/hbase/inmemory.py
deleted file mode 100644
index 50941126..00000000
--- a/ceilometer/storage/hbase/inmemory.py
+++ /dev/null
@@ -1,281 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""This is a very crude version of "in-memory HBase", which implements just
- enough functionality of HappyBase API to support testing of our driver.
-"""
-
-import copy
-import re
-
-from oslo_log import log
-import six
-
-import ceilometer
-
-LOG = log.getLogger(__name__)
-
-
-class MTable(object):
- """HappyBase.Table mock."""
- def __init__(self, name, families):
- self.name = name
- self.families = families
- self._rows_with_ts = {}
-
- def row(self, key, columns=None):
- if key not in self._rows_with_ts:
- return {}
- res = copy.copy(sorted(six.iteritems(
- self._rows_with_ts.get(key)))[-1][1])
- if columns:
- keys = res.keys()
- for key in keys:
- if key not in columns:
- res.pop(key)
- return res
-
- def rows(self, keys):
- return ((k, self.row(k)) for k in keys)
-
- def put(self, key, data, ts=None):
- # Note: Now we use 'timestamped' but only for one Resource table.
- # That's why we may put ts='0' in case when ts is None. If it is
- # needed to use 2 types of put in one table ts=0 cannot be used.
- if ts is None:
- ts = "0"
- if key not in self._rows_with_ts:
- self._rows_with_ts[key] = {ts: data}
- else:
- if ts in self._rows_with_ts[key]:
- self._rows_with_ts[key][ts].update(data)
- else:
- self._rows_with_ts[key].update({ts: data})
-
- def delete(self, key):
- del self._rows_with_ts[key]
-
- def _get_latest_dict(self, row):
- # The idea here is to return latest versions of columns.
- # In _rows_with_ts we store {row: {ts_1: {data}, ts_2: {data}}}.
- # res will contain a list of tuples [(ts_1, {data}), (ts_2, {data})]
- # sorted by ts, i.e. in this list ts_2 is the most latest.
- # To get result as HBase provides we should iterate in reverse order
- # and get from "latest" data only key-values that are not in newer data
- data = {}
- for i in sorted(six.iteritems(self._rows_with_ts[row])):
- data.update(i[1])
- return data
-
- def scan(self, filter=None, columns=None, row_start=None, row_stop=None,
- limit=None):
- columns = columns or []
- sorted_keys = sorted(self._rows_with_ts)
- # copy data between row_start and row_stop into a dict
- rows = {}
- for row in sorted_keys:
- if row_start and row < row_start:
- continue
- if row_stop and row > row_stop:
- break
- rows[row] = self._get_latest_dict(row)
-
- if columns:
- ret = {}
- for row, data in six.iteritems(rows):
- for key in data:
- if key in columns:
- ret[row] = data
- rows = ret
- if filter:
- # TODO(jdanjou): we should really parse this properly,
- # but at the moment we are only going to support AND here
- filters = filter.split('AND')
- for f in filters:
- # Extract filter name and its arguments
- g = re.search("(.*)\((.*),?\)", f)
- fname = g.group(1).strip()
- fargs = [s.strip().replace('\'', '')
- for s in g.group(2).split(',')]
- m = getattr(self, fname)
- if callable(m):
- # overwrite rows for filtering to take effect
- # in case of multiple filters
- rows = m(fargs, rows)
- else:
- raise ceilometer.NotImplementedError(
- "%s filter is not implemented, "
- "you may want to add it!")
- for k in sorted(rows)[:limit]:
- yield k, rows[k]
-
- @staticmethod
- def SingleColumnValueFilter(args, rows):
- """This is filter for testing "in-memory HBase".
-
- This method is called from scan() when 'SingleColumnValueFilter'
- is found in the 'filter' argument.
- """
- op = args[2]
- column = "%s:%s" % (args[0], args[1])
- value = args[3]
- if value.startswith('binary:'):
- value = value[7:]
- r = {}
- for row in rows:
- data = rows[row]
- if op == '=':
- if column in data and data[column] == value:
- r[row] = data
- elif op == '<':
- if column in data and data[column] < value:
- r[row] = data
- elif op == '<=':
- if column in data and data[column] <= value:
- r[row] = data
- elif op == '>':
- if column in data and data[column] > value:
- r[row] = data
- elif op == '>=':
- if column in data and data[column] >= value:
- r[row] = data
- elif op == '!=':
- if column in data and data[column] != value:
- r[row] = data
- return r
-
- @staticmethod
- def ColumnPrefixFilter(args, rows):
- """This is filter for testing "in-memory HBase".
-
- This method is called from scan() when 'ColumnPrefixFilter' is found
- in the 'filter' argument.
-
- :param args: a list of filter arguments, contain prefix of column
- :param rows: a dict of row prefixes for filtering
- """
- value = args[0]
- column = 'f:' + value
- r = {}
- for row, data in rows.items():
- column_dict = {}
- for key in data:
- if key.startswith(column):
- column_dict[key] = data[key]
- r[row] = column_dict
- return r
-
- @staticmethod
- def RowFilter(args, rows):
- """This is filter for testing "in-memory HBase".
-
- This method is called from scan() when 'RowFilter' is found in the
- 'filter' argument.
-
- :param args: a list of filter arguments, it contains operator and
- sought string
- :param rows: a dict of rows which are filtered
- """
- op = args[0]
- value = args[1]
- if value.startswith('regexstring:'):
- value = value[len('regexstring:'):]
- r = {}
- for row, data in rows.items():
- try:
- g = re.search(value, row).group()
- if op == '=':
- if g == row:
- r[row] = data
- else:
- raise ceilometer.NotImplementedError(
- "In-memory "
- "RowFilter doesn't support "
- "the %s operation yet" % op)
- except AttributeError:
- pass
- return r
-
- @staticmethod
- def QualifierFilter(args, rows):
- """This is filter for testing "in-memory HBase".
-
- This method is called from scan() when 'QualifierFilter' is found in
- the 'filter' argument
- """
- op = args[0]
- value = args[1]
- is_regex = False
- if value.startswith('binaryprefix:'):
- value = value[len('binaryprefix:'):]
- if value.startswith('regexstring:'):
- value = value[len('regexstring:'):]
- is_regex = True
- column = 'f:' + value
- r = {}
- for row in rows:
- data = rows[row]
- r_data = {}
- for key in data:
- if ((op == '=' and key.startswith(column)) or
- (op == '>=' and key >= column) or
- (op == '<=' and key <= column) or
- (op == '>' and key > column) or
- (op == '<' and key < column) or
- (is_regex and re.search(value, key))):
- r_data[key] = data[key]
- else:
- raise ceilometer.NotImplementedError(
- "In-memory QualifierFilter "
- "doesn't support the %s "
- "operation yet" % op)
- if r_data:
- r[row] = r_data
- return r
-
-
-class MConnectionPool(object):
- def __init__(self):
- self.conn = MConnection()
-
- def connection(self):
- return self.conn
-
-
-class MConnection(object):
- """HappyBase.Connection mock."""
- def __init__(self):
- self.tables = {}
-
- def __enter__(self, *args, **kwargs):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- pass
-
- @staticmethod
- def open():
- LOG.debug("Opening in-memory HBase connection")
-
- def create_table(self, n, families=None):
- families = families or {}
- if n in self.tables:
- return self.tables[n]
- t = MTable(n, families)
- self.tables[n] = t
- return t
-
- def delete_table(self, name, use_prefix=True):
- del self.tables[name]
-
- def table(self, name):
- return self.create_table(name)
diff --git a/ceilometer/storage/hbase/migration.py b/ceilometer/storage/hbase/migration.py
deleted file mode 100644
index 86a5253f..00000000
--- a/ceilometer/storage/hbase/migration.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""HBase storage backend migrations
-"""
-
-from ceilometer.storage.hbase import utils as hbase_utils
-
-
-def migrate_resource_table(conn, table):
- """Migrate table 'resource' in HBase.
-
- Change qualifiers format from "%s+%s+%s!%s!%s" %
- (rts, source, counter_name, counter_type,counter_unit)
- in columns with meters f:m_*
- to new separator format "%s:%s:%s:%s:%s" %
- (rts, source, counter_name, counter_type,counter_unit)
- """
- resource_table = conn.table(table)
- resource_filter = ("QualifierFilter(=, "
- "'regexstring:m_\\d{19}\\+"
- "[\\w-\\._]*\\+[\\w-\\._!]')")
- gen = resource_table.scan(filter=resource_filter)
- for row, data in gen:
- columns = []
- updated_columns = dict()
- column_prefix = "f:"
- for column, value in data.items():
- if column.startswith('f:m_'):
- columns.append(column)
- parts = column[2:].split("+", 2)
- parts.extend(parts.pop(2).split("!"))
- column = hbase_utils.prepare_key(*parts)
- updated_columns[column_prefix + column] = value
- resource_table.put(row, updated_columns)
- resource_table.delete(row, columns)
-
-
-def migrate_meter_table(conn, table):
- """Migrate table 'meter' in HBase.
-
- Change row format from "%s_%d_%s" % (counter_name, rts, message_signature)
- to new separator format "%s:%s:%s" % (counter_name, rts, message_signature)
- """
- meter_table = conn.table(table)
- meter_filter = ("RowFilter(=, "
- "'regexstring:[\\w\\._-]*_\\d{19}_\\w*')")
- gen = meter_table.scan(filter=meter_filter)
- for row, data in gen:
- parts = row.rsplit('_', 2)
- new_row = hbase_utils.prepare_key(*parts)
- meter_table.put(new_row, data)
- meter_table.delete(row)
-
-
-TABLE_MIGRATION_FUNCS = {'resource': migrate_resource_table,
- 'meter': migrate_meter_table}
-
-
-def migrate_tables(conn, tables):
- if type(tables) is not list:
- tables = [tables]
- for table in tables:
- if table in TABLE_MIGRATION_FUNCS:
- TABLE_MIGRATION_FUNCS.get(table)(conn, table)
diff --git a/ceilometer/storage/hbase/utils.py b/ceilometer/storage/hbase/utils.py
deleted file mode 100644
index d6f9718b..00000000
--- a/ceilometer/storage/hbase/utils.py
+++ /dev/null
@@ -1,448 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Various HBase helpers"""
-import copy
-import datetime
-import json
-
-import bson.json_util
-from happybase.hbase import ttypes
-from oslo_log import log
-import six
-
-from ceilometer.i18n import _
-from ceilometer import utils
-
-LOG = log.getLogger(__name__)
-
-OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='}
-# We need this additional dictionary because we have reverted timestamp in
-# row-keys for stored metrics
-OP_SIGN_REV = {'eq': '=', 'lt': '>', 'le': '>=', 'ne': '!=', 'gt': '<',
- 'ge': '<='}
-
-
-def _QualifierFilter(op, qualifier):
- return "QualifierFilter (%s, 'binaryprefix:m_%s')" % (op, qualifier)
-
-
-def timestamp(dt, reverse=True):
- """Timestamp is count of milliseconds since start of epoch.
-
- If reverse=True then timestamp will be reversed. Such a technique is used
- in HBase rowkey design when period queries are required. Because of the
- fact that rows are sorted lexicographically it's possible to vary whether
- the 'oldest' entries will be on top of the table or it should be the newest
- ones (reversed timestamp case).
-
- :param dt: datetime which is translated to timestamp
- :param reverse: a boolean parameter for reverse or straight count of
- timestamp in milliseconds
- :return: count or reversed count of milliseconds since start of epoch
- """
- epoch = datetime.datetime(1970, 1, 1)
- td = dt - epoch
- ts = td.microseconds + td.seconds * 1000000 + td.days * 86400000000
- return 0x7fffffffffffffff - ts if reverse else ts
-
-
-def make_timestamp_query(func, start=None, start_op=None, end=None,
- end_op=None, bounds_only=False, **kwargs):
- """Return a filter start and stop row for filtering and a query.
-
- Query is based on the fact that CF-name is 'rts'.
- :param start: Optional start timestamp
- :param start_op: Optional start timestamp operator, like gt, ge
- :param end: Optional end timestamp
- :param end_op: Optional end timestamp operator, like lt, le
- :param bounds_only: if True than query will not be returned
- :param func: a function that provide a format of row
- :param kwargs: kwargs for :param func
- """
- # We don't need to dump here because get_start_end_rts returns strings
- rts_start, rts_end = get_start_end_rts(start, end)
- start_row, end_row = func(rts_start, rts_end, **kwargs)
-
- if bounds_only:
- return start_row, end_row
-
- q = []
- start_op = start_op or 'ge'
- end_op = end_op or 'lt'
- if rts_start:
- q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
- (OP_SIGN_REV[start_op], rts_start))
- if rts_end:
- q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" %
- (OP_SIGN_REV[end_op], rts_end))
-
- res_q = None
- if len(q):
- res_q = " AND ".join(q)
-
- return start_row, end_row, res_q
-
-
-def get_start_end_rts(start, end):
-
- rts_start = str(timestamp(start)) if start else ""
- rts_end = str(timestamp(end)) if end else ""
- return rts_start, rts_end
-
-
-def make_query(metaquery=None, **kwargs):
- """Return a filter query string based on the selected parameters.
-
- :param metaquery: optional metaquery dict
- :param kwargs: key-value pairs to filter on. Key should be a real
- column name in db
- """
- q = []
- res_q = None
-
- # Note: we use extended constructor for SingleColumnValueFilter here.
- # It is explicitly specified that entry should not be returned if CF is not
- # found in table.
- for key, value in sorted(kwargs.items()):
- if value is not None:
- if key == 'source':
- q.append("SingleColumnValueFilter "
- "('f', 's_%s', =, 'binary:%s', true, true)" %
- (value, dump('1')))
- else:
- q.append("SingleColumnValueFilter "
- "('f', '%s', =, 'binary:%s', true, true)" %
- (quote(key), dump(value)))
- res_q = None
- if len(q):
- res_q = " AND ".join(q)
-
- if metaquery:
- meta_q = []
- for k, v in metaquery.items():
- meta_q.append(
- "SingleColumnValueFilter ('f', '%s', =, 'binary:%s', "
- "true, true)"
- % ('r_' + k, dump(v)))
- meta_q = " AND ".join(meta_q)
- # join query and metaquery
- if res_q is not None:
- res_q += " AND " + meta_q
- else:
- res_q = meta_q # metaquery only
-
- return res_q
-
-
-def get_meter_columns(metaquery=None, need_timestamp=False, **kwargs):
- """Return a list of required columns in meter table to be scanned.
-
- SingleColumnFilter has 'columns' filter that should be used to determine
- what columns we are interested in. But if we want to use 'filter' and
- 'columns' together we have to include columns we are filtering by
- to columns list.
-
- Please see an example: If we make scan with filter
- "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')"
- and columns ['f:rts'], the output will be always empty
- because only 'rts' will be returned and filter will be applied
- to this data so 's_test-1' cannot be find.
- To make this request correct it should be fixed as follows:
- filter = "SingleColumnValueFilter ('f', 's_test-1', =, 'binary:\"1\"')",
- columns = ['f:rts','f:s_test-1']}
-
- :param metaquery: optional metaquery dict
- :param need_timestamp: flag, which defines the need for timestamp columns
- :param kwargs: key-value pairs to filter on. Key should be a real
- column name in db
- """
- columns = ['f:message', 'f:recorded_at']
- columns.extend("f:%s" % k for k, v in kwargs.items()
- if v is not None)
- if metaquery:
- columns.extend("f:r_%s" % k for k, v in metaquery.items()
- if v is not None)
- source = kwargs.get('source')
- if source:
- columns.append("f:s_%s" % source)
- if need_timestamp:
- columns.extend(['f:rts', 'f:timestamp'])
- return columns
-
-
-def make_sample_query_from_filter(sample_filter, require_meter=True):
- """Return a query dictionary based on the settings in the filter.
-
- :param sample_filter: SampleFilter instance
- :param require_meter: If true and the filter does not have a meter,
- raise an error.
- """
-
- meter = sample_filter.meter
- if not meter and require_meter:
- raise RuntimeError('Missing required meter specifier')
- start_row, end_row, ts_query = make_timestamp_query(
- make_general_rowkey_scan,
- start=sample_filter.start_timestamp,
- start_op=sample_filter.start_timestamp_op,
- end=sample_filter.end_timestamp,
- end_op=sample_filter.end_timestamp_op,
- some_id=meter)
- kwargs = dict(user_id=sample_filter.user,
- project_id=sample_filter.project,
- counter_name=meter,
- resource_id=sample_filter.resource,
- source=sample_filter.source,
- message_id=sample_filter.message_id)
-
- q = make_query(metaquery=sample_filter.metaquery, **kwargs)
-
- if q:
- res_q = q + " AND " + ts_query if ts_query else q
- else:
- res_q = ts_query if ts_query else None
-
- need_timestamp = (sample_filter.start_timestamp or
- sample_filter.end_timestamp) is not None
- columns = get_meter_columns(metaquery=sample_filter.metaquery,
- need_timestamp=need_timestamp, **kwargs)
- return res_q, start_row, end_row, columns
-
-
-def make_meter_query_for_resource(start_timestamp, start_timestamp_op,
- end_timestamp, end_timestamp_op, source,
- query=None):
- """This method is used when Resource table should be filtered by meters.
-
- In this method we are looking into all qualifiers with m_ prefix.
- :param start_timestamp: meter's timestamp start range.
- :param start_timestamp_op: meter's start time operator, like ge, gt.
- :param end_timestamp: meter's timestamp end range.
- :param end_timestamp_op: meter's end time operator, like lt, le.
- :param source: source filter.
- :param query: a query string to concatenate with.
- """
- start_rts, end_rts = get_start_end_rts(start_timestamp, end_timestamp)
- mq = []
- start_op = start_timestamp_op or 'ge'
- end_op = end_timestamp_op or 'lt'
-
- if start_rts:
- filter_value = (start_rts + ':' + quote(source) if source
- else start_rts)
- mq.append(_QualifierFilter(OP_SIGN_REV[start_op], filter_value))
-
- if end_rts:
- filter_value = (end_rts + ':' + quote(source) if source
- else end_rts)
- mq.append(_QualifierFilter(OP_SIGN_REV[end_op], filter_value))
-
- if mq:
- meter_q = " AND ".join(mq)
- # If there is a filtering on time_range we need to point that
- # qualifiers should start with m_. Otherwise in case e.g.
- # QualifierFilter (>=, 'binaryprefix:m_9222030811134775808')
- # qualifier 's_test' satisfies the filter and will be returned.
- meter_q = _QualifierFilter("=", '') + " AND " + meter_q
- query = meter_q if not query else query + " AND " + meter_q
- return query
-
-
-def make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None):
- """If it's filter on some_id without start and end.
-
- start_row = some_id while end_row = some_id + MAX_BYTE.
- """
- if some_id is None:
- return None, None
- if not rts_start:
- # NOTE(idegtiarov): Here we could not use chr > 122 because chr >= 123
- # will be quoted and character will be turn in a composition that is
- # started with '%' (chr(37)) that lexicographically is less than chr
- # of number
- rts_start = chr(122)
- end_row = prepare_key(some_id, rts_start)
- start_row = prepare_key(some_id, rts_end)
-
- return start_row, end_row
-
-
-def prepare_key(*args):
- """Prepares names for rows and columns with correct separator.
-
- :param args: strings or numbers that we want our key construct of
- :return: key with quoted args that are separated with character ":"
- """
- key_quote = []
- for key in args:
- if isinstance(key, six.integer_types):
- key = str(key)
- key_quote.append(quote(key))
- return ":".join(key_quote)
-
-
-def timestamp_from_record_tuple(record):
- """Extract timestamp from HBase tuple record."""
- return record[0]['timestamp']
-
-
-def resource_id_from_record_tuple(record):
- """Extract resource_id from HBase tuple record."""
- return record[0]['resource_id']
-
-
-def deserialize_entry(entry, get_raw_meta=True):
- """Return a list of flatten_result, sources, meters and metadata.
-
- Flatten_result contains a dict of simple structures such as 'resource_id':1
- sources/meters are the lists of sources and meters correspondingly.
- metadata is metadata dict. This dict may be returned as flattened if
- get_raw_meta is False.
-
- :param entry: entry from HBase, without row name and timestamp
- :param get_raw_meta: If true then raw metadata will be returned,
- if False metadata will be constructed from
- 'f:r_metadata.' fields
- """
- flatten_result = {}
- sources = []
- meters = []
- metadata_flattened = {}
- for k, v in entry.items():
- if k.startswith('f:s_'):
- sources.append(decode_unicode(k[4:]))
- elif k.startswith('f:r_metadata.'):
- qualifier = decode_unicode(k[len('f:r_metadata.'):])
- metadata_flattened[qualifier] = load(v)
- elif k.startswith("f:m_"):
- meter = ([unquote(i) for i in k[4:].split(':')], load(v))
- meters.append(meter)
- else:
- if ':' in k[2:]:
- key = tuple([unquote(i) for i in k[2:].split(':')])
- else:
- key = unquote(k[2:])
- flatten_result[key] = load(v)
- if get_raw_meta:
- metadata = flatten_result.get('resource_metadata', {})
- else:
- metadata = metadata_flattened
-
- return flatten_result, meters, metadata
-
-
-def serialize_entry(data=None, **kwargs):
- """Return a dict that is ready to be stored to HBase
-
- :param data: dict to be serialized
- :param kwargs: additional args
- """
- data = data or {}
- entry_dict = copy.copy(data)
- entry_dict.update(**kwargs)
-
- result = {}
- for k, v in entry_dict.items():
- if k == 'source':
- # user, project and resource tables may contain several sources.
- # Besides, resource table may contain several meters.
- # To make insertion safe we need to store all meters and sources in
- # a separate cell. For this purpose s_ and m_ prefixes are
- # introduced.
- qualifier = encode_unicode('f:s_%s' % v)
- result[qualifier] = dump('1')
- elif k == 'meter':
- for meter, ts in v.items():
- qualifier = encode_unicode('f:m_%s' % meter)
- result[qualifier] = dump(ts)
- elif k == 'resource_metadata':
- # keep raw metadata as well as flattened to provide
- # capability with API v2. It will be flattened in another
- # way on API level. But we need flattened too for quick filtering.
- flattened_meta = dump_metadata(v)
- for key, m in flattened_meta.items():
- metadata_qualifier = encode_unicode('f:r_metadata.' + key)
- result[metadata_qualifier] = dump(m)
- result['f:resource_metadata'] = dump(v)
- else:
- result['f:' + quote(k, ':')] = dump(v)
- return result
-
-
-def dump_metadata(meta):
- resource_metadata = {}
- for key, v in utils.dict_to_keyval(meta):
- resource_metadata[key] = v
- return resource_metadata
-
-
-def dump(data):
- return json.dumps(data, default=bson.json_util.default)
-
-
-def load(data):
- return json.loads(data, object_hook=object_hook)
-
-
-def encode_unicode(data):
- return data.encode('utf-8') if isinstance(data, six.text_type) else data
-
-
-def decode_unicode(data):
- return data.decode('utf-8') if isinstance(data, six.string_types) else data
-
-
-# We don't want to have tzinfo in decoded json.This object_hook is
-# overwritten json_util.object_hook for $date
-def object_hook(dct):
- if "$date" in dct:
- dt = bson.json_util.object_hook(dct)
- return dt.replace(tzinfo=None)
- return bson.json_util.object_hook(dct)
-
-
-def create_tables(conn, tables, column_families):
- for table in tables:
- try:
- conn.create_table(table, column_families)
- except ttypes.AlreadyExists:
- if conn.table_prefix:
- table = ("%(table_prefix)s"
- "%(separator)s"
- "%(table_name)s" %
- dict(table_prefix=conn.table_prefix,
- separator=conn.table_prefix_separator,
- table_name=table))
-
- LOG.warning(_("Cannot create table %(table_name)s, "
- "it already exists. Ignoring error")
- % {'table_name': table})
-
-
-def quote(s, *args):
- """Return quoted string even if it is unicode one.
-
- :param s: string that should be quoted
- :param args: any symbol we want to stay unquoted
- """
- s_en = s.encode('utf8')
- return six.moves.urllib.parse.quote(s_en, *args)
-
-
-def unquote(s):
- """Return unquoted and decoded string.
-
- :param s: string that should be unquoted
- """
- s_de = six.moves.urllib.parse.unquote(s)
- return s_de.decode('utf8')
diff --git a/ceilometer/storage/impl_hbase.py b/ceilometer/storage/impl_hbase.py
deleted file mode 100644
index aafaff6c..00000000
--- a/ceilometer/storage/impl_hbase.py
+++ /dev/null
@@ -1,440 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import operator
-import time
-
-from oslo_log import log
-from oslo_utils import timeutils
-
-import ceilometer
-from ceilometer.storage import base
-from ceilometer.storage.hbase import base as hbase_base
-from ceilometer.storage.hbase import migration as hbase_migration
-from ceilometer.storage.hbase import utils as hbase_utils
-from ceilometer.storage import models
-from ceilometer import utils
-
-LOG = log.getLogger(__name__)
-
-
-AVAILABLE_CAPABILITIES = {
- 'meters': {'query': {'simple': True,
- 'metadata': True}},
- 'resources': {'query': {'simple': True,
- 'metadata': True}},
- 'samples': {'query': {'simple': True,
- 'metadata': True}},
- 'statistics': {'query': {'simple': True,
- 'metadata': True},
- 'aggregation': {'standard': True}},
-}
-
-
-AVAILABLE_STORAGE_CAPABILITIES = {
- 'storage': {'production_ready': True},
-}
-
-
-class Connection(hbase_base.Connection, base.Connection):
- """Put the metering data into a HBase database
-
- Collections:
-
- - meter (describes sample actually):
-
- - row-key: consists of reversed timestamp, meter and a message uuid
- for purposes of uniqueness
- - Column Families:
-
- f: contains the following qualifiers:
-
- - counter_name: <name of counter>
- - counter_type: <type of counter>
- - counter_unit: <unit of counter>
- - counter_volume: <volume of counter>
- - message: <raw incoming data>
- - message_id: <id of message>
- - message_signature: <signature of message>
- - resource_metadata: raw metadata for corresponding resource
- of the meter
- - project_id: <id of project>
- - resource_id: <id of resource>
- - user_id: <id of user>
- - recorded_at: <datetime when sample has been recorded (utc.now)>
- - flattened metadata with prefix r_metadata. e.g.::
-
- f:r_metadata.display_name or f:r_metadata.tag
-
- - rts: <reversed timestamp of entry>
- - timestamp: <meter's timestamp (came from message)>
- - source for meter with prefix 's'
-
- - resource:
-
- - row_key: uuid of resource
- - Column Families:
-
- f: contains the following qualifiers:
-
- - resource_metadata: raw metadata for corresponding resource
- - project_id: <id of project>
- - resource_id: <id of resource>
- - user_id: <id of user>
- - flattened metadata with prefix r_metadata. e.g.::
-
- f:r_metadata.display_name or f:r_metadata.tag
-
- - sources for all corresponding meters with prefix 's'
- - all meters with prefix 'm' for this resource in format:
-
- .. code-block:: python
-
- "%s:%s:%s:%s:%s" % (rts, source, counter_name, counter_type,
- counter_unit)
- """
-
- CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
- AVAILABLE_CAPABILITIES)
- STORAGE_CAPABILITIES = utils.update_nested(
- base.Connection.STORAGE_CAPABILITIES,
- AVAILABLE_STORAGE_CAPABILITIES,
- )
- _memory_instance = None
-
- RESOURCE_TABLE = "resource"
- METER_TABLE = "meter"
-
- def upgrade(self):
- tables = [self.RESOURCE_TABLE, self.METER_TABLE]
- column_families = {'f': dict(max_versions=1)}
- with self.conn_pool.connection() as conn:
- hbase_utils.create_tables(conn, tables, column_families)
- hbase_migration.migrate_tables(conn, tables)
-
- def clear(self):
- LOG.debug('Dropping HBase schema...')
- with self.conn_pool.connection() as conn:
- for table in [self.RESOURCE_TABLE,
- self.METER_TABLE]:
- try:
- conn.disable_table(table)
- except Exception:
- LOG.debug('Cannot disable table but ignoring error')
- try:
- conn.delete_table(table)
- except Exception:
- LOG.debug('Cannot delete table but ignoring error')
-
- def record_metering_data(self, data):
- """Write the data to the backend storage system.
-
- :param data: a dictionary such as returned by
- ceilometer.publisher.utils.meter_message_from_counter
- """
-
- # We must not record thing.
- data.pop("monotonic_time", None)
-
- with self.conn_pool.connection() as conn:
- resource_table = conn.table(self.RESOURCE_TABLE)
- meter_table = conn.table(self.METER_TABLE)
-
- resource_metadata = data.get('resource_metadata', {})
- # Determine the name of new meter
- rts = hbase_utils.timestamp(data['timestamp'])
- new_meter = hbase_utils.prepare_key(
- rts, data['source'], data['counter_name'],
- data['counter_type'], data['counter_unit'])
-
- # TODO(nprivalova): try not to store resource_id
- resource = hbase_utils.serialize_entry(**{
- 'source': data['source'],
- 'meter': {new_meter: data['timestamp']},
- 'resource_metadata': resource_metadata,
- 'resource_id': data['resource_id'],
- 'project_id': data['project_id'], 'user_id': data['user_id']})
- # Here we put entry in HBase with our own timestamp. This is needed
- # when samples arrive out-of-order
- # If we use timestamp=data['timestamp'] the newest data will be
- # automatically 'on the top'. It is needed to keep metadata
- # up-to-date: metadata from newest samples is considered as actual.
- ts = int(time.mktime(data['timestamp'].timetuple()) * 1000)
- resource_table.put(hbase_utils.encode_unicode(data['resource_id']),
- resource, ts)
-
- # Rowkey consists of reversed timestamp, meter and a
- # message uuid for purposes of uniqueness
- row = hbase_utils.prepare_key(data['counter_name'], rts,
- data['message_id'])
- record = hbase_utils.serialize_entry(
- data, **{'source': data['source'], 'rts': rts,
- 'message': data, 'recorded_at': timeutils.utcnow()})
- meter_table.put(row, record)
-
- def get_resources(self, user=None, project=None, source=None,
- start_timestamp=None, start_timestamp_op=None,
- end_timestamp=None, end_timestamp_op=None,
- metaquery=None, resource=None, limit=None):
- """Return an iterable of models.Resource instances
-
- :param user: Optional ID for user that owns the resource.
- :param project: Optional ID for project that owns the resource.
- :param source: Optional source filter.
- :param start_timestamp: Optional modified timestamp start range.
- :param start_timestamp_op: Optional start time operator, like ge, gt.
- :param end_timestamp: Optional modified timestamp end range.
- :param end_timestamp_op: Optional end time operator, like lt, le.
- :param metaquery: Optional dict with metadata to match on.
- :param resource: Optional resource filter.
- :param limit: Maximum number of results to return.
- """
- if limit == 0:
- return
- q = hbase_utils.make_query(metaquery=metaquery, user_id=user,
- project_id=project,
- resource_id=resource, source=source)
- q = hbase_utils.make_meter_query_for_resource(start_timestamp,
- start_timestamp_op,
- end_timestamp,
- end_timestamp_op,
- source, q)
- with self.conn_pool.connection() as conn:
- resource_table = conn.table(self.RESOURCE_TABLE)
- LOG.debug("Query Resource table: %s", q)
- for resource_id, data in resource_table.scan(filter=q,
- limit=limit):
- f_res, meters, md = hbase_utils.deserialize_entry(
- data)
- resource_id = hbase_utils.encode_unicode(resource_id)
- # Unfortunately happybase doesn't keep ordered result from
- # HBase. So that's why it's needed to find min and max
- # manually
- first_ts = min(meters, key=operator.itemgetter(1))[1]
- last_ts = max(meters, key=operator.itemgetter(1))[1]
- source = meters[0][0][1]
- # If we use QualifierFilter then HBase returns only
- # qualifiers filtered by. It will not return the whole entry.
- # That's why if we need to ask additional qualifiers manually.
- if 'project_id' not in f_res and 'user_id' not in f_res:
- row = resource_table.row(
- resource_id, columns=['f:project_id', 'f:user_id',
- 'f:resource_metadata'])
- f_res, _m, md = hbase_utils.deserialize_entry(row)
- yield models.Resource(
- resource_id=resource_id,
- first_sample_timestamp=first_ts,
- last_sample_timestamp=last_ts,
- project_id=f_res['project_id'],
- source=source,
- user_id=f_res['user_id'],
- metadata=md)
-
- def get_meters(self, user=None, project=None, resource=None, source=None,
- metaquery=None, limit=None, unique=False):
- """Return an iterable of models.Meter instances
-
- :param user: Optional ID for user that owns the resource.
- :param project: Optional ID for project that owns the resource.
- :param resource: Optional resource filter.
- :param source: Optional source filter.
- :param metaquery: Optional dict with metadata to match on.
- :param limit: Maximum number of results to return.
- :param unique: If set to true, return only unique meter information.
- """
- if limit == 0:
- return
-
- metaquery = metaquery or {}
-
- with self.conn_pool.connection() as conn:
- resource_table = conn.table(self.RESOURCE_TABLE)
- q = hbase_utils.make_query(metaquery=metaquery, user_id=user,
- project_id=project,
- resource_id=resource,
- source=source)
- LOG.debug("Query Resource table: %s", q)
-
- gen = resource_table.scan(filter=q)
- # We need result set to be sure that user doesn't receive several
- # same meters. Please see bug
- # https://bugs.launchpad.net/ceilometer/+bug/1301371
- result = set()
- for ignored, data in gen:
- flatten_result, meters, md = hbase_utils.deserialize_entry(
- data)
- for m in meters:
- if limit and len(result) >= limit:
- return
- _m_rts, m_source, name, m_type, unit = m[0]
- if unique:
- meter_dict = {'name': name,
- 'type': m_type,
- 'unit': unit,
- 'resource_id': None,
- 'project_id': None,
- 'user_id': None,
- 'source': None}
- else:
- meter_dict = {'name': name,
- 'type': m_type,
- 'unit': unit,
- 'resource_id':
- flatten_result['resource_id'],
- 'project_id':
- flatten_result['project_id'],
- 'user_id':
- flatten_result['user_id']}
-
- frozen_meter = frozenset(meter_dict.items())
- if frozen_meter in result:
- continue
- result.add(frozen_meter)
- if not unique:
- meter_dict.update({'source': m_source
- if m_source else None})
-
- yield models.Meter(**meter_dict)
-
- def get_samples(self, sample_filter, limit=None):
- """Return an iterable of models.Sample instances.
-
- :param sample_filter: Filter.
- :param limit: Maximum number of results to return.
- """
- if limit == 0:
- return
- with self.conn_pool.connection() as conn:
- meter_table = conn.table(self.METER_TABLE)
- q, start, stop, columns = (hbase_utils.
- make_sample_query_from_filter
- (sample_filter, require_meter=False))
- LOG.debug("Query Meter Table: %s", q)
- gen = meter_table.scan(filter=q, row_start=start, row_stop=stop,
- limit=limit, columns=columns)
- for ignored, meter in gen:
- d_meter = hbase_utils.deserialize_entry(meter)[0]
- d_meter['message']['counter_volume'] = (
- float(d_meter['message']['counter_volume']))
- d_meter['message']['recorded_at'] = d_meter['recorded_at']
- yield models.Sample(**d_meter['message'])
-
- @staticmethod
- def _update_meter_stats(stat, meter):
- """Do the stats calculation on a requested time bucket in stats dict
-
- :param stats: dict where aggregated stats are kept
- :param index: time bucket index in stats
- :param meter: meter record as returned from HBase
- :param start_time: query start time
- :param period: length of the time bucket
- """
- vol = meter['counter_volume']
- ts = meter['timestamp']
- stat.unit = meter['counter_unit']
- stat.min = min(vol, stat.min or vol)
- stat.max = max(vol, stat.max)
- stat.sum = vol + (stat.sum or 0)
- stat.count += 1
- stat.avg = (stat.sum / float(stat.count))
- stat.duration_start = min(ts, stat.duration_start or ts)
- stat.duration_end = max(ts, stat.duration_end or ts)
- stat.duration = (timeutils.delta_seconds(stat.duration_start,
- stat.duration_end))
-
- def get_meter_statistics(self, sample_filter, period=None, groupby=None,
- aggregate=None):
- """Return an iterable of models.Statistics instances.
-
- Items are containing meter statistics described by the query
- parameters. The filter must have a meter value set.
-
- .. note::
-
- Due to HBase limitations the aggregations are implemented
- in the driver itself, therefore this method will be quite slow
- because of all the Thrift traffic it is going to create.
- """
- if groupby:
- raise ceilometer.NotImplementedError("Group by not implemented.")
-
- if aggregate:
- raise ceilometer.NotImplementedError(
- 'Selectable aggregates not implemented')
-
- with self.conn_pool.connection() as conn:
- meter_table = conn.table(self.METER_TABLE)
- q, start, stop, columns = (hbase_utils.
- make_sample_query_from_filter
- (sample_filter))
- # These fields are used in statistics' calculating
- columns.extend(['f:timestamp', 'f:counter_volume',
- 'f:counter_unit'])
- meters = map(hbase_utils.deserialize_entry,
- list(meter for (ignored, meter) in
- meter_table.scan(
- filter=q, row_start=start,
- row_stop=stop, columns=columns)))
-
- if sample_filter.start_timestamp:
- start_time = sample_filter.start_timestamp
- elif meters:
- start_time = meters[-1][0]['timestamp']
- else:
- start_time = None
-
- if sample_filter.end_timestamp:
- end_time = sample_filter.end_timestamp
- elif meters:
- end_time = meters[0][0]['timestamp']
- else:
- end_time = None
-
- results = []
-
- if not period:
- period = 0
- period_start = start_time
- period_end = end_time
-
- # As our HBase meters are stored as newest-first, we need to iterate
- # in the reverse order
- for meter in meters[::-1]:
- ts = meter[0]['timestamp']
- if period:
- offset = int(timeutils.delta_seconds(
- start_time, ts) / period) * period
- period_start = start_time + datetime.timedelta(0, offset)
-
- if not results or not results[-1].period_start == period_start:
- if period:
- period_end = period_start + datetime.timedelta(
- 0, period)
- results.append(
- models.Statistics(unit='',
- count=0,
- min=0,
- max=0,
- avg=0,
- sum=0,
- period=period,
- period_start=period_start,
- period_end=period_end,
- duration=None,
- duration_start=None,
- duration_end=None,
- groupby=None)
- )
- self._update_meter_stats(results[-1], meter[0])
- return results
diff --git a/ceilometer/storage/impl_log.py b/ceilometer/storage/impl_log.py
deleted file mode 100644
index f76b3262..00000000
--- a/ceilometer/storage/impl_log.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Simple logging storage backend.
-"""
-
-from oslo_log import log
-
-from ceilometer.storage import base
-
-LOG = log.getLogger(__name__)
-
-
-class Connection(base.Connection):
- """Log the data."""
-
- def upgrade(self):
- pass
-
- def clear(self):
- pass
-
- def record_metering_data(self, data):
- """Write the data to the backend storage system.
-
- :param data: a dictionary such as returned by
- ceilometer.meter.meter_message_from_counter.
- """
- LOG.info('metering data %(counter_name)s for %(resource_id)s: '
- '%(counter_volume)s'
- % ({'counter_name': data['counter_name'],
- 'resource_id': data['resource_id'],
- 'counter_volume': data['counter_volume']}))
-
- def clear_expired_metering_data(self, ttl):
- """Clear expired data from the backend storage system.
-
- Clearing occurs according to the time-to-live.
- :param ttl: Number of seconds to keep records for.
- """
- LOG.info("Dropping metering data with TTL %d", ttl)
-
- def get_resources(self, user=None, project=None, source=None,
- start_timestamp=None, start_timestamp_op=None,
- end_timestamp=None, end_timestamp_op=None,
- metaquery=None, resource=None, limit=None):
- """Return an iterable of dictionaries containing resource information.
-
- { 'resource_id': UUID of the resource,
- 'project_id': UUID of project owning the resource,
- 'user_id': UUID of user owning the resource,
- 'timestamp': UTC datetime of last update to the resource,
- 'metadata': most current metadata for the resource,
- 'meter': list of the meters reporting data for the resource,
- }
-
- :param user: Optional ID for user that owns the resource.
- :param project: Optional ID for project that owns the resource.
- :param source: Optional source filter.
- :param start_timestamp: Optional modified timestamp start range.
- :param start_timestamp_op: Optional start time operator, like gt, ge.
- :param end_timestamp: Optional modified timestamp end range.
- :param end_timestamp_op: Optional end time operator, like lt, le.
- :param metaquery: Optional dict with metadata to match on.
- :param resource: Optional resource filter.
- :param limit: Maximum number of results to return.
- """
- return []
-
- def get_meters(self, user=None, project=None, resource=None, source=None,
- limit=None, metaquery=None, unique=False):
- """Return an iterable of dictionaries containing meter information.
-
- { 'name': name of the meter,
- 'type': type of the meter (gauge, delta, cumulative),
- 'resource_id': UUID of the resource,
- 'project_id': UUID of project owning the resource,
- 'user_id': UUID of user owning the resource,
- }
-
- :param user: Optional ID for user that owns the resource.
- :param project: Optional ID for project that owns the resource.
- :param resource: Optional resource filter.
- :param source: Optional source filter.
- :param limit: Maximum number of results to return.
- :param metaquery: Optional dict with metadata to match on.
- :param unique: If set to true, return only unique meter information.
- """
- return []
-
- def get_samples(self, sample_filter, limit=None):
- """Return an iterable of samples.
-
- Items are created by
- ceilometer.publisher.utils.meter_message_from_counter.
- """
- return []
-
- def get_meter_statistics(self, sample_filter, period=None, groupby=None,
- aggregate=None):
- """Return a dictionary containing meter statistics.
-
- Meter statistics is described by the query parameters.
- The filter must have a meter value set.
-
- { 'min':
- 'max':
- 'avg':
- 'sum':
- 'count':
- 'period':
- 'period_start':
- 'period_end':
- 'duration':
- 'duration_start':
- 'duration_end':
- }
- """
- return []
diff --git a/ceilometer/storage/impl_mongodb.py b/ceilometer/storage/impl_mongodb.py
deleted file mode 100644
index 2c3353e7..00000000
--- a/ceilometer/storage/impl_mongodb.py
+++ /dev/null
@@ -1,710 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2013 eNovance
-# Copyright 2014 Red Hat, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""MongoDB storage backend"""
-
-import itertools
-import operator
-
-import copy
-import datetime
-import uuid
-
-import bson.code
-import bson.objectid
-from oslo_log import log
-from oslo_utils import timeutils
-import pymongo
-import six
-
-import ceilometer
-from ceilometer.i18n import _
-from ceilometer import storage
-from ceilometer.storage import base
-from ceilometer.storage import models
-from ceilometer.storage.mongo import utils as pymongo_utils
-from ceilometer.storage import pymongo_base
-from ceilometer import utils
-
-LOG = log.getLogger(__name__)
-
-
-AVAILABLE_CAPABILITIES = {
- 'resources': {'query': {'simple': True,
- 'metadata': True}},
- 'statistics': {'groupby': True,
- 'query': {'simple': True,
- 'metadata': True},
- 'aggregation': {'standard': True,
- 'selectable': {'max': True,
- 'min': True,
- 'sum': True,
- 'avg': True,
- 'count': True,
- 'stddev': True,
- 'cardinality': True}}}
-}
-
-
-class Connection(pymongo_base.Connection):
- """Put the data into a MongoDB database
-
- Collections::
-
- - meter
- - the raw incoming data
- - resource
- - the metadata for resources
- - { _id: uuid of resource,
- metadata: metadata dictionaries
- user_id: uuid
- project_id: uuid
- meter: [ array of {counter_name: string, counter_type: string,
- counter_unit: string} ]
- }
- """
-
- CAPABILITIES = utils.update_nested(pymongo_base.Connection.CAPABILITIES,
- AVAILABLE_CAPABILITIES)
- CONNECTION_POOL = pymongo_utils.ConnectionPool()
-
- STANDARD_AGGREGATES = dict([(a.name, a) for a in [
- pymongo_utils.SUM_AGGREGATION, pymongo_utils.AVG_AGGREGATION,
- pymongo_utils.MIN_AGGREGATION, pymongo_utils.MAX_AGGREGATION,
- pymongo_utils.COUNT_AGGREGATION,
- ]])
-
- AGGREGATES = dict([(a.name, a) for a in [
- pymongo_utils.SUM_AGGREGATION,
- pymongo_utils.AVG_AGGREGATION,
- pymongo_utils.MIN_AGGREGATION,
- pymongo_utils.MAX_AGGREGATION,
- pymongo_utils.COUNT_AGGREGATION,
- pymongo_utils.STDDEV_AGGREGATION,
- pymongo_utils.CARDINALITY_AGGREGATION,
- ]])
-
- SORT_OPERATION_MAPPING = {'desc': (pymongo.DESCENDING, '$lt'),
- 'asc': (pymongo.ASCENDING, '$gt')}
-
- MAP_RESOURCES = bson.code.Code("""
- function () {
- emit(this.resource_id,
- {user_id: this.user_id,
- project_id: this.project_id,
- source: this.source,
- first_timestamp: this.timestamp,
- last_timestamp: this.timestamp,
- metadata: this.resource_metadata})
- }""")
-
- REDUCE_RESOURCES = bson.code.Code("""
- function (key, values) {
- var merge = {user_id: values[0].user_id,
- project_id: values[0].project_id,
- source: values[0].source,
- first_timestamp: values[0].first_timestamp,
- last_timestamp: values[0].last_timestamp,
- metadata: values[0].metadata}
- values.forEach(function(value) {
- if (merge.first_timestamp - value.first_timestamp > 0) {
- merge.first_timestamp = value.first_timestamp;
- merge.user_id = value.user_id;
- merge.project_id = value.project_id;
- merge.source = value.source;
- } else if (merge.last_timestamp - value.last_timestamp <= 0) {
- merge.last_timestamp = value.last_timestamp;
- merge.metadata = value.metadata;
- }
- });
- return merge;
- }""")
-
- _GENESIS = datetime.datetime(year=datetime.MINYEAR, month=1, day=1)
- _APOCALYPSE = datetime.datetime(year=datetime.MAXYEAR, month=12, day=31,
- hour=23, minute=59, second=59)
-
- def __init__(self, conf, url):
- super(Connection, self).__init__(conf, url)
-
- # NOTE(jd) Use our own connection pooling on top of the Pymongo one.
- # We need that otherwise we overflow the MongoDB instance with new
- # connection since we instantiate a Pymongo client each time someone
- # requires a new storage connection.
- self.conn = self.CONNECTION_POOL.connect(conf, url)
- self.version = self.conn.server_info()['versionArray']
- # Require MongoDB 2.4 to use $setOnInsert
- if self.version < pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION:
- raise storage.StorageBadVersion(
- "Need at least MongoDB %s" %
- pymongo_utils.MINIMUM_COMPATIBLE_MONGODB_VERSION)
-
- connection_options = pymongo.uri_parser.parse_uri(url)
- self.db = getattr(self.conn, connection_options['database'])
- if connection_options.get('username'):
- self.db.authenticate(connection_options['username'],
- connection_options['password'])
-
- # NOTE(jd) Upgrading is just about creating index, so let's do this
- # on connection to be sure at least the TTL is correctly updated if
- # needed.
- self.upgrade()
-
- @staticmethod
- def update_ttl(ttl, ttl_index_name, index_field, coll):
- """Update or create time_to_live indexes.
-
- :param ttl: time to live in seconds.
- :param ttl_index_name: name of the index we want to update or create.
- :param index_field: field with the index that we need to update.
- :param coll: collection which indexes need to be updated.
- """
- indexes = coll.index_information()
- if ttl <= 0:
- if ttl_index_name in indexes:
- coll.drop_index(ttl_index_name)
- return
-
- if ttl_index_name in indexes:
- return coll.database.command(
- 'collMod', coll.name,
- index={'keyPattern': {index_field: pymongo.ASCENDING},
- 'expireAfterSeconds': ttl})
-
- coll.create_index([(index_field, pymongo.ASCENDING)],
- expireAfterSeconds=ttl,
- name=ttl_index_name)
-
- def upgrade(self):
- # Establish indexes
- #
- # We need variations for user_id vs. project_id because of the
- # way the indexes are stored in b-trees. The user_id and
- # project_id values are usually mutually exclusive in the
- # queries, so the database won't take advantage of an index
- # including both.
-
- # create collection if not present
- if 'resource' not in self.db.conn.collection_names():
- self.db.conn.create_collection('resource')
- if 'meter' not in self.db.conn.collection_names():
- self.db.conn.create_collection('meter')
-
- name_qualifier = dict(user_id='', project_id='project_')
- background = dict(user_id=False, project_id=True)
- for primary in ['user_id', 'project_id']:
- name = 'meter_%sidx' % name_qualifier[primary]
- self.db.meter.create_index([
- ('resource_id', pymongo.ASCENDING),
- (primary, pymongo.ASCENDING),
- ('counter_name', pymongo.ASCENDING),
- ('timestamp', pymongo.ASCENDING),
- ], name=name, background=background[primary])
-
- self.db.meter.create_index([('timestamp', pymongo.DESCENDING)],
- name='timestamp_idx')
-
- # NOTE(ityaptin) This index covers get_resource requests sorting
- # and MongoDB uses part of this compound index for different
- # queries based on any of user_id, project_id, last_sample_timestamp
- # fields
- self.db.resource.create_index([('user_id', pymongo.DESCENDING),
- ('project_id', pymongo.DESCENDING),
- ('last_sample_timestamp',
- pymongo.DESCENDING)],
- name='resource_user_project_timestamp',)
- self.db.resource.create_index([('last_sample_timestamp',
- pymongo.DESCENDING)],
- name='last_sample_timestamp_idx')
-
- # update or create time_to_live index
- ttl = self.conf.database.metering_time_to_live
- self.update_ttl(ttl, 'meter_ttl', 'timestamp', self.db.meter)
- self.update_ttl(ttl, 'resource_ttl', 'last_sample_timestamp',
- self.db.resource)
-
- def clear(self):
- self.conn.drop_database(self.db.name)
- # Connection will be reopened automatically if needed
- self.conn.close()
-
- def record_metering_data(self, data):
- # TODO(liusheng): this is a workaround that is because there are
- # storage scenario tests which directly invoke this method and pass a
- # sample dict with all the storage backends and
- # call conn.record_metering_data. May all the Ceilometer
- # native storage backends can support batch recording in future, and
- # then we need to refactor the scenario tests.
- self.record_metering_data_batch([data])
-
- def record_metering_data_batch(self, samples):
- """Record the metering data in batch.
-
- :param samples: a list of samples dict.
- """
- # Record the updated resource metadata - we use $setOnInsert to
- # unconditionally insert sample timestamps and resource metadata
- # (in the update case, this must be conditional on the sample not
- # being out-of-order)
-
- # We must not store this
- samples = copy.deepcopy(samples)
-
- for sample in samples:
- sample.pop("monotonic_time", None)
-
- sorted_samples = sorted(
- copy.deepcopy(samples),
- key=lambda s: (s['resource_id'], s['timestamp']))
- res_grouped_samples = itertools.groupby(
- sorted_samples, key=operator.itemgetter('resource_id'))
- samples_to_update_resource = []
- for resource_id, g_samples in res_grouped_samples:
- g_samples = list(g_samples)
- g_samples[-1]['meter'] = [{'counter_name': s['counter_name'],
- 'counter_type': s['counter_type'],
- 'counter_unit': s['counter_unit'],
- } for s in g_samples]
- g_samples[-1]['last_sample_timestamp'] = g_samples[-1]['timestamp']
- g_samples[-1]['first_sample_timestamp'] = g_samples[0]['timestamp']
- samples_to_update_resource.append(g_samples[-1])
- for sample in samples_to_update_resource:
- sample['resource_metadata'] = pymongo_utils.improve_keys(
- sample.pop('resource_metadata'))
- resource = self.db.resource.find_one_and_update(
- {'_id': sample['resource_id']},
- {'$set': {'project_id': sample['project_id'],
- 'user_id': sample['user_id'],
- 'source': sample['source'],
- },
- '$setOnInsert': {
- 'metadata': sample['resource_metadata'],
- 'first_sample_timestamp': sample['timestamp'],
- 'last_sample_timestamp': sample['timestamp'],
- },
- '$addToSet': {
- 'meter': {'$each': sample['meter']},
- },
- },
- upsert=True,
- return_document=pymongo.ReturnDocument.AFTER,
- )
-
- # only update last sample timestamp if actually later (the usual
- # in-order case)
- last_sample_timestamp = resource.get('last_sample_timestamp')
- if (last_sample_timestamp is None or
- last_sample_timestamp <= sample['last_sample_timestamp']):
- self.db.resource.update_one(
- {'_id': sample['resource_id']},
- {'$set': {'metadata': sample['resource_metadata'],
- 'last_sample_timestamp':
- sample['last_sample_timestamp']}}
- )
-
- # only update first sample timestamp if actually earlier (
- # the unusual out-of-order case)
- # NOTE: a null first sample timestamp is not updated as this
- # indicates a pre-existing resource document dating from before
- # we started recording these timestamps in the resource collection
- first_sample_timestamp = resource.get('first_sample_timestamp')
- if (first_sample_timestamp is not None and
- first_sample_timestamp > sample['first_sample_timestamp']):
- self.db.resource.update_one(
- {'_id': sample['resource_id']},
- {'$set': {'first_sample_timestamp':
- sample['first_sample_timestamp']}}
- )
-
- # Record the raw data for the meter. Use a copy so we do not
- # modify a data structure owned by our caller (the driver adds
- # a new key '_id').
- record = copy.deepcopy(samples)
- for s in record:
- s['recorded_at'] = timeutils.utcnow()
- s['resource_metadata'] = pymongo_utils.improve_keys(
- s.pop('resource_metadata'))
- self.db.meter.insert_many(record)
-
- def clear_expired_metering_data(self, ttl):
- """Clear expired data from the backend storage system.
-
- Clearing occurs with native MongoDB time-to-live feature.
- """
- LOG.debug("Clearing expired metering data is based on native "
- "MongoDB time to live feature and going in background.")
-
- @classmethod
- def _build_sort_instructions(cls, sort_keys=None, sort_dir='desc'):
- """Returns a sort_instruction and paging operator.
-
- Sort instructions are used in the query to determine what attributes
- to sort on and what direction to use.
- :param sort_keys: array of attributes by which results be sorted.
- :param sort_dir: direction in which results be sorted (asc, desc).
- :return: sort instructions and paging operator
- """
- sort_keys = sort_keys or []
- sort_instructions = []
- _sort_dir, operation = cls.SORT_OPERATION_MAPPING.get(
- sort_dir, cls.SORT_OPERATION_MAPPING['desc'])
-
- for _sort_key in sort_keys:
- _instruction = (_sort_key, _sort_dir)
- sort_instructions.append(_instruction)
-
- return sort_instructions, operation
-
- def _get_time_constrained_resources(self, query,
- start_timestamp, start_timestamp_op,
- end_timestamp, end_timestamp_op,
- metaquery, resource, limit):
- """Return an iterable of models.Resource instances
-
- Items are constrained by sample timestamp.
- :param query: project/user/source query
- :param start_timestamp: modified timestamp start range.
- :param start_timestamp_op: start time operator, like gt, ge.
- :param end_timestamp: modified timestamp end range.
- :param end_timestamp_op: end time operator, like lt, le.
- :param metaquery: dict with metadata to match on.
- :param resource: resource filter.
- """
- if resource is not None:
- query['resource_id'] = resource
-
- # Add resource_ prefix so it matches the field in the db
- query.update(dict(('resource_' + k, v)
- for (k, v) in six.iteritems(metaquery)))
-
- # FIXME(dhellmann): This may not perform very well,
- # but doing any better will require changing the database
- # schema and that will need more thought than I have time
- # to put into it today.
- # Look for resources matching the above criteria and with
- # samples in the time range we care about, then change the
- # resource query to return just those resources by id.
- ts_range = pymongo_utils.make_timestamp_range(start_timestamp,
- end_timestamp,
- start_timestamp_op,
- end_timestamp_op)
- if ts_range:
- query['timestamp'] = ts_range
-
- sort_keys = base._handle_sort_key('resource')
- sort_instructions = self._build_sort_instructions(sort_keys)[0]
-
- # use a unique collection name for the results collection,
- # as result post-sorting (as oppposed to reduce pre-sorting)
- # is not possible on an inline M-R
- out = 'resource_list_%s' % uuid.uuid4()
- self.db.meter.map_reduce(self.MAP_RESOURCES,
- self.REDUCE_RESOURCES,
- out=out,
- sort={'resource_id': 1},
- query=query)
-
- try:
- if limit is not None:
- results = self.db[out].find(sort=sort_instructions,
- limit=limit)
- else:
- results = self.db[out].find(sort=sort_instructions)
- for r in results:
- resource = r['value']
- yield models.Resource(
- resource_id=r['_id'],
- user_id=resource['user_id'],
- project_id=resource['project_id'],
- first_sample_timestamp=resource['first_timestamp'],
- last_sample_timestamp=resource['last_timestamp'],
- source=resource['source'],
- metadata=pymongo_utils.unquote_keys(resource['metadata']))
- finally:
- self.db[out].drop()
-
- def _get_floating_resources(self, query, metaquery, resource, limit):
- """Return an iterable of models.Resource instances
-
- Items are unconstrained by timestamp.
- :param query: project/user/source query
- :param metaquery: dict with metadata to match on.
- :param resource: resource filter.
- """
- if resource is not None:
- query['_id'] = resource
-
- query.update(dict((k, v)
- for (k, v) in six.iteritems(metaquery)))
-
- keys = base._handle_sort_key('resource')
- sort_keys = ['last_sample_timestamp' if i == 'timestamp' else i
- for i in keys]
- sort_instructions = self._build_sort_instructions(sort_keys)[0]
-
- if limit is not None:
- results = self.db.resource.find(query, sort=sort_instructions,
- limit=limit)
- else:
- results = self.db.resource.find(query, sort=sort_instructions)
-
- for r in results:
- yield models.Resource(
- resource_id=r['_id'],
- user_id=r['user_id'],
- project_id=r['project_id'],
- first_sample_timestamp=r.get('first_sample_timestamp',
- self._GENESIS),
- last_sample_timestamp=r.get('last_sample_timestamp',
- self._APOCALYPSE),
- source=r['source'],
- metadata=pymongo_utils.unquote_keys(r['metadata']))
-
- def get_resources(self, user=None, project=None, source=None,
- start_timestamp=None, start_timestamp_op=None,
- end_timestamp=None, end_timestamp_op=None,
- metaquery=None, resource=None, limit=None):
- """Return an iterable of models.Resource instances
-
- :param user: Optional ID for user that owns the resource.
- :param project: Optional ID for project that owns the resource.
- :param source: Optional source filter.
- :param start_timestamp: Optional modified timestamp start range.
- :param start_timestamp_op: Optional start time operator, like gt, ge.
- :param end_timestamp: Optional modified timestamp end range.
- :param end_timestamp_op: Optional end time operator, like lt, le.
- :param metaquery: Optional dict with metadata to match on.
- :param resource: Optional resource filter.
- :param limit: Maximum number of results to return.
- """
- if limit == 0:
- return
- metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {}
-
- query = {}
- if user is not None:
- query['user_id'] = user
- if project is not None:
- query['project_id'] = project
- if source is not None:
- query['source'] = source
-
- if start_timestamp or end_timestamp:
- return self._get_time_constrained_resources(query,
- start_timestamp,
- start_timestamp_op,
- end_timestamp,
- end_timestamp_op,
- metaquery, resource,
- limit)
- else:
- return self._get_floating_resources(query, metaquery, resource,
- limit)
-
- @staticmethod
- def _make_period_dict(period, first_ts):
- """Create a period field for _id of grouped fields.
-
- :param period: Period duration in seconds
- :param first_ts: First timestamp for first period
- :return:
- """
- if period >= 0:
- period_unique_dict = {
- "period_start":
- {
- "$divide": [
- {"$subtract": [
- {"$subtract": ["$timestamp",
- first_ts]},
- {"$mod": [{"$subtract": ["$timestamp",
- first_ts]},
- period * 1000]
- }
- ]},
- period * 1000
- ]
- }
-
- }
- else:
- # Note(ityaptin) Hack for older MongoDB versions (2.4.+ and older).
- # Since 2.6+ we could use $literal operator
- period_unique_dict = {"$period_start": {"$add": [0, 0]}}
- return period_unique_dict
-
- def get_meter_statistics(self, sample_filter, period=None, groupby=None,
- aggregate=None):
- """Return an iterable of models.Statistics instance.
-
- Items are containing meter statistics described by the query
- parameters. The filter must have a meter value set.
- """
- # NOTE(zqfan): We already have checked at API level, but
- # still leave it here in case of directly storage calls.
- if aggregate:
- for a in aggregate:
- if a.func not in self.AGGREGATES:
- msg = _('Invalid aggregation function: %s') % a.func
- raise storage.StorageBadAggregate(msg)
-
- if (groupby and set(groupby) -
- set(['user_id', 'project_id', 'resource_id', 'source',
- 'resource_metadata.instance_type'])):
- raise ceilometer.NotImplementedError(
- "Unable to group by these fields")
- q = pymongo_utils.make_query_from_filter(sample_filter)
-
- group_stage = {}
- project_stage = {
- "unit": "$_id.unit",
- "name": "$_id.name",
- "first_timestamp": "$first_timestamp",
- "last_timestamp": "$last_timestamp",
- "period_start": "$_id.period_start",
- }
-
- # Add timestamps to $group stage
- group_stage.update({"first_timestamp": {"$min": "$timestamp"},
- "last_timestamp": {"$max": "$timestamp"}})
-
- # Define a _id field for grouped documents
- unique_group_field = {"name": "$counter_name",
- "unit": "$counter_unit"}
-
- # Define a first timestamp for periods
- if sample_filter.start_timestamp:
- first_timestamp = sample_filter.start_timestamp
- else:
- first_timestamp_cursor = self.db.meter.find(
- limit=1, sort=[('timestamp',
- pymongo.ASCENDING)])
- if first_timestamp_cursor.count():
- first_timestamp = first_timestamp_cursor[0]['timestamp']
- else:
- first_timestamp = utils.EPOCH_TIME
-
- # Add a start_period field to unique identifier of grouped documents
- if period:
- period_dict = self._make_period_dict(period,
- first_timestamp)
- unique_group_field.update(period_dict)
-
- # Add a groupby fields to unique identifier of grouped documents
- if groupby:
- unique_group_field.update(dict((field.replace(".", "/"),
- "$%s" % field)
- for field in groupby))
-
- group_stage.update({"_id": unique_group_field})
-
- self._compile_aggregate_stages(aggregate, group_stage, project_stage)
-
- # Aggregation stages list. It's work one by one and uses documents
- # from previous stages.
- aggregation_query = [{'$match': q},
- {"$sort": {"timestamp": 1}},
- {"$group": group_stage},
- {"$sort": {"_id.period_start": 1}},
- {"$project": project_stage}]
-
- # results is dict in pymongo<=2.6.3 and CommandCursor in >=3.0
- results = self.db.meter.aggregate(aggregation_query,
- **self._make_aggregation_params())
- return [self._stats_result_to_model(point, groupby, aggregate,
- period, first_timestamp)
- for point in self._get_results(results)]
-
- def _stats_result_aggregates(self, result, aggregate):
- stats_args = {}
- for attr, func in Connection.STANDARD_AGGREGATES.items():
- if attr in result:
- stats_args.update(func.finalize(result,
- version_array=self.version))
-
- if aggregate:
- stats_args['aggregate'] = {}
- for agr in aggregate:
- stats_args['aggregate'].update(
- Connection.AGGREGATES[agr.func].finalize(
- result, agr.param, self.version))
- return stats_args
-
- def _stats_result_to_model(self, result, groupby, aggregate, period,
- first_timestamp):
- if period is None:
- period = 0
- first_timestamp = pymongo_utils.from_unix_timestamp(first_timestamp)
- stats_args = self._stats_result_aggregates(result, aggregate)
-
- stats_args['unit'] = result['unit']
- stats_args['duration'] = (result["last_timestamp"] -
- result["first_timestamp"]).total_seconds()
- stats_args['duration_start'] = result['first_timestamp']
- stats_args['duration_end'] = result['last_timestamp']
- stats_args['period'] = period
- start = result.get("period_start", 0) * period
-
- stats_args['period_start'] = (first_timestamp +
- datetime.timedelta(seconds=start))
- stats_args['period_end'] = (first_timestamp +
- datetime.timedelta(seconds=start + period)
- if period else result['last_timestamp'])
-
- stats_args['groupby'] = (
- dict((g, result['_id'].get(g.replace(".", "/")))
- for g in groupby) if groupby else None)
- return models.Statistics(**stats_args)
-
- def _compile_aggregate_stages(self, aggregate, group_stage, project_stage):
- if not aggregate:
- for aggregation in Connection.STANDARD_AGGREGATES.values():
- group_stage.update(
- aggregation.group(version_array=self.version)
- )
- project_stage.update(
- aggregation.project(
- version_array=self.version
- )
- )
- else:
- for description in aggregate:
- aggregation = Connection.AGGREGATES.get(description.func)
- if aggregation:
- if not aggregation.validate(description.param):
- raise storage.StorageBadAggregate(
- 'Bad aggregate: %s.%s' % (description.func,
- description.param))
- group_stage.update(
- aggregation.group(description.param,
- version_array=self.version)
- )
- project_stage.update(
- aggregation.project(description.param,
- version_array=self.version)
- )
-
- @staticmethod
- def _get_results(results):
- if isinstance(results, dict):
- return results.get('result', [])
- else:
- return results
-
- def _make_aggregation_params(self):
- if self.version >= pymongo_utils.COMPLETE_AGGREGATE_COMPATIBLE_VERSION:
- return {"allowDiskUse": True}
- return {}
diff --git a/ceilometer/storage/impl_sqlalchemy.py b/ceilometer/storage/impl_sqlalchemy.py
deleted file mode 100644
index d9ea9c35..00000000
--- a/ceilometer/storage/impl_sqlalchemy.py
+++ /dev/null
@@ -1,838 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""SQLAlchemy storage backend."""
-
-from __future__ import absolute_import
-import datetime
-import hashlib
-import os
-
-from oslo_db import api
-from oslo_db import exception as dbexc
-from oslo_db.sqlalchemy import session as db_session
-from oslo_log import log
-from oslo_serialization import jsonutils
-from oslo_utils import timeutils
-import six
-import sqlalchemy as sa
-from sqlalchemy import and_
-from sqlalchemy import distinct
-from sqlalchemy import func
-from sqlalchemy.orm import aliased
-from sqlalchemy.sql.expression import cast
-
-import ceilometer
-from ceilometer.i18n import _
-from ceilometer import storage
-from ceilometer.storage import base
-from ceilometer.storage import models as api_models
-from ceilometer.storage.sqlalchemy import models
-from ceilometer.storage.sqlalchemy import utils as sql_utils
-from ceilometer import utils
-
-LOG = log.getLogger(__name__)
-
-
-STANDARD_AGGREGATES = dict(
- avg=func.avg(models.Sample.volume).label('avg'),
- sum=func.sum(models.Sample.volume).label('sum'),
- min=func.min(models.Sample.volume).label('min'),
- max=func.max(models.Sample.volume).label('max'),
- count=func.count(models.Sample.volume).label('count')
-)
-
-UNPARAMETERIZED_AGGREGATES = dict(
- stddev=func.stddev_pop(models.Sample.volume).label('stddev')
-)
-
-PARAMETERIZED_AGGREGATES = dict(
- validate=dict(
- cardinality=lambda p: p in ['resource_id', 'user_id', 'project_id']
- ),
- compute=dict(
- cardinality=lambda p: func.count(
- distinct(getattr(models.Resource, p))
- ).label('cardinality/%s' % p)
- )
-)
-
-AVAILABLE_CAPABILITIES = {
- 'meters': {'query': {'simple': True,
- 'metadata': True}},
- 'resources': {'query': {'simple': True,
- 'metadata': True}},
- 'samples': {'query': {'simple': True,
- 'metadata': True,
- 'complex': True}},
- 'statistics': {'groupby': True,
- 'query': {'simple': True,
- 'metadata': True},
- 'aggregation': {'standard': True,
- 'selectable': {
- 'max': True,
- 'min': True,
- 'sum': True,
- 'avg': True,
- 'count': True,
- 'stddev': True,
- 'cardinality': True}}
- },
-}
-
-
-AVAILABLE_STORAGE_CAPABILITIES = {
- 'storage': {'production_ready': True},
-}
-
-
-def apply_metaquery_filter(session, query, metaquery):
- """Apply provided metaquery filter to existing query.
-
- :param session: session used for original query
- :param query: Query instance
- :param metaquery: dict with metadata to match on.
- """
- for k, value in six.iteritems(metaquery):
- key = k[9:] # strip out 'metadata.' prefix
- try:
- _model = sql_utils.META_TYPE_MAP[type(value)]
- except KeyError:
- raise ceilometer.NotImplementedError(
- 'Query on %(key)s is of %(value)s '
- 'type and is not supported' %
- {"key": k, "value": type(value)})
- else:
- meta_alias = aliased(_model)
- on_clause = and_(models.Resource.internal_id == meta_alias.id,
- meta_alias.meta_key == key)
- # outer join is needed to support metaquery
- # with or operator on non existent metadata field
- # see: test_query_non_existing_metadata_with_result
- # test case.
- query = query.outerjoin(meta_alias, on_clause)
- query = query.filter(meta_alias.value == value)
-
- return query
-
-
-def make_query_from_filter(session, query, sample_filter, require_meter=True):
- """Return a query dictionary based on the settings in the filter.
-
- :param session: session used for original query
- :param query: Query instance
- :param sample_filter: SampleFilter instance
- :param require_meter: If true and the filter does not have a meter,
- raise an error.
- """
-
- if sample_filter.meter:
- query = query.filter(models.Meter.name == sample_filter.meter)
- elif require_meter:
- raise RuntimeError('Missing required meter specifier')
- if sample_filter.source:
- query = query.filter(
- models.Resource.source_id == sample_filter.source)
- if sample_filter.start_timestamp:
- ts_start = sample_filter.start_timestamp
- if sample_filter.start_timestamp_op == 'gt':
- query = query.filter(models.Sample.timestamp > ts_start)
- else:
- query = query.filter(models.Sample.timestamp >= ts_start)
- if sample_filter.end_timestamp:
- ts_end = sample_filter.end_timestamp
- if sample_filter.end_timestamp_op == 'le':
- query = query.filter(models.Sample.timestamp <= ts_end)
- else:
- query = query.filter(models.Sample.timestamp < ts_end)
- if sample_filter.user:
- if sample_filter.user == 'None':
- sample_filter.user = None
- query = query.filter(models.Resource.user_id == sample_filter.user)
- if sample_filter.project:
- if sample_filter.project == 'None':
- sample_filter.project = None
- query = query.filter(
- models.Resource.project_id == sample_filter.project)
- if sample_filter.resource:
- query = query.filter(
- models.Resource.resource_id == sample_filter.resource)
- if sample_filter.message_id:
- query = query.filter(
- models.Sample.message_id == sample_filter.message_id)
-
- if sample_filter.metaquery:
- query = apply_metaquery_filter(session, query,
- sample_filter.metaquery)
-
- return query
-
-
-class Connection(base.Connection):
- """Put the data into a SQLAlchemy database.
-
- Tables::
-
- - meter
- - meter definition
- - { id: meter id
- name: meter name
- type: meter type
- unit: meter unit
- }
- - resource
- - resource definition
- - { internal_id: resource id
- resource_id: resource uuid
- user_id: user uuid
- project_id: project uuid
- source_id: source id
- resource_metadata: metadata dictionary
- metadata_hash: metadata dictionary hash
- }
- - sample
- - the raw incoming data
- - { id: sample id
- meter_id: meter id (->meter.id)
- resource_id: resource id (->resource.internal_id)
- volume: sample volume
- timestamp: datetime
- recorded_at: datetime
- message_signature: message signature
- message_id: message uuid
- }
- """
- CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
- AVAILABLE_CAPABILITIES)
- STORAGE_CAPABILITIES = utils.update_nested(
- base.Connection.STORAGE_CAPABILITIES,
- AVAILABLE_STORAGE_CAPABILITIES,
- )
-
- def __init__(self, conf, url):
- super(Connection, self).__init__(conf, url)
- # Set max_retries to 0, since oslo.db in certain cases may attempt
- # to retry making the db connection retried max_retries ^ 2 times
- # in failure case and db reconnection has already been implemented
- # in storage.__init__.get_connection_from_config function
- options = dict(self.conf.database.items())
- options['max_retries'] = 0
- # oslo.db doesn't support options defined by Ceilometer
- for opt in storage.OPTS:
- options.pop(opt.name, None)
- self._engine_facade = db_session.EngineFacade(url, **options)
-
- def upgrade(self):
- # NOTE(gordc): to minimise memory, only import migration when needed
- from oslo_db.sqlalchemy import migration
- path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
- 'sqlalchemy', 'migrate_repo')
- engine = self._engine_facade.get_engine()
-
- from migrate import exceptions as migrate_exc
- from migrate.versioning import api
- from migrate.versioning import repository
-
- repo = repository.Repository(path)
- try:
- api.db_version(engine, repo)
- except migrate_exc.DatabaseNotControlledError:
- models.Base.metadata.create_all(engine)
- api.version_control(engine, repo, repo.latest)
- else:
- migration.db_sync(engine, path)
-
- def clear(self):
- engine = self._engine_facade.get_engine()
- for table in reversed(models.Base.metadata.sorted_tables):
- engine.execute(table.delete())
- engine.dispose()
-
- @staticmethod
- def _create_meter(conn, name, type, unit):
- # TODO(gordc): implement lru_cache to improve performance
- try:
- meter = models.Meter.__table__
- trans = conn.begin_nested()
- if conn.dialect.name == 'sqlite':
- trans = conn.begin()
- with trans:
- meter_row = conn.execute(
- sa.select([meter.c.id])
- .where(sa.and_(meter.c.name == name,
- meter.c.type == type,
- meter.c.unit == unit))).first()
- meter_id = meter_row[0] if meter_row else None
- if meter_id is None:
- result = conn.execute(meter.insert(), name=name,
- type=type, unit=unit)
- meter_id = result.inserted_primary_key[0]
- except dbexc.DBDuplicateEntry:
- # retry function to pick up duplicate committed object
- meter_id = Connection._create_meter(conn, name, type, unit)
-
- return meter_id
-
- @staticmethod
- def _create_resource(conn, res_id, user_id, project_id, source_id,
- rmeta):
- # TODO(gordc): implement lru_cache to improve performance
- try:
- res = models.Resource.__table__
- m_hash = jsonutils.dumps(rmeta, sort_keys=True)
- if six.PY3:
- m_hash = m_hash.encode('utf-8')
- m_hash = hashlib.md5(m_hash).hexdigest()
- trans = conn.begin_nested()
- if conn.dialect.name == 'sqlite':
- trans = conn.begin()
- with trans:
- res_row = conn.execute(
- sa.select([res.c.internal_id])
- .where(sa.and_(res.c.resource_id == res_id,
- res.c.user_id == user_id,
- res.c.project_id == project_id,
- res.c.source_id == source_id,
- res.c.metadata_hash == m_hash))).first()
- internal_id = res_row[0] if res_row else None
- if internal_id is None:
- result = conn.execute(res.insert(), resource_id=res_id,
- user_id=user_id,
- project_id=project_id,
- source_id=source_id,
- resource_metadata=rmeta,
- metadata_hash=m_hash)
- internal_id = result.inserted_primary_key[0]
- if rmeta and isinstance(rmeta, dict):
- meta_map = {}
- for key, v in utils.dict_to_keyval(rmeta):
- try:
- _model = sql_utils.META_TYPE_MAP[type(v)]
- if meta_map.get(_model) is None:
- meta_map[_model] = []
- meta_map[_model].append(
- {'id': internal_id, 'meta_key': key,
- 'value': v})
- except KeyError:
- LOG.warning(_("Unknown metadata type. Key "
- "(%s) will not be queryable."),
- key)
- for _model in meta_map.keys():
- conn.execute(_model.__table__.insert(),
- meta_map[_model])
-
- except dbexc.DBDuplicateEntry:
- # retry function to pick up duplicate committed object
- internal_id = Connection._create_resource(
- conn, res_id, user_id, project_id, source_id, rmeta)
-
- return internal_id
-
- # FIXME(sileht): use set_defaults to pass cfg.CONF.database.retry_interval
- # and cfg.CONF.database.max_retries to this method when global config
- # have been removed (puting directly cfg.CONF don't work because and copy
- # the default instead of the configured value)
- @api.wrap_db_retry(retry_interval=10, max_retries=10,
- retry_on_deadlock=True)
- def record_metering_data(self, data):
- """Write the data to the backend storage system.
-
- :param data: a dictionary such as returned by
- ceilometer.publisher.utils.meter_message_from_counter
- """
- engine = self._engine_facade.get_engine()
- with engine.begin() as conn:
- # Record the raw data for the sample.
- m_id = self._create_meter(conn,
- data['counter_name'],
- data['counter_type'],
- data['counter_unit'])
- res_id = self._create_resource(conn,
- data['resource_id'],
- data['user_id'],
- data['project_id'],
- data['source'],
- data['resource_metadata'])
- sample = models.Sample.__table__
- conn.execute(sample.insert(), meter_id=m_id,
- resource_id=res_id,
- timestamp=data['timestamp'],
- volume=data['counter_volume'],
- message_signature=data['message_signature'],
- message_id=data['message_id'])
-
- def clear_expired_metering_data(self, ttl):
- """Clear expired data from the backend storage system.
-
- Clearing occurs according to the time-to-live.
- :param ttl: Number of seconds to keep records for.
- """
- # Prevent database deadlocks from occurring by
- # using separate transaction for each delete
- session = self._engine_facade.get_session()
- with session.begin():
- end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
- sample_q = (session.query(models.Sample)
- .filter(models.Sample.timestamp < end))
- rows = sample_q.delete()
- LOG.info("%d samples removed from database", rows)
-
- if not self.conf.database.sql_expire_samples_only:
- with session.begin():
- # remove Meter definitions with no matching samples
- (session.query(models.Meter)
- .filter(~models.Meter.samples.any())
- .delete(synchronize_session=False))
-
- with session.begin():
- resource_q = (session.query(models.Resource.internal_id)
- .filter(~models.Resource.samples.any()))
- # mark resource with no matching samples for delete
- resource_q.update({models.Resource.metadata_hash: "delete_"
- + cast(models.Resource.internal_id,
- sa.String)},
- synchronize_session=False)
-
- # remove metadata of resources marked for delete
- for table in [models.MetaText, models.MetaBigInt,
- models.MetaFloat, models.MetaBool]:
- with session.begin():
- resource_q = (session.query(models.Resource.internal_id)
- .filter(models.Resource.metadata_hash
- .like('delete_%')))
- resource_subq = resource_q.subquery()
- (session.query(table)
- .filter(table.id.in_(resource_subq))
- .delete(synchronize_session=False))
-
- # remove resource marked for delete
- with session.begin():
- resource_q = (session.query(models.Resource.internal_id)
- .filter(models.Resource.metadata_hash
- .like('delete_%')))
- resource_q.delete(synchronize_session=False)
- LOG.info("Expired residual resource and"
- " meter definition data")
-
- def get_resources(self, user=None, project=None, source=None,
- start_timestamp=None, start_timestamp_op=None,
- end_timestamp=None, end_timestamp_op=None,
- metaquery=None, resource=None, limit=None):
- """Return an iterable of api_models.Resource instances
-
- :param user: Optional ID for user that owns the resource.
- :param project: Optional ID for project that owns the resource.
- :param source: Optional source filter.
- :param start_timestamp: Optional modified timestamp start range.
- :param start_timestamp_op: Optional start time operator, like gt, ge.
- :param end_timestamp: Optional modified timestamp end range.
- :param end_timestamp_op: Optional end time operator, like lt, le.
- :param metaquery: Optional dict with metadata to match on.
- :param resource: Optional resource filter.
- :param limit: Maximum number of results to return.
- """
- if limit == 0:
- return
- s_filter = storage.SampleFilter(user=user,
- project=project,
- source=source,
- start_timestamp=start_timestamp,
- start_timestamp_op=start_timestamp_op,
- end_timestamp=end_timestamp,
- end_timestamp_op=end_timestamp_op,
- metaquery=metaquery,
- resource=resource)
-
- session = self._engine_facade.get_session()
- # get list of resource_ids
- has_timestamp = start_timestamp or end_timestamp
- # NOTE: When sql_expire_samples_only is enabled, there will be some
- # resources without any sample, in such case we should use inner
- # join on sample table to avoid wrong result.
- if self.conf.database.sql_expire_samples_only or has_timestamp:
- res_q = session.query(distinct(models.Resource.resource_id)).join(
- models.Sample,
- models.Sample.resource_id == models.Resource.internal_id)
- else:
- res_q = session.query(distinct(models.Resource.resource_id))
- res_q = make_query_from_filter(session, res_q, s_filter,
- require_meter=False)
- res_q = res_q.limit(limit) if limit else res_q
- for res_id in res_q.all():
-
- # get max and min sample timestamp value
- min_max_q = (session.query(func.max(models.Sample.timestamp)
- .label('max_timestamp'),
- func.min(models.Sample.timestamp)
- .label('min_timestamp'))
- .join(models.Resource,
- models.Resource.internal_id ==
- models.Sample.resource_id)
- .filter(models.Resource.resource_id ==
- res_id[0]))
-
- min_max_q = make_query_from_filter(session, min_max_q, s_filter,
- require_meter=False)
-
- min_max = min_max_q.first()
-
- # get resource details for latest sample
- res_q = (session.query(models.Resource.resource_id,
- models.Resource.user_id,
- models.Resource.project_id,
- models.Resource.source_id,
- models.Resource.resource_metadata)
- .join(models.Sample,
- models.Sample.resource_id ==
- models.Resource.internal_id)
- .filter(models.Sample.timestamp ==
- min_max.max_timestamp)
- .filter(models.Resource.resource_id ==
- res_id[0])
- .order_by(models.Sample.id.desc()).limit(1))
-
- res = res_q.first()
-
- yield api_models.Resource(
- resource_id=res.resource_id,
- project_id=res.project_id,
- first_sample_timestamp=min_max.min_timestamp,
- last_sample_timestamp=min_max.max_timestamp,
- source=res.source_id,
- user_id=res.user_id,
- metadata=res.resource_metadata
- )
-
- def get_meters(self, user=None, project=None, resource=None, source=None,
- metaquery=None, limit=None, unique=False):
- """Return an iterable of api_models.Meter instances
-
- :param user: Optional ID for user that owns the resource.
- :param project: Optional ID for project that owns the resource.
- :param resource: Optional ID of the resource.
- :param source: Optional source filter.
- :param metaquery: Optional dict with metadata to match on.
- :param limit: Maximum number of results to return.
- :param unique: If set to true, return only unique meter information.
- """
- if limit == 0:
- return
- s_filter = storage.SampleFilter(user=user,
- project=project,
- source=source,
- metaquery=metaquery,
- resource=resource)
-
- # NOTE(gordc): get latest sample of each meter/resource. we do not
- # filter here as we want to filter only on latest record.
- session = self._engine_facade.get_session()
-
- subq = session.query(func.max(models.Sample.id).label('id')).join(
- models.Resource,
- models.Resource.internal_id == models.Sample.resource_id)
-
- if unique:
- subq = subq.group_by(models.Sample.meter_id)
- else:
- subq = subq.group_by(models.Sample.meter_id,
- models.Resource.resource_id)
-
- if resource:
- subq = subq.filter(models.Resource.resource_id == resource)
- subq = subq.subquery()
-
- # get meter details for samples.
- query_sample = (session.query(models.Sample.meter_id,
- models.Meter.name, models.Meter.type,
- models.Meter.unit,
- models.Resource.resource_id,
- models.Resource.project_id,
- models.Resource.source_id,
- models.Resource.user_id).join(
- subq, subq.c.id == models.Sample.id)
- .join(models.Meter, models.Meter.id == models.Sample.meter_id)
- .join(models.Resource,
- models.Resource.internal_id == models.Sample.resource_id))
- query_sample = make_query_from_filter(session, query_sample, s_filter,
- require_meter=False)
-
- query_sample = query_sample.limit(limit) if limit else query_sample
-
- if unique:
- for row in query_sample.all():
- yield api_models.Meter(
- name=row.name,
- type=row.type,
- unit=row.unit,
- resource_id=None,
- project_id=None,
- source=None,
- user_id=None)
- else:
- for row in query_sample.all():
- yield api_models.Meter(
- name=row.name,
- type=row.type,
- unit=row.unit,
- resource_id=row.resource_id,
- project_id=row.project_id,
- source=row.source_id,
- user_id=row.user_id)
-
- @staticmethod
- def _retrieve_samples(query):
- samples = query.all()
-
- for s in samples:
- # Remove the id generated by the database when
- # the sample was inserted. It is an implementation
- # detail that should not leak outside of the driver.
- yield api_models.Sample(
- source=s.source_id,
- counter_name=s.counter_name,
- counter_type=s.counter_type,
- counter_unit=s.counter_unit,
- counter_volume=s.counter_volume,
- user_id=s.user_id,
- project_id=s.project_id,
- resource_id=s.resource_id,
- timestamp=s.timestamp,
- recorded_at=s.recorded_at,
- resource_metadata=s.resource_metadata,
- message_id=s.message_id,
- message_signature=s.message_signature,
- )
-
- def get_samples(self, sample_filter, limit=None):
- """Return an iterable of api_models.Samples.
-
- :param sample_filter: Filter.
- :param limit: Maximum number of results to return.
- """
- if limit == 0:
- return []
-
- session = self._engine_facade.get_session()
- query = session.query(models.Sample.timestamp,
- models.Sample.recorded_at,
- models.Sample.message_id,
- models.Sample.message_signature,
- models.Sample.volume.label('counter_volume'),
- models.Meter.name.label('counter_name'),
- models.Meter.type.label('counter_type'),
- models.Meter.unit.label('counter_unit'),
- models.Resource.source_id,
- models.Resource.user_id,
- models.Resource.project_id,
- models.Resource.resource_metadata,
- models.Resource.resource_id).join(
- models.Meter, models.Meter.id == models.Sample.meter_id).join(
- models.Resource,
- models.Resource.internal_id == models.Sample.resource_id).order_by(
- models.Sample.timestamp.desc())
- query = make_query_from_filter(session, query, sample_filter,
- require_meter=False)
- if limit:
- query = query.limit(limit)
- return self._retrieve_samples(query)
-
- def query_samples(self, filter_expr=None, orderby=None, limit=None):
- if limit == 0:
- return []
-
- session = self._engine_facade.get_session()
- engine = self._engine_facade.get_engine()
- query = session.query(models.Sample.timestamp,
- models.Sample.recorded_at,
- models.Sample.message_id,
- models.Sample.message_signature,
- models.Sample.volume.label('counter_volume'),
- models.Meter.name.label('counter_name'),
- models.Meter.type.label('counter_type'),
- models.Meter.unit.label('counter_unit'),
- models.Resource.source_id,
- models.Resource.user_id,
- models.Resource.project_id,
- models.Resource.resource_metadata,
- models.Resource.resource_id).join(
- models.Meter, models.Meter.id == models.Sample.meter_id).join(
- models.Resource,
- models.Resource.internal_id == models.Sample.resource_id)
- transformer = sql_utils.QueryTransformer(models.FullSample, query,
- dialect=engine.dialect.name)
- if filter_expr is not None:
- transformer.apply_filter(filter_expr)
-
- transformer.apply_options(orderby, limit)
- return self._retrieve_samples(transformer.get_query())
-
- @staticmethod
- def _get_aggregate_functions(aggregate):
- if not aggregate:
- return [f for f in STANDARD_AGGREGATES.values()]
-
- functions = []
-
- for a in aggregate:
- if a.func in STANDARD_AGGREGATES:
- functions.append(STANDARD_AGGREGATES[a.func])
- elif a.func in UNPARAMETERIZED_AGGREGATES:
- functions.append(UNPARAMETERIZED_AGGREGATES[a.func])
- elif a.func in PARAMETERIZED_AGGREGATES['compute']:
- validate = PARAMETERIZED_AGGREGATES['validate'].get(a.func)
- if not (validate and validate(a.param)):
- raise storage.StorageBadAggregate('Bad aggregate: %s.%s'
- % (a.func, a.param))
- compute = PARAMETERIZED_AGGREGATES['compute'][a.func]
- functions.append(compute(a.param))
- else:
- # NOTE(zqfan): We already have checked at API level, but
- # still leave it here in case of directly storage calls.
- msg = _('Invalid aggregation function: %s') % a.func
- raise storage.StorageBadAggregate(msg)
-
- return functions
-
- def _make_stats_query(self, sample_filter, groupby, aggregate):
-
- select = [
- func.min(models.Sample.timestamp).label('tsmin'),
- func.max(models.Sample.timestamp).label('tsmax'),
- models.Meter.unit
- ]
- select.extend(self._get_aggregate_functions(aggregate))
-
- session = self._engine_facade.get_session()
-
- if groupby:
- group_attributes = []
- for g in groupby:
- if g != 'resource_metadata.instance_type':
- group_attributes.append(getattr(models.Resource, g))
- else:
- group_attributes.append(
- getattr(models.MetaText, 'value')
- .label('resource_metadata.instance_type'))
-
- select.extend(group_attributes)
-
- query = (
- session.query(*select)
- .join(models.Meter,
- models.Meter.id == models.Sample.meter_id)
- .join(models.Resource,
- models.Resource.internal_id == models.Sample.resource_id)
- .group_by(models.Meter.unit))
-
- if groupby:
- for g in groupby:
- if g == 'resource_metadata.instance_type':
- query = query.join(
- models.MetaText,
- models.Resource.internal_id == models.MetaText.id)
- query = query.filter(
- models.MetaText.meta_key == 'instance_type')
- query = query.group_by(*group_attributes)
-
- return make_query_from_filter(session, query, sample_filter)
-
- @staticmethod
- def _stats_result_aggregates(result, aggregate):
- stats_args = {}
- if isinstance(result.count, six.integer_types):
- stats_args['count'] = result.count
- for attr in ['min', 'max', 'sum', 'avg']:
- if hasattr(result, attr):
- stats_args[attr] = getattr(result, attr)
- if aggregate:
- stats_args['aggregate'] = {}
- for a in aggregate:
- key = '%s%s' % (a.func, '/%s' % a.param if a.param else '')
- stats_args['aggregate'][key] = getattr(result, key)
- return stats_args
-
- @staticmethod
- def _stats_result_to_model(result, period, period_start,
- period_end, groupby, aggregate):
- stats_args = Connection._stats_result_aggregates(result, aggregate)
- stats_args['unit'] = result.unit
- duration = (timeutils.delta_seconds(result.tsmin, result.tsmax)
- if result.tsmin is not None and result.tsmax is not None
- else None)
- stats_args['duration'] = duration
- stats_args['duration_start'] = result.tsmin
- stats_args['duration_end'] = result.tsmax
- stats_args['period'] = period
- stats_args['period_start'] = period_start
- stats_args['period_end'] = period_end
- stats_args['groupby'] = (dict(
- (g, getattr(result, g)) for g in groupby) if groupby else None)
- return api_models.Statistics(**stats_args)
-
- def get_meter_statistics(self, sample_filter, period=None, groupby=None,
- aggregate=None):
- """Return an iterable of api_models.Statistics instances.
-
- Items are containing meter statistics described by the query
- parameters. The filter must have a meter value set.
- """
- if groupby:
- for group in groupby:
- if group not in ['user_id', 'project_id', 'resource_id',
- 'resource_metadata.instance_type']:
- raise ceilometer.NotImplementedError('Unable to group by '
- 'these fields')
-
- if not period:
- for res in self._make_stats_query(sample_filter,
- groupby,
- aggregate):
- if res.count:
- yield self._stats_result_to_model(res, 0,
- res.tsmin, res.tsmax,
- groupby,
- aggregate)
- return
-
- if not (sample_filter.start_timestamp and sample_filter.end_timestamp):
- res = self._make_stats_query(sample_filter,
- None,
- aggregate).first()
- if not res:
- # NOTE(liusheng):The 'res' may be NoneType, because no
- # sample has found with sample filter(s).
- return
-
- query = self._make_stats_query(sample_filter, groupby, aggregate)
- # HACK(jd) This is an awful method to compute stats by period, but
- # since we're trying to be SQL agnostic we have to write portable
- # code, so here it is, admire! We're going to do one request to get
- # stats by period. We would like to use GROUP BY, but there's no
- # portable way to manipulate timestamp in SQL, so we can't.
- for period_start, period_end in base.iter_period(
- sample_filter.start_timestamp or res.tsmin,
- sample_filter.end_timestamp or res.tsmax,
- period):
- q = query.filter(models.Sample.timestamp >= period_start)
- q = q.filter(models.Sample.timestamp < period_end)
- for r in q.all():
- if r.count:
- yield self._stats_result_to_model(
- result=r,
- period=int(timeutils.delta_seconds(period_start,
- period_end)),
- period_start=period_start,
- period_end=period_end,
- groupby=groupby,
- aggregate=aggregate
- )
diff --git a/ceilometer/storage/models.py b/ceilometer/storage/models.py
deleted file mode 100644
index 816a4c5d..00000000
--- a/ceilometer/storage/models.py
+++ /dev/null
@@ -1,148 +0,0 @@
-#
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Model classes for use in the storage API.
-"""
-from ceilometer.storage import base
-
-
-class Resource(base.Model):
- """Something for which sample data has been collected."""
-
- def __init__(self, resource_id, project_id,
- first_sample_timestamp,
- last_sample_timestamp,
- source, user_id, metadata):
- """Create a new resource.
-
- :param resource_id: UUID of the resource
- :param project_id: UUID of project owning the resource
- :param first_sample_timestamp: first sample timestamp captured
- :param last_sample_timestamp: last sample timestamp captured
- :param source: the identifier for the user/project id definition
- :param user_id: UUID of user owning the resource
- :param metadata: most current metadata for the resource (a dict)
- """
- base.Model.__init__(self,
- resource_id=resource_id,
- first_sample_timestamp=first_sample_timestamp,
- last_sample_timestamp=last_sample_timestamp,
- project_id=project_id,
- source=source,
- user_id=user_id,
- metadata=metadata,
- )
-
-
-class Meter(base.Model):
- """Definition of a meter for which sample data has been collected."""
-
- def __init__(self, name, type, unit, resource_id, project_id, source,
- user_id):
- """Create a new meter.
-
- :param name: name of the meter
- :param type: type of the meter (gauge, delta, cumulative)
- :param unit: unit of the meter
- :param resource_id: UUID of the resource
- :param project_id: UUID of project owning the resource
- :param source: the identifier for the user/project id definition
- :param user_id: UUID of user owning the resource
- """
- base.Model.__init__(self,
- name=name,
- type=type,
- unit=unit,
- resource_id=resource_id,
- project_id=project_id,
- source=source,
- user_id=user_id,
- )
-
-
-class Sample(base.Model):
- """One collected data point."""
- def __init__(self,
- source,
- counter_name, counter_type, counter_unit, counter_volume,
- user_id, project_id, resource_id,
- timestamp, resource_metadata,
- message_id,
- message_signature,
- recorded_at,
- ):
- """Create a new sample.
-
- :param source: the identifier for the user/project id definition
- :param counter_name: the name of the measurement being taken
- :param counter_type: the type of the measurement
- :param counter_unit: the units for the measurement
- :param counter_volume: the measured value
- :param user_id: the user that triggered the measurement
- :param project_id: the project that owns the resource
- :param resource_id: the thing on which the measurement was taken
- :param timestamp: the time of the measurement
- :param resource_metadata: extra details about the resource
- :param message_id: a message identifier
- :param recorded_at: sample record timestamp
- :param message_signature: a hash created from the rest of the
- message data
- """
- base.Model.__init__(self,
- source=source,
- counter_name=counter_name,
- counter_type=counter_type,
- counter_unit=counter_unit,
- counter_volume=counter_volume,
- user_id=user_id,
- project_id=project_id,
- resource_id=resource_id,
- timestamp=timestamp,
- resource_metadata=resource_metadata,
- message_id=message_id,
- message_signature=message_signature,
- recorded_at=recorded_at)
-
-
-class Statistics(base.Model):
- """Computed statistics based on a set of sample data."""
- def __init__(self, unit,
- period, period_start, period_end,
- duration, duration_start, duration_end,
- groupby, **data):
- """Create a new statistics object.
-
- :param unit: The unit type of the data set
- :param period: The length of the time range covered by these stats
- :param period_start: The timestamp for the start of the period
- :param period_end: The timestamp for the end of the period
- :param duration: The total time for the matching samples
- :param duration_start: The earliest time for the matching samples
- :param duration_end: The latest time for the matching samples
- :param groupby: The fields used to group the samples.
- :param data: some or all of the following aggregates
- min: The smallest volume found
- max: The largest volume found
- avg: The average of all volumes found
- sum: The total of all volumes found
- count: The number of samples found
- aggregate: name-value pairs for selectable aggregates
- """
- base.Model.__init__(self, unit=unit,
- period=period, period_start=period_start,
- period_end=period_end, duration=duration,
- duration_start=duration_start,
- duration_end=duration_end,
- groupby=groupby,
- **data)
diff --git a/ceilometer/storage/mongo/__init__.py b/ceilometer/storage/mongo/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/storage/mongo/__init__.py
+++ /dev/null
diff --git a/ceilometer/storage/mongo/utils.py b/ceilometer/storage/mongo/utils.py
deleted file mode 100644
index b4f425ea..00000000
--- a/ceilometer/storage/mongo/utils.py
+++ /dev/null
@@ -1,590 +0,0 @@
-#
-# Copyright Ericsson AB 2013. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Common functions for MongoDB backend
-"""
-
-import datetime
-import time
-import weakref
-
-from oslo_log import log
-from oslo_utils import netutils
-import pymongo
-import pymongo.errors
-import six
-from six.moves.urllib import parse
-
-from ceilometer.i18n import _
-
-ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS = 86
-
-LOG = log.getLogger(__name__)
-
-MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4]
-COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6]
-
-FINALIZE_FLOAT_LAMBDA = lambda result, param=None: float(result)
-FINALIZE_INT_LAMBDA = lambda result, param=None: int(result)
-CARDINALITY_VALIDATION = (lambda name, param: param in ['resource_id',
- 'user_id',
- 'project_id',
- 'source'])
-
-
-def make_timestamp_range(start, end,
- start_timestamp_op=None, end_timestamp_op=None):
-
- """Create the query document to find timestamps within that range.
-
- This is done by given two possible datetimes and their operations.
- By default, using $gte for the lower bound and $lt for the upper bound.
- """
- ts_range = {}
-
- if start:
- if start_timestamp_op == 'gt':
- start_timestamp_op = '$gt'
- else:
- start_timestamp_op = '$gte'
- ts_range[start_timestamp_op] = start
-
- if end:
- if end_timestamp_op == 'le':
- end_timestamp_op = '$lte'
- else:
- end_timestamp_op = '$lt'
- ts_range[end_timestamp_op] = end
- return ts_range
-
-
-def make_query_from_filter(sample_filter, require_meter=True):
- """Return a query dictionary based on the settings in the filter.
-
- :param sample_filter: SampleFilter instance
- :param require_meter: If true and the filter does not have a meter,
- raise an error.
- """
- q = {}
-
- if sample_filter.user:
- q['user_id'] = sample_filter.user
- if sample_filter.project:
- q['project_id'] = sample_filter.project
-
- if sample_filter.meter:
- q['counter_name'] = sample_filter.meter
- elif require_meter:
- raise RuntimeError('Missing required meter specifier')
-
- ts_range = make_timestamp_range(sample_filter.start_timestamp,
- sample_filter.end_timestamp,
- sample_filter.start_timestamp_op,
- sample_filter.end_timestamp_op)
-
- if ts_range:
- q['timestamp'] = ts_range
-
- if sample_filter.resource:
- q['resource_id'] = sample_filter.resource
- if sample_filter.source:
- q['source'] = sample_filter.source
- if sample_filter.message_id:
- q['message_id'] = sample_filter.message_id
-
- # so the samples call metadata resource_metadata, so we convert
- # to that.
- q.update(dict(
- ('resource_%s' % k, v) for (k, v) in six.iteritems(
- improve_keys(sample_filter.metaquery, metaquery=True))))
- return q
-
-
-def quote_key(key, reverse=False):
- """Prepare key for storage data in MongoDB.
-
- :param key: key that should be quoted
- :param reverse: boolean, True --- if we need a reverse order of the keys
- parts
- :return: iter of quoted part of the key
- """
- r = -1 if reverse else 1
-
- for k in key.split('.')[::r]:
- if k.startswith('$'):
- k = parse.quote(k)
- yield k
-
-
-def improve_keys(data, metaquery=False):
- """Improves keys in dict if they contained '.' or started with '$'.
-
- :param data: is a dictionary where keys need to be checked and improved
- :param metaquery: boolean, if True dots are not escaped from the keys
- :return: improved dictionary if keys contained dots or started with '$':
- {'a.b': 'v'} -> {'a': {'b': 'v'}}
- {'$ab': 'v'} -> {'%24ab': 'v'}
- """
- if not isinstance(data, dict):
- return data
-
- if metaquery:
- for key in six.iterkeys(data):
- if '.$' in key:
- key_list = []
- for k in quote_key(key):
- key_list.append(k)
- new_key = '.'.join(key_list)
- data[new_key] = data.pop(key)
- else:
- for key, value in data.items():
- if isinstance(value, dict):
- improve_keys(value)
- if '.' in key:
- new_dict = {}
- for k in quote_key(key, reverse=True):
- new = {}
- new[k] = new_dict if new_dict else data.pop(key)
- new_dict = new
- data.update(new_dict)
- else:
- if key.startswith('$'):
- new_key = parse.quote(key)
- data[new_key] = data.pop(key)
- return data
-
-
-def unquote_keys(data):
- """Restores initial view of 'quoted' keys in dictionary data
-
- :param data: is a dictionary
- :return: data with restored keys if they were 'quoted'.
- """
- if isinstance(data, dict):
- for key, value in data.items():
- if isinstance(value, dict):
- unquote_keys(value)
- if key.startswith('%24'):
- k = parse.unquote(key)
- data[k] = data.pop(key)
- return data
-
-
-class ConnectionPool(object):
-
- def __init__(self):
- self._pool = {}
-
- def connect(self, conf, url):
- connection_options = pymongo.uri_parser.parse_uri(url)
- del connection_options['database']
- del connection_options['username']
- del connection_options['password']
- del connection_options['collection']
- pool_key = tuple(connection_options)
-
- if pool_key in self._pool:
- client = self._pool.get(pool_key)()
- if client:
- return client
- splitted_url = netutils.urlsplit(url)
- log_data = {'db': splitted_url.scheme,
- 'nodelist': connection_options['nodelist']}
- LOG.info('Connecting to %(db)s on %(nodelist)s' % log_data)
- client = self._mongo_connect(conf, url)
- self._pool[pool_key] = weakref.ref(client)
- return client
-
- @staticmethod
- def _mongo_connect(conf, url):
- try:
- return MongoProxy(conf, pymongo.MongoClient(url))
- except pymongo.errors.ConnectionFailure as e:
- LOG.warning(_('Unable to connect to the database server: '
- '%(errmsg)s.') % {'errmsg': e})
- raise
-
-
-class QueryTransformer(object):
-
- operators = {"<": "$lt",
- ">": "$gt",
- "<=": "$lte",
- "=<": "$lte",
- ">=": "$gte",
- "=>": "$gte",
- "!=": "$ne",
- "in": "$in",
- "=~": "$regex"}
-
- complex_operators = {"or": "$or",
- "and": "$and"}
-
- ordering_functions = {"asc": pymongo.ASCENDING,
- "desc": pymongo.DESCENDING}
-
- def transform_orderby(self, orderby):
- orderby_filter = []
-
- for field in orderby:
- field_name = list(field.keys())[0]
- ordering = self.ordering_functions[list(field.values())[0]]
- orderby_filter.append((field_name, ordering))
- return orderby_filter
-
- @staticmethod
- def _move_negation_to_leaf(condition):
- """Moves every not operator to the leafs.
-
- Moving is going by applying the De Morgan rules and annihilating
- double negations.
- """
- def _apply_de_morgan(tree, negated_subtree, negated_op):
- if negated_op == "and":
- new_op = "or"
- else:
- new_op = "and"
-
- tree[new_op] = [{"not": child}
- for child in negated_subtree[negated_op]]
- del tree["not"]
-
- def transform(subtree):
- op = list(subtree.keys())[0]
- if op in ["and", "or"]:
- [transform(child) for child in subtree[op]]
- elif op == "not":
- negated_tree = subtree[op]
- negated_op = list(negated_tree.keys())[0]
- if negated_op == "and":
- _apply_de_morgan(subtree, negated_tree, negated_op)
- transform(subtree)
- elif negated_op == "or":
- _apply_de_morgan(subtree, negated_tree, negated_op)
- transform(subtree)
- elif negated_op == "not":
- # two consecutive not annihilates themselves
- value = list(negated_tree.values())[0]
- new_op = list(value.keys())[0]
- subtree[new_op] = negated_tree[negated_op][new_op]
- del subtree["not"]
- transform(subtree)
-
- transform(condition)
-
- def transform_filter(self, condition):
- # in Mongo not operator can only be applied to
- # simple expressions so we have to move every
- # not operator to the leafs of the expression tree
- self._move_negation_to_leaf(condition)
- return self._process_json_tree(condition)
-
- def _handle_complex_op(self, complex_op, nodes):
- element_list = []
- for node in nodes:
- element = self._process_json_tree(node)
- element_list.append(element)
- complex_operator = self.complex_operators[complex_op]
- op = {complex_operator: element_list}
- return op
-
- def _handle_not_op(self, negated_tree):
- # assumes that not is moved to the leaf already
- # so we are next to a leaf
- negated_op = list(negated_tree.keys())[0]
- negated_field = list(negated_tree[negated_op].keys())[0]
- value = negated_tree[negated_op][negated_field]
- if negated_op == "=":
- return {negated_field: {"$ne": value}}
- elif negated_op == "!=":
- return {negated_field: value}
- else:
- return {negated_field: {"$not":
- {self.operators[negated_op]: value}}}
-
- def _handle_simple_op(self, simple_op, nodes):
- field_name = list(nodes.keys())[0]
- field_value = list(nodes.values())[0]
-
- # no operator for equal in Mongo
- if simple_op == "=":
- op = {field_name: field_value}
- return op
-
- operator = self.operators[simple_op]
- op = {field_name: {operator: field_value}}
- return op
-
- def _process_json_tree(self, condition_tree):
- operator_node = list(condition_tree.keys())[0]
- nodes = list(condition_tree.values())[0]
-
- if operator_node in self.complex_operators:
- return self._handle_complex_op(operator_node, nodes)
-
- if operator_node == "not":
- negated_tree = condition_tree[operator_node]
- return self._handle_not_op(negated_tree)
-
- return self._handle_simple_op(operator_node, nodes)
-
-
-def safe_mongo_call(call):
- def closure(self, *args, **kwargs):
- # NOTE(idegtiarov) options max_retries and retry_interval have been
- # registered in storage.__init__ in oslo_db.options.set_defaults
- # default values for both options are 10.
- max_retries = self.conf.database.max_retries
- retry_interval = self.conf.database.retry_interval
- attempts = 0
- while True:
- try:
- return call(self, *args, **kwargs)
- except pymongo.errors.AutoReconnect as err:
- if 0 <= max_retries <= attempts:
- LOG.error('Unable to reconnect to the primary mongodb '
- 'after %(retries)d retries. Giving up.' %
- {'retries': max_retries})
- raise
- LOG.warning(_('Unable to reconnect to the primary '
- 'mongodb: %(errmsg)s. Trying again in '
- '%(retry_interval)d seconds.') %
- {'errmsg': err, 'retry_interval': retry_interval})
- attempts += 1
- time.sleep(retry_interval)
- return closure
-
-
-class MongoConn(object):
- def __init__(self, conf, method):
- self.conf = conf
- self.method = method
-
- @safe_mongo_call
- def __call__(self, *args, **kwargs):
- return self.method(*args, **kwargs)
-
-MONGO_METHODS = set([typ for typ in dir(pymongo.collection.Collection)
- if not typ.startswith('_')])
-MONGO_METHODS.update(set([typ for typ in dir(pymongo.MongoClient)
- if not typ.startswith('_')]))
-MONGO_METHODS.update(set([typ for typ in dir(pymongo)
- if not typ.startswith('_')]))
-
-
-class MongoProxy(object):
- def __init__(self, conf, conn):
- self.conn = conn
- self.conf = conf
-
- def __getitem__(self, item):
- """Create and return proxy around the method in the connection.
-
- :param item: name of the connection
- """
- return MongoProxy(self.conf, self.conn[item])
-
- def find(self, *args, **kwargs):
- # We need this modifying method to return a CursorProxy object so that
- # we can handle the Cursor next function to catch the AutoReconnect
- # exception.
- return CursorProxy(self.conf, self.conn.find(*args, **kwargs))
-
- def create_index(self, keys, name=None, *args, **kwargs):
- try:
- self.conn.create_index(keys, name=name, *args, **kwargs)
- except pymongo.errors.OperationFailure as e:
- if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS:
- LOG.info("Index %s will be recreate.", name)
- self._recreate_index(keys, name, *args, **kwargs)
-
- @safe_mongo_call
- def _recreate_index(self, keys, name, *args, **kwargs):
- self.conn.drop_index(name)
- self.conn.create_index(keys, name=name, *args, **kwargs)
-
- def __getattr__(self, item):
- """Wrap MongoDB connection.
-
- If item is the name of an executable method, for example find or
- insert, wrap this method in the MongoConn.
- Else wrap getting attribute with MongoProxy.
- """
- if item in ("conf",):
- return super(MongoProxy, self).__getattr__(item)
- elif item in ('name', 'database'):
- return getattr(self.conn, item)
- elif item in MONGO_METHODS:
- return MongoConn(self.conf, getattr(self.conn, item))
- return MongoProxy(self.conf, getattr(self.conn, item))
-
- def __call__(self, *args, **kwargs):
- return self.conn(*args, **kwargs)
-
-
-class CursorProxy(pymongo.cursor.Cursor):
- def __init__(self, conf, cursor):
- self.cursor = cursor
- self.conf = conf
-
- def __getitem__(self, item):
- return self.cursor[item]
-
- @safe_mongo_call
- def next(self):
- """Wrap Cursor next method.
-
- This method will be executed before each Cursor next method call.
- """
- try:
- save_cursor = self.cursor.clone()
- return self.cursor.next()
- except pymongo.errors.AutoReconnect:
- self.cursor = save_cursor
- raise
-
- def __getattr__(self, item):
- return getattr(self.cursor, item)
-
-
-class AggregationFields(object):
- def __init__(self, version,
- group,
- project,
- finalize=None,
- parametrized=False,
- validate=None):
- self._finalize = finalize or FINALIZE_FLOAT_LAMBDA
- self.group = lambda *args: group(*args) if parametrized else group
- self.project = (lambda *args: project(*args)
- if parametrized else project)
- self.version = version
- self.validate = validate or (lambda name, param: True)
-
- def finalize(self, name, data, param=None):
- field = ("%s" % name) + ("/%s" % param if param else "")
- return {field: (self._finalize(data.get(field))
- if self._finalize else data.get(field))}
-
-
-class Aggregation(object):
- def __init__(self, name, aggregation_fields):
- self.name = name
- aggregation_fields = (aggregation_fields
- if isinstance(aggregation_fields, list)
- else [aggregation_fields])
- self.aggregation_fields = sorted(aggregation_fields,
- key=lambda af: getattr(af, "version"),
- reverse=True)
-
- def _get_compatible_aggregation_field(self, version_array):
- if version_array:
- version_array = version_array[0:2]
- else:
- version_array = MINIMUM_COMPATIBLE_MONGODB_VERSION
- for aggregation_field in self.aggregation_fields:
- if version_array >= aggregation_field.version:
- return aggregation_field
-
- def group(self, param=None, version_array=None):
- af = self._get_compatible_aggregation_field(version_array)
- return af.group(param)
-
- def project(self, param=None, version_array=None):
- af = self._get_compatible_aggregation_field(version_array)
- return af.project(param)
-
- def finalize(self, data, param=None, version_array=None):
- af = self._get_compatible_aggregation_field(version_array)
- return af.finalize(self.name, data, param)
-
- def validate(self, param=None, version_array=None):
- af = self._get_compatible_aggregation_field(version_array)
- return af.validate(self.name, param)
-
-SUM_AGGREGATION = Aggregation(
- "sum", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
- {"sum": {"$sum": "$counter_volume"}},
- {"sum": "$sum"},
- ))
-AVG_AGGREGATION = Aggregation(
- "avg", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
- {"avg": {"$avg": "$counter_volume"}},
- {"avg": "$avg"},
- ))
-MIN_AGGREGATION = Aggregation(
- "min", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
- {"min": {"$min": "$counter_volume"}},
- {"min": "$min"},
- ))
-MAX_AGGREGATION = Aggregation(
- "max", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
- {"max": {"$max": "$counter_volume"}},
- {"max": "$max"},
- ))
-COUNT_AGGREGATION = Aggregation(
- "count", AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
- {"count": {"$sum": 1}},
- {"count": "$count"},
- FINALIZE_INT_LAMBDA))
-STDDEV_AGGREGATION = Aggregation(
- "stddev",
- AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
- {"std_square": {
- "$sum": {
- "$multiply": ["$counter_volume",
- "$counter_volume"]
- }},
- "std_count": {"$sum": 1},
- "std_sum": {"$sum": "$counter_volume"}},
- {"stddev": {
- "count": "$std_count",
- "sum": "$std_sum",
- "square_sum": "$std_square"}},
- lambda stddev: ((stddev['square_sum']
- * stddev['count']
- - stddev["sum"] ** 2) ** 0.5
- / stddev['count'])))
-
-CARDINALITY_AGGREGATION = Aggregation(
- "cardinality",
- # $cond operator available only in MongoDB 2.6+
- [AggregationFields(COMPLETE_AGGREGATE_COMPATIBLE_VERSION,
- lambda field: ({"cardinality/%s" % field:
- {"$addToSet": "$%s" % field}}),
- lambda field: {
- "cardinality/%s" % field: {
- "$cond": [
- {"$eq": ["$cardinality/%s" % field, None]},
- 0,
- {"$size": "$cardinality/%s" % field}]
- }},
- validate=CARDINALITY_VALIDATION,
- parametrized=True),
- AggregationFields(MINIMUM_COMPATIBLE_MONGODB_VERSION,
- lambda field: ({"cardinality/%s" % field:
- {"$addToSet": "$%s" % field}}),
- lambda field: ({"cardinality/%s" % field:
- "$cardinality/%s" % field}),
- finalize=len,
- validate=CARDINALITY_VALIDATION,
- parametrized=True)]
-)
-
-
-def from_unix_timestamp(timestamp):
- if (isinstance(timestamp, six.integer_types) or
- isinstance(timestamp, float)):
- return datetime.datetime.fromtimestamp(timestamp)
- return timestamp
diff --git a/ceilometer/storage/pymongo_base.py b/ceilometer/storage/pymongo_base.py
deleted file mode 100644
index 8b4b2407..00000000
--- a/ceilometer/storage/pymongo_base.py
+++ /dev/null
@@ -1,175 +0,0 @@
-#
-# Copyright Ericsson AB 2013. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Common functions for MongoDB backend."""
-import pymongo
-
-from ceilometer.storage import base
-from ceilometer.storage import models
-from ceilometer.storage.mongo import utils as pymongo_utils
-from ceilometer import utils
-
-
-COMMON_AVAILABLE_CAPABILITIES = {
- 'meters': {'query': {'simple': True,
- 'metadata': True}},
- 'samples': {'query': {'simple': True,
- 'metadata': True,
- 'complex': True}},
-}
-
-
-AVAILABLE_STORAGE_CAPABILITIES = {
- 'storage': {'production_ready': True},
-}
-
-
-class Connection(base.Connection):
- """Base Connection class for MongoDB driver."""
- CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
- COMMON_AVAILABLE_CAPABILITIES)
-
- STORAGE_CAPABILITIES = utils.update_nested(
- base.Connection.STORAGE_CAPABILITIES,
- AVAILABLE_STORAGE_CAPABILITIES,
- )
-
- def get_meters(self, user=None, project=None, resource=None, source=None,
- metaquery=None, limit=None, unique=False):
- """Return an iterable of models.Meter instances
-
- :param user: Optional ID for user that owns the resource.
- :param project: Optional ID for project that owns the resource.
- :param resource: Optional resource filter.
- :param source: Optional source filter.
- :param metaquery: Optional dict with metadata to match on.
- :param limit: Maximum number of results to return.
- :param unique: If set to true, return only unique meter information.
- """
- if limit == 0:
- return
-
- metaquery = pymongo_utils.improve_keys(metaquery, metaquery=True) or {}
-
- q = {}
- if user == 'None':
- q['user_id'] = None
- elif user is not None:
- q['user_id'] = user
- if project == 'None':
- q['project_id'] = None
- elif project is not None:
- q['project_id'] = project
- if resource == 'None':
- q['_id'] = None
- elif resource is not None:
- q['_id'] = resource
- if source is not None:
- q['source'] = source
- q.update(metaquery)
-
- count = 0
- if unique:
- meter_names = set()
-
- for r in self.db.resource.find(q):
- for r_meter in r['meter']:
- if unique:
- if r_meter['counter_name'] in meter_names:
- continue
- else:
- meter_names.add(r_meter['counter_name'])
-
- if limit and count >= limit:
- return
- else:
- count += 1
-
- if unique:
- yield models.Meter(
- name=r_meter['counter_name'],
- type=r_meter['counter_type'],
- # Return empty string if 'counter_unit' is not valid
- # for backward compatibility.
- unit=r_meter.get('counter_unit', ''),
- resource_id=None,
- project_id=None,
- source=None,
- user_id=None)
- else:
- yield models.Meter(
- name=r_meter['counter_name'],
- type=r_meter['counter_type'],
- # Return empty string if 'counter_unit' is not valid
- # for backward compatibility.
- unit=r_meter.get('counter_unit', ''),
- resource_id=r['_id'],
- project_id=r['project_id'],
- source=r['source'],
- user_id=r['user_id'])
-
- def get_samples(self, sample_filter, limit=None):
- """Return an iterable of model.Sample instances.
-
- :param sample_filter: Filter.
- :param limit: Maximum number of results to return.
- """
- if limit == 0:
- return []
- q = pymongo_utils.make_query_from_filter(sample_filter,
- require_meter=False)
-
- return self._retrieve_samples(q,
- [("timestamp", pymongo.DESCENDING)],
- limit)
-
- def query_samples(self, filter_expr=None, orderby=None, limit=None):
- if limit == 0:
- return []
- query_filter = {}
- orderby_filter = [("timestamp", pymongo.DESCENDING)]
- transformer = pymongo_utils.QueryTransformer()
- if orderby is not None:
- orderby_filter = transformer.transform_orderby(orderby)
- if filter_expr is not None:
- query_filter = transformer.transform_filter(filter_expr)
-
- return self._retrieve_samples(query_filter, orderby_filter, limit)
-
- def _retrieve_samples(self, query, orderby, limit):
- if limit is not None:
- samples = self.db.meter.find(query,
- limit=limit,
- sort=orderby)
- else:
- samples = self.db.meter.find(query,
- sort=orderby)
-
- for s in samples:
- # Remove the ObjectId generated by the database when
- # the sample was inserted. It is an implementation
- # detail that should not leak outside of the driver.
- del s['_id']
- # Backward compatibility for samples without units
- s['counter_unit'] = s.get('counter_unit', '')
- # Compatibility with MongoDB 3.+
- s['counter_volume'] = float(s.get('counter_volume'))
- # Tolerate absence of recorded_at in older datapoints
- s['recorded_at'] = s.get('recorded_at')
- # Check samples for metadata and "unquote" key if initially it
- # was started with '$'.
- if s.get('resource_metadata'):
- s['resource_metadata'] = pymongo_utils.unquote_keys(
- s.get('resource_metadata'))
- yield models.Sample(**s)
diff --git a/ceilometer/storage/sqlalchemy/__init__.py b/ceilometer/storage/sqlalchemy/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/storage/sqlalchemy/__init__.py
+++ /dev/null
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/README b/ceilometer/storage/sqlalchemy/migrate_repo/README
deleted file mode 100644
index 42bddd18..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/README
+++ /dev/null
@@ -1,4 +0,0 @@
-sqlalchemy-migrate is DEPRECATED.
-
-All new migrations should be written using alembic.
-Please see ceilometer/storage/sqlalchemy/alembic/README
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/__init__.py b/ceilometer/storage/sqlalchemy/migrate_repo/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/__init__.py
+++ /dev/null
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/manage.py b/ceilometer/storage/sqlalchemy/migrate_repo/manage.py
deleted file mode 100644
index 39fa3892..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/manage.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python
-from migrate.versioning.shell import main
-
-if __name__ == '__main__':
- main(debug='False')
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg b/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg
deleted file mode 100644
index cd16764f..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/migrate.cfg
+++ /dev/null
@@ -1,25 +0,0 @@
-[db_settings]
-# Used to identify which repository this database is versioned under.
-# You can use the name of your project.
-repository_id=ceilometer
-
-# The name of the database table used to track the schema version.
-# This name shouldn't already be used by your project.
-# If this is changed once a database is under version control, you'll need to
-# change the table name in each database too.
-version_table=migrate_version
-
-# When committing a change script, Migrate will attempt to generate the
-# sql for all supported databases; normally, if one of them fails - probably
-# because you don't have that database installed - it is ignored and the
-# commit continues, perhaps ending successfully.
-# Databases in this list MUST compile successfully during a commit, or the
-# entire commit will fail. List the databases your application will actually
-# be using to ensure your updates to that database work properly.
-# This must be a list; example: ['postgres','sqlite']
-required_dbs=[]
-
-# When creating new change scripts, Migrate will stamp the new script with
-# a version number. By default this is latest_version + 1. You can set this
-# to 'true' to tell Migrate to use the UTC timestamp instead.
-use_timestamp_numbering=False
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py
deleted file mode 100644
index 1032cb40..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/001_add_meter_table.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column
-from sqlalchemy import DateTime
-from sqlalchemy import Index
-from sqlalchemy import Integer
-from sqlalchemy import MetaData
-from sqlalchemy import String
-from sqlalchemy import Table
-from sqlalchemy import UniqueConstraint
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
-
- meter = Table(
- 'meter', meta,
- Column('id', Integer, primary_key=True, index=True),
- Column('counter_name', String(255)),
- Column('user_id', String(255), index=True),
- Column('project_id', String(255), index=True),
- Column('resource_id', String(255)),
- Column('resource_metadata', String(5000)),
- Column('counter_type', String(255)),
- Column('counter_volume', Integer),
- Column('counter_duration', Integer),
- Column('timestamp', DateTime(timezone=False), index=True),
- Column('message_signature', String(1000)),
- Column('message_id', String(1000)),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- resource = Table(
- 'resource', meta,
- Column('id', String(255), primary_key=True, index=True),
- Column('resource_metadata', String(5000)),
- Column('project_id', String(255), index=True),
- Column('received_timestamp', DateTime(timezone=False)),
- Column('timestamp', DateTime(timezone=False), index=True),
- Column('user_id', String(255), index=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- user = Table(
- 'user', meta,
- Column('id', String(255), primary_key=True, index=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- project = Table(
- 'project', meta,
- Column('id', String(255), primary_key=True, index=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- sourceassoc = Table(
- 'sourceassoc', meta,
- Column('source_id', String(255), index=True),
- Column('user_id', String(255)),
- Column('project_id', String(255)),
- Column('resource_id', String(255)),
- Column('meter_id', Integer),
- Index('idx_su', 'source_id', 'user_id'),
- Index('idx_sp', 'source_id', 'project_id'),
- Index('idx_sr', 'source_id', 'resource_id'),
- Index('idx_sm', 'source_id', 'meter_id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- source = Table(
- 'source', meta,
- Column('id', String(255), primary_key=True, index=True),
- UniqueConstraint('id'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
-
- tables = [meter, project, resource, user, source, sourceassoc]
- for i in sorted(tables, key=lambda table: table.fullname):
- i.create()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py
deleted file mode 100644
index 667654ef..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/002_remove_duration.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column
-from sqlalchemy import Integer
-from sqlalchemy import MetaData
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- meter = Table('meter', meta, autoload=True)
- duration = Column('counter_duration', Integer)
- meter.drop_column(duration)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py
deleted file mode 100644
index fecd65c5..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/003_set_utf8_charset.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2012 Canonical.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
-
- if migrate_engine.name == "mysql":
- tables = ['meter', 'user', 'resource', 'project', 'source',
- 'sourceassoc']
- migrate_engine.execute("SET foreign_key_checks = 0")
-
- for table in tables:
- migrate_engine.execute(
- "ALTER TABLE %s CONVERT TO CHARACTER SET utf8" % table)
- migrate_engine.execute("SET foreign_key_checks = 1")
- migrate_engine.execute(
- "ALTER DATABASE %s DEFAULT CHARACTER SET utf8" %
- migrate_engine.url.database)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py
deleted file mode 100644
index ac4b1cb6..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/004_add_counter_unit.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column
-from sqlalchemy import MetaData
-from sqlalchemy import String
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- meter = Table('meter', meta, autoload=True)
- unit = Column('counter_unit', String(255))
- meter.create_column(unit)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py
deleted file mode 100644
index d85c7d73..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/005_remove_resource_timestamp.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Copyright 2013 eNovance
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from sqlalchemy import MetaData, Table, Column, DateTime
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- resource = Table('resource', meta, autoload=True)
- timestamp = Column('timestamp', DateTime)
- resource.drop_column(timestamp)
- received_timestamp = Column('received_timestamp', DateTime)
- resource.drop_column(received_timestamp)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py
deleted file mode 100644
index 36a44846..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/006_counter_volume_is_float.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Float
-from sqlalchemy import MetaData
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- meter = Table('meter', meta, autoload=True)
- meter.c.counter_volume.alter(type=Float(53))
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py
deleted file mode 100644
index 55f7f820..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/007_add_alarm_table.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-# Copyright 2013 eNovance <licensing@enovance.com>
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table, Column, Text
-from sqlalchemy import Boolean, Integer, String, DateTime, Float
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- alarm = Table(
- 'alarm', meta,
- Column('id', String(255), primary_key=True, index=True),
- Column('enabled', Boolean),
- Column('name', Text()),
- Column('description', Text()),
- Column('timestamp', DateTime(timezone=False)),
- Column('counter_name', String(255), index=True),
- Column('user_id', String(255), index=True),
- Column('project_id', String(255), index=True),
- Column('comparison_operator', String(2)),
- Column('threshold', Float),
- Column('statistic', String(255)),
- Column('evaluation_periods', Integer),
- Column('period', Integer),
- Column('state', String(255)),
- Column('state_timestamp', DateTime(timezone=False)),
- Column('ok_actions', Text()),
- Column('alarm_actions', Text()),
- Column('insufficient_data_actions', Text()),
- Column('matching_metadata', Text()),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- alarm.create()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py
deleted file mode 100644
index 68119f4a..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/008_add_events.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column
-from sqlalchemy import Float
-from sqlalchemy import ForeignKey
-from sqlalchemy import Integer
-from sqlalchemy import MetaData
-from sqlalchemy import String
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
-
- unique_name = Table(
- 'unique_name', meta,
- Column('id', Integer, primary_key=True),
- Column('key', String(32), index=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
- unique_name.create()
-
- event = Table(
- 'event', meta,
- Column('id', Integer, primary_key=True),
- Column('generated', Float(asdecimal=True), index=True),
- Column('unique_name_id', Integer, ForeignKey('unique_name.id')),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
- event.create()
-
- trait = Table(
- 'trait', meta,
- Column('id', Integer, primary_key=True),
- Column('name_id', Integer, ForeignKey('unique_name.id')),
- Column('t_type', Integer, index=True),
- Column('t_string', String(32), nullable=True, default=None,
- index=True),
- Column('t_float', Float, nullable=True, default=None, index=True),
- Column('t_int', Integer, nullable=True, default=None, index=True),
- Column('t_datetime', Float(asdecimal=True), nullable=True,
- default=None, index=True),
- Column('event_id', Integer, ForeignKey('event.id')),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
- trait.create()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py
deleted file mode 100644
index b02f781a..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/009_event_strings.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-from sqlalchemy import Table
-from sqlalchemy import VARCHAR
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- name = Table('unique_name', meta, autoload=True)
- name.c.key.alter(type=VARCHAR(length=255))
- trait = Table('trait', meta, autoload=True)
- trait.c.t_string.alter(type=VARCHAR(length=255))
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py
deleted file mode 100644
index 1ca58c6f..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/010_add_index_to_meter.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sa
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- meter = sa.Table('meter', meta, autoload=True)
- index = sa.Index('idx_meter_rid_cname', meter.c.resource_id,
- meter.c.counter_name)
- index.create(bind=migrate_engine)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py
deleted file mode 100644
index f5f2728a..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Index, MetaData, Table
-
-
-INDEXES = {
- # `table_name`: ((`index_name`, `column`),)
- "user": (('ix_user_id', 'id'),),
- "source": (('ix_source_id', 'id'),),
- "project": (('ix_project_id', 'id'),),
- "meter": (('ix_meter_id', 'id'),),
- "alarm": (('ix_alarm_id', 'id'),),
- "resource": (('ix_resource_id', 'id'),)
-}
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- load_tables = dict((table_name, Table(table_name, meta, autoload=True))
- for table_name in INDEXES.keys())
- for table_name, indexes in INDEXES.items():
- table = load_tables[table_name]
- for index_name, column in indexes:
- index = Index(index_name, table.c[column])
- index.drop()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py
deleted file mode 100644
index fa77c311..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/012_add_missing_foreign_keys.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, Table
-from sqlalchemy.sql.expression import select
-
-TABLES = ['resource', 'sourceassoc', 'user',
- 'project', 'meter', 'source', 'alarm']
-
-INDEXES = {
- "resource": (('user_id', 'user', 'id'),
- ('project_id', 'project', 'id')),
- "sourceassoc": (('user_id', 'user', 'id'),
- ('project_id', 'project', 'id'),
- ('resource_id', 'resource', 'id'),
- ('meter_id', 'meter', 'id'),
- ('source_id', 'source', 'id')),
- "alarm": (('user_id', 'user', 'id'),
- ('project_id', 'project', 'id')),
- "meter": (('user_id', 'user', 'id'),
- ('project_id', 'project', 'id'),
- ('resource_id', 'resource', 'id'),)
-}
-
-
-def upgrade(migrate_engine):
- if migrate_engine.name == 'sqlite':
- return
- meta = MetaData(bind=migrate_engine)
- load_tables = dict((table_name, Table(table_name, meta, autoload=True))
- for table_name in TABLES)
- for table_name, indexes in INDEXES.items():
- table = load_tables[table_name]
- for column, ref_table_name, ref_column_name in indexes:
- ref_table = load_tables[ref_table_name]
- subq = select([getattr(ref_table.c, ref_column_name)])
- sql_del = table.delete().where(
- ~ getattr(table.c, column).in_(subq))
- migrate_engine.execute(sql_del)
-
- params = {'columns': [table.c[column]],
- 'refcolumns': [ref_table.c[ref_column_name]]}
- if migrate_engine.name == 'mysql':
- params['name'] = "_".join(('fk', table_name, column))
- fkey = ForeignKeyConstraint(**params)
- fkey.create()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py
deleted file mode 100644
index c35ba173..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/013_rename_counter_to_meter_alarm.py
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Copyright 2013 eNovance <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData, Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- alarm = Table('alarm', meta, autoload=True)
- alarm.c.counter_name.alter(name='meter_name')
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py
deleted file mode 100644
index f3c0c09f..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/014_add_event_message_id.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate.changeset.constraint import UniqueConstraint
-import sqlalchemy
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData(bind=migrate_engine)
-
- event = sqlalchemy.Table('event', meta, autoload=True)
- message_id = sqlalchemy.Column('message_id', sqlalchemy.String(50))
- event.create_column(message_id)
-
- cons = UniqueConstraint('message_id', table=event)
- cons.create()
-
- index = sqlalchemy.Index('idx_event_message_id', event.c.message_id)
- index.create(bind=migrate_engine)
-
- # Populate the new column ...
- trait = sqlalchemy.Table('trait', meta, autoload=True)
- unique_name = sqlalchemy.Table('unique_name', meta, autoload=True)
- join = trait.join(unique_name, unique_name.c.id == trait.c.name_id)
- traits = sqlalchemy.select([trait.c.event_id, trait.c.t_string],
- whereclause=(unique_name.c.key == 'message_id'),
- from_obj=join)
-
- for event_id, value in traits.execute():
- (event.update().where(event.c.id == event_id).values(message_id=value).
- execute())
-
- # Leave the Trait, makes the rollback easier and won't really hurt anyone.
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py
deleted file mode 100644
index 9a9f07ad..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/015_add_alarm_history_table.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#
-# Copyright 2013 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, Table, Column, Index
-from sqlalchemy import String, DateTime
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
-
- project = Table('project', meta, autoload=True)
- user = Table('user', meta, autoload=True)
-
- alarm_history = Table(
- 'alarm_history', meta,
- Column('event_id', String(255), primary_key=True, index=True),
- Column('alarm_id', String(255)),
- Column('on_behalf_of', String(255)),
- Column('project_id', String(255)),
- Column('user_id', String(255)),
- Column('type', String(20)),
- Column('detail', String(255)),
- Column('timestamp', DateTime(timezone=False)),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
-
- alarm_history.create()
-
- if migrate_engine.name in ['mysql', 'postgresql']:
- indices = [Index('ix_alarm_history_alarm_id',
- alarm_history.c.alarm_id),
- Index('ix_alarm_history_on_behalf_of',
- alarm_history.c.on_behalf_of),
- Index('ix_alarm_history_project_id',
- alarm_history.c.project_id),
- Index('ix_alarm_history_on_user_id',
- alarm_history.c.user_id)]
-
- for index in indices:
- index.create(migrate_engine)
-
- fkeys = [ForeignKeyConstraint(columns=[alarm_history.c.on_behalf_of],
- refcolumns=[project.c.id]),
- ForeignKeyConstraint(columns=[alarm_history.c.project_id],
- refcolumns=[project.c.id]),
- ForeignKeyConstraint(columns=[alarm_history.c.user_id],
- refcolumns=[user.c.id])]
- for fkey in fkeys:
- fkey.create(engine=migrate_engine)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py
deleted file mode 100644
index f82ab5ec..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/016_simpler_alarm.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#
-# Copyright 2013 eNovance <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-
-from sqlalchemy import MetaData, Table, Column, Index
-from sqlalchemy import String, Text
-
-
-def upgrade(migrate_engine):
- meta = MetaData()
- meta.bind = migrate_engine
- table = Table('alarm', meta, autoload=True)
-
- type = Column('type', String(50), default='threshold')
- type.create(table, populate_default=True)
-
- rule = Column('rule', Text())
- rule.create(table)
-
- for row in table.select().execute().fetchall():
- query = []
- if row.matching_metadata is not None:
- matching_metadata = json.loads(row.matching_metadata)
- for key in matching_metadata:
- query.append({'field': key,
- 'op': 'eq',
- 'value': matching_metadata[key]})
- rule = {
- 'meter_name': row.meter_name,
- 'comparison_operator': row.comparison_operator,
- 'threshold': row.threshold,
- 'statistic': row.statistic,
- 'evaluation_periods': row.evaluation_periods,
- 'period': row.period,
- 'query': query
- }
- table.update().where(table.c.id == row.id).values(rule=rule).execute()
-
- index = Index('ix_alarm_counter_name', table.c.meter_name)
- index.drop(bind=migrate_engine)
- table.c.meter_name.drop()
- table.c.comparison_operator.drop()
- table.c.threshold.drop()
- table.c.statistic.drop()
- table.c.evaluation_periods.drop()
- table.c.period.drop()
- table.c.matching_metadata.drop()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py
deleted file mode 100644
index f5e58d94..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/017_convert_timestamp_as_datetime_to_decimal.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-# Copyright 2013 Rackspace Hosting
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sa
-
-from ceilometer.storage.sqlalchemy import migration
-from ceilometer.storage.sqlalchemy import models
-
-_col = 'timestamp'
-
-
-def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False):
- temp_col_n = 'convert_data_type_temp_col'
- # Override column we're going to convert with from_t, since the type we're
- # replacing could be custom and we need to tell SQLALchemy how to perform
- # CRUD operations with it.
- table = sa.Table(table.name, table.metadata, sa.Column(col, from_t),
- extend_existing=True)
- sa.Column(temp_col_n, to_t).create(table)
-
- key_attr = getattr(table.c, pk_attr)
- orig_col = getattr(table.c, col)
- new_col = getattr(table.c, temp_col_n)
-
- query = sa.select([key_attr, orig_col])
- for key, value in migration.paged(query):
- (table.update().where(key_attr == key).values({temp_col_n: value}).
- execute())
-
- orig_col.drop()
- new_col.alter(name=col)
- if index:
- sa.Index('ix_%s_%s' % (table.name, col), new_col).create()
-
-
-def upgrade(migrate_engine):
- if migrate_engine.name == 'mysql':
- meta = sa.MetaData(bind=migrate_engine)
- meter = sa.Table('meter', meta, autoload=True)
- _convert_data_type(meter, _col, sa.DateTime(),
- models.PreciseTimestamp(),
- pk_attr='id', index=True)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py
deleted file mode 100644
index 76c1fa2a..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/018_resource_resource_metadata_is_text.py
+++ /dev/null
@@ -1,26 +0,0 @@
-
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-from sqlalchemy import Table
-from sqlalchemy import Text
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- resource = Table('resource', meta, autoload=True)
- resource.c.resource_metadata.alter(type=Text)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py
deleted file mode 100644
index 539d02fa..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/019_alarm_history_detail_is_text.py
+++ /dev/null
@@ -1,26 +0,0 @@
-
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-from sqlalchemy import Table
-from sqlalchemy import Text
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- alm_hist = Table('alarm_history', meta, autoload=True)
- alm_hist.c.detail.alter(type=Text)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py
deleted file mode 100644
index 0748dcff..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/020_add_metadata_tables.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import json
-
-import six
-from sqlalchemy import Boolean
-from sqlalchemy import Column
-from sqlalchemy import Float
-from sqlalchemy import ForeignKey
-from sqlalchemy import Integer
-from sqlalchemy import MetaData
-from sqlalchemy.sql import select
-from sqlalchemy import String
-from sqlalchemy import Table
-from sqlalchemy import Text
-
-from ceilometer import utils
-
-tables = [('metadata_text', Text, True),
- ('metadata_bool', Boolean, False),
- ('metadata_int', Integer, False),
- ('metadata_float', Float, False)]
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- meter = Table('meter', meta, autoload=True)
- meta_tables = {}
- for t_name, t_type, t_nullable in tables:
- meta_tables[t_name] = Table(
- t_name, meta,
- Column('id', Integer, ForeignKey('meter.id'), primary_key=True),
- Column('meta_key', String(255), index=True, primary_key=True),
- Column('value', t_type, nullable=t_nullable),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
- meta_tables[t_name].create()
-
- for row in select([meter]).execute():
- if row['resource_metadata']:
- meter_id = row['id']
- rmeta = json.loads(row['resource_metadata'])
- for key, v in utils.dict_to_keyval(rmeta):
- ins = None
- if isinstance(v, six.string_types) or v is None:
- ins = meta_tables['metadata_text'].insert()
- elif isinstance(v, bool):
- ins = meta_tables['metadata_bool'].insert()
- elif isinstance(v, six.integer_types):
- ins = meta_tables['metadata_int'].insert()
- elif isinstance(v, float):
- ins = meta_tables['metadata_float'].insert()
- if ins is not None:
- ins.values(id=meter_id, meta_key=key, value=v).execute()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py
deleted file mode 100644
index 056f3f5c..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_add_event_types.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from migrate import ForeignKeyConstraint
-from sqlalchemy import Column
-from sqlalchemy import Integer
-from sqlalchemy import MetaData
-from sqlalchemy import select
-from sqlalchemy import String
-from sqlalchemy import Table
-
-from ceilometer.storage.sqlalchemy import migration
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- event_type = Table(
- 'event_type', meta,
- Column('id', Integer, primary_key=True),
- Column('desc', String(255), unique=True),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
- event_type.create()
- event = Table('event', meta, autoload=True)
- unique_name = Table('unique_name', meta, autoload=True)
-
- # Event type is a specialization of Unique name, so
- # we insert into the event_type table all the distinct
- # unique names from the event.unique_name field along
- # with the key from the unique_name table, and
- # then rename the event.unique_name field to event.event_type
- conn = migrate_engine.connect()
- sql = ("INSERT INTO event_type "
- "SELECT unique_name.id, unique_name.key FROM event "
- "INNER JOIN unique_name "
- "ON event.unique_name_id = unique_name.id "
- "GROUP BY unique_name.id")
- conn.execute(sql)
- conn.close()
- # Now we need to drop the foreign key constraint, rename
- # the event.unique_name column, and re-add a new foreign
- # key constraint
- params = {'columns': [event.c.unique_name_id],
- 'refcolumns': [unique_name.c.id]}
- if migrate_engine.name == 'mysql':
- params['name'] = "event_ibfk_1"
- fkey = ForeignKeyConstraint(**params)
- fkey.drop()
-
- Column('event_type_id', Integer).create(event)
-
- # Move data from unique_name_id column into event_type_id column
- # and delete the entry from the unique_name table
- query = select([event.c.id, event.c.unique_name_id])
- for key, value in migration.paged(query):
- (event.update().where(event.c.id == key).
- values({"event_type_id": value}).execute())
- unique_name.delete().where(unique_name.c.id == key).execute()
-
- params = {'columns': [event.c.event_type_id],
- 'refcolumns': [event_type.c.id]}
- if migrate_engine.name == 'mysql':
- params['name'] = "_".join(('fk', 'event_type', 'id'))
- fkey = ForeignKeyConstraint(**params)
- fkey.create()
-
- event.c.unique_name_id.drop()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql b/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql
deleted file mode 100644
index 19030113..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/021_sqlite_upgrade.sql
+++ /dev/null
@@ -1,29 +0,0 @@
-CREATE TABLE event_type (
- id INTEGER PRIMARY KEY ASC,
- desc STRING NOT NULL
-);
-
-INSERT INTO event_type
-SELECT un.id, un.key
-FROM unique_name un
-JOIN event e ON un.id = e.unique_name_id
-GROUP BY un.id;
-
-ALTER TABLE event RENAME TO event_orig;
-
-CREATE TABLE event (
- id INTEGER PRIMARY KEY ASC,
- generated FLOAT NOT NULL,
- message_id VARCHAR(50) UNIQUE,
- event_type_id INTEGER NOT NULL,
- FOREIGN KEY (event_type_id) REFERENCES event_type (id)
-);
-
-INSERT INTO event
-SELECT id, generated, message_id, unique_name_id
-FROM event_orig;
-
-DROP TABLE event_orig;
-
-DELETE FROM unique_name
-WHERE id IN (SELECT id FROM event_type);
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py
deleted file mode 100644
index ebbb6e0c..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/022_metadata_int_is_bigint.py
+++ /dev/null
@@ -1,26 +0,0 @@
-
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import BigInteger
-from sqlalchemy import MetaData
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- resource = Table('metadata_int', meta, autoload=True)
- resource.c.value.alter(type=BigInteger)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py
deleted file mode 100644
index 23c864bc..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_add_trait_types.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from migrate import ForeignKeyConstraint
-from sqlalchemy import Column
-from sqlalchemy import Integer
-from sqlalchemy import MetaData
-from sqlalchemy import select
-from sqlalchemy import String
-from sqlalchemy import Table
-from sqlalchemy import UniqueConstraint
-
-from ceilometer.storage.sqlalchemy import migration
-
-
-def upgrade(migrate_engine):
- meta = MetaData(migrate_engine)
- trait_type = Table(
- 'trait_type', meta,
- Column('id', Integer, primary_key=True),
- Column('desc', String(255)),
- Column('data_type', Integer),
- UniqueConstraint('desc', 'data_type', name="tt_unique"),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
- trait = Table('trait', meta, autoload=True)
- unique_name = Table('unique_name', meta, autoload=True)
- trait_type.create(migrate_engine)
- # Trait type extracts data from Trait and Unique name.
- # We take all trait names from Unique Name, and data types
- # from Trait. We then remove dtype and name from trait, and
- # remove the name field.
-
- conn = migrate_engine.connect()
- sql = ("INSERT INTO trait_type "
- "SELECT unique_name.id, unique_name.key, trait.t_type FROM trait "
- "INNER JOIN unique_name "
- "ON trait.name_id = unique_name.id "
- "GROUP BY unique_name.id, unique_name.key, trait.t_type")
- conn.execute(sql)
- conn.close()
-
- # Now we need to drop the foreign key constraint, rename
- # the trait.name column, and re-add a new foreign
- # key constraint
- params = {'columns': [trait.c.name_id],
- 'refcolumns': [unique_name.c.id]}
- if migrate_engine.name == 'mysql':
- params['name'] = "trait_ibfk_1" # foreign key to the unique name table
- fkey = ForeignKeyConstraint(**params)
- fkey.drop()
-
- Column('trait_type_id', Integer).create(trait)
-
- # Move data from name_id column into trait_type_id column
- query = select([trait.c.id, trait.c.name_id])
- for key, value in migration.paged(query):
- (trait.update().where(trait.c.id == key).
- values({"trait_type_id": value}).execute())
-
- trait.c.name_id.drop()
-
- params = {'columns': [trait.c.trait_type_id],
- 'refcolumns': [trait_type.c.id]}
- if migrate_engine.name == 'mysql':
- params['name'] = "_".join(('fk', 'trait_type', 'id'))
-
- fkey = ForeignKeyConstraint(**params)
- fkey.create()
-
- # Drop the t_type column to data_type.
- trait.c.t_type.drop()
-
- # Finally, drop the unique_name table - we don't need it
- # anymore.
- unique_name.drop()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql b/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql
deleted file mode 100644
index ac4dfc7f..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/023_sqlite_upgrade.sql
+++ /dev/null
@@ -1,34 +0,0 @@
-ALTER TABLE trait RENAME TO trait_orig;
-
-CREATE TABLE trait_type (
- id INTEGER PRIMARY KEY ASC,
- 'desc' STRING NOT NULL,
- data_type INTEGER NOT NULL,
- UNIQUE ('desc', data_type)
-);
-
-INSERT INTO trait_type
-SELECT un.id, un.key, t.t_type
-FROM unique_name un
-JOIN trait_orig t ON un.id = t.name_id
-GROUP BY un.id;
-
-CREATE TABLE trait (
- id INTEGER PRIMARY KEY ASC,
- t_string VARCHAR(255),
- t_int INTEGER,
- t_float FLOAT,
- t_datetime FLOAT,
- trait_type_id INTEGER NOT NULL,
- event_id INTEGER NOT NULL,
- FOREIGN KEY (trait_type_id) REFERENCES trait_type (id)
- FOREIGN KEY (event_id) REFERENCES event (id)
-);
-
-INSERT INTO trait
-SELECT t.id, t.t_string, t.t_int, t.t_float, t.t_datetime, t.name_id,
- t.event_id
-FROM trait_orig t;
-
-DROP TABLE trait_orig;
-DROP TABLE unique_name; \ No newline at end of file
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py
deleted file mode 100644
index e97f24bb..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/024_event_use_floatingprecision.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# Copyright 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sa
-
-from ceilometer.storage.sqlalchemy import migration
-from ceilometer.storage.sqlalchemy import models
-
-
-def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False):
- temp_col_n = 'convert_data_type_temp_col'
- # Override column we're going to convert with from_t, since the type we're
- # replacing could be custom and we need to tell SQLALchemy how to perform
- # CRUD operations with it.
- table = sa.Table(table.name, table.metadata, sa.Column(col, from_t),
- extend_existing=True)
- sa.Column(temp_col_n, to_t).create(table)
-
- key_attr = getattr(table.c, pk_attr)
- orig_col = getattr(table.c, col)
- new_col = getattr(table.c, temp_col_n)
-
- query = sa.select([key_attr, orig_col])
- for key, value in migration.paged(query):
- (table.update().where(key_attr == key).values({temp_col_n: value}).
- execute())
-
- orig_col.drop()
- new_col.alter(name=col)
- if index:
- sa.Index('ix_%s_%s' % (table.name, col), new_col).create()
-
-
-def upgrade(migrate_engine):
- if migrate_engine.name == 'mysql':
- meta = sa.MetaData(bind=migrate_engine)
- event = sa.Table('event', meta, autoload=True)
- _convert_data_type(event, 'generated', sa.Float(),
- models.PreciseTimestamp(),
- pk_attr='id', index=True)
- trait = sa.Table('trait', meta, autoload=True)
- _convert_data_type(trait, 't_datetime', sa.Float(),
- models.PreciseTimestamp(),
- pk_attr='id', index=True)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py
deleted file mode 100644
index 457a9fd5..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/025_alarm_use_floatingprecision.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#
-# Copyright 2013 eNovance SAS <licensing@enovance.com>
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sa
-
-from ceilometer.storage.sqlalchemy import migration
-from ceilometer.storage.sqlalchemy import models
-
-
-def _convert_data_type(table, col, from_t, to_t, pk_attr='id'):
- temp_col_n = 'convert_data_type_temp_col'
- # Override column we're going to convert with from_t, since the type we're
- # replacing could be custom and we need to tell SQLALchemy how to perform
- # CRUD operations with it.
- table = sa.Table(table.name, table.metadata, sa.Column(col, from_t),
- extend_existing=True)
- sa.Column(temp_col_n, to_t).create(table)
-
- key_attr = getattr(table.c, pk_attr)
- orig_col = getattr(table.c, col)
- new_col = getattr(table.c, temp_col_n)
-
- query = sa.select([key_attr, orig_col])
- for key, value in migration.paged(query):
- (table.update().where(key_attr == key).values({temp_col_n: value}).
- execute())
-
- orig_col.drop()
- new_col.alter(name=col)
-
-
-to_convert = [
- ('alarm', 'timestamp', 'id'),
- ('alarm', 'state_timestamp', 'id'),
- ('alarm_history', 'timestamp', 'alarm_id'),
-]
-
-
-def upgrade(migrate_engine):
- if migrate_engine.name == 'mysql':
- meta = sa.MetaData(bind=migrate_engine)
- for table_name, col_name, pk_attr in to_convert:
- table = sa.Table(table_name, meta, autoload=True)
- _convert_data_type(table, col_name, sa.DateTime(),
- models.PreciseTimestamp(),
- pk_attr=pk_attr)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py
deleted file mode 100644
index 959c1fb6..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/026_float_size.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Float
-from sqlalchemy import MetaData
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- metadata_float = Table('metadata_float', meta, autoload=True)
- metadata_float.c.value.alter(type=Float(53))
- trait = Table('trait', meta, autoload=True)
- trait.c.t_float.alter(type=Float(53))
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py
deleted file mode 100644
index 98377628..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/027_remove_alarm_fk_constraints.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Copyright 2014 Intel Crop.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-from sqlalchemy import MetaData, Table
-
-TABLES = ['user', 'project', 'alarm']
-
-INDEXES = {
- "alarm": (('user_id', 'user', 'id'),
- ('project_id', 'project', 'id')),
-}
-
-
-def upgrade(migrate_engine):
- if migrate_engine.name == 'sqlite':
- return
- meta = MetaData(bind=migrate_engine)
- load_tables = dict((table_name, Table(table_name, meta, autoload=True))
- for table_name in TABLES)
- for table_name, indexes in INDEXES.items():
- table = load_tables[table_name]
- for column, ref_table_name, ref_column_name in indexes:
- ref_table = load_tables[ref_table_name]
- params = {'columns': [table.c[column]],
- 'refcolumns': [ref_table.c[ref_column_name]]}
- if migrate_engine.name == 'mysql':
- params['name'] = "_".join(('fk', table_name, column))
- fkey = ForeignKeyConstraint(**params)
- fkey.drop()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py
deleted file mode 100644
index 1778a0b2..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/028_alembic_migrations.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import migrate
-import sqlalchemy as sa
-
-
-def get_alembic_version(meta):
- """Return Alembic version or None if no Alembic table exists."""
- try:
- a_ver = sa.Table(
- 'alembic_version',
- meta,
- autoload=True)
- return sa.select([a_ver.c.version_num]).scalar()
- except sa.exc.NoSuchTableError:
- return None
-
-
-def delete_alembic(meta):
- try:
- sa.Table(
- 'alembic_version',
- meta,
- autoload=True).drop(checkfirst=True)
- except sa.exc.NoSuchTableError:
- pass
-
-
-INDEXES = (
- # ([dialects], table_name, index_name, create/delete, uniq/not_uniq)
- (['mysql', 'sqlite', 'postgresql'],
- 'resource',
- 'resource_user_id_project_id_key',
- ('user_id', 'project_id'), True, False, True),
- (['mysql'], 'source', 'id', ('id',), False, True, False))
-
-
-def index_cleanup(meta, table_name, uniq_name, columns,
- create, unique, limited):
- table = sa.Table(table_name, meta, autoload=True)
- if create:
- if limited and meta.bind.engine.name == 'mysql':
- # For some versions of mysql we can get an error
- # "Specified key was too long; max key length is 1000 bytes".
- # We should create an index by hand in this case with limited
- # length of columns.
- columns_mysql = ",".join((c + "(100)" for c in columns))
- sql = ("create index %s ON %s (%s)" % (uniq_name, table,
- columns_mysql))
- meta.bind.engine.execute(sql)
- else:
- cols = [table.c[col] for col in columns]
- sa.Index(uniq_name, *cols, unique=unique).create()
- else:
- if unique:
- migrate.UniqueConstraint(*columns, table=table,
- name=uniq_name).drop()
- else:
- cols = [table.c[col] for col in columns]
- sa.Index(uniq_name, *cols).drop()
-
-
-def change_uniq(meta):
- uniq_name = 'uniq_sourceassoc0meter_id0user_id'
- columns = ('meter_id', 'user_id')
-
- if meta.bind.engine.name == 'sqlite':
- return
-
- sourceassoc = sa.Table('sourceassoc', meta, autoload=True)
- meter = sa.Table('meter', meta, autoload=True)
- user = sa.Table('user', meta, autoload=True)
- if meta.bind.engine.name == 'mysql':
- # For mysql dialect all dependent FK should be removed
- # before renaming of constraint.
- params = {'columns': [sourceassoc.c.meter_id],
- 'refcolumns': [meter.c.id],
- 'name': 'fk_sourceassoc_meter_id'}
- migrate.ForeignKeyConstraint(**params).drop()
- params = {'columns': [sourceassoc.c.user_id],
- 'refcolumns': [user.c.id],
- 'name': 'fk_sourceassoc_user_id'}
- migrate.ForeignKeyConstraint(**params).drop()
-
- migrate.UniqueConstraint(*columns, table=sourceassoc,
- name=uniq_name).create()
- if meta.bind.engine.name == 'mysql':
- params = {'columns': [sourceassoc.c.meter_id],
- 'refcolumns': [meter.c.id],
- 'name': 'fk_sourceassoc_meter_id'}
- migrate.ForeignKeyConstraint(**params).create()
- params = {'columns': [sourceassoc.c.user_id],
- 'refcolumns': [user.c.id],
- 'name': 'fk_sourceassoc_user_id'}
- migrate.ForeignKeyConstraint(**params).create()
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- a_ver = get_alembic_version(meta)
-
- if not a_ver:
- alarm = sa.Table('alarm', meta, autoload=True)
- repeat_act = sa.Column('repeat_actions', sa.Boolean,
- server_default=sa.sql.expression.false())
- alarm.create_column(repeat_act)
- a_ver = '43b1a023dfaa'
-
- if a_ver == '43b1a023dfaa':
- meter = sa.Table('meter', meta, autoload=True)
- meter.c.resource_metadata.alter(type=sa.Text)
- a_ver = '17738166b91'
-
- if a_ver == '17738166b91':
- for (engine_names, table_name, uniq_name,
- columns, create, uniq, limited) in INDEXES:
- if migrate_engine.name in engine_names:
- index_cleanup(meta, table_name, uniq_name,
- columns, create, uniq, limited)
- a_ver = 'b6ae66d05e3'
-
- if a_ver == 'b6ae66d05e3':
- change_uniq(meta)
-
- delete_alembic(meta)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py
deleted file mode 100644
index 0c692bfa..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/029_sample_recorded_at.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from oslo_utils import timeutils
-import sqlalchemy
-
-from ceilometer.storage.sqlalchemy import models
-
-
-def upgrade(migrate_engine):
- meta = sqlalchemy.MetaData(bind=migrate_engine)
- meter = sqlalchemy.Table('meter', meta, autoload=True)
- c = sqlalchemy.Column('recorded_at', models.PreciseTimestamp(),
- default=timeutils.utcnow)
- meter.create_column(c)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py
deleted file mode 100644
index 39ecf057..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/030_rename_meter_table.py
+++ /dev/null
@@ -1,110 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import migrate
-import sqlalchemy as sa
-
-
-def _handle_meter_indices(meta):
- if meta.bind.engine.name == 'sqlite':
- return
-
- resource = sa.Table('resource', meta, autoload=True)
- project = sa.Table('project', meta, autoload=True)
- user = sa.Table('user', meta, autoload=True)
- meter = sa.Table('meter', meta, autoload=True)
-
- indices = [(sa.Index('ix_meter_timestamp', meter.c.timestamp),
- sa.Index('ix_sample_timestamp', meter.c.timestamp)),
- (sa.Index('ix_meter_user_id', meter.c.user_id),
- sa.Index('ix_sample_user_id', meter.c.user_id)),
- (sa.Index('ix_meter_project_id', meter.c.project_id),
- sa.Index('ix_sample_project_id', meter.c.project_id)),
- (sa.Index('idx_meter_rid_cname', meter.c.resource_id,
- meter.c.counter_name),
- sa.Index('idx_sample_rid_cname', meter.c.resource_id,
- meter.c.counter_name))]
-
- fk_params = [({'columns': [meter.c.resource_id],
- 'refcolumns': [resource.c.id]},
- 'fk_meter_resource_id',
- 'fk_sample_resource_id'),
- ({'columns': [meter.c.project_id],
- 'refcolumns': [project.c.id]},
- 'fk_meter_project_id',
- 'fk_sample_project_id'),
- ({'columns': [meter.c.user_id],
- 'refcolumns': [user.c.id]},
- 'fk_meter_user_id',
- 'fk_sample_user_id')]
-
- for fk in fk_params:
- params = fk[0]
- if meta.bind.engine.name == 'mysql':
- params['name'] = fk[1]
- migrate.ForeignKeyConstraint(**params).drop()
-
- for meter_ix, sample_ix in indices:
- meter_ix.drop()
- sample_ix.create()
-
- for fk in fk_params:
- params = fk[0]
- if meta.bind.engine.name == 'mysql':
- params['name'] = fk[2]
- migrate.ForeignKeyConstraint(**params).create()
-
-
-def _alter_sourceassoc(meta, t_name, ix_name, post_action=False):
- if meta.bind.engine.name == 'sqlite':
- return
-
- sourceassoc = sa.Table('sourceassoc', meta, autoload=True)
- table = sa.Table(t_name, meta, autoload=True)
- user = sa.Table('user', meta, autoload=True)
-
- c_name = '%s_id' % t_name
- col = getattr(sourceassoc.c, c_name)
- uniq_name = 'uniq_sourceassoc0%s0user_id' % c_name
-
- uniq_cols = (c_name, 'user_id')
- param = {'columns': [col],
- 'refcolumns': [table.c.id]}
- user_param = {'columns': [sourceassoc.c.user_id],
- 'refcolumns': [user.c.id]}
- if meta.bind.engine.name == 'mysql':
- param['name'] = 'fk_sourceassoc_%s' % c_name
- user_param['name'] = 'fk_sourceassoc_user_id'
-
- actions = [migrate.ForeignKeyConstraint(**user_param),
- migrate.ForeignKeyConstraint(**param),
- sa.Index(ix_name, sourceassoc.c.source_id, col),
- migrate.UniqueConstraint(*uniq_cols, table=sourceassoc,
- name=uniq_name)]
- for action in actions:
- action.create() if post_action else action.drop()
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
-
- _handle_meter_indices(meta)
- meter = sa.Table('meter', meta, autoload=True)
- meter.rename('sample')
-
- _alter_sourceassoc(meta, 'meter', 'idx_sm')
- sourceassoc = sa.Table('sourceassoc', meta, autoload=True)
- sourceassoc.c.meter_id.alter(name='sample_id')
- # re-bind metadata to pick up alter name change
- meta = sa.MetaData(bind=migrate_engine)
- _alter_sourceassoc(meta, 'sample', 'idx_ss', True)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py
deleted file mode 100644
index 3dd8e469..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/031_add_new_meter_table.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#
-# Copyright 2013 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import migrate
-import sqlalchemy as sa
-
-
-def handle_rid_index(meta):
- if meta.bind.engine.name == 'sqlite':
- return
-
- resource = sa.Table('resource', meta, autoload=True)
- sample = sa.Table('sample', meta, autoload=True)
- params = {'columns': [sample.c.resource_id],
- 'refcolumns': [resource.c.id],
- 'name': 'fk_sample_resource_id'}
- if meta.bind.engine.name == 'mysql':
- # For mysql dialect all dependent FK should be removed
- # before index create/delete
- migrate.ForeignKeyConstraint(**params).drop()
-
- index = sa.Index('idx_sample_rid_cname', sample.c.resource_id,
- sample.c.counter_name)
- index.drop()
-
- if meta.bind.engine.name == 'mysql':
- migrate.ForeignKeyConstraint(**params).create()
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- meter = sa.Table(
- 'meter', meta,
- sa.Column('id', sa.Integer, primary_key=True),
- sa.Column('name', sa.String(255), nullable=False),
- sa.Column('type', sa.String(255)),
- sa.Column('unit', sa.String(255)),
- sa.UniqueConstraint('name', 'type', 'unit', name='def_unique'),
- mysql_engine='InnoDB',
- mysql_charset='utf8'
- )
- meter.create()
- sample = sa.Table('sample', meta, autoload=True)
- query = sa.select([sample.c.counter_name, sample.c.counter_type,
- sample.c.counter_unit]).distinct()
- for row in query.execute():
- meter.insert().values(name=row['counter_name'],
- type=row['counter_type'],
- unit=row['counter_unit']).execute()
-
- meter_id = sa.Column('meter_id', sa.Integer)
- meter_id.create(sample)
- params = {'columns': [sample.c.meter_id],
- 'refcolumns': [meter.c.id]}
- if migrate_engine.name == 'mysql':
- params['name'] = 'fk_sample_meter_id'
- if migrate_engine.name != 'sqlite':
- migrate.ForeignKeyConstraint(**params).create()
-
- index = sa.Index('ix_meter_name', meter.c.name)
- index.create(bind=migrate_engine)
-
- for row in sa.select([meter]).execute():
- (sample.update().
- where(sa.and_(sample.c.counter_name == row['name'],
- sample.c.counter_type == row['type'],
- sample.c.counter_unit == row['unit'])).
- values({sample.c.meter_id: row['id']}).execute())
-
- handle_rid_index(meta)
-
- sample.c.counter_name.drop()
- sample.c.counter_type.drop()
- sample.c.counter_unit.drop()
- sample.c.counter_volume.alter(name='volume')
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py
deleted file mode 100644
index ec0b537c..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/032_add_alarm_time_constraints.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column
-from sqlalchemy import MetaData
-from sqlalchemy import Table
-from sqlalchemy import Text
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- alarm = Table('alarm', meta, autoload=True)
- time_constraints = Column('time_constraints', Text())
- alarm.create_column(time_constraints)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py
deleted file mode 100644
index bb0264eb..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/033_alarm_id_rename.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- users = Table('alarm', meta, autoload=True)
- users.c.id.alter(name='alarm_id')
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py
deleted file mode 100644
index ba4e3160..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/034_drop_dump_tables.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import sqlalchemy as sa
-
-TABLES_012 = ['resource', 'sourceassoc', 'user',
- 'project', 'meter', 'source', 'alarm']
-TABLES_027 = ['user', 'project', 'alarm']
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- for table_name in TABLES_027:
- try:
- (sa.Table('dump027_' + table_name, meta, autoload=True).
- drop(checkfirst=True))
- except sa.exc.NoSuchTableError:
- pass
- for table_name in TABLES_012:
- try:
- (sa.Table('dump_' + table_name, meta, autoload=True).
- drop(checkfirst=True))
- except sa.exc.NoSuchTableError:
- pass
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py
deleted file mode 100644
index e58915af..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/035_drop_user_project_tables.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint, UniqueConstraint
-import sqlalchemy as sa
-
-TABLES_DROP = ['user', 'project']
-TABLES = ['user', 'project', 'sourceassoc', 'sample',
- 'resource', 'alarm_history']
-
-INDEXES = {
- "sample": (('user_id', 'user', 'id'),
- ('project_id', 'project', 'id')),
- "sourceassoc": (('user_id', 'user', 'id'),
- ('project_id', 'project', 'id')),
- "resource": (('user_id', 'user', 'id'),
- ('project_id', 'project', 'id')),
- "alarm_history": (('user_id', 'user', 'id'),
- ('project_id', 'project', 'id'),
- ('on_behalf_of', 'project', 'id')),
-}
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- load_tables = dict((table_name, sa.Table(table_name, meta,
- autoload=True))
- for table_name in TABLES)
-
- if migrate_engine.name != 'sqlite':
- for table_name, indexes in INDEXES.items():
- table = load_tables[table_name]
- for column, ref_table_name, ref_column_name in indexes:
- ref_table = load_tables[ref_table_name]
- params = {'columns': [table.c[column]],
- 'refcolumns': [ref_table.c[ref_column_name]]}
-
- if (migrate_engine.name == "mysql" and
- table_name != 'alarm_history'):
- params['name'] = "_".join(('fk', table_name, column))
- elif (migrate_engine.name == "postgresql" and
- table_name == "sample"):
- # The fk contains the old table name
- params['name'] = "_".join(('meter', column, 'fkey'))
-
- fkey = ForeignKeyConstraint(**params)
- fkey.drop()
-
- sourceassoc = load_tables['sourceassoc']
- if migrate_engine.name != 'sqlite':
- idx = sa.Index('idx_su', sourceassoc.c.source_id,
- sourceassoc.c.user_id)
- idx.drop(bind=migrate_engine)
- idx = sa.Index('idx_sp', sourceassoc.c.source_id,
- sourceassoc.c.project_id)
- idx.drop(bind=migrate_engine)
-
- params = {}
- if migrate_engine.name == "mysql":
- params = {'name': 'uniq_sourceassoc0sample_id'}
- uc = UniqueConstraint('sample_id', table=sourceassoc, **params)
- uc.create()
-
- params = {}
- if migrate_engine.name == "mysql":
- params = {'name': 'uniq_sourceassoc0sample_id0user_id'}
- uc = UniqueConstraint('sample_id', 'user_id',
- table=sourceassoc, **params)
- uc.drop()
- sourceassoc.c.user_id.drop()
- sourceassoc.c.project_id.drop()
-
- for table_name in TABLES_DROP:
- sa.Table(table_name, meta, autoload=True).drop()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py
deleted file mode 100644
index b8a1a3db..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/036_drop_sourceassoc_resource_tables.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from migrate import ForeignKeyConstraint
-import sqlalchemy as sa
-
-from ceilometer.storage.sqlalchemy import migration
-
-
-TABLES = ['sample', 'resource', 'source', 'sourceassoc']
-DROP_TABLES = ['resource', 'source', 'sourceassoc']
-
-INDEXES = {
- "sample": (('resource_id', 'resource', 'id'),),
- "sourceassoc": (('sample_id', 'sample', 'id'),
- ('resource_id', 'resource', 'id'),
- ('source_id', 'source', 'id'))
-}
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- load_tables = dict((table_name, sa.Table(table_name, meta,
- autoload=True))
- for table_name in TABLES)
-
- # drop foreign keys
- if migrate_engine.name != 'sqlite':
- for table_name, indexes in INDEXES.items():
- table = load_tables[table_name]
- for column, ref_table_name, ref_column_name in indexes:
- ref_table = load_tables[ref_table_name]
- params = {'columns': [table.c[column]],
- 'refcolumns': [ref_table.c[ref_column_name]]}
- fk_table_name = table_name
- if migrate_engine.name == "mysql":
- params['name'] = "_".join(('fk', fk_table_name, column))
- elif (migrate_engine.name == "postgresql" and
- table_name == 'sample'):
- # fk was not renamed in script 030
- params['name'] = "_".join(('meter', column, 'fkey'))
- fkey = ForeignKeyConstraint(**params)
- fkey.drop()
-
- # create source field in sample
- sample = load_tables['sample']
- sample.create_column(sa.Column('source_id', sa.String(255)))
-
- # move source values to samples
- sourceassoc = load_tables['sourceassoc']
- query = (sa.select([sourceassoc.c.sample_id, sourceassoc.c.source_id]).
- where(sourceassoc.c.sample_id.isnot(None)))
- for sample_id, source_id in migration.paged(query):
- (sample.update().where(sample_id == sample.c.id).
- values({'source_id': source_id}).execute())
-
- # drop tables
- for table_name in DROP_TABLES:
- sa.Table(table_name, meta, autoload=True).drop()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py
deleted file mode 100644
index 18ee7a67..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/037_sample_index_cleanup.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from migrate import ForeignKeyConstraint
-import sqlalchemy as sa
-
-
-class ForeignKeyHandle(object):
- def __init__(self, meta):
- sample = sa.Table('sample', meta, autoload=True)
- meter = sa.Table('meter', meta, autoload=True)
- self.sample_params = {'columns': [sample.c.meter_id],
- 'refcolumns': [meter.c.id]}
- if meta.bind.engine.name == 'mysql':
- self.sample_params['name'] = "fk_sample_meter_id"
-
- def __enter__(self):
- ForeignKeyConstraint(**self.sample_params).drop()
-
- def __exit__(self, type, value, traceback):
- ForeignKeyConstraint(**self.sample_params).create()
-
-
-def upgrade(migrate_engine):
- if migrate_engine.name == 'sqlite':
- return
- meta = sa.MetaData(bind=migrate_engine)
- sample = sa.Table('sample', meta, autoload=True)
-
- with ForeignKeyHandle(meta):
- # remove stray indexes implicitly created by InnoDB
- for index in sample.indexes:
- if index.name in ['fk_sample_meter_id', 'fk_sample_resource_id']:
- index.drop()
- sa.Index('ix_sample_meter_id', sample.c.meter_id).create()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py
deleted file mode 100644
index 2fb7b47b..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/038_normalise_tables.py
+++ /dev/null
@@ -1,131 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import hashlib
-
-import migrate
-from oslo_serialization import jsonutils
-import sqlalchemy as sa
-
-
-m_tables = [('metadata_text', sa.Text, True),
- ('metadata_bool', sa.Boolean, False),
- ('metadata_int', sa.BigInteger, False),
- ('metadata_float', sa.Float(53), False)]
-
-
-def _migrate_meta_tables(meta, col, new_col, new_fk):
- for t_name, t_type, t_nullable in m_tables:
- m_table = sa.Table(t_name, meta, autoload=True)
- m_table_new = sa.Table(
- '%s_new' % t_name, meta,
- sa.Column('id', sa.Integer, sa.ForeignKey(new_fk),
- primary_key=True),
- sa.Column('meta_key', sa.String(255),
- primary_key=True),
- sa.Column('value', t_type, nullable=t_nullable),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
- m_table_new.create()
-
- if m_table.select().scalar() is not None:
- m_table_new.insert().from_select(
- ['id', 'meta_key', 'value'],
- sa.select([new_col, m_table.c.meta_key,
- m_table.c.value]).where(
- col == m_table.c.id).group_by(
- new_col, m_table.c.meta_key, m_table.c.value)).execute()
-
- m_table.drop()
- if meta.bind.engine.name != 'sqlite':
- sa.Index('ix_%s_meta_key' % t_name,
- m_table_new.c.meta_key).create()
- m_table_new.rename(t_name)
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- resource = sa.Table(
- 'resource', meta,
- sa.Column('internal_id', sa.Integer, primary_key=True),
- sa.Column('resource_id', sa.String(255)),
- sa.Column('user_id', sa.String(255)),
- sa.Column('project_id', sa.String(255)),
- sa.Column('source_id', sa.String(255)),
- sa.Column('resource_metadata', sa.Text),
- sa.Column('metadata_hash', sa.String(32)),
- mysql_engine='InnoDB',
- mysql_charset='utf8')
- resource.create()
-
- # copy resource data in to resource table
- sample = sa.Table('sample', meta, autoload=True)
- sa.Column('metadata_hash', sa.String(32)).create(sample)
- for row in sa.select([sample.c.id, sample.c.resource_metadata]).execute():
- sample.update().where(sample.c.id == row['id']).values(
- {sample.c.metadata_hash:
- hashlib.md5(jsonutils.dumps(
- row['resource_metadata'],
- sort_keys=True)).hexdigest()}).execute()
- query = sa.select([sample.c.resource_id, sample.c.user_id,
- sample.c.project_id, sample.c.source_id,
- sample.c.resource_metadata,
- sample.c.metadata_hash]).distinct()
- for row in query.execute():
- resource.insert().values(
- resource_id=row['resource_id'],
- user_id=row['user_id'],
- project_id=row['project_id'],
- source_id=row['source_id'],
- resource_metadata=row['resource_metadata'],
- metadata_hash=row['metadata_hash']).execute()
- # link sample records to new resource records
- sa.Column('resource_id_new', sa.Integer).create(sample)
- for row in sa.select([resource]).execute():
- (sample.update().
- where(sa.and_(
- sample.c.resource_id == row['resource_id'],
- sample.c.user_id == row['user_id'],
- sample.c.project_id == row['project_id'],
- sample.c.source_id == row['source_id'],
- sample.c.metadata_hash == row['metadata_hash'])).
- values({sample.c.resource_id_new: row['internal_id']}).execute())
-
- sample.c.resource_id.drop()
- sample.c.metadata_hash.drop()
- sample.c.resource_id_new.alter(name='resource_id')
- # re-bind metadata to pick up alter name change
- meta = sa.MetaData(bind=migrate_engine)
- sample = sa.Table('sample', meta, autoload=True)
- resource = sa.Table('resource', meta, autoload=True)
- if migrate_engine.name != 'sqlite':
- sa.Index('ix_resource_resource_id', resource.c.resource_id).create()
- sa.Index('ix_sample_user_id', sample.c.user_id).drop()
- sa.Index('ix_sample_project_id', sample.c.project_id).drop()
- sa.Index('ix_sample_resource_id', sample.c.resource_id).create()
- sa.Index('ix_sample_meter_id_resource_id',
- sample.c.meter_id, sample.c.resource_id).create()
-
- params = {'columns': [sample.c.resource_id],
- 'refcolumns': [resource.c.internal_id]}
- if migrate_engine.name == 'mysql':
- params['name'] = 'fk_sample_resource_internal_id'
- migrate.ForeignKeyConstraint(**params).create()
-
- sample.c.user_id.drop()
- sample.c.project_id.drop()
- sample.c.source_id.drop()
- sample.c.resource_metadata.drop()
-
- _migrate_meta_tables(meta, sample.c.id, sample.c.resource_id,
- 'resource.internal_id')
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py
deleted file mode 100644
index 055f2ee6..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/039_event_floatingprecision_pgsql.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# NOTE (gordc): this is a copy of 024 migration script which missed pgsql
-
-import sqlalchemy as sa
-
-from ceilometer.storage.sqlalchemy import migration
-from ceilometer.storage.sqlalchemy import models
-
-
-def _convert_data_type(table, col, from_t, to_t, pk_attr='id', index=False):
- temp_col_n = 'convert_data_type_temp_col'
- # Override column we're going to convert with from_t, since the type we're
- # replacing could be custom and we need to tell SQLALchemy how to perform
- # CRUD operations with it.
- table = sa.Table(table.name, table.metadata, sa.Column(col, from_t),
- extend_existing=True)
- sa.Column(temp_col_n, to_t).create(table)
-
- key_attr = getattr(table.c, pk_attr)
- orig_col = getattr(table.c, col)
- new_col = getattr(table.c, temp_col_n)
-
- query = sa.select([key_attr, orig_col])
- for key, value in migration.paged(query):
- (table.update().where(key_attr == key).values({temp_col_n: value}).
- execute())
-
- orig_col.drop()
- new_col.alter(name=col)
- if index:
- sa.Index('ix_%s_%s' % (table.name, col), new_col).create()
-
-
-def upgrade(migrate_engine):
- if migrate_engine.name == 'postgresql':
- meta = sa.MetaData(bind=migrate_engine)
- event = sa.Table('event', meta, autoload=True)
- _convert_data_type(event, 'generated', sa.Float(),
- models.PreciseTimestamp(),
- pk_attr='id', index=True)
- trait = sa.Table('trait', meta, autoload=True)
- _convert_data_type(trait, 't_datetime', sa.Float(),
- models.PreciseTimestamp(),
- pk_attr='id', index=True)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py
deleted file mode 100644
index 07a94deb..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/040_add_alarm_severity.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import Column
-from sqlalchemy import MetaData
-from sqlalchemy import String
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- alarm = Table('alarm', meta, autoload=True)
- severity = Column('severity', String(50))
- alarm.create_column(severity)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py
deleted file mode 100644
index a9492381..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/041_expand_event_traits.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sa
-
-from ceilometer.storage.sqlalchemy import models
-
-tables = [('trait_text', sa.String(255), True, 't_string', 1),
- ('trait_int', sa.Integer, False, 't_int', 2),
- ('trait_float', sa.Float(53), False, 't_float', 3),
- ('trait_datetime', models.PreciseTimestamp(),
- False, 't_datetime', 4)]
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- trait = sa.Table('trait', meta, autoload=True)
- event = sa.Table('event', meta, autoload=True)
- trait_type = sa.Table('trait_type', meta, autoload=True)
- for t_name, t_type, t_nullable, col_name, __ in tables:
- t_table = sa.Table(
- t_name, meta,
- sa.Column('event_id', sa.Integer,
- sa.ForeignKey(event.c.id), primary_key=True),
- sa.Column('key', sa.String(255), primary_key=True),
- sa.Column('value', t_type, nullable=t_nullable),
- sa.Index('ix_%s_event_id_key' % t_name,
- 'event_id', 'key'),
- mysql_engine='InnoDB',
- mysql_charset='utf8',
- )
- t_table.create()
- query = sa.select(
- [trait.c.event_id,
- trait_type.c.desc,
- trait.c[col_name]]).select_from(
- trait.join(trait_type,
- trait.c.trait_type_id == trait_type.c.id)).where(
- trait.c[col_name] != sa.null())
- if query.alias().select().scalar() is not None:
- t_table.insert().from_select(
- ['event_id', 'key', 'value'], query).execute()
- trait.drop()
- trait_type.drop()
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py
deleted file mode 100644
index 1e8b4614..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/042_add_raw_column.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sa
-
-
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- event = sa.Table('event', meta, autoload=True)
- raw = sa.Column('raw', sa.Text)
- event.create_column(raw)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py
deleted file mode 100644
index 03a5525b..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/043_reduce_uuid_data_types.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def upgrade(migrate_engine):
- # NOTE(gordc): this is a noop script to handle bug1468916
- # previous lowering of id length will fail if db contains data longer.
- # this skips migration for those failing. the next script will resize
- # if this original migration passed.
- pass
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py
deleted file mode 100644
index a7db70cb..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/044_restore_long_uuid_data_types.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from sqlalchemy import MetaData
-from sqlalchemy import String
-from sqlalchemy import Table
-
-
-def upgrade(migrate_engine):
- meta = MetaData(bind=migrate_engine)
- resource = Table('resource', meta, autoload=True)
- resource.c.user_id.alter(type=String(255))
- resource.c.project_id.alter(type=String(255))
- resource.c.resource_id.alter(type=String(255))
- resource.c.source_id.alter(type=String(255))
- sample = Table('sample', meta, autoload=True)
- sample.c.message_signature.alter(type=String(64))
- sample.c.message_id.alter(type=String(128))
- alarm = Table('alarm', meta, autoload=True)
- alarm.c.alarm_id.alter(type=String(128))
- alarm.c.user_id.alter(type=String(255))
- alarm.c.project_id.alter(type=String(255))
- alarm_history = Table('alarm_history', meta, autoload=True)
- alarm_history.c.alarm_id.alter(type=String(128))
- alarm_history.c.user_id.alter(type=String(255))
- alarm_history.c.project_id.alter(type=String(255))
- alarm_history.c.event_id.alter(type=String(128))
- alarm_history.c.on_behalf_of.alter(type=String(255))
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py
deleted file mode 100644
index ac59595d..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/045_add_resource_metadatahash_index.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sqlalchemy as sa
-
-
-# Add index on metadata_hash column of resource
-def upgrade(migrate_engine):
- meta = sa.MetaData(bind=migrate_engine)
- resource = sa.Table('resource', meta, autoload=True)
- index = sa.Index('ix_resource_metadata_hash', resource.c.metadata_hash)
- index.create(bind=migrate_engine)
diff --git a/ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py b/ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/storage/sqlalchemy/migrate_repo/versions/__init__.py
+++ /dev/null
diff --git a/ceilometer/storage/sqlalchemy/migration.py b/ceilometer/storage/sqlalchemy/migration.py
deleted file mode 100644
index 160e68e5..00000000
--- a/ceilometer/storage/sqlalchemy/migration.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-def paged(query, size=1000):
- """Page query results
-
- :param query: the SQLAlchemy query to execute
- :param size: the max page size
- return: generator with query data
- """
- offset = 0
- while True:
- page = query.offset(offset).limit(size).execute()
- if page.rowcount <= 0:
- # There are no more rows
- break
- for row in page:
- yield row
- offset += size
diff --git a/ceilometer/storage/sqlalchemy/models.py b/ceilometer/storage/sqlalchemy/models.py
deleted file mode 100644
index 386d7361..00000000
--- a/ceilometer/storage/sqlalchemy/models.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-SQLAlchemy models for Ceilometer data.
-"""
-import hashlib
-import json
-
-from oslo_utils import timeutils
-import six
-from sqlalchemy import (Column, Integer, String, ForeignKey, Index,
- UniqueConstraint, BigInteger)
-from sqlalchemy import event
-from sqlalchemy import Float, Boolean, Text, DateTime
-from sqlalchemy.dialects.mysql import DECIMAL
-from sqlalchemy.ext.declarative import declarative_base
-from sqlalchemy.orm import deferred
-from sqlalchemy.orm import relationship
-from sqlalchemy.types import TypeDecorator
-
-from ceilometer import utils
-
-
-class JSONEncodedDict(TypeDecorator):
- """Represents an immutable structure as a json-encoded string."""
-
- impl = Text
-
- @staticmethod
- def process_bind_param(value, dialect):
- if value is not None:
- value = json.dumps(value)
- return value
-
- @staticmethod
- def process_result_value(value, dialect):
- if value is not None:
- value = json.loads(value)
- return value
-
-
-class PreciseTimestamp(TypeDecorator):
- """Represents a timestamp precise to the microsecond."""
-
- impl = DateTime
-
- def load_dialect_impl(self, dialect):
- if dialect.name == 'mysql':
- return dialect.type_descriptor(DECIMAL(precision=20,
- scale=6,
- asdecimal=True))
- return self.impl
-
- @staticmethod
- def process_bind_param(value, dialect):
- if value is None:
- return value
- elif dialect.name == 'mysql':
- return utils.dt_to_decimal(value)
- return value
-
- @staticmethod
- def process_result_value(value, dialect):
- if value is None:
- return value
- elif dialect.name == 'mysql':
- return utils.decimal_to_dt(value)
- return value
-
-
-_COMMON_TABLE_ARGS = {'mysql_charset': "utf8", 'mysql_engine': "InnoDB"}
-
-
-class CeilometerBase(object):
- """Base class for Ceilometer Models."""
- __table_args__ = _COMMON_TABLE_ARGS
- __table_initialized__ = False
-
- def __setitem__(self, key, value):
- setattr(self, key, value)
-
- def __getitem__(self, key):
- return getattr(self, key)
-
- def update(self, values):
- """Make the model object behave like a dict."""
- for k, v in six.iteritems(values):
- setattr(self, k, v)
-
-
-Base = declarative_base(cls=CeilometerBase)
-
-
-class MetaText(Base):
- """Metering text metadata."""
-
- __tablename__ = 'metadata_text'
- __table_args__ = (
- Index('ix_meta_text_key', 'meta_key'),
- _COMMON_TABLE_ARGS,
- )
- id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True)
- meta_key = Column(String(255), primary_key=True)
- value = Column(Text)
-
-
-class MetaBool(Base):
- """Metering boolean metadata."""
-
- __tablename__ = 'metadata_bool'
- __table_args__ = (
- Index('ix_meta_bool_key', 'meta_key'),
- _COMMON_TABLE_ARGS,
- )
- id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True)
- meta_key = Column(String(255), primary_key=True)
- value = Column(Boolean)
-
-
-class MetaBigInt(Base):
- """Metering integer metadata."""
-
- __tablename__ = 'metadata_int'
- __table_args__ = (
- Index('ix_meta_int_key', 'meta_key'),
- _COMMON_TABLE_ARGS,
- )
- id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True)
- meta_key = Column(String(255), primary_key=True)
- value = Column(BigInteger, default=False)
-
-
-class MetaFloat(Base):
- """Metering float metadata."""
-
- __tablename__ = 'metadata_float'
- __table_args__ = (
- Index('ix_meta_float_key', 'meta_key'),
- _COMMON_TABLE_ARGS,
- )
- id = Column(Integer, ForeignKey('resource.internal_id'), primary_key=True)
- meta_key = Column(String(255), primary_key=True)
- value = Column(Float(53), default=False)
-
-
-class Meter(Base):
- """Meter definition data."""
-
- __tablename__ = 'meter'
- __table_args__ = (
- UniqueConstraint('name', 'type', 'unit', name='def_unique'),
- Index('ix_meter_name', 'name'),
- _COMMON_TABLE_ARGS,
- )
- id = Column(Integer, primary_key=True)
- name = Column(String(255), nullable=False)
- type = Column(String(255))
- unit = Column(String(255))
- samples = relationship("Sample", backref="meter")
-
-
-class Resource(Base):
- """Resource data."""
-
- __tablename__ = 'resource'
- __table_args__ = (
- # TODO(gordc): this should exist but the attribute values we set
- # for user/project/source/resource id's are too large
- # for an uuid.
- # UniqueConstraint('resource_id', 'user_id', 'project_id',
- # 'source_id', 'metadata_hash',
- # name='res_def_unique'),
- Index('ix_resource_resource_id', 'resource_id'),
- Index('ix_resource_metadata_hash', 'metadata_hash'),
- _COMMON_TABLE_ARGS,
- )
-
- internal_id = Column(Integer, primary_key=True)
- user_id = Column(String(255))
- project_id = Column(String(255))
- source_id = Column(String(255))
- resource_id = Column(String(255), nullable=False)
- resource_metadata = deferred(Column(JSONEncodedDict()))
- metadata_hash = deferred(Column(String(32)))
- samples = relationship("Sample", backref="resource")
- meta_text = relationship("MetaText", backref="resource",
- cascade="all, delete-orphan")
- meta_float = relationship("MetaFloat", backref="resource",
- cascade="all, delete-orphan")
- meta_int = relationship("MetaBigInt", backref="resource",
- cascade="all, delete-orphan")
- meta_bool = relationship("MetaBool", backref="resource",
- cascade="all, delete-orphan")
-
-
-@event.listens_for(Resource, "before_insert")
-def before_insert(mapper, connection, target):
- metadata = json.dumps(target.resource_metadata, sort_keys=True)
- target.metadata_hash = hashlib.md5(metadata).hexdigest()
-
-
-class Sample(Base):
- """Metering data."""
-
- __tablename__ = 'sample'
- __table_args__ = (
- Index('ix_sample_timestamp', 'timestamp'),
- Index('ix_sample_resource_id', 'resource_id'),
- Index('ix_sample_meter_id', 'meter_id'),
- Index('ix_sample_meter_id_resource_id', 'meter_id', 'resource_id'),
- _COMMON_TABLE_ARGS,
- )
- id = Column(Integer, primary_key=True)
- meter_id = Column(Integer, ForeignKey('meter.id'))
- resource_id = Column(Integer, ForeignKey('resource.internal_id'))
- volume = Column(Float(53))
- timestamp = Column(PreciseTimestamp(), default=lambda: timeutils.utcnow())
- recorded_at = Column(PreciseTimestamp(),
- default=lambda: timeutils.utcnow())
- message_signature = Column(String(64))
- message_id = Column(String(128))
-
-
-class FullSample(object):
- """A fake model for query samples."""
- id = Sample.id
- timestamp = Sample.timestamp
- message_id = Sample.message_id
- message_signature = Sample.message_signature
- recorded_at = Sample.recorded_at
- counter_name = Meter.name
- counter_type = Meter.type
- counter_unit = Meter.unit
- counter_volume = Sample.volume
- resource_id = Resource.resource_id
- source_id = Resource.source_id
- user_id = Resource.user_id
- project_id = Resource.project_id
- resource_metadata = Resource.resource_metadata
- internal_id = Resource.internal_id
diff --git a/ceilometer/storage/sqlalchemy/utils.py b/ceilometer/storage/sqlalchemy/utils.py
deleted file mode 100644
index 2003c24c..00000000
--- a/ceilometer/storage/sqlalchemy/utils.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-
-import operator
-
-import six
-from sqlalchemy import and_
-from sqlalchemy import asc
-from sqlalchemy import desc
-from sqlalchemy import not_
-from sqlalchemy import or_
-from sqlalchemy.orm import aliased
-
-import ceilometer
-from ceilometer.storage.sqlalchemy import models
-
-
-META_TYPE_MAP = {bool: models.MetaBool,
- str: models.MetaText,
- six.text_type: models.MetaText,
- type(None): models.MetaText,
- int: models.MetaBigInt,
- float: models.MetaFloat}
-if six.PY2:
- META_TYPE_MAP[long] = models.MetaBigInt
-
-
-class QueryTransformer(object):
- operators = {"=": operator.eq,
- "<": operator.lt,
- ">": operator.gt,
- "<=": operator.le,
- "=<": operator.le,
- ">=": operator.ge,
- "=>": operator.ge,
- "!=": operator.ne,
- "in": lambda field_name, values: field_name.in_(values),
- "=~": lambda field, value: field.op("regexp")(value)}
-
- # operators which are different for different dialects
- dialect_operators = {'postgresql': {'=~': (lambda field, value:
- field.op("~")(value))}}
-
- complex_operators = {"or": or_,
- "and": and_,
- "not": not_}
-
- ordering_functions = {"asc": asc,
- "desc": desc}
-
- def __init__(self, table, query, dialect='mysql'):
- self.table = table
- self.query = query
- self.dialect_name = dialect
-
- def _get_operator(self, op):
- return (self.dialect_operators.get(self.dialect_name, {}).get(op)
- or self.operators[op])
-
- def _handle_complex_op(self, complex_op, nodes):
- op = self.complex_operators[complex_op]
- if op == not_:
- nodes = [nodes]
- element_list = []
- for node in nodes:
- element = self._transform(node)
- element_list.append(element)
- return op(*element_list)
-
- def _handle_simple_op(self, simple_op, nodes):
- op = self._get_operator(simple_op)
- field_name, value = list(nodes.items())[0]
- if field_name.startswith('resource_metadata.'):
- return self._handle_metadata(op, field_name, value)
- else:
- return op(getattr(self.table, field_name), value)
-
- def _handle_metadata(self, op, field_name, value):
- if op == self.operators["in"]:
- raise ceilometer.NotImplementedError('Metadata query with in '
- 'operator is not implemented')
- field_name = field_name[len('resource_metadata.'):]
- meta_table = META_TYPE_MAP[type(value)]
- meta_alias = aliased(meta_table)
- on_clause = and_(self.table.internal_id == meta_alias.id,
- meta_alias.meta_key == field_name)
- # outer join is needed to support metaquery
- # with or operator on non existent metadata field
- # see: test_query_non_existing_metadata_with_result
- # test case.
- self.query = self.query.outerjoin(meta_alias, on_clause)
- return op(meta_alias.value, value)
-
- def _transform(self, sub_tree):
- operator, nodes = list(sub_tree.items())[0]
- if operator in self.complex_operators:
- return self._handle_complex_op(operator, nodes)
- else:
- return self._handle_simple_op(operator, nodes)
-
- def apply_filter(self, expression_tree):
- condition = self._transform(expression_tree)
- self.query = self.query.filter(condition)
-
- def apply_options(self, orderby, limit):
- self._apply_order_by(orderby)
- if limit is not None:
- self.query = self.query.limit(limit)
-
- def _apply_order_by(self, orderby):
- if orderby is not None:
- for field in orderby:
- attr, order = list(field.items())[0]
- ordering_function = self.ordering_functions[order]
- self.query = self.query.order_by(ordering_function(
- getattr(self.table, attr)))
- else:
- self.query = self.query.order_by(desc(self.table.timestamp))
-
- def get_query(self):
- return self.query
diff --git a/ceilometer/tests/db.py b/ceilometer/tests/db.py
deleted file mode 100644
index b5acfe09..00000000
--- a/ceilometer/tests/db.py
+++ /dev/null
@@ -1,214 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2013 eNovance
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Base classes for API tests."""
-import os
-import uuid
-import warnings
-
-import fixtures
-import mock
-import six
-from six.moves.urllib import parse as urlparse
-import sqlalchemy
-from testtools import testcase
-
-from ceilometer import service
-from ceilometer import storage
-from ceilometer.tests import base as test_base
-try:
- from ceilometer.tests import mocks
-except ImportError:
- mocks = None # happybase module is not Python 3 compatible yet
-
-
-class DBManager(fixtures.Fixture):
- def __init__(self, conf, url):
- self._url = url
- self._conf = conf
-
-
-class MongoDbManager(DBManager):
-
- def setUp(self):
- super(MongoDbManager, self).setUp()
- with warnings.catch_warnings():
- warnings.filterwarnings(
- action='ignore',
- message='.*you must provide a username and password.*')
- try:
- self.connection = storage.get_connection(self._conf, self.url)
- except storage.StorageBadVersion as e:
- raise testcase.TestSkipped(six.text_type(e))
-
- @property
- def url(self):
- return '%(url)s_%(db)s' % {
- 'url': self._url,
- 'db': uuid.uuid4().hex
- }
-
-
-class SQLManager(DBManager):
- def __init__(self, conf, url):
- super(SQLManager, self).__init__(conf, url)
- db_name = 'ceilometer_%s' % uuid.uuid4().hex
- engine = sqlalchemy.create_engine(url)
- conn = engine.connect()
- self._create_database(conn, db_name)
- conn.close()
- engine.dispose()
- parsed = list(urlparse.urlparse(url))
- parsed[2] = '/' + db_name
- self.url = urlparse.urlunparse(parsed)
-
- def setUp(self):
- super(SQLManager, self).setUp()
- self.connection = storage.get_connection(self._conf, self.url)
-
-
-class PgSQLManager(SQLManager):
- @staticmethod
- def _create_database(conn, db_name):
- conn.connection.set_isolation_level(0)
- conn.execute('CREATE DATABASE %s WITH TEMPLATE template0;' % db_name)
- conn.connection.set_isolation_level(1)
-
-
-class MySQLManager(SQLManager):
- @staticmethod
- def _create_database(conn, db_name):
- conn.execute('CREATE DATABASE %s;' % db_name)
-
-
-class HBaseManager(DBManager):
- def setUp(self):
- super(HBaseManager, self).setUp()
- self.connection = storage.get_connection(self._conf, self.url)
- # Unique prefix for each test to keep data is distinguished because
- # all test data is stored in one table
- data_prefix = str(uuid.uuid4().hex)
-
- def table(conn, name):
- return mocks.MockHBaseTable(name, conn, data_prefix)
-
- # Mock only real HBase connection, MConnection "table" method
- # stays origin.
- mock.patch('happybase.Connection.table', new=table).start()
- # We shouldn't delete data and tables after each test,
- # because it last for too long.
- # All tests tables will be deleted in setup-test-env.sh
- mock.patch("happybase.Connection.disable_table",
- new=mock.MagicMock()).start()
- mock.patch("happybase.Connection.delete_table",
- new=mock.MagicMock()).start()
- mock.patch("happybase.Connection.create_table",
- new=mock.MagicMock()).start()
-
- @property
- def url(self):
- return '%s?table_prefix=%s&table_prefix_separator=%s' % (
- self._url,
- os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", "test"),
- os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX_SEPARATOR", "_")
- )
-
-
-class SQLiteManager(DBManager):
- def setUp(self):
- super(SQLiteManager, self).setUp()
- self.url = self._url
- self.connection = storage.get_connection(self._conf, self._url)
-
-
-@six.add_metaclass(test_base.SkipNotImplementedMeta)
-class TestBase(test_base.BaseTestCase):
-
- DRIVER_MANAGERS = {
- 'mongodb': MongoDbManager,
- 'mysql': MySQLManager,
- 'postgresql': PgSQLManager,
- 'sqlite': SQLiteManager,
- }
- if mocks is not None:
- DRIVER_MANAGERS['hbase'] = HBaseManager
-
- def setUp(self):
- super(TestBase, self).setUp()
- db_url = os.environ.get('PIFPAF_URL', "sqlite://").replace(
- "mysql://", "mysql+pymysql://")
-
- engine = urlparse.urlparse(db_url).scheme
- # in case some drivers have additional specification, for example:
- # PyMySQL will have scheme mysql+pymysql
- engine = engine.split('+')[0]
-
- # NOTE(Alexei_987) Shortcut to skip expensive db setUp
- test_method = self._get_test_method()
- if (hasattr(test_method, '_run_with')
- and engine not in test_method._run_with):
- raise testcase.TestSkipped(
- 'Test is not applicable for %s' % engine)
-
- self.CONF = service.prepare_service([], [])
-
- manager = self.DRIVER_MANAGERS.get(engine)
- if not manager:
- self.skipTest("missing driver manager: %s" % engine)
-
- self.db_manager = manager(self.CONF, db_url)
-
- self.useFixture(self.db_manager)
-
- self.conn = self.db_manager.connection
- self.conn.upgrade()
-
- self.useFixture(fixtures.MockPatch('ceilometer.storage.get_connection',
- side_effect=self._get_connection))
-
- # Set a default location for the pipeline config file so the
- # tests work even if ceilometer is not installed globally on
- # the system.
- self.CONF.import_opt('pipeline_cfg_file', 'ceilometer.pipeline')
-
- def tearDown(self):
- self.conn.clear()
- self.conn = None
- super(TestBase, self).tearDown()
-
- def _get_connection(self, conf, url):
- return self.conn
-
-
-def run_with(*drivers):
- """Used to mark tests that are only applicable for certain db driver.
-
- Skips test if driver is not available.
- """
- def decorator(test):
- if isinstance(test, type) and issubclass(test, TestBase):
- # Decorate all test methods
- for attr in dir(test):
- value = getattr(test, attr)
- if callable(value) and attr.startswith('test_'):
- if six.PY3:
- value._run_with = drivers
- else:
- value.__func__._run_with = drivers
- else:
- test._run_with = drivers
- return test
- return decorator
diff --git a/ceilometer/tests/functional/__init__.py b/ceilometer/tests/functional/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/functional/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/functional/hooks/post_test_hook.sh b/ceilometer/tests/functional/hooks/post_test_hook.sh
deleted file mode 100755
index 2ca732ee..00000000
--- a/ceilometer/tests/functional/hooks/post_test_hook.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash -xe
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This script is executed inside post_test_hook function in devstack gate.
-
-function generate_testr_results {
- if [ -f .testrepository/0 ]; then
- sudo .tox/py-functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit
- sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit
- sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html
- sudo gzip -9 $BASE/logs/testrepository.subunit
- sudo gzip -9 $BASE/logs/testr_results.html
- sudo chown $USER:$USER $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
- sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz
- fi
-}
-
-export CEILOMETER_DIR="$BASE/new/ceilometer"
-
-# Go to the ceilometer dir
-cd $CEILOMETER_DIR
-
-if [[ -z "$STACK_USER" ]]; then
- export STACK_USER=stack
-fi
-
-sudo chown -R $STACK_USER:stack $CEILOMETER_DIR
-
-# Run tests
-echo "Running ceilometer functional test suite"
-set +e
-
-# NOTE(ityaptin) Expected a script param which contains a backend name
-CEILOMETER_TEST_BACKEND="$1" sudo -E -H -u ${STACK_USER:-${USER}} tox -epy-functional
-EXIT_CODE=$?
-set -e
-
-# Collect and parse result
-generate_testr_results
-exit $EXIT_CODE
diff --git a/ceilometer/tests/functional/publisher/__init__.py b/ceilometer/tests/functional/publisher/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/functional/publisher/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/functional/publisher/test_direct.py b/ceilometer/tests/functional/publisher/test_direct.py
deleted file mode 100644
index ce1cbffb..00000000
--- a/ceilometer/tests/functional/publisher/test_direct.py
+++ /dev/null
@@ -1,80 +0,0 @@
-#
-# Copyright 2015 Red Hat
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests for ceilometer/publisher/direct.py
-"""
-
-import datetime
-import uuid
-
-from oslo_utils import netutils
-
-from ceilometer.publisher import direct
-from ceilometer import sample
-from ceilometer.tests import db as tests_db
-
-
-class TestDirectPublisher(tests_db.TestBase):
-
- resource_id = str(uuid.uuid4())
-
- test_data = [
- sample.Sample(
- name='alpha',
- type=sample.TYPE_CUMULATIVE,
- unit='',
- volume=1,
- user_id='test',
- project_id='test',
- resource_id=resource_id,
- timestamp=datetime.datetime.utcnow().isoformat(),
- resource_metadata={'name': 'TestPublish'},
- ),
- sample.Sample(
- name='beta',
- type=sample.TYPE_CUMULATIVE,
- unit='',
- volume=1,
- user_id='test',
- project_id='test',
- resource_id=resource_id,
- timestamp=datetime.datetime.utcnow().isoformat(),
- resource_metadata={'name': 'TestPublish'},
- ),
- sample.Sample(
- name='gamma',
- type=sample.TYPE_CUMULATIVE,
- unit='',
- volume=1,
- user_id='test',
- project_id='test',
- resource_id=resource_id,
- timestamp=datetime.datetime.now().isoformat(),
- resource_metadata={'name': 'TestPublish'},
- ),
- ]
-
- def test_direct_publisher(self):
- """Test samples are saved."""
- self.CONF.set_override('connection', self.db_manager.url,
- group='database')
- parsed_url = netutils.urlsplit('direct://')
- publisher = direct.DirectPublisher(self.CONF, parsed_url)
- publisher.publish_samples(self.test_data)
-
- meters = list(self.conn.get_meters(resource=self.resource_id))
- names = sorted([meter.name for meter in meters])
-
- self.assertEqual(3, len(meters), 'There should be 3 samples')
- self.assertEqual(['alpha', 'beta', 'gamma'], names)
diff --git a/ceilometer/tests/functional/storage/__init__.py b/ceilometer/tests/functional/storage/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/functional/storage/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/functional/storage/test_impl_hbase.py b/ceilometer/tests/functional/storage/test_impl_hbase.py
deleted file mode 100644
index 09a30737..00000000
--- a/ceilometer/tests/functional/storage/test_impl_hbase.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-# Copyright 2012, 2013 Dell Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests for ceilometer/storage/impl_hbase.py
-
-.. note::
- In order to run the tests against real HBase server set the environment
- variable CEILOMETER_TEST_HBASE_URL to point to that HBase instance before
- running the tests. Make sure the Thrift server is running on that server.
-
-"""
-import mock
-
-
-try:
- import happybase # noqa
-except ImportError:
- import testtools.testcase
- raise testtools.testcase.TestSkipped("happybase is needed")
-
-from ceilometer.storage import impl_hbase as hbase
-from ceilometer.tests import base as test_base
-from ceilometer.tests import db as tests_db
-
-
-class ConnectionTest(tests_db.TestBase):
-
- @tests_db.run_with('hbase')
- def test_hbase_connection(self):
-
- class TestConn(object):
- def __init__(self, host, port):
- self.netloc = '%s:%s' % (host, port)
-
- def open(self):
- pass
-
- def get_connection_pool(conf):
- return TestConn(conf['host'], conf['port'])
-
- with mock.patch.object(hbase.Connection, '_get_connection_pool',
- side_effect=get_connection_pool):
- conn = hbase.Connection(self.CONF, 'hbase://test_hbase:9090')
- self.assertIsInstance(conn.conn_pool, TestConn)
-
-
-class CapabilitiesTest(test_base.BaseTestCase):
- # Check the returned capabilities list, which is specific to each DB
- # driver
-
- def test_capabilities(self):
- expected_capabilities = {
- 'meters': {'query': {'simple': True,
- 'metadata': True}},
- 'resources': {'query': {'simple': True,
- 'metadata': True}},
- 'samples': {'query': {'simple': True,
- 'metadata': True,
- 'complex': False}},
- 'statistics': {'groupby': False,
- 'query': {'simple': True,
- 'metadata': True},
- 'aggregation': {'standard': True,
- 'selectable': {
- 'max': False,
- 'min': False,
- 'sum': False,
- 'avg': False,
- 'count': False,
- 'stddev': False,
- 'cardinality': False}}
- },
- }
-
- actual_capabilities = hbase.Connection.get_capabilities()
- self.assertEqual(expected_capabilities, actual_capabilities)
-
- def test_storage_capabilities(self):
- expected_capabilities = {
- 'storage': {'production_ready': True},
- }
- actual_capabilities = hbase.Connection.get_storage_capabilities()
- self.assertEqual(expected_capabilities, actual_capabilities)
diff --git a/ceilometer/tests/functional/storage/test_impl_log.py b/ceilometer/tests/functional/storage/test_impl_log.py
deleted file mode 100644
index b80f9105..00000000
--- a/ceilometer/tests/functional/storage/test_impl_log.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests for ceilometer/storage/impl_log.py
-"""
-from oslotest import base
-
-from ceilometer.storage import impl_log
-
-
-class ConnectionTest(base.BaseTestCase):
- @staticmethod
- def test_get_connection():
- conn = impl_log.Connection(None, None)
- conn.record_metering_data({'counter_name': 'test',
- 'resource_id': __name__,
- 'counter_volume': 1,
- })
diff --git a/ceilometer/tests/functional/storage/test_impl_mongodb.py b/ceilometer/tests/functional/storage/test_impl_mongodb.py
deleted file mode 100644
index d96257fd..00000000
--- a/ceilometer/tests/functional/storage/test_impl_mongodb.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests for ceilometer/storage/impl_mongodb.py
-
-.. note::
- In order to run the tests against another MongoDB server set the
- environment variable CEILOMETER_TEST_MONGODB_URL to point to a MongoDB
- server before running the tests.
-
-"""
-
-from ceilometer.storage import impl_mongodb
-from ceilometer.tests import base as test_base
-from ceilometer.tests import db as tests_db
-
-
-@tests_db.run_with('mongodb')
-class MongoDBConnection(tests_db.TestBase):
- def test_connection_pooling(self):
- test_conn = impl_mongodb.Connection(self.CONF, self.db_manager.url)
- self.assertEqual(self.conn.conn, test_conn.conn)
-
- def test_replica_set(self):
- url = self.db_manager._url + '?replicaSet=foobar'
- conn = impl_mongodb.Connection(self.CONF, url)
- self.assertTrue(conn.conn)
-
-
-@tests_db.run_with('mongodb')
-class IndexTest(tests_db.TestBase):
-
- def _test_ttl_index_absent(self, conn, coll_name, ttl_opt):
- # create a fake index and check it is deleted
- coll = getattr(conn.db, coll_name)
- index_name = '%s_ttl' % coll_name
- self.CONF.set_override(ttl_opt, -1, group='database')
- conn.upgrade()
- self.assertNotIn(index_name, coll.index_information())
-
- self.CONF.set_override(ttl_opt, 456789, group='database')
- conn.upgrade()
- self.assertEqual(456789,
- coll.index_information()
- [index_name]['expireAfterSeconds'])
-
- def test_meter_ttl_index_absent(self):
- self._test_ttl_index_absent(self.conn, 'meter',
- 'metering_time_to_live')
-
- def _test_ttl_index_present(self, conn, coll_name, ttl_opt):
- coll = getattr(conn.db, coll_name)
- self.CONF.set_override(ttl_opt, 456789, group='database')
- conn.upgrade()
- index_name = '%s_ttl' % coll_name
- self.assertEqual(456789,
- coll.index_information()
- [index_name]['expireAfterSeconds'])
-
- self.CONF.set_override(ttl_opt, -1, group='database')
- conn.upgrade()
- self.assertNotIn(index_name, coll.index_information())
-
- def test_meter_ttl_index_present(self):
- self._test_ttl_index_present(self.conn, 'meter',
- 'metering_time_to_live')
-
-
-class CapabilitiesTest(test_base.BaseTestCase):
- # Check the returned capabilities list, which is specific to each DB
- # driver
-
- def test_capabilities(self):
- expected_capabilities = {
- 'meters': {'query': {'simple': True,
- 'metadata': True}},
- 'resources': {'query': {'simple': True,
- 'metadata': True}},
- 'samples': {'query': {'simple': True,
- 'metadata': True,
- 'complex': True}},
- 'statistics': {'groupby': True,
- 'query': {'simple': True,
- 'metadata': True},
- 'aggregation': {'standard': True,
- 'selectable': {
- 'max': True,
- 'min': True,
- 'sum': True,
- 'avg': True,
- 'count': True,
- 'stddev': True,
- 'cardinality': True}}
- },
- }
-
- actual_capabilities = impl_mongodb.Connection.get_capabilities()
- self.assertEqual(expected_capabilities, actual_capabilities)
-
- def test_storage_capabilities(self):
- expected_capabilities = {
- 'storage': {'production_ready': True},
- }
- actual_capabilities = (impl_mongodb.Connection.
- get_storage_capabilities())
- self.assertEqual(expected_capabilities, actual_capabilities)
diff --git a/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py b/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py
deleted file mode 100644
index 026345f8..00000000
--- a/ceilometer/tests/functional/storage/test_impl_sqlalchemy.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests for ceilometer/storage/impl_sqlalchemy.py
-
-.. note::
- In order to run the tests against real SQL server set the environment
- variable CEILOMETER_TEST_SQL_URL to point to a SQL server before running
- the tests.
-
-"""
-
-import datetime
-import warnings
-
-import mock
-from oslo_db import exception
-from oslo_utils import timeutils
-
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer.storage import impl_sqlalchemy
-from ceilometer.storage.sqlalchemy import models as sql_models
-from ceilometer.tests import base as test_base
-from ceilometer.tests import db as tests_db
-from ceilometer.tests.functional.storage \
- import test_storage_scenarios as scenarios
-
-
-@tests_db.run_with('sqlite', 'mysql', 'pgsql')
-class CeilometerBaseTest(tests_db.TestBase):
-
- def test_ceilometer_base(self):
- base = sql_models.CeilometerBase()
- base['key'] = 'value'
- self.assertEqual('value', base['key'])
-
-
-@tests_db.run_with('sqlite')
-class EngineFacadeTest(tests_db.TestBase):
-
- @mock.patch.object(warnings, 'warn')
- def test_no_not_supported_warning(self, mocked):
- impl_sqlalchemy.Connection(self.CONF, 'sqlite://')
- self.assertNotIn(mock.call(mock.ANY, exception.NotSupportedWarning),
- mocked.call_args_list)
-
-
-@tests_db.run_with('sqlite', 'mysql', 'pgsql')
-class RelationshipTest(scenarios.DBTestBase):
- # Note: Do not derive from SQLAlchemyEngineTestBase, since we
- # don't want to automatically inherit all the Meter setup.
-
- @mock.patch.object(timeutils, 'utcnow')
- def test_clear_metering_data_meta_tables(self, mock_utcnow):
- mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45)
- self.conn.clear_expired_metering_data(3 * 60)
-
- session = self.conn._engine_facade.get_session()
- self.assertEqual(5, session.query(sql_models.Sample).count())
-
- resource_ids = (session.query(sql_models.Resource.internal_id)
- .group_by(sql_models.Resource.internal_id))
- meta_tables = [sql_models.MetaText, sql_models.MetaFloat,
- sql_models.MetaBigInt, sql_models.MetaBool]
- s = set()
- for table in meta_tables:
- self.assertEqual(0, (session.query(table)
- .filter(~table.id.in_(resource_ids)).count()
- ))
- s.update(session.query(table.id).all())
- self.assertEqual(set(resource_ids.all()), s)
-
-
-class CapabilitiesTest(test_base.BaseTestCase):
- # Check the returned capabilities list, which is specific to each DB
- # driver
-
- def test_capabilities(self):
- expected_capabilities = {
- 'meters': {'query': {'simple': True,
- 'metadata': True}},
- 'resources': {'query': {'simple': True,
- 'metadata': True}},
- 'samples': {'query': {'simple': True,
- 'metadata': True,
- 'complex': True}},
- 'statistics': {'groupby': True,
- 'query': {'simple': True,
- 'metadata': True},
- 'aggregation': {'standard': True,
- 'selectable': {
- 'max': True,
- 'min': True,
- 'sum': True,
- 'avg': True,
- 'count': True,
- 'stddev': True,
- 'cardinality': True}}
- },
- }
-
- actual_capabilities = impl_sqlalchemy.Connection.get_capabilities()
- self.assertEqual(expected_capabilities, actual_capabilities)
-
- def test_storage_capabilities(self):
- expected_capabilities = {
- 'storage': {'production_ready': True},
- }
- actual_capabilities = (impl_sqlalchemy.
- Connection.get_storage_capabilities())
- self.assertEqual(expected_capabilities, actual_capabilities)
-
-
-@tests_db.run_with('sqlite', 'mysql', 'pgsql')
-class FilterQueryTestForMeters(scenarios.DBTestBase):
- def prepare_data(self):
- self.counters = []
- c = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- 5,
- user_id=None,
- project_id=None,
- resource_id='fake_id',
- timestamp=datetime.datetime(2012, 9, 25, 10, 30),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.counter',
- },
- source='test',
- )
-
- self.counters.append(c)
- msg = utils.meter_message_from_counter(
- c,
- secret='not-so-secret')
- self.conn.record_metering_data(msg)
-
- def test_get_meters_by_user(self):
- meters = list(self.conn.get_meters(user='None'))
- self.assertEqual(1, len(meters))
-
- def test_get_meters_by_project(self):
- meters = list(self.conn.get_meters(project='None'))
- self.assertEqual(1, len(meters))
diff --git a/ceilometer/tests/functional/storage/test_pymongo_base.py b/ceilometer/tests/functional/storage/test_pymongo_base.py
deleted file mode 100644
index 6dadffad..00000000
--- a/ceilometer/tests/functional/storage/test_pymongo_base.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests the mongodb functionality
-"""
-
-import copy
-import datetime
-
-import mock
-
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer.tests import db as tests_db
-from ceilometer.tests.functional.storage import test_storage_scenarios
-
-
-@tests_db.run_with('mongodb')
-class CompatibilityTest(test_storage_scenarios.DBTestBase):
-
- def prepare_data(self):
- def old_record_metering_data(self, data):
- received_timestamp = datetime.datetime.utcnow()
- self.db.resource.update(
- {'_id': data['resource_id']},
- {'$set': {'project_id': data['project_id'],
- 'user_id': data['user_id'],
- # Current metadata being used and when it was
- # last updated.
- 'timestamp': data['timestamp'],
- 'received_timestamp': received_timestamp,
- 'metadata': data['resource_metadata'],
- 'source': data['source'],
- },
- '$addToSet': {'meter': {'counter_name': data['counter_name'],
- 'counter_type': data['counter_type'],
- },
- },
- },
- upsert=True,
- )
-
- record = copy.copy(data)
- self.db.meter.insert(record)
-
- # Stubout with the old version DB schema, the one w/o 'counter_unit'
- with mock.patch.object(self.conn, 'record_metering_data',
- side_effect=old_record_metering_data):
- self.counters = []
- c = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- 5,
- 'user-id',
- 'project1',
- 'resource-id',
- timestamp=datetime.datetime(2012, 9, 25, 10, 30),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.counter',
- },
- source='test',
- )
- self.counters.append(c)
- msg = utils.meter_message_from_counter(
- c,
- secret='not-so-secret')
- self.conn.record_metering_data(self.conn, msg)
-
- def test_counter_unit(self):
- meters = list(self.conn.get_meters())
- self.assertEqual(1, len(meters))
-
-
-@tests_db.run_with('mongodb')
-class FilterQueryTestForMeters(test_storage_scenarios.DBTestBase):
- def prepare_data(self):
- def old_record_metering_data(self, data):
- received_timestamp = datetime.datetime.utcnow()
- self.db.resource.update(
- {'_id': data['resource_id']},
- {'$set': {'project_id': data['project_id'],
- 'user_id': data['user_id'],
- # Current metadata being used and when it was
- # last updated.
- 'timestamp': data['timestamp'],
- 'received_timestamp': received_timestamp,
- 'metadata': data['resource_metadata'],
- 'source': data['source'],
- },
- '$addToSet': {'meter': {'counter_name': data['counter_name'],
- 'counter_type': data['counter_type'],
- },
- },
- },
- upsert=True,
- )
-
- record = copy.copy(data)
- self.db.meter.insert(record)
-
- # Stubout with the old version DB schema, the one w/o 'counter_unit'
- with mock.patch.object(self.conn, 'record_metering_data',
- side_effect=old_record_metering_data):
- self.counters = []
- c = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- 5,
- None,
- None,
- None,
- timestamp=datetime.datetime(2012, 9, 25, 10, 30),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.counter',
- },
- source='test',
- )
-
- self.counters.append(c)
- msg = utils.meter_message_from_counter(
- c,
- secret='not-so-secret')
- self.conn.record_metering_data(self.conn, msg)
-
- def test_get_meters_by_user(self):
- meters = list(self.conn.get_meters(user='None'))
- self.assertEqual(1, len(meters))
-
- def test_get_meters_by_resource(self):
- meters = list(self.conn.get_meters(resource='None'))
- self.assertEqual(1, len(meters))
-
- def test_get_meters_by_project(self):
- meters = list(self.conn.get_meters(project='None'))
- self.assertEqual(1, len(meters))
diff --git a/ceilometer/tests/functional/storage/test_storage_scenarios.py b/ceilometer/tests/functional/storage/test_storage_scenarios.py
deleted file mode 100644
index 4c795235..00000000
--- a/ceilometer/tests/functional/storage/test_storage_scenarios.py
+++ /dev/null
@@ -1,2805 +0,0 @@
-#
-# Copyright 2013 Intel Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Base classes for DB backend implementation test"""
-
-import datetime
-
-import mock
-from oslo_db import api
-from oslo_db import exception as dbexc
-from oslo_utils import timeutils
-import pymongo
-
-import ceilometer
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer import storage
-from ceilometer.tests import db as tests_db
-
-
-class DBTestBase(tests_db.TestBase):
- @staticmethod
- def create_side_effect(method, exception_type, test_exception):
- def side_effect(*args, **kwargs):
- if test_exception.pop():
- raise exception_type
- else:
- return method(*args, **kwargs)
- return side_effect
-
- def create_and_store_sample(self, timestamp=datetime.datetime.utcnow(),
- metadata=None,
- name='instance',
- sample_type=sample.TYPE_CUMULATIVE, unit='',
- volume=1, user_id='user-id',
- project_id='project-id',
- resource_id='resource-id', source=None):
- metadata = metadata or {'display_name': 'test-server',
- 'tag': 'self.counter'}
- s = sample.Sample(
- name, sample_type, unit=unit, volume=volume, user_id=user_id,
- project_id=project_id, resource_id=resource_id,
- timestamp=timestamp,
- resource_metadata=metadata, source=source
- )
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret
- )
- self.conn.record_metering_data(msg)
- return msg
-
- def setUp(self):
- super(DBTestBase, self).setUp()
- patcher = mock.patch.object(timeutils, 'utcnow')
- self.addCleanup(patcher.stop)
- self.mock_utcnow = patcher.start()
- self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39)
- self.prepare_data()
-
- def prepare_data(self):
- original_timestamps = [(2012, 7, 2, 10, 40), (2012, 7, 2, 10, 41),
- (2012, 7, 2, 10, 41), (2012, 7, 2, 10, 42),
- (2012, 7, 2, 10, 43)]
-
- timestamps_for_test_samples_default_order = [(2012, 7, 2, 10, 44),
- (2011, 5, 30, 18, 3),
- (2012, 12, 1, 1, 25),
- (2012, 2, 29, 6, 59),
- (2013, 5, 31, 23, 7)]
- timestamp_list = (original_timestamps +
- timestamps_for_test_samples_default_order)
-
- self.msgs = []
-
- self.msgs.append(self.create_and_store_sample(
- timestamp=datetime.datetime(2012, 7, 2, 10, 39),
- source='test-1')
- )
- self.msgs.append(self.create_and_store_sample(
- timestamp=datetime.datetime(*timestamp_list[0]),
- source='test-1')
- )
- self.msgs.append(self.create_and_store_sample(
- timestamp=datetime.datetime(*timestamp_list[1]),
- resource_id='resource-id-alternate',
- metadata={'display_name': 'test-server', 'tag': 'self.counter2'},
- source='test-2')
- )
- self.msgs.append(self.create_and_store_sample(
- timestamp=datetime.datetime(*timestamp_list[2]),
- resource_id='resource-id-alternate',
- user_id='user-id-alternate',
- metadata={'display_name': 'test-server', 'tag': 'self.counter3'},
- source='test-3')
- )
-
- start_idx = 3
- end_idx = len(timestamp_list)
-
- for i, ts in zip(range(start_idx - 1, end_idx - 1),
- timestamp_list[start_idx:end_idx]):
- self.msgs.append(
- self.create_and_store_sample(
- timestamp=datetime.datetime(*ts),
- user_id='user-id-%s' % i,
- project_id='project-id-%s' % i,
- resource_id='resource-id-%s' % i,
- metadata={
- 'display_name': 'test-server',
- 'tag': 'counter-%s' % i
- },
- source='test')
- )
-
-
-class ResourceTest(DBTestBase):
- def prepare_data(self):
- super(ResourceTest, self).prepare_data()
-
- self.msgs.append(self.create_and_store_sample(
- timestamp=datetime.datetime(2012, 7, 2, 10, 39),
- user_id='mongodb_test',
- resource_id='resource-id-mongo_bad_key',
- project_id='project-id-test',
- metadata={'display.name': {'name.$1': 'test-server1',
- '$name_2': 'test-server2'},
- 'tag': 'self.counter'},
- source='test-4'
- ))
-
- def test_get_resources(self):
- expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39)
- expected_last_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 40)
- msgs_sources = [msg['source'] for msg in self.msgs]
- resources = list(self.conn.get_resources())
- self.assertEqual(10, len(resources))
- for resource in resources:
- if resource.resource_id != 'resource-id':
- continue
- self.assertEqual(expected_first_sample_timestamp,
- resource.first_sample_timestamp)
- self.assertEqual(expected_last_sample_timestamp,
- resource.last_sample_timestamp)
- self.assertEqual('resource-id', resource.resource_id)
- self.assertEqual('project-id', resource.project_id)
- self.assertIn(resource.source, msgs_sources)
- self.assertEqual('user-id', resource.user_id)
- self.assertEqual('test-server', resource.metadata['display_name'])
- break
- else:
- self.fail('Never found resource-id')
-
- def test_get_resources_start_timestamp(self):
- timestamp = datetime.datetime(2012, 7, 2, 10, 42)
- expected = set(['resource-id-2', 'resource-id-3', 'resource-id-4',
- 'resource-id-6', 'resource-id-8'])
-
- resources = list(self.conn.get_resources(start_timestamp=timestamp))
- resource_ids = [r.resource_id for r in resources]
- self.assertEqual(expected, set(resource_ids))
-
- resources = list(self.conn.get_resources(start_timestamp=timestamp,
- start_timestamp_op='ge'))
- resource_ids = [r.resource_id for r in resources]
- self.assertEqual(expected, set(resource_ids))
-
- resources = list(self.conn.get_resources(start_timestamp=timestamp,
- start_timestamp_op='gt'))
- resource_ids = [r.resource_id for r in resources]
- expected.remove('resource-id-2')
- self.assertEqual(expected, set(resource_ids))
-
- def test_get_resources_end_timestamp(self):
- timestamp = datetime.datetime(2012, 7, 2, 10, 42)
- expected = set(['resource-id', 'resource-id-alternate',
- 'resource-id-5', 'resource-id-7',
- 'resource-id-mongo_bad_key'])
-
- resources = list(self.conn.get_resources(end_timestamp=timestamp))
- resource_ids = [r.resource_id for r in resources]
- self.assertEqual(expected, set(resource_ids))
-
- resources = list(self.conn.get_resources(end_timestamp=timestamp,
- end_timestamp_op='lt'))
- resource_ids = [r.resource_id for r in resources]
- self.assertEqual(expected, set(resource_ids))
-
- resources = list(self.conn.get_resources(end_timestamp=timestamp,
- end_timestamp_op='le'))
- resource_ids = [r.resource_id for r in resources]
- expected.add('resource-id-2')
- self.assertEqual(expected, set(resource_ids))
-
- def test_get_resources_both_timestamps(self):
- start_ts = datetime.datetime(2012, 7, 2, 10, 42)
- end_ts = datetime.datetime(2012, 7, 2, 10, 43)
-
- resources = list(self.conn.get_resources(start_timestamp=start_ts,
- end_timestamp=end_ts))
- resource_ids = [r.resource_id for r in resources]
- self.assertEqual(set(['resource-id-2']), set(resource_ids))
-
- resources = list(self.conn.get_resources(start_timestamp=start_ts,
- end_timestamp=end_ts,
- start_timestamp_op='ge',
- end_timestamp_op='lt'))
- resource_ids = [r.resource_id for r in resources]
- self.assertEqual(set(['resource-id-2']), set(resource_ids))
-
- resources = list(self.conn.get_resources(start_timestamp=start_ts,
- end_timestamp=end_ts,
- start_timestamp_op='gt',
- end_timestamp_op='lt'))
- resource_ids = [r.resource_id for r in resources]
- self.assertEqual(0, len(resource_ids))
-
- resources = list(self.conn.get_resources(start_timestamp=start_ts,
- end_timestamp=end_ts,
- start_timestamp_op='gt',
- end_timestamp_op='le'))
- resource_ids = [r.resource_id for r in resources]
- self.assertEqual(set(['resource-id-3']), set(resource_ids))
-
- resources = list(self.conn.get_resources(start_timestamp=start_ts,
- end_timestamp=end_ts,
- start_timestamp_op='ge',
- end_timestamp_op='le'))
- resource_ids = [r.resource_id for r in resources]
- self.assertEqual(set(['resource-id-2', 'resource-id-3']),
- set(resource_ids))
-
- def test_get_resources_by_source(self):
- resources = list(self.conn.get_resources(source='test-1'))
- self.assertEqual(1, len(resources))
- ids = set(r.resource_id for r in resources)
- self.assertEqual(set(['resource-id']), ids)
-
- def test_get_resources_by_user(self):
- resources = list(self.conn.get_resources(user='user-id'))
- self.assertTrue(len(resources) == 2 or len(resources) == 1)
- ids = set(r.resource_id for r in resources)
- # tolerate storage driver only reporting latest owner of resource
- resources_ever_owned_by = set(['resource-id',
- 'resource-id-alternate'])
- resources_now_owned_by = set(['resource-id'])
- self.assertTrue(ids == resources_ever_owned_by or
- ids == resources_now_owned_by,
- 'unexpected resources: %s' % ids)
-
- def test_get_resources_by_alternate_user(self):
- resources = list(self.conn.get_resources(user='user-id-alternate'))
- self.assertEqual(1, len(resources))
- # only a single resource owned by this user ever
- self.assertEqual('resource-id-alternate', resources[0].resource_id)
-
- def test_get_resources_by_project(self):
- resources = list(self.conn.get_resources(project='project-id'))
- self.assertEqual(2, len(resources))
- ids = set(r.resource_id for r in resources)
- self.assertEqual(set(['resource-id', 'resource-id-alternate']), ids)
-
- def test_get_resources_by_metaquery(self):
- q = {'metadata.display_name': 'test-server'}
- resources = list(self.conn.get_resources(metaquery=q))
- self.assertEqual(9, len(resources))
-
- def test_get_resources_by_metaquery_key_with_dot_in_metadata(self):
- q = {'metadata.display.name.$name_2': 'test-server2',
- 'metadata.display.name.name.$1': 'test-server1'}
- resources = list(self.conn.get_resources(metaquery=q))
- self.assertEqual(1, len(resources))
-
- def test_get_resources_by_empty_metaquery(self):
- resources = list(self.conn.get_resources(metaquery={}))
- self.assertEqual(10, len(resources))
-
- def test_get_resources_most_recent_metadata_all(self):
- resources = self.conn.get_resources()
- expected_tags = ['self.counter', 'self.counter3', 'counter-2',
- 'counter-3', 'counter-4', 'counter-5', 'counter-6',
- 'counter-7', 'counter-8']
-
- for resource in resources:
- self.assertIn(resource.metadata['tag'], expected_tags)
-
- def test_get_resources_most_recent_metadata_single(self):
- resource = list(
- self.conn.get_resources(resource='resource-id-alternate')
- )[0]
- expected_tag = 'self.counter3'
- self.assertEqual(expected_tag, resource.metadata['tag'])
-
-
-class ResourceTestOrdering(DBTestBase):
- def prepare_data(self):
- sample_timings = [('resource-id-1', [(2013, 8, 10, 10, 43),
- (2013, 8, 10, 10, 44),
- (2013, 8, 10, 10, 42),
- (2013, 8, 10, 10, 49),
- (2013, 8, 10, 10, 47)]),
- ('resource-id-2', [(2013, 8, 10, 10, 43),
- (2013, 8, 10, 10, 48),
- (2013, 8, 10, 10, 42),
- (2013, 8, 10, 10, 48),
- (2013, 8, 10, 10, 47)]),
- ('resource-id-3', [(2013, 8, 10, 10, 43),
- (2013, 8, 10, 10, 44),
- (2013, 8, 10, 10, 50),
- (2013, 8, 10, 10, 49),
- (2013, 8, 10, 10, 47)])]
-
- counter = 0
- for resource, timestamps in sample_timings:
- for timestamp in timestamps:
- self.create_and_store_sample(
- timestamp=datetime.datetime(*timestamp),
- resource_id=resource,
- user_id=str(counter % 2),
- project_id=str(counter % 3),
- metadata={
- 'display_name': 'test-server',
- 'tag': 'sample-%s' % counter
- },
- source='test'
- )
- counter += 1
-
- def test_get_resources_ordering_all(self):
- resources = list(self.conn.get_resources())
- expected = set([
- ('resource-id-1', 'sample-3'),
- ('resource-id-2', 'sample-8'),
- ('resource-id-3', 'sample-12')
- ])
- received = set([(r.resource_id, r.metadata['tag']) for r in resources])
- self.assertEqual(expected, received)
-
- def test_get_resources_ordering_single(self):
- resource = list(self.conn.get_resources(resource='resource-id-2'))[0]
- self.assertEqual('resource-id-2', resource.resource_id)
- self.assertEqual('sample-8', resource.metadata['tag'])
-
-
-class MeterTest(DBTestBase):
- def test_get_meters(self):
- msgs_sources = [msg['source'] for msg in self.msgs]
- results = list(self.conn.get_meters())
- self.assertEqual(9, len(results))
- for meter in results:
- self.assertIn(meter.source, msgs_sources)
-
- def test_get_meters_by_user(self):
- results = list(self.conn.get_meters(user='user-id'))
- self.assertEqual(1, len(results))
-
- def test_get_meters_by_project(self):
- results = list(self.conn.get_meters(project='project-id'))
- self.assertEqual(2, len(results))
-
- def test_get_meters_by_metaquery(self):
- q = {'metadata.display_name': 'test-server'}
- results = list(self.conn.get_meters(metaquery=q))
- self.assertIsNotEmpty(results)
- self.assertEqual(9, len(results))
-
- def test_get_meters_by_empty_metaquery(self):
- results = list(self.conn.get_meters(metaquery={}))
- self.assertEqual(9, len(results))
-
-
-class RawSampleTest(DBTestBase):
-
- def prepare_data(self):
- super(RawSampleTest, self).prepare_data()
-
- self.msgs.append(self.create_and_store_sample(
- timestamp=datetime.datetime(2012, 7, 2, 10, 39),
- user_id='mongodb_test',
- resource_id='resource-id-mongo_bad_key',
- project_id='project-id-test',
- metadata={'display.name': {'name.$1': 'test-server1',
- '$name_2': 'test-server2'},
- 'tag': 'self.counter'},
- source='test-4'
- ))
-
- def test_get_sample_counter_volume(self):
- # NOTE(idegtiarov) Because wsme expected a float type of data this test
- # checks type of counter_volume received from database.
- f = storage.SampleFilter()
- result = next(self.conn.get_samples(f, limit=1))
- self.assertIsInstance(result.counter_volume, float)
-
- def test_get_samples_limit_zero(self):
- f = storage.SampleFilter()
- results = list(self.conn.get_samples(f, limit=0))
- self.assertEqual(0, len(results))
-
- def test_get_samples_limit(self):
- f = storage.SampleFilter()
- results = list(self.conn.get_samples(f, limit=3))
- self.assertEqual(3, len(results))
- for result in results:
- self.assertTimestampEqual(timeutils.utcnow(), result.recorded_at)
-
- def test_get_samples_in_default_order(self):
- f = storage.SampleFilter()
- prev_timestamp = None
- for sample_item in self.conn.get_samples(f):
- if prev_timestamp is not None:
- self.assertGreaterEqual(prev_timestamp, sample_item.timestamp)
- prev_timestamp = sample_item.timestamp
-
- def test_get_samples_by_user(self):
- f = storage.SampleFilter(user='user-id')
- results = list(self.conn.get_samples(f))
- self.assertEqual(3, len(results))
- for meter in results:
- d = meter.as_dict()
- self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at'])
- del d['recorded_at']
- d['monotonic_time'] = None
- self.assertIn(d, self.msgs[:3])
-
- def test_get_samples_by_user_limit(self):
- f = storage.SampleFilter(user='user-id')
- results = list(self.conn.get_samples(f, limit=1))
- self.assertEqual(1, len(results))
-
- def test_get_samples_by_user_limit_bigger(self):
- f = storage.SampleFilter(user='user-id')
- results = list(self.conn.get_samples(f, limit=42))
- self.assertEqual(3, len(results))
-
- def test_get_samples_by_project(self):
- f = storage.SampleFilter(project='project-id')
- results = list(self.conn.get_samples(f))
- self.assertIsNotNone(results)
- for meter in results:
- d = meter.as_dict()
- self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at'])
- del d['recorded_at']
- d['monotonic_time'] = None
- self.assertIn(d, self.msgs[:4])
-
- def test_get_samples_by_resource(self):
- f = storage.SampleFilter(user='user-id', resource='resource-id')
- results = list(self.conn.get_samples(f))
- self.assertEqual(2, len(results))
- d = results[1].as_dict()
- self.assertEqual(timeutils.utcnow(), d['recorded_at'])
- del d['recorded_at']
- d['monotonic_time'] = None
- self.assertEqual(self.msgs[0], d)
-
- def test_get_samples_by_metaquery(self):
- q = {'metadata.display_name': 'test-server'}
- f = storage.SampleFilter(metaquery=q)
- results = list(self.conn.get_samples(f))
- self.assertIsNotNone(results)
- for meter in results:
- d = meter.as_dict()
- self.assertTimestampEqual(timeutils.utcnow(), d['recorded_at'])
- del d['recorded_at']
- d['monotonic_time'] = None
- self.assertIn(d, self.msgs)
-
- def test_get_samples_by_metaquery_key_with_dot_in_metadata(self):
- q = {'metadata.display.name.name.$1': 'test-server1',
- 'metadata.display.name.$name_2': 'test-server2'}
- f = storage.SampleFilter(metaquery=q)
- results = list(self.conn.get_samples(f))
- self.assertIsNotNone(results)
- self.assertEqual(1, len(results))
-
- def test_get_samples_by_start_time(self):
- timestamp = datetime.datetime(2012, 7, 2, 10, 41)
- f = storage.SampleFilter(
- user='user-id',
- start_timestamp=timestamp,
- )
-
- results = list(self.conn.get_samples(f))
- self.assertEqual(1, len(results))
- self.assertEqual(timestamp, results[0].timestamp)
-
- f.start_timestamp_op = 'ge'
- results = list(self.conn.get_samples(f))
- self.assertEqual(1, len(results))
- self.assertEqual(timestamp, results[0].timestamp)
-
- f.start_timestamp_op = 'gt'
- results = list(self.conn.get_samples(f))
- self.assertEqual(0, len(results))
-
- def test_get_samples_by_end_time(self):
- timestamp = datetime.datetime(2012, 7, 2, 10, 40)
- f = storage.SampleFilter(
- user='user-id',
- end_timestamp=timestamp,
- )
-
- results = list(self.conn.get_samples(f))
- self.assertEqual(1, len(results))
-
- f.end_timestamp_op = 'lt'
- results = list(self.conn.get_samples(f))
- self.assertEqual(1, len(results))
-
- f.end_timestamp_op = 'le'
- results = list(self.conn.get_samples(f))
- self.assertEqual(2, len(results))
- self.assertEqual(datetime.datetime(2012, 7, 2, 10, 39),
- results[1].timestamp)
-
- def test_get_samples_by_both_times(self):
- start_ts = datetime.datetime(2012, 7, 2, 10, 42)
- end_ts = datetime.datetime(2012, 7, 2, 10, 43)
- f = storage.SampleFilter(
- start_timestamp=start_ts,
- end_timestamp=end_ts,
- )
-
- results = list(self.conn.get_samples(f))
- self.assertEqual(1, len(results))
- self.assertEqual(start_ts, results[0].timestamp)
-
- f.start_timestamp_op = 'gt'
- f.end_timestamp_op = 'lt'
- results = list(self.conn.get_samples(f))
- self.assertEqual(0, len(results))
-
- f.start_timestamp_op = 'ge'
- f.end_timestamp_op = 'lt'
- results = list(self.conn.get_samples(f))
- self.assertEqual(1, len(results))
- self.assertEqual(start_ts, results[0].timestamp)
-
- f.start_timestamp_op = 'gt'
- f.end_timestamp_op = 'le'
- results = list(self.conn.get_samples(f))
- self.assertEqual(1, len(results))
- self.assertEqual(end_ts, results[0].timestamp)
-
- f.start_timestamp_op = 'ge'
- f.end_timestamp_op = 'le'
- results = list(self.conn.get_samples(f))
- self.assertEqual(2, len(results))
- self.assertEqual(end_ts, results[0].timestamp)
- self.assertEqual(start_ts, results[1].timestamp)
-
- def test_get_samples_by_name(self):
- f = storage.SampleFilter(user='user-id', meter='no-such-meter')
- results = list(self.conn.get_samples(f))
- self.assertIsEmpty(results)
-
- def test_get_samples_by_name2(self):
- f = storage.SampleFilter(user='user-id', meter='instance')
- results = list(self.conn.get_samples(f))
- self.assertIsNotEmpty(results)
-
- def test_get_samples_by_source(self):
- f = storage.SampleFilter(source='test-1')
- results = list(self.conn.get_samples(f))
- self.assertEqual(2, len(results))
-
- @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase')
- def test_clear_metering_data(self):
- # NOTE(jd) Override this test in MongoDB because our code doesn't clear
- # the collections, this is handled by MongoDB TTL feature.
-
- self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45)
- self.conn.clear_expired_metering_data(3 * 60)
- f = storage.SampleFilter(meter='instance')
- results = list(self.conn.get_samples(f))
- self.assertEqual(5, len(results))
- results = list(self.conn.get_resources())
- self.assertEqual(5, len(results))
-
- @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase')
- def test_clear_metering_data_no_data_to_remove(self):
- # NOTE(jd) Override this test in MongoDB because our code doesn't clear
- # the collections, this is handled by MongoDB TTL feature.
-
- self.mock_utcnow.return_value = datetime.datetime(2010, 7, 2, 10, 45)
- self.conn.clear_expired_metering_data(3 * 60)
- f = storage.SampleFilter(meter='instance')
- results = list(self.conn.get_samples(f))
- self.assertEqual(12, len(results))
- results = list(self.conn.get_resources())
- self.assertEqual(10, len(results))
-
- @tests_db.run_with('sqlite', 'mysql', 'pgsql')
- def test_clear_metering_data_expire_samples_only(self):
-
- self.CONF.set_override('sql_expire_samples_only', True, 'database')
- self.mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45)
- self.conn.clear_expired_metering_data(4 * 60)
- f = storage.SampleFilter(meter='instance')
- results = list(self.conn.get_samples(f))
- self.assertEqual(7, len(results))
- results = list(self.conn.get_resources())
- self.assertEqual(6, len(results))
-
- @tests_db.run_with('sqlite', 'mysql', 'pgsql')
- def test_record_metering_data_retry_success_on_deadlock(self):
- raise_deadlock = [False, True]
- self.CONF.set_override('max_retries', 2, group='database')
-
- s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='',
- volume=1, user_id='user_id',
- project_id='project_id',
- resource_id='resource_id',
- timestamp=datetime.datetime.utcnow(),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.counter'},
- source=None)
-
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret
- )
-
- mock_resource_create = mock.patch.object(self.conn, "_create_resource")
-
- mock_resource_create.side_effect = self.create_side_effect(
- self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock)
- with mock.patch.object(api.time, 'sleep') as retry_sleep:
- self.conn.record_metering_data(msg)
- self.assertEqual(1, retry_sleep.call_count)
-
- f = storage.SampleFilter(meter='instance')
- results = list(self.conn.get_samples(f))
- self.assertEqual(13, len(results))
-
- @tests_db.run_with('sqlite', 'mysql', 'pgsql')
- def test_record_metering_data_retry_failure_on_deadlock(self):
- raise_deadlock = [True, True, True]
- self.CONF.set_override('max_retries', 3, group='database')
-
- s = sample.Sample('instance', sample.TYPE_CUMULATIVE, unit='',
- volume=1, user_id='user_id',
- project_id='project_id',
- resource_id='resource_id',
- timestamp=datetime.datetime.utcnow(),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.counter'},
- source=None)
-
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret
- )
-
- mock_resource_create = mock.patch.object(self.conn, "_create_resource")
-
- mock_resource_create.side_effect = self.create_side_effect(
- self.conn._create_resource, dbexc.DBDeadlock, raise_deadlock)
- with mock.patch.object(api.time, 'sleep') as retry_sleep:
- try:
- self.conn.record_metering_data(msg)
- except dbexc.DBError as err:
- self.assertIn('DBDeadlock', str(type(err)))
- self.assertEqual(3, retry_sleep.call_count)
-
-
-class ComplexSampleQueryTest(DBTestBase):
- def setUp(self):
- super(ComplexSampleQueryTest, self).setUp()
- self.complex_filter = {
- "and":
- [{"or":
- [{"=": {"resource_id": "resource-id-42"}},
- {"=": {"resource_id": "resource-id-44"}}]},
- {"and":
- [{"=": {"counter_name": "cpu_util"}},
- {"and":
- [{">": {"counter_volume": 0.4}},
- {"not": {">": {"counter_volume": 0.8}}}]}]}]}
- or_expression = [{"=": {"resource_id": "resource-id-42"}},
- {"=": {"resource_id": "resource-id-43"}},
- {"=": {"resource_id": "resource-id-44"}}]
- and_expression = [{">": {"counter_volume": 0.4}},
- {"not": {">": {"counter_volume": 0.8}}}]
- self.complex_filter_list = {"and":
- [{"or": or_expression},
- {"and":
- [{"=": {"counter_name": "cpu_util"}},
- {"and": and_expression}]}]}
- in_expression = {"in": {"resource_id": ["resource-id-42",
- "resource-id-43",
- "resource-id-44"]}}
- self.complex_filter_in = {"and":
- [in_expression,
- {"and":
- [{"=": {"counter_name": "cpu_util"}},
- {"and": and_expression}]}]}
-
- def _create_samples(self):
- for resource in range(42, 45):
- for volume in [0.79, 0.41, 0.4, 0.8, 0.39, 0.81]:
- metadata = {'a_string_key': "meta-value" + str(volume),
- 'a_float_key': volume,
- 'an_int_key': resource,
- 'a_bool_key': (resource == 43)}
-
- self.create_and_store_sample(resource_id="resource-id-%s"
- % resource,
- metadata=metadata,
- name="cpu_util",
- volume=volume)
-
- def test_no_filter(self):
- results = list(self.conn.query_samples())
- self.assertEqual(len(self.msgs), len(results))
- for sample_item in results:
- d = sample_item.as_dict()
- del d['recorded_at']
- d['monotonic_time'] = None
- self.assertIn(d, self.msgs)
-
- def test_query_complex_filter_with_regexp(self):
- self._create_samples()
- complex_regex_filter = {"and": [
- {"=~": {"resource_id": "resource-id.*"}},
- {"=": {"counter_volume": 0.4}}]}
- results = list(
- self.conn.query_samples(filter_expr=complex_regex_filter))
- self.assertEqual(3, len(results))
- for sample_item in results:
- self.assertIn(sample_item.resource_id,
- set(["resource-id-42",
- "resource-id-43",
- "resource-id-44"]))
-
- def test_query_complex_filter_with_regexp_metadata(self):
- self._create_samples()
- complex_regex_filter = {"and": [
- {"=~": {"resource_metadata.a_string_key": "meta-value.*"}},
- {"=": {"counter_volume": 0.4}}]}
- results = list(
- self.conn.query_samples(filter_expr=complex_regex_filter))
- self.assertEqual(3, len(results))
- for sample_item in results:
- self.assertEqual("meta-value0.4",
- sample_item.resource_metadata['a_string_key'])
-
- def test_no_filter_with_zero_limit(self):
- limit = 0
- results = list(self.conn.query_samples(limit=limit))
- self.assertEqual(limit, len(results))
-
- def test_no_filter_with_limit(self):
- limit = 3
- results = list(self.conn.query_samples(limit=limit))
- self.assertEqual(limit, len(results))
-
- def test_query_simple_filter(self):
- simple_filter = {"=": {"resource_id": "resource-id-8"}}
- results = list(self.conn.query_samples(filter_expr=simple_filter))
- self.assertEqual(1, len(results))
- for sample_item in results:
- self.assertEqual("resource-id-8", sample_item.resource_id)
-
- def test_query_simple_filter_with_not_equal_relation(self):
- simple_filter = {"!=": {"resource_id": "resource-id-8"}}
- results = list(self.conn.query_samples(filter_expr=simple_filter))
- self.assertEqual(len(self.msgs) - 1, len(results))
- for sample_item in results:
- self.assertNotEqual("resource-id-8", sample_item.resource_id)
-
- def test_query_complex_filter(self):
- self._create_samples()
- results = list(self.conn.query_samples(filter_expr=(
- self.complex_filter)))
- self.assertEqual(6, len(results))
- for sample_item in results:
- self.assertIn(sample_item.resource_id,
- set(["resource-id-42", "resource-id-44"]))
- self.assertEqual("cpu_util", sample_item.counter_name)
- self.assertGreater(sample_item.counter_volume, 0.4)
- self.assertLessEqual(sample_item.counter_volume, 0.8)
-
- def test_query_complex_filter_with_limit(self):
- self._create_samples()
- limit = 3
- results = list(self.conn.query_samples(filter_expr=self.complex_filter,
- limit=limit))
- self.assertEqual(limit, len(results))
-
- def test_query_complex_filter_with_simple_orderby(self):
- self._create_samples()
- expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8]
- orderby = [{"counter_volume": "asc"}]
- results = list(self.conn.query_samples(filter_expr=self.complex_filter,
- orderby=orderby))
- self.assertEqual(expected_volume_order,
- [s.counter_volume for s in results])
-
- def test_query_complex_filter_with_complex_orderby(self):
- self._create_samples()
- expected_volume_order = [0.41, 0.41, 0.79, 0.79, 0.8, 0.8]
- expected_resource_id_order = ["resource-id-44", "resource-id-42",
- "resource-id-44", "resource-id-42",
- "resource-id-44", "resource-id-42"]
-
- orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}]
-
- results = list(self.conn.query_samples(filter_expr=self.complex_filter,
- orderby=orderby))
-
- self.assertEqual(expected_volume_order,
- [s.counter_volume for s in results])
- self.assertEqual(expected_resource_id_order,
- [s.resource_id for s in results])
-
- def test_query_complex_filter_with_list(self):
- self._create_samples()
- results = list(
- self.conn.query_samples(filter_expr=self.complex_filter_list))
- self.assertEqual(9, len(results))
- for sample_item in results:
- self.assertIn(sample_item.resource_id,
- set(["resource-id-42",
- "resource-id-43",
- "resource-id-44"]))
- self.assertEqual("cpu_util", sample_item.counter_name)
- self.assertGreater(sample_item.counter_volume, 0.4)
- self.assertLessEqual(sample_item.counter_volume, 0.8)
-
- def test_query_complex_filter_with_list_with_limit(self):
- self._create_samples()
- limit = 3
- results = list(
- self.conn.query_samples(filter_expr=self.complex_filter_list,
- limit=limit))
- self.assertEqual(limit, len(results))
-
- def test_query_complex_filter_with_list_with_simple_orderby(self):
- self._create_samples()
- expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79,
- 0.79, 0.8, 0.8, 0.8]
- orderby = [{"counter_volume": "asc"}]
- results = list(
- self.conn.query_samples(filter_expr=self.complex_filter_list,
- orderby=orderby))
- self.assertEqual(expected_volume_order,
- [s.counter_volume for s in results])
-
- def test_query_complex_filterwith_list_with_complex_orderby(self):
- self._create_samples()
- expected_volume_order = [0.41, 0.41, 0.41, 0.79, 0.79,
- 0.79, 0.8, 0.8, 0.8]
- expected_resource_id_order = ["resource-id-44", "resource-id-43",
- "resource-id-42", "resource-id-44",
- "resource-id-43", "resource-id-42",
- "resource-id-44", "resource-id-43",
- "resource-id-42"]
-
- orderby = [{"counter_volume": "asc"}, {"resource_id": "desc"}]
-
- results = list(
- self.conn.query_samples(filter_expr=self.complex_filter_list,
- orderby=orderby))
-
- self.assertEqual(expected_volume_order,
- [s.counter_volume for s in results])
- self.assertEqual(expected_resource_id_order,
- [s.resource_id for s in results])
-
- def test_query_complex_filter_with_wrong_order_in_orderby(self):
- self._create_samples()
-
- orderby = [{"counter_volume": "not valid order"},
- {"resource_id": "desc"}]
-
- query = lambda: list(self.conn.query_samples(filter_expr=(
- self.complex_filter),
- orderby=orderby))
- self.assertRaises(KeyError, query)
-
- def test_query_complex_filter_with_in(self):
- self._create_samples()
- results = list(
- self.conn.query_samples(filter_expr=self.complex_filter_in))
- self.assertEqual(9, len(results))
- for sample_item in results:
- self.assertIn(sample_item.resource_id,
- set(["resource-id-42",
- "resource-id-43",
- "resource-id-44"]))
- self.assertEqual("cpu_util", sample_item.counter_name)
- self.assertGreater(sample_item.counter_volume, 0.4)
- self.assertLessEqual(sample_item.counter_volume, 0.8)
-
- def test_query_simple_metadata_filter(self):
- self._create_samples()
-
- filter_expr = {"=": {"resource_metadata.a_bool_key": True}}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
-
- self.assertEqual(6, len(results))
- for sample_item in results:
- self.assertTrue(sample_item.resource_metadata["a_bool_key"])
-
- def test_query_simple_metadata_with_in_op(self):
- self._create_samples()
-
- filter_expr = {"in": {"resource_metadata.an_int_key": [42, 43]}}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
-
- self.assertEqual(12, len(results))
- for sample_item in results:
- self.assertIn(sample_item.resource_metadata["an_int_key"],
- [42, 43])
-
- def test_query_complex_metadata_filter(self):
- self._create_samples()
- subfilter = {"or": [{"=": {"resource_metadata.a_string_key":
- "meta-value0.81"}},
- {"<=": {"resource_metadata.a_float_key": 0.41}}]}
- filter_expr = {"and": [{">": {"resource_metadata.an_int_key": 42}},
- subfilter]}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
-
- self.assertEqual(8, len(results))
- for sample_item in results:
- self.assertTrue((sample_item.resource_metadata["a_string_key"] ==
- "meta-value0.81" or
- sample_item.resource_metadata["a_float_key"] <=
- 0.41))
- self.assertGreater(sample_item.resource_metadata["an_int_key"],
- 42)
-
- def test_query_mixed_data_and_metadata_filter(self):
- self._create_samples()
- subfilter = {"or": [{"=": {"resource_metadata.a_string_key":
- "meta-value0.81"}},
- {"<=": {"resource_metadata.a_float_key": 0.41}}]}
-
- filter_expr = {"and": [{"=": {"resource_id": "resource-id-42"}},
- subfilter]}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
-
- self.assertEqual(4, len(results))
- for sample_item in results:
- self.assertTrue((sample_item.resource_metadata["a_string_key"] ==
- "meta-value0.81" or
- sample_item.resource_metadata["a_float_key"] <=
- 0.41))
- self.assertEqual("resource-id-42", sample_item.resource_id)
-
- def test_query_non_existing_metadata_with_result(self):
- self._create_samples()
-
- filter_expr = {
- "or": [{"=": {"resource_metadata.a_string_key":
- "meta-value0.81"}},
- {"<=": {"resource_metadata.key_not_exists": 0.41}}]}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
-
- self.assertEqual(3, len(results))
- for sample_item in results:
- self.assertEqual("meta-value0.81",
- sample_item.resource_metadata["a_string_key"])
-
- def test_query_non_existing_metadata_without_result(self):
- self._create_samples()
-
- filter_expr = {
- "or": [{"=": {"resource_metadata.key_not_exists":
- "meta-value0.81"}},
- {"<=": {"resource_metadata.key_not_exists": 0.41}}]}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
- self.assertEqual(0, len(results))
-
- def test_query_negated_metadata(self):
- self._create_samples()
-
- filter_expr = {
- "and": [{"=": {"resource_id": "resource-id-42"}},
- {"not": {"or": [{">": {"resource_metadata.an_int_key":
- 43}},
- {"<=": {"resource_metadata.a_float_key":
- 0.41}}]}}]}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
-
- self.assertEqual(3, len(results))
- for sample_item in results:
- self.assertEqual("resource-id-42", sample_item.resource_id)
- self.assertLessEqual(sample_item.resource_metadata["an_int_key"],
- 43)
- self.assertGreater(sample_item.resource_metadata["a_float_key"],
- 0.41)
-
- def test_query_negated_complex_expression(self):
- self._create_samples()
- filter_expr = {
- "and":
- [{"=": {"counter_name": "cpu_util"}},
- {"not":
- {"or":
- [{"or":
- [{"=": {"resource_id": "resource-id-42"}},
- {"=": {"resource_id": "resource-id-44"}}]},
- {"and":
- [{">": {"counter_volume": 0.4}},
- {"<": {"counter_volume": 0.8}}]}]}}]}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
-
- self.assertEqual(4, len(results))
- for sample_item in results:
- self.assertEqual("resource-id-43", sample_item.resource_id)
- self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81])
- self.assertEqual("cpu_util", sample_item.counter_name)
-
- def test_query_with_double_negation(self):
- self._create_samples()
- filter_expr = {
- "and":
- [{"=": {"counter_name": "cpu_util"}},
- {"not":
- {"or":
- [{"or":
- [{"=": {"resource_id": "resource-id-42"}},
- {"=": {"resource_id": "resource-id-44"}}]},
- {"and": [{"not": {"<=": {"counter_volume": 0.4}}},
- {"<": {"counter_volume": 0.8}}]}]}}]}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
-
- self.assertEqual(4, len(results))
- for sample_item in results:
- self.assertEqual("resource-id-43", sample_item.resource_id)
- self.assertIn(sample_item.counter_volume, [0.39, 0.4, 0.8, 0.81])
- self.assertEqual("cpu_util", sample_item.counter_name)
-
- def test_query_negate_not_equal(self):
- self._create_samples()
- filter_expr = {"not": {"!=": {"resource_id": "resource-id-43"}}}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
-
- self.assertEqual(6, len(results))
- for sample_item in results:
- self.assertEqual("resource-id-43", sample_item.resource_id)
-
- def test_query_negated_in_op(self):
- self._create_samples()
- filter_expr = {
- "and": [{"not": {"in": {"counter_volume": [0.39, 0.4, 0.79]}}},
- {"=": {"resource_id": "resource-id-42"}}]}
-
- results = list(self.conn.query_samples(filter_expr=filter_expr))
-
- self.assertEqual(3, len(results))
- for sample_item in results:
- self.assertIn(sample_item.counter_volume,
- [0.41, 0.8, 0.81])
-
-
-class StatisticsTest(DBTestBase):
- def prepare_data(self):
- for i in range(3):
- c = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- 5 + i,
- 'user-id',
- 'project1',
- 'resource-id',
- timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.counter',
- },
- source='test',
- )
- msg = utils.meter_message_from_counter(
- c,
- secret='not-so-secret',
- )
- self.conn.record_metering_data(msg)
- for i in range(3):
- c = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- 8 + i,
- 'user-5',
- 'project2',
- 'resource-6',
- timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.counter',
- },
- source='test',
- )
- msg = utils.meter_message_from_counter(
- c,
- secret='not-so-secret',
- )
- self.conn.record_metering_data(msg)
- for i in range(3):
- c = sample.Sample(
- 'memory',
- 'gauge',
- 'MB',
- 8 + i,
- 'user-5',
- 'project2',
- 'resource-6',
- timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
- resource_metadata={},
- source='test',
- )
- msg = utils.meter_message_from_counter(
- c,
- secret='not-so-secret',
- )
- self.conn.record_metering_data(msg)
-
- def test_by_meter(self):
- f = storage.SampleFilter(
- meter='memory'
- )
- results = list(self.conn.get_meter_statistics(f))[0]
- self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32)
- - datetime.datetime(2012, 9, 25, 10, 30)).seconds,
- results.duration)
- self.assertEqual(3, results.count)
- self.assertEqual('MB', results.unit)
- self.assertEqual(8, results.min)
- self.assertEqual(10, results.max)
- self.assertEqual(27, results.sum)
- self.assertEqual(9, results.avg)
- self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30),
- results.period_start)
- self.assertEqual(datetime.datetime(2012, 9, 25, 12, 32),
- results.period_end)
-
- def test_by_user(self):
- f = storage.SampleFilter(
- user='user-5',
- meter='volume.size',
- )
- results = list(self.conn.get_meter_statistics(f))[0]
- self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32)
- - datetime.datetime(2012, 9, 25, 10, 30)).seconds,
- results.duration)
- self.assertEqual(3, results.count)
- self.assertEqual('GiB', results.unit)
- self.assertEqual(8, results.min)
- self.assertEqual(10, results.max)
- self.assertEqual(27, results.sum)
- self.assertEqual(9, results.avg)
-
- def test_no_period_in_query(self):
- f = storage.SampleFilter(
- user='user-5',
- meter='volume.size',
- )
- results = list(self.conn.get_meter_statistics(f))[0]
- self.assertEqual(0, results.period)
-
- def test_period_is_int(self):
- f = storage.SampleFilter(
- meter='volume.size',
- )
- results = list(self.conn.get_meter_statistics(f))[0]
- self.assertIs(int, type(results.period))
- self.assertEqual(6, results.count)
-
- def test_by_user_period(self):
- f = storage.SampleFilter(
- user='user-5',
- meter='volume.size',
- start_timestamp='2012-09-25T10:28:00',
- )
- results = list(self.conn.get_meter_statistics(f, period=7200))
- self.assertEqual(2, len(results))
- self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28),
- datetime.datetime(2012, 9, 25, 12, 28)]),
- set(r.period_start for r in results))
- self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28),
- datetime.datetime(2012, 9, 25, 14, 28)]),
- set(r.period_end for r in results))
- r = results[0]
- self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28),
- r.period_start)
- self.assertEqual(2, r.count)
- self.assertEqual('GiB', r.unit)
- self.assertEqual(8.5, r.avg)
- self.assertEqual(8, r.min)
- self.assertEqual(9, r.max)
- self.assertEqual(17, r.sum)
- self.assertEqual(7200, r.period)
- self.assertIsInstance(r.period, int)
- expected_end = r.period_start + datetime.timedelta(seconds=7200)
- self.assertEqual(expected_end, r.period_end)
- self.assertEqual(3660, r.duration)
- self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30),
- r.duration_start)
- self.assertEqual(datetime.datetime(2012, 9, 25, 11, 31),
- r.duration_end)
-
- def test_by_user_period_with_timezone(self):
- dates = [
- '2012-09-25T00:28:00-10:00',
- '2012-09-25T01:28:00-09:00',
- '2012-09-25T02:28:00-08:00',
- '2012-09-25T03:28:00-07:00',
- '2012-09-25T04:28:00-06:00',
- '2012-09-25T05:28:00-05:00',
- '2012-09-25T06:28:00-04:00',
- '2012-09-25T07:28:00-03:00',
- '2012-09-25T08:28:00-02:00',
- '2012-09-25T09:28:00-01:00',
- '2012-09-25T10:28:00Z',
- '2012-09-25T11:28:00+01:00',
- '2012-09-25T12:28:00+02:00',
- '2012-09-25T13:28:00+03:00',
- '2012-09-25T14:28:00+04:00',
- '2012-09-25T15:28:00+05:00',
- '2012-09-25T16:28:00+06:00',
- '2012-09-25T17:28:00+07:00',
- '2012-09-25T18:28:00+08:00',
- '2012-09-25T19:28:00+09:00',
- '2012-09-25T20:28:00+10:00',
- '2012-09-25T21:28:00+11:00',
- '2012-09-25T22:28:00+12:00',
- ]
- for date in dates:
- f = storage.SampleFilter(
- user='user-5',
- meter='volume.size',
- start_timestamp=date
- )
- results = list(self.conn.get_meter_statistics(f, period=7200))
- self.assertEqual(2, len(results))
- self.assertEqual(set([datetime.datetime(2012, 9, 25, 10, 28),
- datetime.datetime(2012, 9, 25, 12, 28)]),
- set(r.period_start for r in results))
- self.assertEqual(set([datetime.datetime(2012, 9, 25, 12, 28),
- datetime.datetime(2012, 9, 25, 14, 28)]),
- set(r.period_end for r in results))
-
- def test_by_user_period_start_end(self):
- f = storage.SampleFilter(
- user='user-5',
- meter='volume.size',
- start_timestamp='2012-09-25T10:28:00',
- end_timestamp='2012-09-25T11:28:00',
- )
- results = list(self.conn.get_meter_statistics(f, period=1800))
- self.assertEqual(1, len(results))
- r = results[0]
- self.assertEqual(datetime.datetime(2012, 9, 25, 10, 28),
- r.period_start)
- self.assertEqual(1, r.count)
- self.assertEqual('GiB', r.unit)
- self.assertEqual(8, r.avg)
- self.assertEqual(8, r.min)
- self.assertEqual(8, r.max)
- self.assertEqual(8, r.sum)
- self.assertEqual(1800, r.period)
- self.assertEqual(r.period_start + datetime.timedelta(seconds=1800),
- r.period_end)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30),
- r.duration_start)
- self.assertEqual(datetime.datetime(2012, 9, 25, 10, 30),
- r.duration_end)
-
- def test_by_project(self):
- f = storage.SampleFilter(
- meter='volume.size',
- resource='resource-id',
- start_timestamp='2012-09-25T11:30:00',
- end_timestamp='2012-09-25T11:32:00',
- )
- results = list(self.conn.get_meter_statistics(f))[0]
- self.assertEqual(0, results.duration)
- self.assertEqual(1, results.count)
- self.assertEqual('GiB', results.unit)
- self.assertEqual(6, results.min)
- self.assertEqual(6, results.max)
- self.assertEqual(6, results.sum)
- self.assertEqual(6, results.avg)
-
- def test_one_resource(self):
- f = storage.SampleFilter(
- user='user-id',
- meter='volume.size',
- )
- results = list(self.conn.get_meter_statistics(f))[0]
- self.assertEqual((datetime.datetime(2012, 9, 25, 12, 32)
- - datetime.datetime(2012, 9, 25, 10, 30)).seconds,
- results.duration)
- self.assertEqual(3, results.count)
- self.assertEqual('GiB', results.unit)
- self.assertEqual(5, results.min)
- self.assertEqual(7, results.max)
- self.assertEqual(18, results.sum)
- self.assertEqual(6, results.avg)
-
- def test_with_no_sample(self):
- f = storage.SampleFilter(
- user='user-not-exists',
- meter='volume.size',
- )
- results = list(self.conn.get_meter_statistics(f, period=1800))
- self.assertEqual([], results)
-
-
-class StatisticsGroupByTest(DBTestBase):
- def prepare_data(self):
- test_sample_data = (
- {'volume': 2, 'user': 'user-1', 'project': 'project-1',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1',
- 'source': 'source-2', 'metadata_instance_type': '84'},
- {'volume': 2, 'user': 'user-1', 'project': 'project-2',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1',
- 'source': 'source-2', 'metadata_instance_type': '83'},
- {'volume': 1, 'user': 'user-2', 'project': 'project-1',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2',
- 'source': 'source-1', 'metadata_instance_type': '82'},
- {'volume': 1, 'user': 'user-2', 'project': 'project-1',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source-1', 'metadata_instance_type': '82'},
- {'volume': 2, 'user': 'user-2', 'project': 'project-1',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source-1', 'metadata_instance_type': '84'},
- {'volume': 4, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source-1', 'metadata_instance_type': '82'},
- {'volume': 4, 'user': 'user-3', 'project': 'project-1',
- 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2',
- 'source': 'source-3', 'metadata_instance_type': '83'},
- )
-
- for test_sample in test_sample_data:
- c = sample.Sample(
- 'instance',
- sample.TYPE_CUMULATIVE,
- unit='s',
- volume=test_sample['volume'],
- user_id=test_sample['user'],
- project_id=test_sample['project'],
- resource_id=test_sample['resource'],
- timestamp=datetime.datetime(*test_sample['timestamp']),
- resource_metadata={'flavor': test_sample['metadata_flavor'],
- 'event': test_sample['metadata_event'],
- 'instance_type':
- test_sample['metadata_instance_type']},
- source=test_sample['source'],
- )
- msg = utils.meter_message_from_counter(
- c, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_group_by_user(self):
- f = storage.SampleFilter(
- meter='instance',
- )
- results = list(self.conn.get_meter_statistics(f, groupby=['user_id']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['user_id']), groupby_keys_set)
- self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set)
-
- for r in results:
- if r.groupby == {'user_id': 'user-1'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'user_id': 'user-2'}:
- self.assertEqual(4, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(8, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'user_id': 'user-3'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
-
- def test_group_by_resource(self):
- f = storage.SampleFilter(
- meter='instance',
- )
- results = list(self.conn.get_meter_statistics(f,
- groupby=['resource_id']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['resource_id']), groupby_keys_set)
- self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']),
- groupby_vals_set)
- for r in results:
- if r.groupby == {'resource_id': 'resource-1'}:
- self.assertEqual(3, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'resource_id': 'resource-2'}:
- self.assertEqual(3, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'resource_id': 'resource-3'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
-
- def test_group_by_project(self):
- f = storage.SampleFilter(
- meter='instance',
- )
- results = list(self.conn.get_meter_statistics(f,
- groupby=['project_id']))
- self.assertEqual(2, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
-
- for r in results:
- if r.groupby == {'project_id': 'project-1'}:
- self.assertEqual(5, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(10, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'project_id': 'project-2'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(3, r.avg)
-
- def test_group_by_source(self):
- f = storage.SampleFilter(
- meter='instance',
- )
- results = list(self.conn.get_meter_statistics(f, groupby=['source']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['source']), groupby_keys_set)
- self.assertEqual(set(['source-1', 'source-2', 'source-3']),
- groupby_vals_set)
-
- for r in results:
- if r.groupby == {'source': 'source-1'}:
- self.assertEqual(4, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(8, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'source': 'source-2'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'source': 'source-3'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
-
- def test_group_by_unknown_field(self):
- f = storage.SampleFilter(
- meter='instance',
- )
- # NOTE(terriyu): The MongoDB get_meter_statistics() returns a list
- # whereas the SQLAlchemy get_meter_statistics() returns a generator.
- # You have to apply list() to the SQLAlchemy generator to get it to
- # throw an error. The MongoDB get_meter_statistics() will throw an
- # error before list() is called. By using lambda, we can cover both
- # MongoDB and SQLAlchemy in a single test.
- self.assertRaises(
- ceilometer.NotImplementedError,
- lambda: list(self.conn.get_meter_statistics(f, groupby=['wtf']))
- )
-
- def test_group_by_metadata(self):
- # This test checks grouping by a single metadata field
- # (now only resource_metadata.instance_type is available).
- f = storage.SampleFilter(
- meter='instance',
- )
- results = list(
- self.conn.get_meter_statistics(
- f, groupby=['resource_metadata.instance_type']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['resource_metadata.instance_type']),
- groupby_keys_set)
- self.assertEqual(set(['82', '83', '84']), groupby_vals_set)
-
- for r in results:
- if r.groupby == {'resource_metadata.instance_type': '82'}:
- self.assertEqual(3, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'resource_metadata.instance_type': '83'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(3, r.avg)
- elif r.groupby == {'resource_metadata.instance_type': '84'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
-
- def test_group_by_multiple_regular(self):
- f = storage.SampleFilter(
- meter='instance',
- )
- results = list(self.conn.get_meter_statistics(f,
- groupby=['user_id',
- 'resource_id']))
- self.assertEqual(4, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set)
- self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1',
- 'resource-2', 'resource-3']),
- groupby_vals_set)
-
- for r in results:
- if r.groupby == {'user_id': 'user-1', 'resource_id': 'resource-1'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'user_id': 'user-2',
- 'resource_id': 'resource-1'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'user_id': 'user-2',
- 'resource_id': 'resource-2'}:
- self.assertEqual(3, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'user_id': 'user-3',
- 'resource_id': 'resource-3'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
- else:
- self.assertNotEqual({'user_id': 'user-1',
- 'resource_id': 'resource-2'},
- r.groupby)
- self.assertNotEqual({'user_id': 'user-1',
- 'resource_id': 'resource-3'},
- r.groupby)
- self.assertNotEqual({'user_id': 'user-2',
- 'resource_id': 'resource-3'},
- r.groupby)
- self.assertNotEqual({'user_id': 'user-3',
- 'resource_id': 'resource-1'},
- r.groupby)
- self.assertNotEqual({'user_id': 'user-3',
- 'resource_id': 'resource-2'},
- r.groupby, )
-
- def test_group_by_multiple_metadata(self):
- # TODO(terriyu): test_group_by_multiple_metadata needs to be
- # implemented.
- # This test should check grouping by multiple metadata fields.
- pass
-
- def test_group_by_multiple_regular_metadata(self):
- # This test checks grouping by a combination of regular and
- # metadata fields.
- f = storage.SampleFilter(
- meter='instance',
- )
- results = list(
- self.conn.get_meter_statistics(
- f, groupby=['user_id', 'resource_metadata.instance_type']))
- self.assertEqual(5, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['user_id', 'resource_metadata.instance_type']),
- groupby_keys_set)
- self.assertEqual(set(['user-1', 'user-2', 'user-3', '82',
- '83', '84']),
- groupby_vals_set)
-
- for r in results:
- if r.groupby == {'user_id': 'user-1',
- 'resource_metadata.instance_type': '83'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'user_id': 'user-1',
- 'resource_metadata.instance_type': '84'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'user_id': 'user-2',
- 'resource_metadata.instance_type': '82'}:
- self.assertEqual(3, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'user_id': 'user-2',
- 'resource_metadata.instance_type': '84'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'user_id': 'user-3',
- 'resource_metadata.instance_type': '83'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
- else:
- self.assertNotEqual({'user_id': 'user-1',
- 'resource_metadata.instance_type': '82'},
- r.groupby)
- self.assertNotEqual({'user_id': 'user-2',
- 'resource_metadata.instance_type': '83'},
- r.groupby)
- self.assertNotEqual({'user_id': 'user-3',
- 'resource_metadata.instance_type': '82'},
- r.groupby)
- self.assertNotEqual({'user_id': 'user-3',
- 'resource_metadata.instance_type': '84'},
- r.groupby)
-
- def test_group_by_with_query_filter(self):
- f = storage.SampleFilter(
- meter='instance',
- project='project-1',
- )
- results = list(self.conn.get_meter_statistics(
- f,
- groupby=['resource_id']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['resource_id']), groupby_keys_set)
- self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']),
- groupby_vals_set)
-
- for r in results:
- if r.groupby == {'resource_id': 'resource-1'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'resource_id': 'resource-2'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(1, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(1, r.avg)
- elif r.groupby == {'resource_id': 'resource-3'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
-
- def test_group_by_metadata_with_query_filter(self):
- # This test checks grouping by a metadata field in combination
- # with a query filter.
- f = storage.SampleFilter(
- meter='instance',
- project='project-1',
- )
- results = list(self.conn.get_meter_statistics(
- f,
- groupby=['resource_metadata.instance_type']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['resource_metadata.instance_type']),
- groupby_keys_set)
- self.assertEqual(set(['82', '83', '84']),
- groupby_vals_set)
-
- for r in results:
- if r.groupby == {'resource_metadata.instance_type': '82'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(1, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(1, r.avg)
- elif r.groupby == {'resource_metadata.instance_type': '83'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
- elif r.groupby == {'resource_metadata.instance_type': '84'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
-
- def test_group_by_with_query_filter_multiple(self):
- f = storage.SampleFilter(
- meter='instance',
- user='user-2',
- source='source-1',
- )
- results = list(self.conn.get_meter_statistics(
- f,
- groupby=['project_id', 'resource_id']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2',
- 'resource-1', 'resource-2']),
- groupby_vals_set)
-
- for r in results:
- if r.groupby == {'project_id': 'project-1',
- 'resource_id': 'resource-1'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'project_id': 'project-1',
- 'resource_id': 'resource-2'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(1, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(1, r.avg)
- elif r.groupby == {'project_id': 'project-2',
- 'resource_id': 'resource-2'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
- else:
- self.assertNotEqual({'project_id': 'project-2',
- 'resource_id': 'resource-1'},
- r.groupby)
-
- def test_group_by_metadata_with_query_filter_multiple(self):
- # TODO(terriyu): test_group_by_metadata_with_query_filter_multiple
- # needs to be implemented.
- # This test should check grouping by multiple metadata fields in
- # combination with a query filter.
- pass
-
- def test_group_by_with_period(self):
- f = storage.SampleFilter(
- meter='instance',
- )
- results = list(self.conn.get_meter_statistics(f,
- period=7200,
- groupby=['project_id']))
- self.assertEqual(4, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
- period_start_set = set([r.period_start for r in results])
- period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11),
- datetime.datetime(2013, 8, 1, 14, 11),
- datetime.datetime(2013, 8, 1, 16, 11)])
- self.assertEqual(period_start_valid, period_start_set)
-
- for r in results:
- if (r.groupby == {'project_id': 'project-1'} and
- r.period_start == datetime.datetime(2013, 8, 1, 10, 11)):
- self.assertEqual(3, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(4260, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11),
- r.period_end)
- elif (r.groupby == {'project_id': 'project-1'} and
- r.period_start == datetime.datetime(2013, 8, 1, 14, 11)):
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(4260, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11),
- r.period_end)
- elif (r.groupby == {'project_id': 'project-2'} and
- r.period_start == datetime.datetime(2013, 8, 1, 14, 11)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11),
- r.period_end)
- elif (r.groupby == {'project_id': 'project-2'} and
- r.period_start == datetime.datetime(2013, 8, 1, 16, 11)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11),
- r.period_end)
- else:
- self.assertNotEqual([{'project_id': 'project-1'},
- datetime.datetime(2013, 8, 1, 16, 11)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'project_id': 'project-2'},
- datetime.datetime(2013, 8, 1, 10, 11)],
- [r.groupby, r.period_start])
-
- def test_group_by_metadata_with_period(self):
- # This test checks grouping by metadata fields in combination
- # with period grouping.
- f = storage.SampleFilter(
- meter='instance')
-
- results = list(self.conn.get_meter_statistics(f, period=7200,
- groupby=['resource_metadata.instance_type']))
- self.assertEqual(5, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['resource_metadata.instance_type']),
- groupby_keys_set)
- self.assertEqual(set(['82', '83', '84']), groupby_vals_set)
- period_start_set = set([r.period_start for r in results])
- period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11),
- datetime.datetime(2013, 8, 1, 14, 11),
- datetime.datetime(2013, 8, 1, 16, 11)])
- self.assertEqual(period_start_valid, period_start_set)
-
- for r in results:
- if (r.groupby == {'resource_metadata.instance_type': '82'} and
- r.period_start == datetime.datetime(2013, 8, 1, 10, 11)):
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(1, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(1, r.avg)
- self.assertEqual(1740, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11),
- r.period_end)
- elif (r.groupby == {'resource_metadata.instance_type': '82'} and
- r.period_start == datetime.datetime(2013, 8, 1, 16, 11)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11),
- r.period_end)
- elif (r.groupby == {'resource_metadata.instance_type': '83'} and
- r.period_start == datetime.datetime(2013, 8, 1, 10, 11)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11),
- r.period_end)
- elif (r.groupby == {'resource_metadata.instance_type': '83'} and
- r.period_start == datetime.datetime(2013, 8, 1, 14, 11)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11),
- r.period_end)
- elif (r.groupby == {'resource_metadata.instance_type': '84'} and
- r.period_start == datetime.datetime(2013, 8, 1, 14, 11)):
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(4260, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11),
- r.period_end)
- else:
- self.assertNotEqual([{'resource_metadata.instance_type': '82'},
- datetime.datetime(2013, 8, 1, 14, 11)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'resource_metadata.instance_type': '83'},
- datetime.datetime(2013, 8, 1, 16, 11)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'resource_metadata.instance_type': '84'},
- datetime.datetime(2013, 8, 1, 10, 11)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'resource_metadata.instance_type': '84'},
- datetime.datetime(2013, 8, 1, 16, 11)],
- [r.groupby, r.period_start])
-
- def test_group_by_with_query_filter_and_period(self):
- f = storage.SampleFilter(
- meter='instance',
- source='source-1',
- )
- results = list(self.conn.get_meter_statistics(f,
- period=7200,
- groupby=['project_id']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
- period_start_set = set([r.period_start for r in results])
- period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11),
- datetime.datetime(2013, 8, 1, 14, 11),
- datetime.datetime(2013, 8, 1, 16, 11)])
- self.assertEqual(period_start_valid, period_start_set)
-
- for r in results:
- if (r.groupby == {'project_id': 'project-1'} and
- r.period_start == datetime.datetime(2013, 8, 1, 10, 11)):
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(1, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(1, r.avg)
- self.assertEqual(1740, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11),
- r.period_end)
- elif (r.groupby == {'project_id': 'project-1'} and
- r.period_start == datetime.datetime(2013, 8, 1, 14, 11)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11),
- r.period_end)
- elif (r.groupby == {'project_id': 'project-2'} and
- r.period_start == datetime.datetime(2013, 8, 1, 16, 11)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 18, 11),
- r.period_end)
- else:
- self.assertNotEqual([{'project_id': 'project-1'},
- datetime.datetime(2013, 8, 1, 16, 11)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'project_id': 'project-2'},
- datetime.datetime(2013, 8, 1, 10, 11)],
- [r.groupby, r.period_start])
-
- def test_group_by_metadata_with_query_filter_and_period(self):
- # This test checks grouping with metadata fields in combination
- # with a query filter and period grouping.
- f = storage.SampleFilter(
- meter='instance',
- project='project-1',
- )
- results = list(
- self.conn.get_meter_statistics(
- f, period=7200, groupby=['resource_metadata.instance_type']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['resource_metadata.instance_type']),
- groupby_keys_set)
- self.assertEqual(set(['82', '83', '84']), groupby_vals_set)
- period_start_set = set([r.period_start for r in results])
- period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11),
- datetime.datetime(2013, 8, 1, 14, 11)])
- self.assertEqual(period_start_valid, period_start_set)
-
- for r in results:
- if (r.groupby == {'resource_metadata.instance_type': '82'} and
- r.period_start == datetime.datetime(2013, 8, 1, 10, 11)):
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(1, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(1, r.avg)
- self.assertEqual(1740, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11),
- r.period_end)
- elif (r.groupby == {'resource_metadata.instance_type': '83'} and
- r.period_start == datetime.datetime(2013, 8, 1, 10, 11)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 11, 22),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 12, 11),
- r.period_end)
- elif (r.groupby == {'resource_metadata.instance_type': '84'} and
- r.period_start == datetime.datetime(2013, 8, 1, 14, 11)):
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(4260, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 11),
- r.period_end)
- else:
- self.assertNotEqual([{'resource_metadata.instance_type': '82'},
- datetime.datetime(2013, 8, 1, 14, 11)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'resource_metadata.instance_type': '83'},
- datetime.datetime(2013, 8, 1, 14, 11)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'resource_metadata.instance_type': '84'},
- datetime.datetime(2013, 8, 1, 10, 11)],
- [r.groupby, r.period_start])
-
- def test_group_by_start_timestamp_after(self):
- f = storage.SampleFilter(
- meter='instance',
- start_timestamp=datetime.datetime(2013, 8, 1, 17, 28, 1),
- )
- results = list(self.conn.get_meter_statistics(f,
- groupby=['project_id']))
-
- self.assertEqual([], results)
-
- def test_group_by_end_timestamp_before(self):
- f = storage.SampleFilter(
- meter='instance',
- end_timestamp=datetime.datetime(2013, 8, 1, 10, 10, 59),
- )
- results = list(self.conn.get_meter_statistics(f,
- groupby=['project_id']))
-
- self.assertEqual([], results)
-
- def test_group_by_start_timestamp(self):
- f = storage.SampleFilter(
- meter='instance',
- start_timestamp=datetime.datetime(2013, 8, 1, 14, 58),
- )
- results = list(self.conn.get_meter_statistics(f,
- groupby=['project_id']))
- self.assertEqual(2, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
-
- for r in results:
- if r.groupby == {'project_id': 'project-1'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'project_id': 'project-2'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(3, r.avg)
-
- def test_group_by_end_timestamp(self):
- f = storage.SampleFilter(
- meter='instance',
- end_timestamp=datetime.datetime(2013, 8, 1, 11, 45),
- )
- results = list(self.conn.get_meter_statistics(f,
- groupby=['project_id']))
- self.assertEqual(1, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1']), groupby_vals_set)
-
- for r in results:
- if r.groupby == {'project_id': 'project-1'}:
- self.assertEqual(3, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(2, r.avg)
-
- def test_group_by_start_end_timestamp(self):
- f = storage.SampleFilter(
- meter='instance',
- start_timestamp=datetime.datetime(2013, 8, 1, 8, 17, 3),
- end_timestamp=datetime.datetime(2013, 8, 1, 23, 59, 59),
- )
- results = list(self.conn.get_meter_statistics(f,
- groupby=['project_id']))
- self.assertEqual(2, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
-
- for r in results:
- if r.groupby == {'project_id': 'project-1'}:
- self.assertEqual(5, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(10, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'project_id': 'project-2'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(6, r.sum)
- self.assertEqual(3, r.avg)
-
- def test_group_by_start_end_timestamp_with_query_filter(self):
- f = storage.SampleFilter(
- meter='instance',
- project='project-1',
- start_timestamp=datetime.datetime(2013, 8, 1, 11, 1),
- end_timestamp=datetime.datetime(2013, 8, 1, 20, 0),
- )
- results = list(self.conn.get_meter_statistics(f,
- groupby=['resource_id']))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['resource_id']), groupby_keys_set)
- self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set)
-
- for r in results:
- if r.groupby == {'resource_id': 'resource-1'}:
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(2, r.avg)
- elif r.groupby == {'resource_id': 'resource-3'}:
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
-
- def test_group_by_start_end_timestamp_with_period(self):
- f = storage.SampleFilter(
- meter='instance',
- start_timestamp=datetime.datetime(2013, 8, 1, 14, 0),
- end_timestamp=datetime.datetime(2013, 8, 1, 17, 0),
- )
- results = list(self.conn.get_meter_statistics(f,
- period=3600,
- groupby=['project_id']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
- period_start_set = set([r.period_start for r in results])
- period_start_valid = set([datetime.datetime(2013, 8, 1, 14, 0),
- datetime.datetime(2013, 8, 1, 15, 0),
- datetime.datetime(2013, 8, 1, 16, 0)])
- self.assertEqual(period_start_valid, period_start_set)
-
- for r in results:
- if (r.groupby == {'project_id': 'project-1'} and
- r.period_start == datetime.datetime(2013, 8, 1, 14, 0)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
- r.duration_end)
- self.assertEqual(3600, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 15, 0),
- r.period_end)
- elif (r.groupby == {'project_id': 'project-1'} and
- r.period_start == datetime.datetime(2013, 8, 1, 16, 0)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 10),
- r.duration_end)
- self.assertEqual(3600, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 17, 0),
- r.period_end)
- elif (r.groupby == {'project_id': 'project-2'} and
- r.period_start == datetime.datetime(2013, 8, 1, 15, 0)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 15, 37),
- r.duration_end)
- self.assertEqual(3600, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0),
- r.period_end)
- else:
- self.assertNotEqual([{'project_id': 'project-1'},
- datetime.datetime(2013, 8, 1, 15, 0)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'project_id': 'project-2'},
- datetime.datetime(2013, 8, 1, 14, 0)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'project_id': 'project-2'},
- datetime.datetime(2013, 8, 1, 16, 0)],
- [r.groupby, r.period_start])
-
- def test_group_by_start_end_timestamp_with_query_filter_and_period(self):
- f = storage.SampleFilter(
- meter='instance',
- source='source-1',
- start_timestamp=datetime.datetime(2013, 8, 1, 10, 0),
- end_timestamp=datetime.datetime(2013, 8, 1, 18, 0),
- )
- results = list(self.conn.get_meter_statistics(f,
- period=7200,
- groupby=['project_id']))
- self.assertEqual(3, len(results))
- groupby_list = [r.groupby for r in results]
- groupby_keys_set = set(x for sub_dict in groupby_list
- for x in sub_dict.keys())
- groupby_vals_set = set(x for sub_dict in groupby_list
- for x in sub_dict.values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
- period_start_set = set([r.period_start for r in results])
- period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 0),
- datetime.datetime(2013, 8, 1, 14, 0),
- datetime.datetime(2013, 8, 1, 16, 0)])
- self.assertEqual(period_start_valid, period_start_set)
-
- for r in results:
- if (r.groupby == {'project_id': 'project-1'} and
- r.period_start == datetime.datetime(2013, 8, 1, 10, 0)):
- self.assertEqual(2, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(1, r.min)
- self.assertEqual(1, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(1, r.avg)
- self.assertEqual(1740, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 10, 11),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 10, 40),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 12, 0),
- r.period_end)
- elif (r.groupby == {'project_id': 'project-1'} and
- r.period_start == datetime.datetime(2013, 8, 1, 14, 0)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(2, r.min)
- self.assertEqual(2, r.max)
- self.assertEqual(2, r.sum)
- self.assertEqual(2, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 14, 59),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 16, 0),
- r.period_end)
- elif (r.groupby == {'project_id': 'project-2'} and
- r.period_start == datetime.datetime(2013, 8, 1, 16, 0)):
- self.assertEqual(1, r.count)
- self.assertEqual('s', r.unit)
- self.assertEqual(4, r.min)
- self.assertEqual(4, r.max)
- self.assertEqual(4, r.sum)
- self.assertEqual(4, r.avg)
- self.assertEqual(0, r.duration)
- self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28),
- r.duration_start)
- self.assertEqual(datetime.datetime(2013, 8, 1, 17, 28),
- r.duration_end)
- self.assertEqual(7200, r.period)
- self.assertEqual(datetime.datetime(2013, 8, 1, 18, 0),
- r.period_end)
- else:
- self.assertNotEqual([{'project_id': 'project-1'},
- datetime.datetime(2013, 8, 1, 16, 0)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'project_id': 'project-2'},
- datetime.datetime(2013, 8, 1, 10, 0)],
- [r.groupby, r.period_start])
- self.assertNotEqual([{'project_id': 'project-2'},
- datetime.datetime(2013, 8, 1, 14, 0)],
- [r.groupby, r.period_start])
-
-
-class CounterDataTypeTest(DBTestBase):
- def prepare_data(self):
- c = sample.Sample(
- 'dummyBigCounter',
- sample.TYPE_CUMULATIVE,
- unit='',
- volume=337203685477580,
- user_id='user-id',
- project_id='project-id',
- resource_id='resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={},
- source='test-1',
- )
- msg = utils.meter_message_from_counter(
- c, self.CONF.publisher.telemetry_secret,
- )
-
- self.conn.record_metering_data(msg)
-
- c = sample.Sample(
- 'dummySmallCounter',
- sample.TYPE_CUMULATIVE,
- unit='',
- volume=-337203685477580,
- user_id='user-id',
- project_id='project-id',
- resource_id='resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={},
- source='test-1',
- )
- msg = utils.meter_message_from_counter(
- c, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- c = sample.Sample(
- 'floatCounter',
- sample.TYPE_CUMULATIVE,
- unit='',
- volume=1938495037.53697,
- user_id='user-id',
- project_id='project-id',
- resource_id='resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={},
- source='test-1',
- )
- msg = utils.meter_message_from_counter(
- c, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_storage_can_handle_large_values(self):
- f = storage.SampleFilter(
- meter='dummyBigCounter',
- )
- results = list(self.conn.get_samples(f))
- self.assertEqual(337203685477580, results[0].counter_volume)
- f = storage.SampleFilter(
- meter='dummySmallCounter',
- )
- results = list(self.conn.get_samples(f))
- observed_num = int(results[0].counter_volume)
- self.assertEqual(-337203685477580, observed_num)
-
- def test_storage_can_handle_float_values(self):
- f = storage.SampleFilter(
- meter='floatCounter',
- )
- results = list(self.conn.get_samples(f))
- self.assertEqual(1938495037.53697, results[0].counter_volume)
-
-
-class BigIntegerTest(tests_db.TestBase):
- def test_metadata_bigint(self):
- metadata = {'bigint': 99999999999999}
- s = sample.Sample(name='name',
- type=sample.TYPE_GAUGE,
- unit='B',
- volume=1,
- user_id='user-id',
- project_id='project-id',
- resource_id='resource-id',
- timestamp=datetime.datetime.utcnow(),
- resource_metadata=metadata)
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret)
- self.conn.record_metering_data(msg)
-
-
-@tests_db.run_with('mongodb')
-class MongoAutoReconnectTest(DBTestBase):
- def setUp(self):
- super(MongoAutoReconnectTest, self).setUp()
- self.CONF.set_override('retry_interval', 0, group='database')
-
- def test_mongo_client(self):
- self.assertIsInstance(self.conn.conn.conn,
- pymongo.MongoClient)
-
- def test_mongo_cursor_next(self):
- expected_first_sample_timestamp = datetime.datetime(2012, 7, 2, 10, 39)
- raise_exc = [False, True]
- method = self.conn.db.resource.find().cursor.next
- with mock.patch('pymongo.cursor.Cursor.next',
- mock.Mock()) as mock_next:
- mock_next.side_effect = self.create_side_effect(
- method, pymongo.errors.AutoReconnect, raise_exc)
- resource = self.conn.db.resource.find().next()
- self.assertEqual(expected_first_sample_timestamp,
- resource['first_sample_timestamp'])
-
- def test_mongo_insert(self):
- raise_exc = [False, True]
- method = self.conn.db.meter.insert
-
- with mock.patch('pymongo.collection.Collection.insert',
- mock.Mock(return_value=method)) as mock_insert:
- mock_insert.side_effect = self.create_side_effect(
- method, pymongo.errors.AutoReconnect, raise_exc)
- mock_insert.__name__ = 'insert'
- self.create_and_store_sample(
- timestamp=datetime.datetime(2014, 10, 15, 14, 39),
- source='test-proxy')
- meters = list(self.conn.db.meter.find())
- self.assertEqual(12, len(meters))
-
- def test_mongo_find_and_modify(self):
- raise_exc = [False, True]
- method = self.conn.db.resource.find_and_modify
-
- with mock.patch('pymongo.collection.Collection.find_and_modify',
- mock.Mock()) as mock_fam:
- mock_fam.side_effect = self.create_side_effect(
- method, pymongo.errors.AutoReconnect, raise_exc)
- mock_fam.__name__ = 'find_and_modify'
- self.create_and_store_sample(
- timestamp=datetime.datetime(2014, 10, 15, 14, 39),
- source='test-proxy')
- data = self.conn.db.resource.find(
- {'last_sample_timestamp':
- datetime.datetime(2014, 10, 15, 14, 39)})[0]['source']
- self.assertEqual('test-proxy', data)
-
- def test_mongo_update(self):
- raise_exc = [False, True]
- method = self.conn.db.resource.update
-
- with mock.patch('pymongo.collection.Collection.update',
- mock.Mock()) as mock_update:
- mock_update.side_effect = self.create_side_effect(
- method, pymongo.errors.AutoReconnect, raise_exc)
- mock_update.__name__ = 'update'
- self.create_and_store_sample(
- timestamp=datetime.datetime(2014, 10, 15, 17, 39),
- source='test-proxy-update')
- data = self.conn.db.resource.find(
- {'last_sample_timestamp':
- datetime.datetime(2014, 10, 15, 17, 39)})[0]['source']
- self.assertEqual('test-proxy-update', data)
-
-
-@tests_db.run_with('mongodb')
-class MongoTimeToLiveTest(DBTestBase):
-
- def test_ensure_index(self):
- self.CONF.set_override('metering_time_to_live', 5, group='database')
- self.conn.upgrade()
- self.assertEqual(5, self.conn.db.resource.index_information()
- ['resource_ttl']['expireAfterSeconds'])
- self.assertEqual(5, self.conn.db.meter.index_information()
- ['meter_ttl']['expireAfterSeconds'])
-
- def test_modification_of_index(self):
- self.CONF.set_override('metering_time_to_live', 5, group='database')
- self.conn.upgrade()
- self.CONF.set_override('metering_time_to_live', 15, group='database')
- self.conn.upgrade()
- self.assertEqual(15, self.conn.db.resource.index_information()
- ['resource_ttl']['expireAfterSeconds'])
- self.assertEqual(15, self.conn.db.meter.index_information()
- ['meter_ttl']['expireAfterSeconds'])
-
-
-class TestRecordUnicodeSamples(DBTestBase):
- def prepare_data(self):
- self.msgs = []
- self.msgs.append(self.create_and_store_sample(
- name=u'meter.accent\xe9\u0437',
- metadata={u"metadata_key\xe9\u0437": "test",
- u"metadata_key": u"test\xe9\u0437"},
- ))
-
- def test_unicode_sample(self):
- f = storage.SampleFilter()
- results = list(self.conn.get_samples(f))
- self.assertEqual(1, len(results))
- expected = self.msgs[0]
- actual = results[0].as_dict()
- self.assertEqual(expected['counter_name'], actual['counter_name'])
- self.assertEqual(expected['resource_metadata'],
- actual['resource_metadata'])
-
-
-@tests_db.run_with('mongodb')
-class TestBatchRecordingMetering(tests_db.TestBase):
- def test_batch_recording_metering_data(self):
- self.sample_dicts = []
- for i in range(1, 10):
- s = sample.Sample(name='sample-%s' % i,
- type=sample.TYPE_CUMULATIVE,
- unit='',
- volume=i * 0.1,
- user_id='user-id',
- project_id='project-id',
- resource_id='resource-%s' % str(i % 3),
- timestamp=datetime.datetime(2016, 6, 1, 15, i),
- resource_metadata={'fake_meta': i},
- source=None)
- s_dict = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret)
- self.sample_dicts.append(s_dict)
- self.conn.record_metering_data_batch(self.sample_dicts)
- results = list(self.conn.query_samples())
- self.assertEqual(len(self.sample_dicts), len(results))
- for sample_item in results:
- d = sample_item.as_dict()
- del d['recorded_at']
- d['monotonic_time'] = None
- self.assertIn(d, self.sample_dicts)
-
- resources = list(self.conn.get_resources())
- self.assertEqual(3, len(resources))
- self.assertEqual('resource-0', resources[0].resource_id)
- self.assertEqual({'fake_meta': 9}, resources[0].metadata)
- self.assertEqual('resource-2', resources[1].resource_id)
- self.assertEqual({'fake_meta': 8}, resources[1].metadata)
- self.assertEqual('resource-1', resources[2].resource_id)
- self.assertEqual({'fake_meta': 7}, resources[2].metadata)
diff --git a/ceilometer/tests/mocks.py b/ceilometer/tests/mocks.py
deleted file mode 100644
index 5e33b08f..00000000
--- a/ceilometer/tests/mocks.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-
-import happybase
-
-
-class MockHBaseTable(happybase.Table):
-
- def __init__(self, name, connection, data_prefix):
- # data_prefix is added to all rows which are written
- # in this test. It allows to divide data from different tests
- self.data_prefix = data_prefix
- # We create happybase Table with prefix from
- # CEILOMETER_TEST_HBASE_TABLE_PREFIX
- prefix = os.getenv("CEILOMETER_TEST_HBASE_TABLE_PREFIX", 'test')
- separator = os.getenv(
- "CEILOMETER_TEST_HBASE_TABLE_PREFIX_SEPARATOR", '_')
- super(MockHBaseTable, self).__init__(
- "%s%s%s" % (prefix, separator, name),
- connection)
-
- def put(self, row, *args, **kwargs):
- row = self.data_prefix + row
- return super(MockHBaseTable, self).put(row, *args,
- **kwargs)
-
- def scan(self, row_start=None, row_stop=None, row_prefix=None,
- columns=None, filter=None, timestamp=None,
- include_timestamp=False, batch_size=10, scan_batching=None,
- limit=None, sorted_columns=False):
- # Add data prefix for row parameters
- # row_prefix could not be combined with row_start or row_stop
- if not row_start and not row_stop:
- row_prefix = self.data_prefix + (row_prefix or "")
- row_start = None
- row_stop = None
- elif row_start and not row_stop:
- # Adding data_prefix to row_start and row_stop does not work
- # if it looks like row_start = %data_prefix%foo,
- # row_stop = %data_prefix, because row_start > row_stop
- filter = self._update_filter_row(filter)
- row_start = self.data_prefix + row_start
- else:
- row_start = self.data_prefix + (row_start or "")
- row_stop = self.data_prefix + (row_stop or "")
- gen = super(MockHBaseTable, self).scan(row_start, row_stop,
- row_prefix, columns,
- filter, timestamp,
- include_timestamp, batch_size,
- scan_batching, limit,
- sorted_columns)
- data_prefix_len = len(self.data_prefix)
- # Restore original row format
- for row, data in gen:
- yield (row[data_prefix_len:], data)
-
- def row(self, row, *args, **kwargs):
- row = self.data_prefix + row
- return super(MockHBaseTable, self).row(row, *args, **kwargs)
-
- def delete(self, row, *args, **kwargs):
- row = self.data_prefix + row
- return super(MockHBaseTable, self).delete(row, *args, **kwargs)
-
- def _update_filter_row(self, filter):
- if filter:
- return "PrefixFilter(%s) AND %s" % (self.data_prefix, filter)
- else:
- return "PrefixFilter(%s)" % self.data_prefix
diff --git a/ceilometer/tests/unit/dispatcher/__init__.py b/ceilometer/tests/unit/dispatcher/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/unit/dispatcher/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/unit/dispatcher/test_db.py b/ceilometer/tests/unit/dispatcher/test_db.py
deleted file mode 100644
index 2b69120f..00000000
--- a/ceilometer/tests/unit/dispatcher/test_db.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#
-# Copyright 2013 IBM Corp
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import datetime
-
-import mock
-from oslotest import base
-
-from ceilometer.dispatcher import database
-from ceilometer.publisher import utils
-from ceilometer import service
-
-
-class TestDispatcherDB(base.BaseTestCase):
-
- def setUp(self):
- super(TestDispatcherDB, self).setUp()
- self.CONF = service.prepare_service([], [])
- self.CONF.set_override('connection', 'sqlite://', group='database')
- self.meter_dispatcher = database.MeterDatabaseDispatcher(self.CONF)
-
- def test_valid_message(self):
- msg = {'counter_name': 'test',
- 'resource_id': self.id(),
- 'counter_volume': 1,
- }
- msg['message_signature'] = utils.compute_signature(
- msg, self.CONF.publisher.telemetry_secret,
- )
-
- with mock.patch.object(self.meter_dispatcher.conn,
- 'record_metering_data') as record_metering_data:
- self.meter_dispatcher.record_metering_data(msg)
-
- record_metering_data.assert_called_once_with(msg)
-
- def test_timestamp_conversion(self):
- msg = {'counter_name': 'test',
- 'resource_id': self.id(),
- 'counter_volume': 1,
- 'timestamp': '2012-07-02T13:53:40Z',
- }
- msg['message_signature'] = utils.compute_signature(
- msg, self.CONF.publisher.telemetry_secret,
- )
-
- expected = msg.copy()
- expected['timestamp'] = datetime.datetime(2012, 7, 2, 13, 53, 40)
-
- with mock.patch.object(self.meter_dispatcher.conn,
- 'record_metering_data') as record_metering_data:
- self.meter_dispatcher.record_metering_data(msg)
-
- record_metering_data.assert_called_once_with(expected)
-
- def test_timestamp_tzinfo_conversion(self):
- msg = {'counter_name': 'test',
- 'resource_id': self.id(),
- 'counter_volume': 1,
- 'timestamp': '2012-09-30T15:31:50.262-08:00',
- }
- msg['message_signature'] = utils.compute_signature(
- msg, self.CONF.publisher.telemetry_secret,
- )
-
- expected = msg.copy()
- expected['timestamp'] = datetime.datetime(2012, 9, 30, 23,
- 31, 50, 262000)
-
- with mock.patch.object(self.meter_dispatcher.conn,
- 'record_metering_data') as record_metering_data:
- self.meter_dispatcher.record_metering_data(msg)
-
- record_metering_data.assert_called_once_with(expected)
diff --git a/ceilometer/tests/unit/dispatcher/test_dispatcher.py b/ceilometer/tests/unit/dispatcher/test_dispatcher.py
deleted file mode 100644
index fb9b0414..00000000
--- a/ceilometer/tests/unit/dispatcher/test_dispatcher.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2015 Intel Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-from oslo_config import fixture
-
-from ceilometer import dispatcher
-from ceilometer import service
-from ceilometer.tests import base
-
-
-class FakeMeterDispatcher(dispatcher.MeterDispatcherBase):
- def record_metering_data(self, data):
- pass
-
-
-class TestDispatchManager(base.BaseTestCase):
- def setUp(self):
- super(TestDispatchManager, self).setUp()
- conf = service.prepare_service([], [])
- self.conf = self.useFixture(fixture.Config(conf))
- self.conf.config(meter_dispatchers=['database'],
- event_dispatchers=['database'])
- self.CONF = self.conf.conf
- self.useFixture(fixtures.MockPatch(
- 'ceilometer.dispatcher.database.MeterDatabaseDispatcher',
- new=FakeMeterDispatcher))
-
- def test_load(self):
- sample_mg, event_mg = dispatcher.load_dispatcher_manager(self.CONF)
- self.assertEqual(1, len(list(sample_mg)))
diff --git a/ceilometer/tests/unit/event/test_endpoint.py b/ceilometer/tests/unit/event/test_endpoint.py
index 66c3454c..cedc47a5 100644
--- a/ceilometer/tests/unit/event/test_endpoint.py
+++ b/ceilometer/tests/unit/event/test_endpoint.py
@@ -124,7 +124,6 @@ class TestEventEndpoint(tests_base.BaseTestCase):
def setUp(self):
super(TestEventEndpoint, self).setUp()
self.CONF = service.prepare_service([], [])
- self.CONF.set_override("connection", "log://", group='database')
self.setup_messaging(self.CONF)
self.useFixture(fixtures.MockPatchObject(
diff --git a/ceilometer/tests/unit/storage/__init__.py b/ceilometer/tests/unit/storage/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/unit/storage/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/unit/storage/sqlalchemy/__init__.py b/ceilometer/tests/unit/storage/sqlalchemy/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/unit/storage/sqlalchemy/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/unit/storage/sqlalchemy/test_models.py b/ceilometer/tests/unit/storage/sqlalchemy/test_models.py
deleted file mode 100644
index 18cbd768..00000000
--- a/ceilometer/tests/unit/storage/sqlalchemy/test_models.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#
-# Copyright 2013 Rackspace Hosting
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-
-import mock
-from oslotest import base
-import sqlalchemy
-from sqlalchemy.dialects.mysql import DECIMAL
-from sqlalchemy.types import NUMERIC
-
-from ceilometer.storage.sqlalchemy import models
-from ceilometer import utils
-
-
-class PreciseTimestampTest(base.BaseTestCase):
-
- @staticmethod
- def fake_dialect(name):
- def _type_descriptor_mock(desc):
- if type(desc) == DECIMAL:
- return NUMERIC(precision=desc.precision, scale=desc.scale)
- dialect = mock.MagicMock()
- dialect.name = name
- dialect.type_descriptor = _type_descriptor_mock
- return dialect
-
- def setUp(self):
- super(PreciseTimestampTest, self).setUp()
- self._mysql_dialect = self.fake_dialect('mysql')
- self._postgres_dialect = self.fake_dialect('postgres')
- self._type = models.PreciseTimestamp()
- self._date = datetime.datetime(2012, 7, 2, 10, 44)
-
- def test_load_dialect_impl_mysql(self):
- result = self._type.load_dialect_impl(self._mysql_dialect)
- self.assertEqual(NUMERIC, type(result))
- self.assertEqual(20, result.precision)
- self.assertEqual(6, result.scale)
- self.assertTrue(result.asdecimal)
-
- def test_load_dialect_impl_postgres(self):
- result = self._type.load_dialect_impl(self._postgres_dialect)
- self.assertEqual(sqlalchemy.DateTime, type(result))
-
- def test_process_bind_param_store_decimal_mysql(self):
- expected = utils.dt_to_decimal(self._date)
- result = self._type.process_bind_param(self._date, self._mysql_dialect)
- self.assertEqual(expected, result)
-
- def test_process_bind_param_store_datetime_postgres(self):
- result = self._type.process_bind_param(self._date,
- self._postgres_dialect)
- self.assertEqual(self._date, result)
-
- def test_process_bind_param_store_none_mysql(self):
- result = self._type.process_bind_param(None, self._mysql_dialect)
- self.assertIsNone(result)
-
- def test_process_bind_param_store_none_postgres(self):
- result = self._type.process_bind_param(None,
- self._postgres_dialect)
- self.assertIsNone(result)
-
- def test_process_result_value_datetime_mysql(self):
- dec_value = utils.dt_to_decimal(self._date)
- result = self._type.process_result_value(dec_value,
- self._mysql_dialect)
- self.assertEqual(self._date, result)
-
- def test_process_result_value_datetime_postgres(self):
- result = self._type.process_result_value(self._date,
- self._postgres_dialect)
- self.assertEqual(self._date, result)
-
- def test_process_result_value_none_mysql(self):
- result = self._type.process_result_value(None,
- self._mysql_dialect)
- self.assertIsNone(result)
-
- def test_process_result_value_none_postgres(self):
- result = self._type.process_result_value(None,
- self._postgres_dialect)
- self.assertIsNone(result)
diff --git a/ceilometer/tests/unit/storage/test_base.py b/ceilometer/tests/unit/storage/test_base.py
deleted file mode 100644
index f6b3e989..00000000
--- a/ceilometer/tests/unit/storage/test_base.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright 2013 eNovance
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-import datetime
-import math
-
-from oslotest import base as testbase
-
-from ceilometer.storage import base
-
-
-class BaseTest(testbase.BaseTestCase):
-
- def test_iter_period(self):
- times = list(base.iter_period(
- datetime.datetime(2013, 1, 1, 12, 0),
- datetime.datetime(2013, 1, 1, 13, 0),
- 60))
- self.assertEqual(60, len(times))
- self.assertEqual((datetime.datetime(2013, 1, 1, 12, 10),
- datetime.datetime(2013, 1, 1, 12, 11)), times[10])
- self.assertEqual((datetime.datetime(2013, 1, 1, 12, 21),
- datetime.datetime(2013, 1, 1, 12, 22)), times[21])
-
- def test_iter_period_bis(self):
- times = list(base.iter_period(
- datetime.datetime(2013, 1, 2, 13, 0),
- datetime.datetime(2013, 1, 2, 14, 0),
- 55))
- self.assertEqual(math.ceil(3600 / 55.0), len(times))
- self.assertEqual((datetime.datetime(2013, 1, 2, 13, 9, 10),
- datetime.datetime(2013, 1, 2, 13, 10, 5)),
- times[10])
- self.assertEqual((datetime.datetime(2013, 1, 2, 13, 19, 15),
- datetime.datetime(2013, 1, 2, 13, 20, 10)),
- times[21])
-
- def test_handle_sort_key(self):
- sort_keys_meter = base._handle_sort_key('meter', 'foo')
- self.assertEqual(['foo', 'user_id', 'project_id'], sort_keys_meter)
-
- sort_keys_resource = base._handle_sort_key('resource', 'project_id')
- self.assertEqual(['project_id', 'user_id', 'timestamp'],
- sort_keys_resource)
diff --git a/ceilometer/tests/unit/storage/test_get_connection.py b/ceilometer/tests/unit/storage/test_get_connection.py
deleted file mode 100644
index b44ed652..00000000
--- a/ceilometer/tests/unit/storage/test_get_connection.py
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests for ceilometer/storage/
-"""
-import mock
-from oslotest import base
-
-from ceilometer import service
-from ceilometer import storage
-from ceilometer.storage import impl_log
-from ceilometer.storage import impl_sqlalchemy
-
-import six
-
-
-class EngineTest(base.BaseTestCase):
- def setUp(self):
- super(EngineTest, self).setUp()
- self.CONF = service.prepare_service([], [])
-
- def test_get_connection(self):
- engine = storage.get_connection(self.CONF,
- 'log://localhost')
- self.assertIsInstance(engine, impl_log.Connection)
-
- def test_get_connection_no_such_engine(self):
- try:
- storage.get_connection(self.CONF,
- 'no-such-engine://localhost')
- except RuntimeError as err:
- self.assertIn('no-such-engine', six.text_type(err))
-
-
-class ConnectionRetryTest(base.BaseTestCase):
- def setUp(self):
- super(ConnectionRetryTest, self).setUp()
- self.CONF = service.prepare_service([], [])
-
- def test_retries(self):
- with mock.patch.object(storage, 'get_connection') as retries:
- try:
- self.CONF.set_override("connection", "no-such-engine://",
- group="database")
- self.CONF.set_override("retry_interval", 0.00001,
- group="database")
- storage.get_connection_from_config(self.CONF)
- except RuntimeError:
- self.assertEqual(10, retries.call_count)
-
-
-class ConnectionConfigTest(base.BaseTestCase):
- def setUp(self):
- super(ConnectionConfigTest, self).setUp()
- self.CONF = service.prepare_service([], [])
-
- def test_only_default_url(self):
- self.CONF.set_override("connection", "log://", group="database")
- conn = storage.get_connection_from_config(self.CONF)
- self.assertIsInstance(conn, impl_log.Connection)
-
- def test_two_urls(self):
- self.CONF.set_override("connection", "log://", group="database")
- conn = storage.get_connection_from_config(self.CONF)
- self.assertIsInstance(conn, impl_log.Connection)
-
- def test_sqlalchemy_driver(self):
- self.CONF.set_override("connection", "sqlite+pysqlite://",
- group="database")
- conn = storage.get_connection_from_config(self.CONF)
- self.assertIsInstance(conn, impl_sqlalchemy.Connection)
diff --git a/ceilometer/tests/unit/test_bin.py b/ceilometer/tests/unit/test_bin.py
index 712c7c30..433ab1a5 100644
--- a/ceilometer/tests/unit/test_bin.py
+++ b/ceilometer/tests/unit/test_bin.py
@@ -26,9 +26,7 @@ class BinTestCase(base.BaseTestCase):
def setUp(self):
super(BinTestCase, self).setUp()
content = ("[DEFAULT]\n"
- "transport_url = fake://\n"
- "[database]\n"
- "connection=log://localhost\n")
+ "transport_url = fake://\n")
if six.PY3:
content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content,
@@ -45,43 +43,6 @@ class BinTestCase(base.BaseTestCase):
"--config-file=%s" % self.tempfile])
self.assertEqual(0, subp.wait())
- def test_run_expirer_ttl_disabled(self):
- subp = subprocess.Popen(['ceilometer-expirer',
- '-d',
- "--config-file=%s" % self.tempfile],
- stdout=subprocess.PIPE)
- stdout, __ = subp.communicate()
- self.assertEqual(0, subp.poll())
- self.assertIn(b"Nothing to clean, database metering "
- b"time to live is disabled", stdout)
-
- def _test_run_expirer_ttl_enabled(self, ttl_name, data_name):
- content = ("[DEFAULT]\n"
- "transport_url = fake://\n"
- "[database]\n"
- "%s=1\n"
- "connection=log://localhost\n" % ttl_name)
- if six.PY3:
- content = content.encode('utf-8')
- self.tempfile = fileutils.write_to_tempfile(content=content,
- prefix='ceilometer',
- suffix='.conf')
- subp = subprocess.Popen(['ceilometer-expirer',
- '-d',
- "--config-file=%s" % self.tempfile],
- stdout=subprocess.PIPE)
- stdout, __ = subp.communicate()
- self.assertEqual(0, subp.poll())
- msg = "Dropping %s data with TTL 1" % data_name
- if six.PY3:
- msg = msg.encode('utf-8')
- self.assertIn(msg, stdout)
-
- def test_run_expirer_ttl_enabled(self):
- self._test_run_expirer_ttl_enabled('metering_time_to_live',
- 'metering')
- self._test_run_expirer_ttl_enabled('time_to_live', 'metering')
-
class BinSendSampleTestCase(base.BaseTestCase):
def setUp(self):
@@ -127,9 +88,7 @@ class BinCeilometerPollingServiceTestCase(base.BaseTestCase):
def test_starting_with_duplication_namespaces(self):
content = ("[DEFAULT]\n"
- "transport_url = fake://\n"
- "[database]\n"
- "connection=log://localhost\n")
+ "transport_url = fake://\n")
if six.PY3:
content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content,
@@ -155,9 +114,7 @@ class BinCeilometerPollingServiceTestCase(base.BaseTestCase):
def test_polling_namespaces_invalid_value_in_config(self):
content = ("[DEFAULT]\n"
"transport_url = fake://\n"
- "polling_namespaces = ['central']\n"
- "[database]\n"
- "connection=log://localhost\n")
+ "polling_namespaces = ['central']\n")
if six.PY3:
content = content.encode('utf-8')
self.tempfile = fileutils.write_to_tempfile(content=content,
diff --git a/ceilometer/tests/unit/test_notification.py b/ceilometer/tests/unit/test_notification.py
index 993eda7b..064fb447 100644
--- a/ceilometer/tests/unit/test_notification.py
+++ b/ceilometer/tests/unit/test_notification.py
@@ -102,7 +102,6 @@ class TestNotification(tests_base.BaseTestCase):
def setUp(self):
super(TestNotification, self).setUp()
self.CONF = service.prepare_service([], [])
- self.CONF.set_override("connection", "log://", group='database')
self.CONF.set_override("backend_url", "zake://", group="coordination")
self.CONF.set_override("workload_partitioning", True,
group='notification')
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index c3e9acc3..5f62ea74 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -34,7 +34,7 @@
# of Ceilometer (see within for additional settings):
#
# CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600.
-# CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'gnocchi', 'none')
+# CEILOMETER_BACKEND: Database backend (e.g. 'gnocchi', 'none')
# CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz.
# CEILOMETER_EVENT_ALARM: Set to True to enable publisher for event alarming
@@ -62,27 +62,6 @@ function gnocchi_service_url {
echo "$GNOCCHI_SERVICE_PROTOCOL://$GNOCCHI_SERVICE_HOST/metric"
}
-# _ceilometer_install_mongdb - Install mongodb and python lib.
-function _ceilometer_install_mongodb {
- # Server package is the same on all
- local packages=mongodb-server
-
- if is_fedora; then
- # mongodb client
- packages="${packages} mongodb"
- fi
-
- install_package ${packages}
-
- if is_fedora; then
- restart_service mongod
- else
- restart_service mongodb
- fi
-
- # give time for service to restart
- sleep 5
-}
# _ceilometer_install_redis() - Install the redis server and python lib.
function _ceilometer_install_redis {
@@ -129,15 +108,6 @@ function _ceilometer_prepare_coordination {
fi
}
-# Install required services for storage backends
-function _ceilometer_prepare_storage_backend {
- if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
- pip_install_gr pymongo
- _ceilometer_install_mongodb
- fi
-}
-
-
# Install the python modules for inspecting nova virt instances
function _ceilometer_prepare_virt_drivers {
# Only install virt drivers if we're running nova compute
@@ -246,9 +216,6 @@ function _ceilometer_configure_cache_backend {
# Set configuration for storage backend.
function _ceilometer_configure_storage_backend {
-
- inidelete $CEILOMETER_CONF database metering_connection
-
if [ "$CEILOMETER_BACKEND" = 'none' ] ; then
# It's ok for the backend to be 'none', if panko is enabled. We do not
# combine this condition with the outer if statement, so that the else
@@ -256,20 +223,12 @@ function _ceilometer_configure_storage_backend {
if ! is_service_enabled panko-api; then
echo_summary "All Ceilometer backends seems disabled, set \$CEILOMETER_BACKEND to select one."
fi
- elif [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
- iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer)
- elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
- iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer
elif [ "$CEILOMETER_BACKEND" = 'gnocchi' ] ; then
sed -i "s/gnocchi:\/\//gnocchi:\/\/?archive_policy=${GNOCCHI_ARCHIVE_POLICY}\&filter_project=gnocchi_swift/" $CEILOMETER_CONF_DIR/event_pipeline.yaml $CEILOMETER_CONF_DIR/pipeline.yaml
else
die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND"
fi
- if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'mongodb' ]; then
- sed -i 's/gnocchi:\/\//database:\/\//g' $CEILOMETER_CONF_DIR/event_pipeline.yaml $CEILOMETER_CONF_DIR/pipeline.yaml
- fi
-
# configure panko
if is_service_enabled panko-api; then
if ! grep -q 'panko' $CEILOMETER_CONF_DIR/event_pipeline.yaml ; then
@@ -372,10 +331,7 @@ function install_ceilometer {
install_ceilometerclient
case $CEILOMETER_BACKEND in
- mongodb) extra=mongo;;
gnocchi) extra=gnocchi;;
- mysql) extra=mysql;;
- postgresql) extra=postgresql;;
esac
setup_develop $CEILOMETER_DIR $extra
sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR
@@ -399,7 +355,7 @@ function start_ceilometer {
run_process gnocchi-api "$CEILOMETER_BIN_DIR/uwsgi --ini $GNOCCHI_UWSGI_FILE" ""
run_process gnocchi-metricd "$CEILOMETER_BIN_DIR/gnocchi-metricd --config-file $GNOCCHI_CONF"
wait_for_service 30 "$(gnocchi_service_url)"
- $CEILOMETER_BIN_DIR/ceilometer-upgrade --skip-metering-database
+ $CEILOMETER_BIN_DIR/ceilometer-upgrade
fi
run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF"
diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh
index e6ff2b59..b5aa6753 100755
--- a/devstack/upgrade/upgrade.sh
+++ b/devstack/upgrade/upgrade.sh
@@ -33,13 +33,6 @@ source $GRENADE_DIR/functions
# only the first error that occurred.
set -o errexit
-# Save mongodb state (replace with snapshot)
-# TODO(chdent): There used to be a 'register_db_to_save ceilometer'
-# which may wish to consider putting back in.
-if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then
- mongodump --db ceilometer --out $SAVE_DIR/ceilometer-dump.$BASE_RELEASE
-fi
-
# Upgrade Ceilometer
# ==================
# Locate ceilometer devstack plugin, the directory above the
@@ -81,12 +74,6 @@ ensure_services_started "ceilometer-polling --polling-namespaces compute" \
"ceilometer-polling --polling-namespaces central" \
ceilometer-agent-notification
-# Save mongodb state (replace with snapshot)
-if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then
- mongodump --db ceilometer --out $SAVE_DIR/ceilometer-dump.$TARGET_RELEASE
-fi
-
-
set +o xtrace
echo "*********************************************************************"
echo "SUCCESS: End $0"
diff --git a/doc/source/admin/telemetry-data-pipelines.rst b/doc/source/admin/telemetry-data-pipelines.rst
index 6bf35d3d..b70b05a3 100644
--- a/doc/source/admin/telemetry-data-pipelines.rst
+++ b/doc/source/admin/telemetry-data-pipelines.rst
@@ -405,8 +405,7 @@ panko
Event data in Ceilometer can be stored in panko which provides an HTTP REST
interface to query system events in OpenStack. To push data to panko,
-set the publisher to ``direct://?dispatcher=panko``. Beginning in panko's
-Pike release, the publisher can be set as ``panko://``
+set the publisher to ``panko://``.
notifier
````````
@@ -531,16 +530,6 @@ Deprecated publishers
The following publishers are deprecated as of Ocata and may be removed in
subsequent releases.
-direct
-``````
-
-This publisher can be specified in the form of ``direct://?dispatcher=http``.
-The dispatcher's options include: ``database``, ``file``, ``http``, and
-``gnocchi``. It emits data in the configured dispatcher directly, default
-configuration (the form is ``direct://``) is database dispatcher.
-In the Mitaka release, this method can only emit data to the database
-dispatcher, and the form is ``direct://``.
-
kafka
`````
@@ -562,57 +551,3 @@ offers similar options as ``notifier`` publisher.
metering data under a topic name, ``ceilometer``. When the port
number is not specified, this publisher uses 9092 as the
broker's port.
-
-
-.. _telemetry-expiry:
-
-database
-````````
-
-.. note::
-
- This functionality was replaced by ``gnocchi`` and ``panko`` publishers.
-
-When the database dispatcher is configured as a data store, you have the
-option to set a ``time_to_live`` option (ttl) for samples. By default
-the ttl value for samples is set to -1, which means that they
-are kept in the database forever.
-
-The time to live value is specified in seconds. Each sample has a time
-stamp, and the ``ttl`` value indicates that a sample will be deleted
-from the database when the number of seconds has elapsed since that
-sample reading was stamped. For example, if the time to live is set to
-600, all samples older than 600 seconds will be purged from the
-database.
-
-Certain databases support native TTL expiration. In cases where this is
-not possible, a command-line script, which you can use for this purpose
-is ``ceilometer-expirer``. You can run it in a cron job, which helps to keep
-your database in a consistent state.
-
-The level of support differs in case of the configured back end:
-
-.. list-table::
- :widths: 33 33 33
- :header-rows: 1
-
- * - Database
- - TTL value support
- - Note
- * - MongoDB
- - Yes
- - MongoDB has native TTL support for deleting samples
- that are older than the configured ttl value.
- * - SQL-based back ends
- - Yes
- - ``ceilometer-expirer`` has to be used for deleting
- samples and its related data from the database.
- * - HBase
- - No
- - Telemetry's HBase support does not include native TTL
- nor ``ceilometer-expirer`` support.
- * - DB2 NoSQL
- - No
- - DB2 NoSQL does not have native TTL
- nor ``ceilometer-expirer`` support.
-
diff --git a/doc/source/contributor/install/custom.rst b/doc/source/contributor/install/custom.rst
index 1dfcc994..a98ae8d4 100644
--- a/doc/source/contributor/install/custom.rst
+++ b/doc/source/contributor/install/custom.rst
@@ -110,7 +110,6 @@ To use multiple publishers, add multiple publisher lines in ``pipeline.yaml`` an
- name: sink_name
transformers:
publishers:
- - database://
- gnocchi://?archive_policy=low
- file://
diff --git a/doc/source/contributor/install/manual.rst b/doc/source/contributor/install/manual.rst
index f631e23d..ba5651e6 100644
--- a/doc/source/contributor/install/manual.rst
+++ b/doc/source/contributor/install/manual.rst
@@ -20,12 +20,6 @@
Installing Manually
=====================
-.. note::
-
- Ceilometer collector service is deprecated. Configure dispatchers under publisher
- in pipeline to push data instead. For more details about how to configure
- publishers in the :ref:`publisher-configuration`.
-
Storage Backend Installation
============================
@@ -68,7 +62,7 @@ Gnocchi
#. Initialize Gnocchi database by creating ceilometer resources::
- ceilometer-upgrade --skip-metering-database
+ ceilometer-upgrade
#. To minimize data requests, caching and batch processing should be enabled:
diff --git a/doc/source/contributor/new_resource_types.rst b/doc/source/contributor/new_resource_types.rst
index dcd15ff6..405fc5af 100644
--- a/doc/source/contributor/new_resource_types.rst
+++ b/doc/source/contributor/new_resource_types.rst
@@ -82,7 +82,7 @@ The following operations are supported:
.. note::
Do not modify the existing change steps when making changes. Each modification
- requires a new step to be added and for `ceilometer-upgrade --skip-metering-database`
+ requires a new step to be added and for `ceilometer-upgrade`
to be run to apply the change to Gnocchi.
With accomplishing sections above, don't forget to add a new resource type or attributes of
diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst
index af793c16..da92eba9 100644
--- a/doc/source/contributor/testing.rst
+++ b/doc/source/contributor/testing.rst
@@ -24,22 +24,11 @@ run through tox_.
$ sudo pip install tox
-2. On Ubuntu install ``mongodb`` and ``libmysqlclient-dev`` packages::
-
- $ sudo apt-get install mongodb
- $ sudo apt-get install libmysqlclient-dev
-
- For Fedora20 there is no ``libmysqlclient-dev`` package, so you’ll need
- to install ``mariadb-devel.x86-64`` (or ``mariadb-devel.i386``) instead::
-
- $ sudo yum install mongodb
- $ sudo yum install mariadb-devel.x86_64
-
-3. Install the test dependencies::
+2. Install the test dependencies::
$ sudo pip install -r /opt/stack/ceilometer/test-requirements.txt
-4. Run the unit and code-style tests::
+3. Run the unit and code-style tests::
$ cd /opt/stack/ceilometer
$ tox -e py27,pep8
diff --git a/doc/source/install/install-base-config-common.inc b/doc/source/install/install-base-config-common.inc
index ba9c30ea..9323ed1d 100644
--- a/doc/source/install/install-base-config-common.inc
+++ b/doc/source/install/install-base-config-common.inc
@@ -76,4 +76,4 @@
.. code-block:: console
- # ceilometer-upgrade --skip-metering-database
+ # ceilometer-upgrade
diff --git a/etc/ceilometer/ceilometer-config-generator.conf b/etc/ceilometer/ceilometer-config-generator.conf
index 205c093d..dbecc5dd 100644
--- a/etc/ceilometer/ceilometer-config-generator.conf
+++ b/etc/ceilometer/ceilometer-config-generator.conf
@@ -4,7 +4,6 @@ wrap_width = 79
namespace = ceilometer
namespace = ceilometer-auth
namespace = oslo.concurrency
-namespace = oslo.db
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.service.service
diff --git a/playbooks/legacy/ceilometer-dsvm-functional-mongodb/post.yaml b/playbooks/legacy/ceilometer-dsvm-functional-mongodb/post.yaml
deleted file mode 100644
index dac87534..00000000
--- a/playbooks/legacy/ceilometer-dsvm-functional-mongodb/post.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/logs/**
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/legacy/ceilometer-dsvm-functional-mongodb/run.yaml b/playbooks/legacy/ceilometer-dsvm-functional-mongodb/run.yaml
deleted file mode 100644
index e86fa719..00000000
--- a/playbooks/legacy/ceilometer-dsvm-functional-mongodb/run.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-- hosts: all
- name: Autoconverted job legacy-ceilometer-dsvm-functional-mongodb from old job gate-ceilometer-dsvm-functional-mongodb-ubuntu-xenial
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack-infra/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- git://git.openstack.org \
- openstack-infra/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PYTHONUNBUFFERED=true
- export DEVSTACK_GATE_CEILOMETER_BACKEND=mongodb
- export DEVSTACK_LOCAL_CONFIG="enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer"
- export BRANCH_OVERRIDE=default
- if [ "$BRANCH_OVERRIDE" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
-
- if [ "mongodb" = "postgresql" ] ; then
- export DEVSTACK_GATE_POSTGRES=1
- fi
- if [ "" == "-identity-v3-only" ] ; then
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"ENABLE_IDENTITY_V2=False"
- fi
-
- function post_test_hook {
- # Configure and run functional tests
- $BASE/new/ceilometer/ceilometer/tests/functional/hooks/post_test_hook.sh "mongodb"
- }
- export -f post_test_hook
-
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/playbooks/legacy/ceilometer-dsvm-functional-mysql/post.yaml b/playbooks/legacy/ceilometer-dsvm-functional-mysql/post.yaml
deleted file mode 100644
index dac87534..00000000
--- a/playbooks/legacy/ceilometer-dsvm-functional-mysql/post.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/logs/**
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/legacy/ceilometer-dsvm-functional-mysql/run.yaml b/playbooks/legacy/ceilometer-dsvm-functional-mysql/run.yaml
deleted file mode 100644
index 69a9c756..00000000
--- a/playbooks/legacy/ceilometer-dsvm-functional-mysql/run.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-- hosts: all
- name: Autoconverted job legacy-ceilometer-dsvm-functional-mysql from old job gate-ceilometer-dsvm-functional-mysql-ubuntu-xenial
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack-infra/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- git://git.openstack.org \
- openstack-infra/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PYTHONUNBUFFERED=true
- export DEVSTACK_GATE_CEILOMETER_BACKEND=mysql
- export DEVSTACK_LOCAL_CONFIG="enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer"
- export BRANCH_OVERRIDE=default
- if [ "$BRANCH_OVERRIDE" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
-
- if [ "mysql" = "postgresql" ] ; then
- export DEVSTACK_GATE_POSTGRES=1
- fi
- if [ "" == "-identity-v3-only" ] ; then
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"ENABLE_IDENTITY_V2=False"
- fi
-
- function post_test_hook {
- # Configure and run functional tests
- $BASE/new/ceilometer/ceilometer/tests/functional/hooks/post_test_hook.sh "mysql"
- }
- export -f post_test_hook
-
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/playbooks/legacy/ceilometer-tox-py27-mongodb/post.yaml b/playbooks/legacy/ceilometer-tox-py27-mongodb/post.yaml
deleted file mode 100644
index 68fbdf81..00000000
--- a/playbooks/legacy/ceilometer-tox-py27-mongodb/post.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/legacy/ceilometer-tox-py27-mongodb/run.yaml b/playbooks/legacy/ceilometer-tox-py27-mongodb/run.yaml
deleted file mode 100644
index 42391ed7..00000000
--- a/playbooks/legacy/ceilometer-tox-py27-mongodb/run.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-- hosts: all
- name: Autoconverted job legacy-ceilometer-tox-py27-mongodb from old job gate-ceilometer-tox-py27-mongodb-ubuntu-xenial
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- CLONEMAP=`mktemp`
- REQS_DIR=`mktemp -d`
- function cleanup {
- mkdir -p $WORKSPACE
- rm -rf $CLONEMAP $REQS_DIR
- }
- trap cleanup EXIT
- cat > $CLONEMAP << EOF
- clonemap:
- - name: $ZUUL_PROJECT
- dest: .
- EOF
- # zuul cloner works poorly if there are 2 names that are the
- # same in here.
- if [[ "$ZUUL_PROJECT" != "openstack/requirements" ]]; then
- cat >> $CLONEMAP << EOF
- - name: openstack/requirements
- dest: $REQS_DIR
- EOF
- fi
- /usr/zuul-env/bin/zuul-cloner -m $CLONEMAP --cache-dir /opt/git \
- git://git.openstack.org $ZUUL_PROJECT openstack/requirements
- # REQS_DIR is not set for openstack/requirements and there is also
- # no need to copy in this case.
- if [[ "$ZUUL_PROJECT" != "openstack/requirements" ]]; then
- cp $REQS_DIR/upper-constraints.txt ./
- fi
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: /usr/local/jenkins/slave_scripts/install-distro-packages.sh
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- if [ -x tools/test-setup.sh ] ; then
- tools/test-setup.sh
- fi
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -x
- sudo rm -f /etc/sudoers.d/zuul
- # Prove that general sudo access is actually revoked
- ! sudo -n true
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: /usr/local/jenkins/slave_scripts/run-tox.sh py27-mongodb
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- OUT=`git ls-files --other --exclude-standard --directory`
- if [ -z "$OUT" ]; then
- echo "No extra files created during test."
- exit 0
- else
- echo "The following un-ignored files were created during the test:"
- echo "$OUT"
- exit 0 # TODO: change to 1 to fail tests.
- fi
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/playbooks/legacy/ceilometer-tox-py27-mysql/post.yaml b/playbooks/legacy/ceilometer-tox-py27-mysql/post.yaml
deleted file mode 100644
index 68fbdf81..00000000
--- a/playbooks/legacy/ceilometer-tox-py27-mysql/post.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/legacy/ceilometer-tox-py27-mysql/run.yaml b/playbooks/legacy/ceilometer-tox-py27-mysql/run.yaml
deleted file mode 100644
index 181119fb..00000000
--- a/playbooks/legacy/ceilometer-tox-py27-mysql/run.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-- hosts: all
- name: Autoconverted job legacy-ceilometer-tox-py27-mysql from old job gate-ceilometer-tox-py27-mysql-ubuntu-xenial
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- CLONEMAP=`mktemp`
- REQS_DIR=`mktemp -d`
- function cleanup {
- mkdir -p $WORKSPACE
- rm -rf $CLONEMAP $REQS_DIR
- }
- trap cleanup EXIT
- cat > $CLONEMAP << EOF
- clonemap:
- - name: $ZUUL_PROJECT
- dest: .
- EOF
- # zuul cloner works poorly if there are 2 names that are the
- # same in here.
- if [[ "$ZUUL_PROJECT" != "openstack/requirements" ]]; then
- cat >> $CLONEMAP << EOF
- - name: openstack/requirements
- dest: $REQS_DIR
- EOF
- fi
- /usr/zuul-env/bin/zuul-cloner -m $CLONEMAP --cache-dir /opt/git \
- git://git.openstack.org $ZUUL_PROJECT openstack/requirements
- # REQS_DIR is not set for openstack/requirements and there is also
- # no need to copy in this case.
- if [[ "$ZUUL_PROJECT" != "openstack/requirements" ]]; then
- cp $REQS_DIR/upper-constraints.txt ./
- fi
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: /usr/local/jenkins/slave_scripts/install-distro-packages.sh
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- if [ -x tools/test-setup.sh ] ; then
- tools/test-setup.sh
- fi
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -x
- sudo rm -f /etc/sudoers.d/zuul
- # Prove that general sudo access is actually revoked
- ! sudo -n true
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: /usr/local/jenkins/slave_scripts/run-tox.sh py27-mysql
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- OUT=`git ls-files --other --exclude-standard --directory`
- if [ -z "$OUT" ]; then
- echo "No extra files created during test."
- exit 0
- else
- echo "The following un-ignored files were created during the test:"
- echo "$OUT"
- exit 0 # TODO: change to 1 to fail tests.
- fi
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/playbooks/legacy/ceilometer-tox-py27-postgresql/post.yaml b/playbooks/legacy/ceilometer-tox-py27-postgresql/post.yaml
deleted file mode 100644
index 68fbdf81..00000000
--- a/playbooks/legacy/ceilometer-tox-py27-postgresql/post.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/legacy/ceilometer-tox-py27-postgresql/run.yaml b/playbooks/legacy/ceilometer-tox-py27-postgresql/run.yaml
deleted file mode 100644
index 69b20fd7..00000000
--- a/playbooks/legacy/ceilometer-tox-py27-postgresql/run.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-- hosts: all
- name: Autoconverted job legacy-ceilometer-tox-py27-postgresql from old job gate-ceilometer-tox-py27-postgresql-ubuntu-xenial
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- CLONEMAP=`mktemp`
- REQS_DIR=`mktemp -d`
- function cleanup {
- mkdir -p $WORKSPACE
- rm -rf $CLONEMAP $REQS_DIR
- }
- trap cleanup EXIT
- cat > $CLONEMAP << EOF
- clonemap:
- - name: $ZUUL_PROJECT
- dest: .
- EOF
- # zuul cloner works poorly if there are 2 names that are the
- # same in here.
- if [[ "$ZUUL_PROJECT" != "openstack/requirements" ]]; then
- cat >> $CLONEMAP << EOF
- - name: openstack/requirements
- dest: $REQS_DIR
- EOF
- fi
- /usr/zuul-env/bin/zuul-cloner -m $CLONEMAP --cache-dir /opt/git \
- git://git.openstack.org $ZUUL_PROJECT openstack/requirements
- # REQS_DIR is not set for openstack/requirements and there is also
- # no need to copy in this case.
- if [[ "$ZUUL_PROJECT" != "openstack/requirements" ]]; then
- cp $REQS_DIR/upper-constraints.txt ./
- fi
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: /usr/local/jenkins/slave_scripts/install-distro-packages.sh
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- if [ -x tools/test-setup.sh ] ; then
- tools/test-setup.sh
- fi
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -x
- sudo rm -f /etc/sudoers.d/zuul
- # Prove that general sudo access is actually revoked
- ! sudo -n true
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: /usr/local/jenkins/slave_scripts/run-tox.sh py27-postgresql
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- OUT=`git ls-files --other --exclude-standard --directory`
- if [ -z "$OUT" ]; then
- echo "No extra files created during test."
- exit 0
- else
- echo "The following un-ignored files were created during the test:"
- echo "$OUT"
- exit 0 # TODO: change to 1 to fail tests.
- fi
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/requirements.txt b/requirements.txt
index faa05260..adaf1a32 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -15,7 +15,6 @@ monotonic
msgpack-python>=0.4.0 # Apache-2.0
oslo.concurrency>=3.5.0 # Apache-2.0
oslo.config>=3.22.0 # Apache-2.0
-oslo.db>=4.1.0 # Apache-2.0
oslo.i18n>=2.1.0 # Apache-2.0
oslo.log>=1.14.0 # Apache-2.0
oslo.reports>=0.6.0 # Apache-2.0
@@ -35,8 +34,6 @@ python-cinderclient>=1.6.0,!=1.7.0,!=1.7.1 # Apache-2.0
PyYAML>=3.1.0 # MIT
requests!=2.9.0,>=2.8.1 # Apache-2.0
six>=1.9.0 # MIT
-SQLAlchemy>=1.0.10 # MIT
-sqlalchemy-migrate>=0.9.6 # Apache-2.0
stevedore>=1.9.0 # Apache-2.0
tenacity>=3.2.1 # Apache-2.0
tooz[zake]>=1.47.0 # Apache-2.0
diff --git a/run-tests.sh b/run-tests.sh
index 732302b6..b6539316 100755
--- a/run-tests.sh
+++ b/run-tests.sh
@@ -2,29 +2,13 @@
set -e
set -x
-# Use a mongodb backend by default
-if [ -z $CEILOMETER_TEST_BACKEND ]; then
- CEILOMETER_TEST_BACKEND="mongodb"
-fi
-
echo
echo "OS_TEST_PATH: $OS_TEST_PATH"
-echo "CEILOMETER_TEST_BACKEND: $CEILOMETER_TEST_BACKEND"
echo "CEILOMETER_TEST_DEBUG: $CEILOMETER_TEST_DEBUG"
echo
-if [ "$CEILOMETER_TEST_BACKEND" == "none" ]; then
- if [ "$CEILOMETER_TEST_DEBUG" == "True" ]; then
- oslo_debug_helper $*
- else
- ./tools/pretty_tox.sh $*
- fi
+if [ "$CEILOMETER_TEST_DEBUG" == "True" ]; then
+ oslo_debug_helper $*
else
- for backend in $CEILOMETER_TEST_BACKEND; do
- if [ "$CEILOMETER_TEST_DEBUG" == "True" ]; then
- pifpaf --debug run $backend oslo_debug_helper $*
- else
- pifpaf run $backend ./tools/pretty_tox.sh $*
- fi
- done
+ ./tools/pretty_tox.sh $*
fi
diff --git a/setup.cfg b/setup.cfg
index 02c46b77..a0630a06 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -34,12 +34,6 @@ data_files =
gnocchi =
oslo.cache>=1.5.0 # Apache-2.0
gnocchiclient>=4.0.0 # Apache-2.0
-mongo =
- pymongo!=3.1,>=3.0.2 # Apache-2.0
-postgresql =
- psycopg2>=2.5 # LGPL/ZPL
-mysql =
- PyMySQL>=0.6.2 # MIT License
zaqar =
python-zaqarclient>=1.0.0 # Apache-2.0
@@ -215,14 +209,6 @@ ceilometer.poll.central =
ceilometer.builder.poll.central =
hardware.snmp = ceilometer.hardware.pollsters.generic:GenericHardwareDeclarativePollster
-ceilometer.metering.storage =
- log = ceilometer.storage.impl_log:Connection
- mongodb = ceilometer.storage.impl_mongodb:Connection
- mysql = ceilometer.storage.impl_sqlalchemy:Connection
- postgresql = ceilometer.storage.impl_sqlalchemy:Connection
- sqlite = ceilometer.storage.impl_sqlalchemy:Connection
- hbase = ceilometer.storage.impl_hbase:Connection
-
ceilometer.compute.virt =
libvirt = ceilometer.compute.virt.libvirt.inspector:LibvirtInspector
hyperv = ceilometer.compute.virt.hyperv.inspector:HyperVInspector
@@ -245,25 +231,17 @@ ceilometer.sample.publisher =
notifier = ceilometer.publisher.messaging:SampleNotifierPublisher
udp = ceilometer.publisher.udp:UDPPublisher
file = ceilometer.publisher.file:FilePublisher
- direct = ceilometer.publisher.direct:DirectPublisher
http = ceilometer.publisher.http:HttpPublisher
https = ceilometer.publisher.http:HttpPublisher
gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher
- database = ceilometer.publisher.direct:DirectPublisher
- file_alt = ceilometer.publisher.direct:DirectPublisher
- http_alt = ceilometer.publisher.direct:DirectPublisher
zaqar = ceilometer.publisher.zaqar:ZaqarPublisher
ceilometer.event.publisher =
test = ceilometer.publisher.test:TestPublisher
- direct = ceilometer.publisher.direct:DirectPublisher
notifier = ceilometer.publisher.messaging:EventNotifierPublisher
http = ceilometer.publisher.http:HttpPublisher
https = ceilometer.publisher.http:HttpPublisher
gnocchi = ceilometer.publisher.gnocchi:GnocchiPublisher
- database = ceilometer.publisher.direct:DirectPublisher
- file_alt = ceilometer.publisher.direct:DirectPublisher
- http_alt = ceilometer.publisher.direct:DirectPublisher
zaqar = ceilometer.publisher.zaqar:ZaqarPublisher
ceilometer.event.trait_plugin =
@@ -276,13 +254,8 @@ console_scripts =
ceilometer-agent-notification = ceilometer.cmd.agent_notification:main
ceilometer-send-sample = ceilometer.cmd.sample:send_sample
ceilometer-upgrade = ceilometer.cmd.storage:upgrade
- ceilometer-db-legacy-clean = ceilometer.cmd.storage:db_clean_legacy
- ceilometer-expirer = ceilometer.cmd.storage:expirer
ceilometer-rootwrap = oslo_rootwrap.cmd:main
-ceilometer.dispatcher.meter =
- database = ceilometer.dispatcher.database:MeterDatabaseDispatcher
-
network.statistics.drivers =
opendaylight = ceilometer.network.statistics.opendaylight.driver:OpenDayLightDriver
opencontrail = ceilometer.network.statistics.opencontrail.driver:OpencontrailDriver
diff --git a/test-requirements.txt b/test-requirements.txt
index 69ecdfc6..f516136d 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -4,7 +4,6 @@
coverage>=3.6 # Apache-2.0
fixtures<2.0,>=1.3.1 # Apache-2.0/BSD
-happybase!=0.7,>=0.5,<1.0.0;python_version=='2.7' # MIT
mock>=1.2 # BSD
os-win>=0.2.3 # Apache-2.0
# Docs Requirements
@@ -21,4 +20,3 @@ gabbi>=1.30.0 # Apache-2.0
requests-aws>=0.1.4 # BSD License (3 clause)
os-testr>=0.4.1 # Apache-2.0
tempest>=14.0.0 # Apache-2.0
-pifpaf>=0.0.11 # Apache-2.0
diff --git a/tools/make_test_data.py b/tools/make_test_data.py
deleted file mode 100755
index 00715e7d..00000000
--- a/tools/make_test_data.py
+++ /dev/null
@@ -1,229 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Command line tool for creating test data for Ceilometer.
-
-Usage:
-
-Generate testing data for e.g. for default time span
-
-source .tox/py27/bin/activate
-./tools/make_test_data.py --user 1 --project 1 --resource 1 --counter cpu_util
---volume 20
-"""
-import argparse
-import datetime
-import random
-import uuid
-
-from oslo_utils import timeutils
-
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer import service
-from ceilometer import storage
-
-
-def make_test_data(conf, name, meter_type, unit, volume, random_min,
- random_max, user_id, project_id, resource_id, start,
- end, interval, resource_metadata=None, source='artificial'):
- resource_metadata = resource_metadata or {'display_name': 'toto',
- 'host': 'tata',
- 'image_ref': 'test',
- 'instance_flavor_id': 'toto',
- 'server_group': 'toto',
- }
- # Compute start and end timestamps for the new data.
- if isinstance(start, datetime.datetime):
- timestamp = start
- else:
- timestamp = timeutils.parse_strtime(start)
-
- if not isinstance(end, datetime.datetime):
- end = timeutils.parse_strtime(end)
-
- increment = datetime.timedelta(minutes=interval)
-
- print('Adding new samples for meter %s.' % (name))
- # Generate samples
- n = 0
- total_volume = volume
- while timestamp <= end:
- if (random_min >= 0 and random_max >= 0):
- # If there is a random element defined, we will add it to
- # user given volume.
- if isinstance(random_min, int) and isinstance(random_max, int):
- total_volume += random.randint(random_min, random_max)
- else:
- total_volume += random.uniform(random_min, random_max)
-
- c = sample.Sample(name=name,
- type=meter_type,
- unit=unit,
- volume=total_volume,
- user_id=user_id,
- project_id=project_id,
- resource_id=resource_id,
- timestamp=timestamp.isoformat(),
- resource_metadata=resource_metadata,
- source=source,
- )
- data = utils.meter_message_from_counter(
- c, conf.publisher.telemetry_secret)
- # timestamp should be string when calculating signature, but should be
- # datetime object when calling record_metering_data.
- data['timestamp'] = timestamp
- yield data
- n += 1
- timestamp = timestamp + increment
-
- if (meter_type == 'gauge' or meter_type == 'delta'):
- # For delta and gauge, we don't want to increase the value
- # in time by random element. So we always set it back to
- # volume.
- total_volume = volume
-
- print('Added %d new samples for meter %s.' % (n, name))
-
-
-def record_test_data(conf, conn, *args, **kwargs):
- for data in make_test_data(conf, *args, **kwargs):
- conn.record_metering_data(data)
-
-
-def get_parser():
- parser = argparse.ArgumentParser(
- description='generate metering data',
- )
- parser.add_argument(
- '--interval',
- default=10,
- type=int,
- help='The period between samples, in minutes.',
- )
- parser.add_argument(
- '--start',
- default=31,
- help='Number of days to be stepped back from now or date in the past ('
- '"YYYY-MM-DDTHH:MM:SS" format) to define timestamps start range.',
- )
- parser.add_argument(
- '--end',
- default=2,
- help='Number of days to be stepped forward from now or date in the '
- 'future ("YYYY-MM-DDTHH:MM:SS" format) to define timestamps end '
- 'range.',
- )
- parser.add_argument(
- '--type',
- choices=('gauge', 'cumulative'),
- default='gauge',
- dest='meter_type',
- help='Counter type.',
- )
- parser.add_argument(
- '--unit',
- default=None,
- help='Counter unit.',
- )
- parser.add_argument(
- '--project',
- dest='project_id',
- help='Project id of owner.',
- )
- parser.add_argument(
- '--user',
- dest='user_id',
- help='User id of owner.',
- )
- parser.add_argument(
- '--random_min',
- help='The random min border of amount for added to given volume.',
- type=int,
- default=0,
- )
- parser.add_argument(
- '--random_max',
- help='The random max border of amount for added to given volume.',
- type=int,
- default=0,
- )
- parser.add_argument(
- '--resource',
- dest='resource_id',
- default=str(uuid.uuid4()),
- help='The resource id for the meter data.',
- )
- parser.add_argument(
- '--counter',
- default='instance',
- dest='name',
- help='The counter name for the meter data.',
- )
- parser.add_argument(
- '--volume',
- help='The amount to attach to the meter.',
- type=int,
- default=1,
- )
- return parser
-
-
-def main():
-
- args = get_parser().parse_args()
- conf = service.prepare_service([])
-
- # Connect to the metering database
- conn = storage.get_connection_from_config(conf)
-
- # Find the user and/or project for a real resource
- if not (args.user_id or args.project_id):
- for r in conn.get_resources():
- if r.resource_id == args.resource_id:
- args.user_id = r.user_id
- args.project_id = r.project_id
- break
-
- # Compute the correct time span
- format = '%Y-%m-%dT%H:%M:%S'
-
- try:
- start = datetime.datetime.utcnow() - datetime.timedelta(
- days=int(args.start))
- except ValueError:
- try:
- start = datetime.datetime.strptime(args.start, format)
- except ValueError:
- raise
-
- try:
- end = datetime.datetime.utcnow() + datetime.timedelta(
- days=int(args.end))
- except ValueError:
- try:
- end = datetime.datetime.strptime(args.end, format)
- except ValueError:
- raise
- args.start = start
- args.end = end
- record_test_data(conf, conn=conn, **args.__dict__)
-
- return 0
-
-
-if __name__ == '__main__':
- main()
diff --git a/tools/make_test_data.sh b/tools/make_test_data.sh
deleted file mode 100755
index 23a93e88..00000000
--- a/tools/make_test_data.sh
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/bin/bash
-
-bindir=$(dirname $0)
-
-project_name="$1"
-if [ -z "$project_name" ]
-then
- project_name=demo
-fi
-
-if [ -z "$OS_USERNAME" ]
-then
- user=demo
-else
- user=$OS_USERNAME
-fi
-
-# Convert a possible project name to an id, if we have
-# openstack cli installed.
-if which openstack >/dev/null
-then
- project=$(openstack project show "$project_name" -c id -f value)
-else
- # Assume they gave us the project id as argument.
- project="$project_name"
-fi
-
-if [ -z "$project" ]
-then
- echo "Could not determine project id for \"$project_name\"" 1>&2
- exit 1
-fi
-
-early1="2012-08-27T07:00:00"
-early2="2012-08-27T17:00:00"
-
-start="2012-08-28T00:00:00"
-
-middle1="2012-08-28T08:00:00"
-middle2="2012-08-28T18:00:00"
-middle3="2012-08-29T09:00:00"
-middle4="2012-08-29T19:00:00"
-
-end="2012-08-31T23:59:00"
-
-late1="2012-08-31T10:00:00"
-late2="2012-08-31T20:00:00"
-
-mkdata() {
- ${bindir}/make_test_data.py --project "$project" \
- --user "$user" --start "$2" --end "$3" \
- --resource "$1" --counter instance --volume 1
-}
-
-dates=(early1 early2 start middle1 middle2 middle3 middle4 end late1 late2)
-
-echo $project
-
-for i in $(seq 0 $((${#dates[@]} - 2)) )
-do
-
- iname=${dates[$i]}
- eval "ivalue=\$$iname"
-
- for j in $(seq $((i + 1)) $((${#dates[@]} - 1)) )
- do
- jname=${dates[$j]}
- eval "jvalue=\$$jname"
-
- resource_id="${project_name}-$iname-$jname"
- echo "$resource_id"
-
- mkdata "$resource_id" "$ivalue" "$jvalue"
- [ $? -eq 0 ] || exit $?
- done
- echo
-done
diff --git a/tools/migrate_data_to_gnocchi.py b/tools/migrate_data_to_gnocchi.py
deleted file mode 100755
index 27c87d97..00000000
--- a/tools/migrate_data_to_gnocchi.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#
-# Copyright 2017 Huawei Technologies Co.,LTD.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Command line tool for migrating metrics data from ceilometer native
-storage backend to Gnocchi.
-
-Usage:
-python migrate_data_to_gnocchi.py --native-metering-connection "mysql+pymysql:
-//root:password@127.0.0.1/ceilometer?charset=utf8"
-
-NOTE:
-You may need to install *tqdm* python lib to support progress bar in migration.
-
-"""
-
-import sys
-
-try:
- from tqdm import tqdm as progress_bar
-except ImportError:
- progress_bar = None
-
-import argparse
-from oslo_config import cfg
-from oslo_db import options as db_options
-from oslo_log import log
-
-from ceilometer.publisher import gnocchi
-from ceilometer import service
-from ceilometer import storage
-from ceilometer.storage import impl_mongodb
-from ceilometer.storage import impl_sqlalchemy
-from ceilometer.storage.mongo import utils as pymongo_utils
-from ceilometer import utils
-
-
-def get_parser():
- parser = argparse.ArgumentParser(
- description='For migrating metrics data from ceilometer built-in '
- 'storage backends to Gnocchi.')
- parser.add_argument(
- '--native-metering-connection',
- required=True,
- help='The database connection url of native storage backend. '
- 'e.g. mysql+pymysql://root:password@127.0.0.1/ceilometer?charset'
- '=utf8',
- )
- parser.add_argument(
- '--ceilometer-config-file',
- help="The config file of ceilometer, it is main used for gnocchi "
- "publisher to init gnocchiclient with the service credentials "
- "defined in the ceilometer config file. Default as "
- "/etc/ceilometer/ceilometer.conf",
- )
- parser.add_argument(
- '--log-file',
- default='gnocchi-data-migration.log',
- help="The log file to record messages during migration.", )
- parser.add_argument(
- '--batch-migration-size',
- default=300,
- help="The amount of samples that will be posted to gnocchi per batch",)
- parser.add_argument(
- '--start-timestamp',
- default=None,
- help="The stat timestamp of metrics data to be migrated, with ISO8601 "
- "format, e.g. 2016-01-25 11:58:00",
- )
- parser.add_argument(
- '--end-timestamp',
- default=None,
- help="The end timestamp of metrics data to be migrated, with ISO8601 "
- "format, e.g. 2017-01-25 11:58:00",
- )
- return parser
-
-
-def count_samples(storage_conn, start_timestamp=None, end_timestamp=None):
- if start_timestamp:
- start_timestamp = utils.sanitize_timestamp(start_timestamp)
- if start_timestamp:
- end_timestamp = utils.sanitize_timestamp(end_timestamp)
- if isinstance(storage_conn, impl_sqlalchemy.Connection):
- from ceilometer.storage.sqlalchemy import models
- session = storage_conn._engine_facade.get_session()
- query = session.query(models.Sample.id)
- if start_timestamp:
- query = query.filter(models.Sample.timestamp >= start_timestamp)
- if end_timestamp:
- query = query.filter(models.Sample.timestamp < end_timestamp)
- return query.count()
- elif isinstance(storage_conn, impl_mongodb.Connection):
- ts_range = pymongo_utils.make_timestamp_range(start_timestamp,
- end_timestamp)
- return storage_conn.db.meter.count(ts_range)
- else:
- print("Unsupported type of storage connection: %s" % storage_conn)
- sys.exit(1)
-
-
-def get_native_storage_conn(metering_connection):
- storage_conf = cfg.ConfigOpts()
- db_options.set_defaults(storage_conf)
- storage_conf.register_opts(storage.OPTS, 'database')
- storage_conf.set_override('metering_connection',
- metering_connection, 'database')
- storage_conn = storage.get_connection_from_config(storage_conf)
- return storage_conn
-
-
-def main():
- args = get_parser().parse_args()
-
- storage_conn = get_native_storage_conn(args.native_metering_connection)
- total_amount = count_samples(storage_conn, args.start_timestamp,
- args.end_timestamp)
- print('%s samples will be migrated to Gnocchi.' % total_amount)
-
- # NOTE: we need service credentials to init gnocchiclient
- config_file = ([args.ceilometer_config_file] if
- args.ceilometer_config_file else None)
- gnocchi_conf = service.prepare_service([], config_file)
- logger = log.getLogger()
- log_conf = cfg.ConfigOpts()
- log.register_options(log_conf)
- log_conf.set_override('log_file', args.log_file)
- log_conf.set_override('debug', True)
- log.setup(log_conf, 'ceilometer_migration')
- time_filters = []
- if args.start_timestamp:
- time_filters.append({">=": {'timestamp': args.start_timestamp}})
- if args.end_timestamp:
- time_filters.append({"<": {'timestamp': args.end_timestamp}})
-
- gnocchi_publisher = gnocchi.GnocchiPublisher(gnocchi_conf, "gnocchi://")
-
- batch_size = args.batch_migration_size
- if total_amount == 'Unknown':
- total_amount = None
- orderby = [{"message_id": "asc"}]
- last_message_id = None
- migrated_amount = 0
- if progress_bar:
- pbar = progress_bar(total=total_amount, ncols=100, unit='samples')
- else:
- pbar = None
- while migrated_amount < total_amount:
- if time_filters and last_message_id:
- filter_expr = {
- 'and': time_filters + [{">": {"message_id": last_message_id}}]}
- elif time_filters and not last_message_id:
- if len(time_filters) == 1:
- filter_expr = time_filters[0]
- else:
- filter_expr = {'and': time_filters}
- elif not time_filters and last_message_id:
- filter_expr = {">": {"message_id": last_message_id}}
- else:
- filter_expr = None
- samples = storage_conn.query_samples(
- filter_expr=filter_expr, orderby=orderby, limit=batch_size)
- samples = list(samples)
- if not samples:
- break
- last_message_id = samples[-1].message_id
- for sample in samples:
- logger.info('Migrating sample with message_id: %s, meter: %s, '
- 'resource_id: %s' % (sample.message_id,
- sample.counter_name,
- sample.resource_id))
- samples_dict = [sample.as_dict() for sample in samples]
- gnocchi_publisher.publish_samples(samples_dict)
- length = len(samples)
- migrated_amount += length
- if pbar:
- pbar.update(length)
- logger.info("=========== %s metrics data migration done ============" %
- total_amount)
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/tox.ini b/tox.ini
index ce95a831..6159a33f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,10 +1,10 @@
[tox]
minversion = 1.8
skipsdist = True
-envlist = py{27,35},{debug,py,py27,py35}-{mongodb,mysql,postgresql,functional},pep8
+envlist = py{27,35},pep8
[testenv]
-deps = .[mongo,mysql,postgresql,gnocchi,zaqar]
+deps = .[gnocchi,zaqar]
-r{toxinidir}/test-requirements.txt
# NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt
install_command = pip install -U {opts} {packages}
@@ -14,11 +14,6 @@ setenv = VIRTUAL_ENV={envdir}
CEILOMETER_TEST_BACKEND={env:CEILOMETER_TEST_BACKEND:none}
CEILOMETER_TEST_DEBUG={env:CEILOMETER_TEST_DEBUG:}
debug: CEILOMETER_TEST_DEBUG=True
- {mongodb,mysql,postgresql,functional}: OS_TEST_PATH=ceilometer/tests/functional/
- mongodb: CEILOMETER_TEST_BACKEND=mongodb
- mysql: CEILOMETER_TEST_BACKEND=mysql
- postgresql: CEILOMETER_TEST_BACKEND=postgresql
- functional: CEILOMETER_TEST_BACKEND={env:CEILOMETER_TEST_BACKEND:mongodb}
passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE CEILOMETER_*
commands =
bash -x {toxinidir}/run-tests.sh "{posargs}"