summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJulien Danjou <julien@danjou.info>2017-10-16 11:31:51 +0200
committerJulien Danjou <julien@danjou.info>2017-10-25 14:38:43 +0200
commitd881dd52289d453b9f9d94c7c32c0672a70a8064 (patch)
tree3e8dba427d47ccaf28c5c16bfc20956d09e6a042
parent4b420650697c9c19029152ebce24e0c591f2d9c3 (diff)
downloadceilometer-d881dd52289d453b9f9d94c7c32c0672a70a8064.tar.gz
Remove Ceilometer API
This removes the deprecated Ceilometer API. Change-Id: I752b36b3dfe8f935b68c4d3d59ccb5b8b60c582f
-rw-r--r--.gitignore4
-rw-r--r--.zuul.yaml39
-rw-r--r--README.rst3
-rw-r--r--api-ref/source/alarms.inc336
-rw-r--r--api-ref/source/capabilities.inc92
-rw-r--r--api-ref/source/conf.py273
-rw-r--r--api-ref/source/events.inc93
-rw-r--r--api-ref/source/index.rst8
-rw-r--r--api-ref/source/meters.inc386
-rw-r--r--api-ref/source/parameters.yaml768
-rw-r--r--api-ref/source/resources.inc95
-rw-r--r--api-ref/source/samples.inc111
-rw-r--r--api-ref/source/samples/alarm-show-response.json24
-rw-r--r--api-ref/source/samples/alarm-show-response.xml25
-rw-r--r--api-ref/source/samples/alarms-list-response.json26
-rw-r--r--api-ref/source/samples/alarms-list-response.xml27
-rw-r--r--api-ref/source/samples/capabilities-list-response.json40
-rw-r--r--api-ref/source/samples/capabilities-list-response.xml131
-rw-r--r--api-ref/source/samples/event-show-response.json18
-rw-r--r--api-ref/source/samples/events-list-response.json20
-rw-r--r--api-ref/source/samples/meters-list-response.json12
-rw-r--r--api-ref/source/samples/meters-list-response.xml13
-rw-r--r--api-ref/source/samples/resource-show-response.json20
-rw-r--r--api-ref/source/samples/resource-show-response.xml27
-rw-r--r--api-ref/source/samples/resources-list-response.json22
-rw-r--r--api-ref/source/samples/resources-list-response.xml29
-rw-r--r--api-ref/source/samples/sample-create-request.json17
-rw-r--r--api-ref/source/samples/sample-create-request.xml23
-rw-r--r--api-ref/source/samples/sample-show-response.json17
-rw-r--r--api-ref/source/samples/sample-show-response.xml24
-rw-r--r--api-ref/source/samples/samples-list-response.json19
-rw-r--r--api-ref/source/samples/samples-list-response.xml26
-rw-r--r--api-ref/source/samples/statistics-list-response.json16
-rw-r--r--api-ref/source/samples/statistics-list-response.xml17
-rw-r--r--ceilometer/__init__.py5
-rw-r--r--ceilometer/api/__init__.py0
-rw-r--r--ceilometer/api/app.py112
-rw-r--r--ceilometer/api/app.wsgi25
-rw-r--r--ceilometer/api/controllers/__init__.py0
-rw-r--r--ceilometer/api/controllers/root.py56
-rw-r--r--ceilometer/api/controllers/v2/__init__.py0
-rw-r--r--ceilometer/api/controllers/v2/base.py222
-rw-r--r--ceilometer/api/controllers/v2/capabilities.py90
-rw-r--r--ceilometer/api/controllers/v2/meters.py505
-rw-r--r--ceilometer/api/controllers/v2/query.py359
-rw-r--r--ceilometer/api/controllers/v2/resources.py158
-rw-r--r--ceilometer/api/controllers/v2/root.py218
-rw-r--r--ceilometer/api/controllers/v2/samples.py145
-rw-r--r--ceilometer/api/controllers/v2/utils.py316
-rw-r--r--ceilometer/api/hooks.py91
-rw-r--r--ceilometer/api/middleware.py127
-rw-r--r--ceilometer/api/rbac.py86
-rw-r--r--ceilometer/cmd/api.py34
-rw-r--r--ceilometer/conf/__init__.py0
-rw-r--r--ceilometer/conf/defaults.py37
-rw-r--r--ceilometer/opts.py5
-rw-r--r--ceilometer/service.py10
-rw-r--r--ceilometer/telemetry/notifications.py4
-rw-r--r--ceilometer/tests/base.py5
-rw-r--r--ceilometer/tests/functional/api/__init__.py177
-rw-r--r--ceilometer/tests/functional/api/v2/__init__.py20
-rw-r--r--ceilometer/tests/functional/api/v2/test_acl_scenarios.py180
-rw-r--r--ceilometer/tests/functional/api/v2/test_api_upgrade.py183
-rw-r--r--ceilometer/tests/functional/api/v2/test_app.py98
-rw-r--r--ceilometer/tests/functional/api/v2/test_capabilities.py30
-rw-r--r--ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py312
-rw-r--r--ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py193
-rw-r--r--ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py797
-rw-r--r--ceilometer/tests/functional/api/v2/test_list_resources_scenarios.py586
-rw-r--r--ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py156
-rw-r--r--ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py374
-rw-r--r--ceilometer/tests/functional/api/v2/test_statistics_scenarios.py1693
-rw-r--r--ceilometer/tests/functional/api/v2/test_versions.py65
-rw-r--r--ceilometer/tests/functional/gabbi/__init__.py0
-rw-r--r--ceilometer/tests/functional/gabbi/fixtures.py170
-rw-r--r--ceilometer/tests/functional/gabbi/gabbi_paste.ini24
-rw-r--r--ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml19
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits/basic.yaml24
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml13
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml102
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml18
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits/meters.yaml384
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits/middleware.yaml44
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml59
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml86
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits/samples.yaml154
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml20
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml50
-rw-r--r--ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml24
-rw-r--r--ceilometer/tests/functional/gabbi/test_gabbi.py35
-rw-r--r--ceilometer/tests/functional/gabbi/test_gabbi_prefix.py33
-rw-r--r--ceilometer/tests/tempest/api/__init__.py0
-rw-r--r--ceilometer/tests/tempest/api/base.py145
-rw-r--r--ceilometer/tests/tempest/api/test_telemetry_notification_api.py87
-rw-r--r--ceilometer/tests/tempest/config.py14
-rw-r--r--ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py146
-rw-r--r--ceilometer/tests/tempest/service/__init__.py0
-rw-r--r--ceilometer/tests/tempest/service/client.py110
-rw-r--r--ceilometer/tests/unit/api/__init__.py0
-rw-r--r--ceilometer/tests/unit/api/test_app.py35
-rw-r--r--ceilometer/tests/unit/api/test_hooks.py33
-rw-r--r--ceilometer/tests/unit/api/v2/__init__.py0
-rw-r--r--ceilometer/tests/unit/api/v2/test_complex_query.py339
-rw-r--r--ceilometer/tests/unit/api/v2/test_query.py387
-rw-r--r--ceilometer/tests/unit/api/v2/test_statistics.py105
-rw-r--r--ceilometer/tests/unit/telemetry/__init__.py0
-rw-r--r--ceilometer/tests/unit/telemetry/test_notifications.py81
-rw-r--r--ceilometer/tests/unit/test_utils.py62
-rw-r--r--ceilometer/utils.py40
-rw-r--r--devstack/apache-ceilometer.template15
-rw-r--r--devstack/plugin.sh79
-rw-r--r--devstack/settings11
-rw-r--r--devstack/upgrade/settings4
-rwxr-xr-xdevstack/upgrade/shutdown.sh2
-rwxr-xr-xdevstack/upgrade/upgrade.sh3
-rw-r--r--doc/source/admin/index.rst1
-rw-r--r--doc/source/admin/telemetry-best-practices.rst82
-rw-r--r--doc/source/admin/telemetry-data-collection.rst77
-rw-r--r--doc/source/admin/telemetry-data-retrieval.rst493
-rw-r--r--doc/source/admin/telemetry-system-architecture.rst10
-rw-r--r--doc/source/conf.py5
-rw-r--r--doc/source/contributor/install/manual.rst12
-rw-r--r--doc/source/contributor/install/upgrade.rst12
-rw-r--r--doc/source/contributor/plugins.rst10
-rw-r--r--doc/source/contributor/testing.rst17
-rw-r--r--doc/source/glossary.rst3
-rw-r--r--doc/source/index.rst1
-rw-r--r--doc/source/webapi/index.rst55
-rw-r--r--doc/source/webapi/v2.rst655
-rw-r--r--etc/apache2/ceilometer39
-rw-r--r--etc/ceilometer/api_paste.ini27
-rw-r--r--etc/ceilometer/ceilometer-config-generator.conf3
-rw-r--r--etc/ceilometer/policy.json15
-rw-r--r--playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only/post.yaml80
-rw-r--r--playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only/run.yaml51
-rw-r--r--playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb/post.yaml80
-rw-r--r--playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb/run.yaml51
-rw-r--r--playbooks/legacy/ceilometer-dsvm-tempest-plugin-mysql/post.yaml80
-rw-r--r--playbooks/legacy/ceilometer-dsvm-tempest-plugin-mysql/run.yaml51
-rw-r--r--rally-jobs/README.rst29
-rw-r--r--rally-jobs/ceilometer.yaml69
-rw-r--r--rally-jobs/extra/README.rst6
-rw-r--r--rally-jobs/extra/fake.img0
-rw-r--r--rally-jobs/plugins/README.rst9
-rw-r--r--rally-jobs/plugins/plugin_sample.py27
-rw-r--r--releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml4
-rw-r--r--requirements.txt10
-rw-r--r--setup.cfg6
-rw-r--r--test-requirements.txt4
-rw-r--r--tox.ini16
150 files changed, 23 insertions, 15299 deletions
diff --git a/.gitignore b/.gitignore
index e4cc07eb..65a52d27 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,13 +10,9 @@ ChangeLog
cover/*
dist/*
doc/build
-doc/source/api/
doc/source/_static/
etc/ceilometer/ceilometer.conf
subunit.log
# Files created by releasenotes build
releasenotes/build
-
-# Files created by api-ref build
-api-ref/build
diff --git a/.zuul.yaml b/.zuul.yaml
index 6e342ceb..1c1521b1 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -19,39 +19,6 @@
- openstack/ceilometer
- job:
- name: ceilometer-dsvm-tempest-plugin-mongodb
- parent: legacy-dsvm-base
- run: playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb/run
- post-run: playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb/post
- timeout: 7800
- required-projects:
- - openstack-infra/devstack-gate
- - openstack/ceilometer
- - openstack/tempest
-
-- job:
- name: ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only
- parent: legacy-dsvm-base
- run: playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only/run
- post-run: playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only/post
- timeout: 7800
- required-projects:
- - openstack-infra/devstack-gate
- - openstack/ceilometer
- - openstack/tempest
-
-- job:
- name: ceilometer-dsvm-tempest-plugin-mysql
- parent: legacy-dsvm-base
- run: playbooks/legacy/ceilometer-dsvm-tempest-plugin-mysql/run
- post-run: playbooks/legacy/ceilometer-dsvm-tempest-plugin-mysql/post
- timeout: 7800
- required-projects:
- - openstack-infra/devstack-gate
- - openstack/ceilometer
- - openstack/tempest
-
-- job:
name: ceilometer-tox-py27-mongodb
parent: legacy-base
run: playbooks/legacy/ceilometer-tox-py27-mongodb/run
@@ -113,8 +80,6 @@
branches: ^stable/newton$
- ceilometer-dsvm-functional-mysql:
branches: ^stable/newton$
- - ceilometer-dsvm-tempest-plugin-mongodb
- - ceilometer-dsvm-tempest-plugin-mysql
- ceilometer-tox-py27-mongodb:
branches: ^(?!stable/newton)
- ceilometer-tox-py27-mysql:
@@ -127,15 +92,12 @@
- ^(test-|)requirements.txt$
- ^setup.cfg$
- telemetry-dsvm-integration-ceilometer
- - ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only
gate:
jobs:
- ceilometer-dsvm-functional-mongodb:
branches: ^stable/newton$
- ceilometer-dsvm-functional-mysql:
branches: ^stable/newton$
- - ceilometer-dsvm-tempest-plugin-mongodb
- - ceilometer-dsvm-tempest-plugin-mysql
- ceilometer-tox-py27-mongodb:
branches: ^(?!stable/newton)
- ceilometer-tox-py27-mysql:
@@ -148,4 +110,3 @@
- ^(test-|)requirements.txt$
- ^setup.cfg$
- telemetry-dsvm-integration-ceilometer
- - ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only
diff --git a/README.rst b/README.rst
index 6fafac56..9ff58dd7 100644
--- a/README.rst
+++ b/README.rst
@@ -11,9 +11,6 @@ Ceilometer is distributed under the terms of the Apache
License, Version 2.0. The full terms and conditions of this
license are detailed in the LICENSE file.
-For more information about Ceilometer APIs, see
-https://developer.openstack.org/api-ref-telemetry-v2.html
-
Release notes are available at
https://releases.openstack.org/teams/telemetry.html
diff --git a/api-ref/source/alarms.inc b/api-ref/source/alarms.inc
deleted file mode 100644
index 592e134f..00000000
--- a/api-ref/source/alarms.inc
+++ /dev/null
@@ -1,336 +0,0 @@
-.. -*- rst -*-
-
-======
-Alarms
-======
-
-Lists, creates, gets details for, updates, and deletes alarms.
-
-
-Show alarm details
-==================
-
-.. rest_method:: GET /v2/alarms/{alarm_id}
-
-Shows details for an alarm, by alarm ID.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - alarm_id: alarm_id_path
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - alarm: alarm_response
- - alarm_actions: alarm_actions
- - alarm_id: alarm_id
- - combination_rule: alarm_combination_rule
- - description: alarm_description
- - enabled: alarm_enabled
- - insufficient_data_actions: alarm_insufficient_data_actions
- - timestamp: alarm_timestamp
- - name: alarm_name
- - ok_actions: alarm_ok_actions
- - project_id: alarm_project_id
- - state_timestamp: alarm_state_timestamp
- - threshold_rule: alarm_threshold_rule
- - repeat_actions: alarm_repeat_actions
- - state: alarm_state
- - type: alarm_type
- - user_id: user_id
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/alarm-show-response.json
- :language: javascript
-
-
-
-
-Update alarm
-============
-
-.. rest_method:: PUT /v2/alarms/{alarm_id}
-
-Updates an alarm.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - alarm_id: alarm_id_path
- - alarm: alarm_request
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - alarm: alarm_response
- - alarm_actions: alarm_actions
- - alarm_id: alarm_id
- - combination_rule: alarm_combination_rule
- - description: alarm_description
- - enabled: alarm_enabled
- - insufficient_data_actions: alarm_insufficient_data_actions
- - timestamp: alarm_timestamp
- - name: alarm_name
- - ok_actions: alarm_ok_actions
- - project_id: alarm_project_id
- - state_timestamp: alarm_state_timestamp
- - threshold_rule: alarm_threshold_rule
- - repeat_actions: alarm_repeat_actions
- - state: alarm_state
- - type: alarm_type
- - user_id: user_id
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/alarm-show-response.json
- :language: javascript
-
-
-
-
-Delete alarm
-============
-
-.. rest_method:: DELETE /v2/alarms/{alarm_id}
-
-Deletes an alarm, by alarm ID.
-
-Normal response codes:204
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - alarm_id: alarm_id_path
-
-
-
-
-
-
-Update alarm state
-==================
-
-.. rest_method:: PUT /v2/alarms/{alarm_id}/state
-
-Sets the state of an alarm.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - alarm_id: alarm_id_path
- - state: alarm_state
-
-
-
-
-Response Example
-----------------
-
-.. literalinclude::
- :language: javascript
-
-
-
-
-Show alarm state
-================
-
-.. rest_method:: GET /v2/alarms/{alarm_id}/state
-
-Shows the state for an alarm, by alarm ID.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - alarm_id: alarm_id_path
-
-
-
-
-Response Example
-----------------
-
-.. literalinclude::
- :language: javascript
-
-
-
-
-List alarms
-===========
-
-.. rest_method:: GET /v2/alarms
-
-Lists alarms, based on a query.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - q: q
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - alarm_actions: alarm_actions
- - ok_actions: ok_actions
- - description: description
- - timestamp: timestamp
- - enabled: enabled
- - combination_rule: combination_rule
- - state_timestamp: state_timestamp
- - threshold_rule: threshold_rule
- - alarm_id: alarm_id
- - state: state
- - insufficient_data_actions: alarm_insufficient_data_actions
- - repeat_actions: repeat_actions
- - user_id: user_id
- - project_id: project_id
- - type: type
- - name: name
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/alarms-list-response.json
- :language: javascript
-
-
-
-
-Create alarm
-============
-
-.. rest_method:: POST /v2/alarms
-
-Creates an alarm.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - data: data
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - alarm: alarm_response
- - alarm_actions: alarm_actions
- - alarm_id: alarm_id
- - combination_rule: alarm_combination_rule
- - description: alarm_description
- - enabled: alarm_enabled
- - insufficient_data_actions: alarm_insufficient_data_actions
- - timestamp: alarm_timestamp
- - name: alarm_name
- - ok_actions: alarm_ok_actions
- - project_id: alarm_project_id
- - state_timestamp: alarm_state_timestamp
- - threshold_rule: alarm_threshold_rule
- - repeat_actions: alarm_repeat_actions
- - state: alarm_state
- - type: alarm_type
- - user_id: user_id
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/alarm-show-response.json
- :language: javascript
-
-
-
-
-Show alarm history
-==================
-
-.. rest_method:: GET /v2/alarms/{alarm_id}/history
-
-Assembles and shows the history for an alarm, by alarm ID.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - alarm_id: alarm_id_path
- - q: q
-
-
-
-
-Response Example
-----------------
-
-.. literalinclude::
- :language: javascript
diff --git a/api-ref/source/capabilities.inc b/api-ref/source/capabilities.inc
deleted file mode 100644
index e3461b28..00000000
--- a/api-ref/source/capabilities.inc
+++ /dev/null
@@ -1,92 +0,0 @@
-.. -*- rst -*-
-
-============
-Capabilities
-============
-
-Gets information for API and storage capabilities.
-
-The Telemetry service enables you to store samples, events, and
-alarm definitions in supported database back ends. The
-``capabilities`` resource enables you to list the capabilities that
-a database supports.
-
-The ``capabilities`` resource returns a flattened dictionary of
-capability properties, each with an associated boolean value. A
-value of ``true`` indicates that the corresponding capability is
-available in the back end.
-
-You can optionally configure separate database back ends for
-samples, events, and alarms definitions. The ``capabilities``
-response shows a value of ``true`` to indicate that the definitions
-database for samples, events, or alarms is ready to use in a
-production environment.
-
-
-List capabilities
-=================
-
-.. rest_method:: GET /v2/capabilities
-
-A representation of the API and storage capabilities. Usually, the storage driver imposes constraints.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - statistics:query:complex: statistics:query:complex
- - alarms:history:query:simple: alarms:history:query:simple
- - meters:query:metadata: meters:query:metadata
- - alarms:query:simple: alarms:query:simple
- - resources:query:simple: resources:query:simple
- - api: api
- - statistics:aggregation:selectable:quartile: statistics:aggregation:selectable:quartile
- - statistics:query:simple: statistics:query:simple
- - statistics:aggregation:selectable:count: statistics:aggregation:selectable:count
- - statistics:aggregation:selectable:min: statistics:aggregation:selectable:min
- - statistics:aggregation:selectable:sum: statistics:aggregation:selectable:sum
- - storage: storage
- - alarm_storage: alarm_storage
- - statistics:aggregation:selectable:avg: statistics:aggregation:selectable:avg
- - meters:query:complex: meters:query:complex
- - statistics:groupby: statistics:groupby
- - alarms:history:query:complex: alarms:history:query:complex
- - meters:query:simple: meters:query:simple
- - samples:query:metadata: samples:query:metadata
- - statistics:query:metadata: statistics:query:metadata
- - storage:production_ready: storage:production_ready
- - samples:query:simple: samples:query:simple
- - resources:query:metadata: resources:query:metadata
- - statistics:aggregation:selectable:max: statistics:aggregation:selectable:max
- - samples:query:complex: samples:query:complex
- - statistics:aggregation:standard: statistics:aggregation:standard
- - events:query:simple: events:query:simple
- - statistics:aggregation:selectable:stddev: statistics:aggregation:selectable:stddev
- - alarms:query:complex: alarms:query:complex
- - statistics:aggregation:selectable:cardinality: statistics:aggregation:selectable:cardinality
- - event_storage: event_storage
- - resources:query:complex: resources:query:complex
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/capabilities-list-response.json
- :language: javascript
-
-
-
diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py
deleted file mode 100644
index 1514779d..00000000
--- a/api-ref/source/conf.py
+++ /dev/null
@@ -1,273 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-#
-# ceilometer documentation build configuration file, created by
-# sphinx-quickstart on Sat May 1 15:17:47 2010.
-#
-# This file is execfile()d with the current directory set to
-# its containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import os
-import subprocess
-import sys
-import warnings
-
-import openstackdocstheme
-
-html_theme = 'openstackdocs'
-html_theme_path = [openstackdocstheme.get_html_theme_path()]
-html_theme_options = {
- "sidebar_mode": "toc",
-}
-
-extensions = [
- 'os_api_ref',
-]
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-sys.path.insert(0, os.path.abspath('../../'))
-sys.path.insert(0, os.path.abspath('../'))
-sys.path.insert(0, os.path.abspath('./'))
-
-# -- General configuration ----------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#
-# source_encoding = 'utf-8'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'Compute API Reference'
-copyright = u'2010-present, OpenStack Foundation'
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-from ceilometer.version import version_info as ceilometer_version
-# The full version, including alpha/beta/rc tags.
-release = ceilometer_version.version_string_with_vcs()
-# The short X.Y version.
-version = ceilometer_version.canonical_version_string()
-
-# Config logABug feature
-giturl = (
- u'https://git.openstack.org/cgit/openstack/ceilometer/tree/api-ref/source')
-# source tree
-# html_context allows us to pass arbitrary values into the html template
-html_context = {'bug_tag': 'api-ref',
- 'giturl': giturl,
- 'bug_project': 'ceilometer'}
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#
-# language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-# today = ''
-# Else, today_fmt is used as the format for a strftime call.
-# today_fmt = '%B %d, %Y'
-
-# The reST default role (used for this markup: `text`) to use
-# for all documents.
-# default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-# add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-add_module_names = False
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# -- Options for man page output ----------------------------------------------
-
-# Grouping the document tree for man pages.
-# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
-
-
-# -- Options for HTML output --------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. Major themes that come with
-# Sphinx are currently 'default' and 'sphinxdoc'.
-# html_theme_path = ["."]
-# html_theme = '_theme'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-# html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-# html_theme_path = []
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-# html_title = None
-
-# A shorter title for the navigation bar. Default is the same as html_title.
-# html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-# html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-# html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-# html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-# html_last_updated_fmt = '%b %d, %Y'
-git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local",
- "-n1"]
-try:
- html_last_updated_fmt = subprocess.check_output(git_cmd).decode('utf-8')
-except Exception:
- warnings.warn('Cannot get last updated time from git repository. '
- 'Not setting "html_last_updated_fmt".')
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-# html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-# html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-# html_additional_pages = {}
-
-# If false, no module index is generated.
-# html_use_modindex = True
-
-# If false, no index is generated.
-# html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-# html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-# html_show_sourcelink = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-# html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
-# html_file_suffix = ''
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'ceilometerdoc'
-
-
-# -- Options for LaTeX output -------------------------------------------------
-
-# The paper size ('letter' or 'a4').
-# latex_paper_size = 'letter'
-
-# The font size ('10pt', '11pt' or '12pt').
-# latex_font_size = '10pt'
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title, author, documentclass
-# [howto/manual]).
-latex_documents = [
- ('index', 'CeilometerReleaseNotes.tex',
- u'Ceilometer Release Notes Documentation',
- u'Ceilometer Developers', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-# latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-# latex_use_parts = False
-
-
-# Additional stuff for the LaTeX preamble.
-# latex_preamble = ''
-
-# Documents to append as an appendix to all manuals.
-# latex_appendices = []
-
-# If false, no module index is generated.
-# latex_use_modindex = True
-# (source start file, name, description, authors, manual section).
-man_pages = [
- ('index', 'ceilometerreleasenotes',
- u'Ceilometer Release Notes Documentation', [u'Ceilometer Developers'], 1)
-]
-
-# If true, show URL addresses after external links.
-# man_show_urls = False
-
-
-# -- Options for Texinfo output -------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- ('index', 'CeilometerReleaseNotes',
- u'Ceilometer Release Notes Documentation',
- u'Ceilometer Developers', 'CeilometerReleaseNotes',
- 'One line description of project.',
- 'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-# texinfo_appendices = []
-
-# If false, no module index is generated.
-# texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-# texinfo_show_urls = 'footnote'
-
-# If true, do not generate a @detailmenu in the "Top" node's menu.
-# texinfo_no_detailmenu = False
diff --git a/api-ref/source/events.inc b/api-ref/source/events.inc
deleted file mode 100644
index f72a8283..00000000
--- a/api-ref/source/events.inc
+++ /dev/null
@@ -1,93 +0,0 @@
-.. -*- rst -*-
-
-======
-Events
-======
-
-Lists all events and shows details for an event.
-
-
-Show event details
-==================
-
-.. rest_method:: GET /v2/events/{message_id}
-
-Shows details for an event.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - message_id: message_id_path
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - events: events
- - raw: event_raw
- - generated: event_generated
- - event_type: event_type
- - message_id: message_id
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/event-show-response.json
- :language: javascript
-
-
-
-
-List events
-===========
-
-.. rest_method:: GET /v2/events
-
-Lists all events.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - q: q
- - limit: limit
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - events: events
- - raw: event_raw
- - generated: generated
- - event_type: event_type
- - message_id: message_id
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/events-list-response.json
- :language: javascript
-
-
-
diff --git a/api-ref/source/index.rst b/api-ref/source/index.rst
deleted file mode 100644
index 18f7994f..00000000
--- a/api-ref/source/index.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-=========================
- Ceilometer Release Notes
-=========================
-
-.. toctree::
- :maxdepth: 1
-
- \ No newline at end of file
diff --git a/api-ref/source/meters.inc b/api-ref/source/meters.inc
deleted file mode 100644
index 292fbaad..00000000
--- a/api-ref/source/meters.inc
+++ /dev/null
@@ -1,386 +0,0 @@
-.. -*- rst -*-
-
-======
-Meters
-======
-
-Lists all meters, adds samples to meters, and lists samples for
-meters. For list operations, if you do not explicitly set the
-``limit`` query parameter, a default limit is applied. The default
-limit is the ``default_api_return_limit`` configuration option
-value.
-
-Also, computes and lists statistics for samples in a time range.
-You can use the ``aggregate`` query parameter in the ``statistics``
-URI to explicitly select the ``stddev``, ``cardinality``, or any
-other standard function. For example:
-
-::
-
- GET /v2/meters/METER_NAME/statistics?aggregate.func=NAME
- &
- aggregate.param=VALUE
-
-The ``aggregate.param`` parameter value is optional for all
-functions except the ``cardinality`` function.
-
-The API silently ignores any duplicate aggregate function and
-parameter pairs.
-
-The API accepts and storage drivers support duplicate functions
-with different parameter values. In this example, the
-``cardinality`` function is accepted twice with two different
-parameter values:
-
-::
-
- GET /v2/meters/METER_NAME/statistics?aggregate.func=cardinality
- &
- aggregate.param=resource_id
- &
- aggregate.func=cardinality
- &
- aggregate.param=project_id
-
-**Examples:**
-
-Use the ``stddev`` function to request the standard deviation of
-CPU utilization:
-
-::
-
- GET /v2/meters/cpu_util/statistics?aggregate.func=stddev
-
-The response looks like this:
-
-.. code-block:: json
-
- [
- {
- "aggregate": {
- "stddev": 0.6858829
- },
- "duration_start": "2014-01-30T11:13:23",
- "duration_end": "2014-01-31T16:07:13",
- "duration": 104030,
- "period": 0,
- "period_start": "2014-01-30T11:13:23",
- "period_end": "2014-01-31T16:07:13",
- "groupby": null,
- "unit": "%"
- }
- ]
-
-Use the ``cardinality`` function with the project ID to return the
-number of distinct tenants with images:
-
-::
-
- GET /v2/meters/image/statistics?aggregate.func=cardinality
- &
- aggregate.param=project_id
-
-The following, more complex, example determines:
-
-- The number of distinct instances (``cardinality``)
-
-- The total number of instance samples (``count``) for a tenant in
- 15-minute intervals (``period`` and ``groupby`` options)
-
-::
-
- GET /v2/meters/instance/statistics?aggregate.func=cardinality
- &
- aggregate.param=resource_id
- &
- aggregate.func=count
- &
- groupby=project_id
- &
- period=900
-
-The response looks like this:
-
-.. code-block:: json
-
- [
- {
- "count": 19,
- "aggregate": {
- "count": 19,
- "cardinality/resource_id": 3
- },
- "duration": 328.47803,
- "duration_start": "2014-01-31T10:00:41.823919",
- "duration_end": "2014-01-31T10:06:10.301948",
- "period": 900,
- "period_start": "2014-01-31T10:00:00",
- "period_end": "2014-01-31T10:15:00",
- "groupby": {
- "project_id": "061a5c91811e4044b7dc86c6136c4f99"
- },
- "unit": "instance"
- },
- {
- "count": 22,
- "aggregate": {
- "count": 22,
- "cardinality/resource_id": 4
- },
- "duration": 808.00385,
- "duration_start": "2014-01-31T10:15:15",
- "duration_end": "2014-01-31T10:28:43.003840",
- "period": 900,
- "period_start": "2014-01-31T10:15:00",
- "period_end": "2014-01-31T10:30:00",
- "groupby": {
- "project_id": "061a5c91811e4044b7dc86c6136c4f99"
- },
- "unit": "instance"
- },
- {
- "count": 2,
- "aggregate": {
- "count": 2,
- "cardinality/resource_id": 2
- },
- "duration": 0,
- "duration_start": "2014-01-31T10:35:15",
- "duration_end": "2014-01-31T10:35:15",
- "period": 900,
- "period_start": "2014-01-31T10:30:00",
- "period_end": "2014-01-31T10:45:00",
- "groupby": {
- "project_id": "061a5c91811e4044b7dc86c6136c4f99"
- },
- "unit": "instance"
- }
- ]
-
-
-Show meter statistics
-=====================
-
-.. rest_method:: GET /v2/meters/{meter_name}/statistics
-
-Computes and lists statistics for samples in a time range.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - meter_name: meter_name
- - q: q
- - groupby: groupby
- - period: period
- - aggregate: aggregate
- - limit: limit
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - count: count
- - duration_start: duration_start
- - min: min
- - max: max
- - duration_end: duration_end
- - period: period
- - sum: sum
- - duration: duration
- - period_end: period_end
- - aggregate: aggregate
- - period_start: period_start
- - avg: avg
- - groupby: groupby
- - unit: unit
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/statistics-list-response.json
- :language: javascript
-
-
-
-
-List meters
-===========
-
-.. rest_method:: GET /v2/meters
-
-Lists meters, based on the data recorded so far.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - q: q
- - limit: limit
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - user_id: user_id
- - name: name
- - resource_id: resource_id
- - source: source
- - meter_id: meter_id
- - project_id: project_id
- - type: type
- - unit: unit
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/meters-list-response.json
- :language: javascript
-
-
-
-
-List samples for meter
-======================
-
-.. rest_method:: GET /v2/meters/{meter_name}
-
-Lists samples for a meter, by meter name.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - meter_name: meter_name
- - q: q
- - limit: limit
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - user_id: user_id
- - resource_id: resource_id
- - timestamp: timestamp
- - meter: meter
- - volume: volume
- - source: source
- - recorded_at: recorded_at
- - project_id: project_id
- - type: type
- - id: id
- - unit: unit
- - metadata: metadata
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/samples-list-response.json
- :language: javascript
-
-
-
-
-Add samples to meter
-====================
-
-.. rest_method:: POST /v2/meters/{meter_name}
-
-Adds samples to a meter, by meter name.
-
-If you attempt to add a sample that is not supported, this call
-returns the ``409`` response code.
-
-
-Normal response codes: 200
-Error response codes:409,
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - user_id: user_id
- - resource_id: resource_id
- - timestamp: timestamp
- - meter: meter
- - volume: volume
- - source: source
- - recorded_at: recorded_at
- - project_id: project_id
- - type: type
- - id: id
- - unit: unit
- - metadata: metadata
- - meter_name: meter_name
- - direct: direct
- - samples: samples
-
-Request Example
----------------
-
-.. literalinclude:: ../samples/sample-create-request.json
- :language: javascript
-
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - user_id: user_id
- - resource_id: resource_id
- - timestamp: timestamp
- - meter: meter
- - volume: volume
- - source: source
- - recorded_at: recorded_at
- - project_id: project_id
- - type: type
- - id: id
- - unit: unit
- - metadata: metadata
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/sample-show-response.json
- :language: javascript
-
-
-
-
diff --git a/api-ref/source/parameters.yaml b/api-ref/source/parameters.yaml
deleted file mode 100644
index c8f9383b..00000000
--- a/api-ref/source/parameters.yaml
+++ /dev/null
@@ -1,768 +0,0 @@
-# variables in header
-{}
-
-# variables in path
-alarm_id_path:
- description: |
- The UUID of the alarm.
- in: path
- required: false
- type: string
-message_id_path:
- description: |
- The UUID of the message.
- in: path
- required: false
- type: string
-meter_name:
- description: |
- The name of the meter.
- in: path
- required: false
- type: string
-resource_id_path:
- description: |
- The UUID of the resource.
- in: path
- required: false
- type: string
-sample_id:
- description: |
- The UUID of the sample.
- in: path
- required: false
- type: string
-
-# variables in query
-aggregate:
- description: |
- A list of selectable aggregation functions to apply.
-
- For example:
-
- ::
-
- GET /v2/meters/METER_NAME/statistics?aggregate.func=cardinality
- &
- aggregate.param=resource_id
- &
- aggregate.func=cardinality
- &
- aggregate.param=project_id
- in: query
- required: false
- type: object
-direct:
- description: |
- Indicates whether the samples are POST ed
- directly to storage. Set ``?direct=True`` to POST the samples
- directly to storage.
- in: query
- required: false
- type: string
-groupby:
- description: |
- Fields for group by aggregation.
- in: query
- required: false
- type: object
-limit:
- description: |
- Limits the maximum number of samples that the response returns.
-
- For example:
-
- ::
-
- GET /v2/events?limit=1000
- in: query
- required: false
- type: integer
-limit_1:
- description: |
- Requests a page size of items. Returns a number
- of items up to a limit value. Use the ``limit`` parameter to make
- an initial limited request and use the ID of the last-seen item
- from the response as the ``marker`` parameter value in a
- subsequent limited request.
- in: query
- required: false
- type: integer
-meter_links:
- description: |
- Set ``?meter_links=1`` to return a self link and
- related meter links.
- in: query
- required: false
- type: integer
-period:
- description: |
- The period, in seconds, for which you want
- statistics.
- in: query
- required: false
- type: integer
-q:
- description: |
- Filters the response by one or more arguments.
- For example: ``?q.field=Foo & q.value=my_text``.
- in: query
- required: false
- type: array
-q_1:
- description: |
- Filters the response by one or more event arguments.
-
- For example:
-
- ::
-
- GET /v2/events?q.field=Foo
- &
- q.value=my_text
- in: query
- required: false
- type: array
-samples:
- description: |
- A list of samples.
- in: query
- required: false
- type: array
-
-# variables in body
-alarm_actions:
- description: |
- The list of actions that the alarm performs.
- in: body
- required: true
- type: array
-alarm_combination_rule:
- description: |
- The rules for the combination alarm type.
- in: body
- required: true
- type: string
-alarm_description:
- description: |
- Describes the alarm.
- in: body
- required: true
- type: string
-alarm_enabled:
- description: |
- If ``true``, evaluation and actioning is enabled
- for the alarm.
- in: body
- required: true
- type: boolean
-alarm_id:
- description: |
- The UUID of the alarm.
- in: body
- required: true
- type: string
-alarm_insufficient_data_actions:
- description: |
- The list of actions that the alarm performs when
- the alarm state is ``insufficient_data``.
- in: body
- required: true
- type: array
-alarm_name:
- description: |
- The name of the alarm.
- in: body
- required: true
- type: string
-alarm_ok_actions:
- description: |
- The list of actions that the alarm performs when
- the alarm state is ``ok``.
- in: body
- required: true
- type: array
-alarm_repeat_actions:
- description: |
- If set to ``true``, the alarm notifications are
- repeated. Otherwise, this value is ``false``.
- in: body
- required: true
- type: boolean
-alarm_request:
- description: |
- An alarm within the request body.
- in: body
- required: false
- type: string
-alarm_state:
- description: |
- The state of the alarm.
- in: body
- required: true
- type: string
-alarm_state_timestamp:
- description: |
- The date and time of the alarm state.
- in: body
- required: true
- type: string
-alarm_storage:
- description: |
- Defines the capabilities for the storage that
- stores persisting alarm definitions. A value of ``true`` indicates
- that the capability is available.
- in: body
- required: true
- type: object
-alarm_threshold_rule:
- description: |
- The rules for the threshold alarm type.
- in: body
- required: true
- type: string
-alarm_timestamp:
- description: |
- The date and time of the alarm.
- in: body
- required: true
- type: string
-alarm_type:
- description: |
- The type of the alarm, which is either
- ``threshold`` or ``combination``.
- in: body
- required: true
- type: string
-alarms:history:query:complex:
- description: |
- If ``true``, the complex query capability for
- alarm history is available for the configured database back end.
- in: body
- required: true
- type: boolean
-alarms:history:query:simple:
- description: |
- If ``true``, the simple query capability for
- alarm history is available for the configured database back end.
- in: body
- required: true
- type: boolean
-alarms:query:complex:
- description: |
- If ``true``, the complex query capability for
- alarm definitions is available for the configured database back
- end.
- in: body
- required: true
- type: boolean
-alarms:query:simple:
- description: |
- If ``true``, the simple query capability for
- alarm definitions is available for the configured database back
- end.
- in: body
- required: true
- type: boolean
-api:
- description: |
- A set of key and value pairs that contain the API
- capabilities for the configured storage driver.
- in: body
- required: true
- type: object
-avg:
- description: |
- The average of all volume values in the data.
- in: body
- required: true
- type: number
-combination_rule:
- description: |
- The rules for the combination alarm type.
- in: body
- required: true
- type: string
-count:
- description: |
- The number of samples seen.
- in: body
- required: true
- type: integer
-description:
- description: |
- Describes the alarm.
- in: body
- required: true
- type: string
-duration:
- description: |
- The number of seconds between the oldest and
- newest date and time stamp.
- in: body
- required: true
- type: number
-duration_end:
- description: |
- The date and time in UTC format of the query end
- time.
- in: body
- required: true
- type: string
-duration_start:
- description: |
- The date and time in UTC format of the query
- start time.
- in: body
- required: true
- type: string
-event_generated:
- description: |
- The date and time when the event occurred.
- in: body
- required: true
- type: string
-event_raw:
- description: |
- A dictionary object that stores event messages
- for future evaluation.
- in: body
- required: true
- type: object
-event_storage:
- description: |
- If ``true``, the capabilities for the storage
- that stores persisting events is available.
- in: body
- required: true
- type: object
-event_type:
- description: |
- The dotted string that represents the event.
- in: body
- required: true
- type: string
-events:
- description: |
- A list of objects. Each object contains key and
- value pairs that describe the event.
- in: body
- required: true
- type: array
-events:query:simple:
- description: |
- If ``true``, the simple query capability for
- events is available for the configured database back end.
- in: body
- required: true
- type: boolean
-id:
- description: |
- The UUID of the sample.
- in: body
- required: true
- type: string
-links:
- description: |
- A list that contains a self link and associated
- meter links.
- in: body
- required: true
- type: array
-max:
- description: |
- The maximum volume seen in the data.
- in: body
- required: true
- type: number
-message_id:
- description: |
- The UUID of the message.
- in: body
- required: true
- type: string
-metadata:
- description: |
- An arbitrary set of one or more metadata key and
- value pairs that are associated with the sample.
- in: body
- required: true
- type: object
-metadata_1:
- description: |
- A set of one or more arbitrary metadata key and
- value pairs that are associated with the resource.
- in: body
- required: true
- type: object
-meter:
- description: |
- The meter name.
- in: body
- required: true
- type: string
-meter_id:
- description: |
- The UUID of the meter.
- in: body
- required: true
- type: string
-meters:query:complex:
- description: |
- If ``true``, the complex query capability for
- meters is available for the configured database back end.
- in: body
- required: true
- type: boolean
-meters:query:metadata:
- description: |
- If ``true``, the simple query capability for the
- metadata of meters is available for the configured database back
- end.
- in: body
- required: true
- type: boolean
-meters:query:simple:
- description: |
- If ``true``, the simple query capability for
- meters is available for the configured database back end.
- in: body
- required: true
- type: boolean
-min:
- description: |
- The minimum volume seen in the data.
- in: body
- required: true
- type: number
-name:
- description: |
- The name of the alarm.
- in: body
- required: true
- type: string
-name_1:
- description: |
- The meter name.
- in: body
- required: true
- type: string
-period_end:
- description: |
- The period end date and time in UTC format.
- in: body
- required: true
- type: string
-period_start:
- description: |
- The period start date and time in UTC format.
- in: body
- required: true
- type: string
-project_id:
- description: |
- The UUID of the project or tenant that owns the
- resource.
- in: body
- required: true
- type: string
-project_id_1:
- description: |
- The UUID of the project.
- in: body
- required: true
- type: string
-project_id_2:
- description: |
- The UUID of the owning project or tenant.
- in: body
- required: true
- type: string
-recorded_at:
- description: |
- The date and time when the sample was recorded.
- in: body
- required: true
- type: string
-measurement_resource_id:
- description: |
- The UUID of the resource for which the
- measurements are taken.
- in: body
- required: true
- type: string
-resource:
- description: |
- in: body
- required: true
- type: object
-resource_id:
- description: |
- The UUID of the resource.
- in: body
- required: true
- type: string
-resouces:
- description: |
- List of the resources.
- in: body
- required: true
- type: array
-resources:query:complex:
- description: |
- If ``true``, the complex query capability for
- resources is available for the configured database back end.
- in: body
- required: true
- type: boolean
-resources:query:metadata:
- description: |
- If ``true``, the simple query capability for the
- metadata of resources is available for the configured database
- back end.
- in: body
- required: true
- type: boolean
-resources:query:simple:
- description: |
- If ``true``, the simple query capability for
- resources is available for the configured database back end.
- in: body
- required: true
- type: boolean
-samples:query:complex:
- description: |
- If ``true``, the complex query capability for
- samples is available for the configured database back end.
- in: body
- required: true
- type: boolean
-samples:query:metadata:
- description: |
- If ``true``, the simple query capability for the
- metadata of samples is available for the configured database back
- end.
- in: body
- required: true
- type: boolean
-samples:query:simple:
- description: |
- If ``true``, the simple query capability for
- samples is available for the configured database back end.
- in: body
- required: true
- type: boolean
-source:
- description: |
- The name of the source that identifies where the
- sample comes from.
- in: body
- required: true
- type: string
-source_1:
- description: |
- The name of the source from which the meter came.
- in: body
- required: true
- type: string
-source_2:
- description: |
- The name of the source from which the resource
- came.
- in: body
- required: true
- type: string
-state:
- description: |
- The state of the alarm.
- in: body
- required: true
- type: string
-statistics:aggregation:selectable:avg:
- description: |
- If ``true``, the ``avg`` capability is available
- for the configured database back end. Use the ``avg`` capability
- to get average values for samples.
- in: body
- required: true
- type: boolean
-statistics:aggregation:selectable:cardinality:
- description: |
- If ``true``, the ``cardinality`` capability is
- available for the configured database back end. Use the
- ``cardinality`` capability to get cardinality for samples.
- in: body
- required: true
- type: boolean
-statistics:aggregation:selectable:count:
- description: |
- If ``true``, the ``count`` capability is
- available for the configured database back end. Use the ``count``
- capability to calculate the number of samples for a query.
- in: body
- required: true
- type: boolean
-statistics:aggregation:selectable:max:
- description: |
- If ``true``, the ``max`` capability is available
- for the configured database back end. . Use the ``max`` capability
- to calculate the maximum value for a query.
- in: body
- required: true
- type: boolean
-statistics:aggregation:selectable:min:
- description: |
- If ``true``, the ``min`` capability is available
- for the configured database back end. Use the ``min`` capability
- to calculate the minimum value for a query.
- in: body
- required: true
- type: boolean
-statistics:aggregation:selectable:quartile:
- description: |
- If ``true``, the ``quartile`` capability is
- available for the configured database back end. Use the
- ``quartile`` capability to calculate the quartile of sample
- volumes for a query.
- in: body
- required: true
- type: boolean
-statistics:aggregation:selectable:stddev:
- description: |
- If ``true``, the ``stddev`` capability is
- available for the configured database back end. Use the ``stddev``
- capability to calculate the standard deviation of sample volumes
- for a query.
- in: body
- required: true
- type: boolean
-statistics:aggregation:selectable:sum:
- description: |
- If ``true``, the ``sum`` capability is available
- for the configured database back end. Use the ``sum`` capability
- to calculate the sum of sample volumes for a query.
- in: body
- required: true
- type: boolean
-statistics:aggregation:standard:
- description: |
- If ``true``, the ``standard`` set of aggregation
- capability is available for the configured database back end.
- in: body
- required: true
- type: boolean
-statistics:groupby:
- description: |
- If ``true``, the ``groupby`` capability is
- available for calculating statistics for the configured database
- back end.
- in: body
- required: true
- type: boolean
-statistics:query:complex:
- description: |
- If ``true``, the complex query capability for
- statistics is available for the configured database back end.
- in: body
- required: true
- type: boolean
-statistics:query:metadata:
- description: |
- If ``true``, the simple query capability for the
- sample metadata that is used to calculate statistics is available
- for the configured database back end.
- in: body
- required: true
- type: boolean
-statistics:query:simple:
- description: |
- If ``true``, the simple query capability for
- statistics is available for the configured database back end.
- in: body
- required: true
- type: boolean
-storage:
- description: |
- If ``true``, the capabilities for the storage
- that stores persisting samples is available.
- in: body
- required: true
- type: object
-storage:production_ready:
- description: |
- If ``true``, the database back end is ready to
- use in a production environment.
- in: body
- required: true
- type: boolean
-sum:
- description: |
- The total of all of the volume values seen in the
- data.
- in: body
- required: true
- type: number
-timestamp:
- description: |
- The date and time in UTC format when the
- measurement was made.
- in: body
- required: true
- type: string
-timestamp_1:
- description: |
- The date and time of the alarm.
- in: body
- required: true
- type: string
-type:
- description: |
- The meter type.
- in: body
- required: true
- type: string
-type_2:
- description: |
- The meter type. The type value is gauge, delta,
- or cumulative.
- in: body
- required: true
- type: string
-unit:
- description: |
- The unit of measure for the ``volume`` value.
- in: body
- required: true
- type: string
-unit_1:
- description: |
- The unit of measure.
- in: body
- required: true
- type: string
-unit_2:
- description: |
- The unit type of the data set.
- in: body
- required: true
- type: string
-user_id:
- description: |
- The UUID of the user who either created or last
- updated the resource.
- in: body
- required: true
- type: string
-user_id_1:
- description: |
- The UUID of the user.
- in: body
- required: true
- type: string
-volume:
- description: |
- The actual measured value.
- in: body
- required: true
- type: number
-
diff --git a/api-ref/source/resources.inc b/api-ref/source/resources.inc
deleted file mode 100644
index 735f16fb..00000000
--- a/api-ref/source/resources.inc
+++ /dev/null
@@ -1,95 +0,0 @@
-.. -*- rst -*-
-
-=========
-Resources
-=========
-
-Lists all and gets information for resources.
-
-
-List resources
-==============
-
-.. rest_method:: GET /v2/resources
-
-Lists definitions for all resources.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - q: q
- - meter_links: meter_links
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - resources: resources
- - user_id: user_id
- - links: links
- - resource_id: resource_id
- - source: source
- - project_id: project_id
- - metadata: metadata
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/resources-list-response.json
- :language: javascript
-
-
-
-
-Show resource details
-=====================
-
-.. rest_method:: GET /v2/resources/{resource_id}
-
-Shows details for a resource, by resource ID.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - resource_id: resource_id_path
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - resource: resource
- - user_id: user_id
- - links: links
- - resource_id: resource_id
- - source: source
- - project_id: project_id
- - metadata: metadata
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/resource-show-response.json
- :language: javascript
-
diff --git a/api-ref/source/samples.inc b/api-ref/source/samples.inc
deleted file mode 100644
index 496224ed..00000000
--- a/api-ref/source/samples.inc
+++ /dev/null
@@ -1,111 +0,0 @@
-.. -*- rst -*-
-
-=======
-Samples
-=======
-
-Lists all samples and gets information for a sample.
-
-For list operations, if you do not explicitly set the ``limit``
-query parameter, a default limit is applied. The default limit is
-the ``default_api_return_limit`` configuration option value.
-
-
-Show sample details
-===================
-
-.. rest_method:: GET /v2/samples/{sample_id}
-
-Shows details for a sample, by sample ID.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - sample_id: sample_id
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - user_id: user_id
- - resource_id: resource_id
- - timestamp: timestamp
- - meter: meter
- - volume: volume
- - source: source
- - recorded_at: recorded_at
- - project_id: project_id
- - type: type
- - id: id
- - unit: unit
- - metadata: metadata
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/sample-show-response.json
- :language: javascript
-
-
-
-
-List samples
-============
-
-.. rest_method:: GET /v2/samples
-
-Lists all known samples, based on the data recorded so far.
-
-
-Normal response codes: 200
-Error response codes:
-
-
-Request
--------
-
-.. rest_parameters:: parameters.yaml
-
- - q: q
- - limit: limit
-
-
-Response Parameters
--------------------
-
-.. rest_parameters:: parameters.yaml
-
- - user_id: user_id
- - resource_id: resource_id
- - timestamp: timestamp
- - meter: meter
- - volume: volume
- - source: source
- - recorded_at: recorded_at
- - project_id: project_id
- - type: type
- - id: id
- - unit: unit
- - metadata: metadata
-
-
-
-Response Example
-----------------
-
-.. literalinclude:: ../samples/samples-list-response.json
- :language: javascript
-
-
-
diff --git a/api-ref/source/samples/alarm-show-response.json b/api-ref/source/samples/alarm-show-response.json
deleted file mode 100644
index 802d472e..00000000
--- a/api-ref/source/samples/alarm-show-response.json
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "alarm_actions": [
- "http://site:8000/alarm"
- ],
- "alarm_id": null,
- "combination_rule": null,
- "description": "An alarm",
- "enabled": true,
- "insufficient_data_actions": [
- "http://site:8000/nodata"
- ],
- "name": "SwiftObjectAlarm",
- "ok_actions": [
- "http://site:8000/ok"
- ],
- "project_id": "c96c887c216949acbdfbd8b494863567",
- "repeat_actions": false,
- "state": "ok",
- "state_timestamp": "2013-11-21T12:33:08.486228",
- "threshold_rule": null,
- "timestamp": "2013-11-21T12:33:08.486221",
- "type": "threshold",
- "user_id": "c96c887c216949acbdfbd8b494863567"
-}
diff --git a/api-ref/source/samples/alarm-show-response.xml b/api-ref/source/samples/alarm-show-response.xml
deleted file mode 100644
index 61ae48bb..00000000
--- a/api-ref/source/samples/alarm-show-response.xml
+++ /dev/null
@@ -1,25 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<value>
- <alarm_actions>
- <item>http://site:8000/alarm</item>
- </alarm_actions>
- <alarm_id nil="true" />
- <combination_rule nil="true" />
- <description>An alarm</description>
- <enabled>true</enabled>
- <insufficient_data_actions>
- <item>http://site:8000/nodata</item>
- </insufficient_data_actions>
- <name>SwiftObjectAlarm</name>
- <ok_actions>
- <item>http://site:8000/ok</item>
- </ok_actions>
- <project_id>c96c887c216949acbdfbd8b494863567</project_id>
- <repeat_actions>false</repeat_actions>
- <state>ok</state>
- <state_timestamp>2013-11-21T12:33:08.486228</state_timestamp>
- <threshold_rule nil="true" />
- <timestamp>2013-11-21T12:33:08.486221</timestamp>
- <type>threshold</type>
- <user_id>c96c887c216949acbdfbd8b494863567</user_id>
-</value>
diff --git a/api-ref/source/samples/alarms-list-response.json b/api-ref/source/samples/alarms-list-response.json
deleted file mode 100644
index 760b68b9..00000000
--- a/api-ref/source/samples/alarms-list-response.json
+++ /dev/null
@@ -1,26 +0,0 @@
-[
- {
- "alarm_actions": [
- "http://site:8000/alarm"
- ],
- "alarm_id": null,
- "combination_rule": null,
- "description": "An alarm",
- "enabled": true,
- "insufficient_data_actions": [
- "http://site:8000/nodata"
- ],
- "name": "SwiftObjectAlarm",
- "ok_actions": [
- "http://site:8000/ok"
- ],
- "project_id": "c96c887c216949acbdfbd8b494863567",
- "repeat_actions": false,
- "state": "ok",
- "state_timestamp": "2013-11-21T12:33:08.486228",
- "threshold_rule": null,
- "timestamp": "2013-11-21T12:33:08.486221",
- "type": "threshold",
- "user_id": "c96c887c216949acbdfbd8b494863567"
- }
-]
diff --git a/api-ref/source/samples/alarms-list-response.xml b/api-ref/source/samples/alarms-list-response.xml
deleted file mode 100644
index 99ca555e..00000000
--- a/api-ref/source/samples/alarms-list-response.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<values>
- <value>
- <alarm_actions>
- <item>http://site:8000/alarm</item>
- </alarm_actions>
- <alarm_id nil="true" />
- <combination_rule nil="true" />
- <description>An alarm</description>
- <enabled>true</enabled>
- <insufficient_data_actions>
- <item>http://site:8000/nodata</item>
- </insufficient_data_actions>
- <name>SwiftObjectAlarm</name>
- <ok_actions>
- <item>http://site:8000/ok</item>
- </ok_actions>
- <project_id>c96c887c216949acbdfbd8b494863567</project_id>
- <repeat_actions>false</repeat_actions>
- <state>ok</state>
- <state_timestamp>2013-11-21T12:33:08.486228</state_timestamp>
- <threshold_rule nil="true" />
- <timestamp>2013-11-21T12:33:08.486221</timestamp>
- <type>threshold</type>
- <user_id>c96c887c216949acbdfbd8b494863567</user_id>
- </value>
-</values>
diff --git a/api-ref/source/samples/capabilities-list-response.json b/api-ref/source/samples/capabilities-list-response.json
deleted file mode 100644
index 8ec23e79..00000000
--- a/api-ref/source/samples/capabilities-list-response.json
+++ /dev/null
@@ -1,40 +0,0 @@
-{
- "alarm_storage": {
- "storage:production_ready": true
- },
- "api": {
- "alarms:history:query:complex": true,
- "alarms:history:query:simple": true,
- "alarms:query:complex": true,
- "alarms:query:simple": true,
- "events:query:simple": true,
- "meters:query:complex": false,
- "meters:query:metadata": true,
- "meters:query:simple": true,
- "resources:query:complex": false,
- "resources:query:metadata": true,
- "resources:query:simple": true,
- "samples:query:complex": true,
- "samples:query:metadata": true,
- "samples:query:simple": true,
- "statistics:aggregation:selectable:avg": true,
- "statistics:aggregation:selectable:cardinality": true,
- "statistics:aggregation:selectable:count": true,
- "statistics:aggregation:selectable:max": true,
- "statistics:aggregation:selectable:min": true,
- "statistics:aggregation:selectable:quartile": false,
- "statistics:aggregation:selectable:stddev": true,
- "statistics:aggregation:selectable:sum": true,
- "statistics:aggregation:standard": true,
- "statistics:groupby": true,
- "statistics:query:complex": false,
- "statistics:query:metadata": true,
- "statistics:query:simple": true
- },
- "event_storage": {
- "storage:production_ready": true
- },
- "storage": {
- "storage:production_ready": true
- }
-}
diff --git a/api-ref/source/samples/capabilities-list-response.xml b/api-ref/source/samples/capabilities-list-response.xml
deleted file mode 100644
index d286c8cb..00000000
--- a/api-ref/source/samples/capabilities-list-response.xml
+++ /dev/null
@@ -1,131 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<value>
- <api>
- <item>
- <key>statistics:query:complex</key>
- <value>false</value>
- </item>
- <item>
- <key>alarms:history:query:simple</key>
- <value>true</value>
- </item>
- <item>
- <key>meters:query:metadata</key>
- <value>true</value>
- </item>
- <item>
- <key>alarms:query:simple</key>
- <value>true</value>
- </item>
- <item>
- <key>resources:query:simple</key>
- <value>true</value>
- </item>
- <item>
- <key>statistics:aggregation:selectable:quartile</key>
- <value>false</value>
- </item>
- <item>
- <key>statistics:query:simple</key>
- <value>true</value>
- </item>
- <item>
- <key>statistics:aggregation:selectable:count</key>
- <value>true</value>
- </item>
- <item>
- <key>statistics:aggregation:selectable:min</key>
- <value>true</value>
- </item>
- <item>
- <key>statistics:aggregation:selectable:sum</key>
- <value>true</value>
- </item>
- <item>
- <key>alarms:query:complex</key>
- <value>true</value>
- </item>
- <item>
- <key>meters:query:complex</key>
- <value>false</value>
- </item>
- <item>
- <key>statistics:groupby</key>
- <value>true</value>
- </item>
- <item>
- <key>alarms:history:query:complex</key>
- <value>true</value>
- </item>
- <item>
- <key>meters:query:simple</key>
- <value>true</value>
- </item>
- <item>
- <key>samples:query:metadata</key>
- <value>true</value>
- </item>
- <item>
- <key>statistics:query:metadata</key>
- <value>true</value>
- </item>
- <item>
- <key>samples:query:simple</key>
- <value>true</value>
- </item>
- <item>
- <key>resources:query:metadata</key>
- <value>true</value>
- </item>
- <item>
- <key>statistics:aggregation:selectable:max</key>
- <value>true</value>
- </item>
- <item>
- <key>samples:query:complex</key>
- <value>true</value>
- </item>
- <item>
- <key>statistics:aggregation:standard</key>
- <value>true</value>
- </item>
- <item>
- <key>events:query:simple</key>
- <value>true</value>
- </item>
- <item>
- <key>statistics:aggregation:selectable:stddev</key>
- <value>true</value>
- </item>
- <item>
- <key>statistics:aggregation:selectable:avg</key>
- <value>true</value>
- </item>
- <item>
- <key>statistics:aggregation:selectable:cardinality</key>
- <value>true</value>
- </item>
- <item>
- <key>resources:query:complex</key>
- <value>false</value>
- </item>
- </api>
- <storage>
- <item>
- <key>storage:production_ready</key>
- <value>true</value>
- </item>
- </storage>
- <alarm_storage>
- <item>
- <key>storage:production_ready</key>
- <value>true</value>
- </item>
- </alarm_storage>
- <event_storage>
- <item>
- <key>storage:production_ready</key>
- <value>true</value>
- </item>
- </event_storage>
-</value>
diff --git a/api-ref/source/samples/event-show-response.json b/api-ref/source/samples/event-show-response.json
deleted file mode 100644
index 5c736a07..00000000
--- a/api-ref/source/samples/event-show-response.json
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "raw": {},
- "traits": [
- {
- "type": "string",
- "name": "action",
- "value": "read"
- },
- {
- "type": "string",
- "name": "eventTime",
- "value": "2015-10-28T20:26:58.545477+0000"
- }
- ],
- "generated": "2015-10-28T20:26:58.546933",
- "message_id": "bae43de6-e9fa-44ad-8c15-40a852584444",
- "event_type": "http.request"
-}
diff --git a/api-ref/source/samples/events-list-response.json b/api-ref/source/samples/events-list-response.json
deleted file mode 100644
index 4bd2dafd..00000000
--- a/api-ref/source/samples/events-list-response.json
+++ /dev/null
@@ -1,20 +0,0 @@
-[
- {
- "raw": {},
- "traits": [
- {
- "type": "string",
- "name": "action",
- "value": "read"
- },
- {
- "type": "string",
- "name": "eventTime",
- "value": "2015-10-28T20:26:58.545477+0000"
- }
- ],
- "generated": "2015-10-28T20:26:58.546933",
- "message_id": "bae43de6-e9fa-44ad-8c15-40a852584444",
- "event_type": "http.request"
- }
-]
diff --git a/api-ref/source/samples/meters-list-response.json b/api-ref/source/samples/meters-list-response.json
deleted file mode 100644
index f40c3c60..00000000
--- a/api-ref/source/samples/meters-list-response.json
+++ /dev/null
@@ -1,12 +0,0 @@
-[
- {
- "meter_id": "YmQ5NDMxYzEtOGQ2OS00YWQzLTgwM2EtOGQ0YTZiODlmZDM2K2luc3RhbmNl",
- "name": "instance",
- "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68",
- "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "source": "openstack",
- "type": "gauge",
- "unit": "instance",
- "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff"
- }
-]
diff --git a/api-ref/source/samples/meters-list-response.xml b/api-ref/source/samples/meters-list-response.xml
deleted file mode 100644
index 045f3668..00000000
--- a/api-ref/source/samples/meters-list-response.xml
+++ /dev/null
@@ -1,13 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<values>
- <value>
- <name>instance</name>
- <type>gauge</type>
- <unit>instance</unit>
- <resource_id>bd9431c1-8d69-4ad3-803a-8d4a6b89fd36</resource_id>
- <project_id>35b17138-b364-4e6a-a131-8f3099c5be68</project_id>
- <user_id>efd87807-12d2-4b38-9c70-5f5c2ac427ff</user_id>
- <source>openstack</source>
- <meter_id>YmQ5NDMxYzEtOGQ2OS00YWQzLTgwM2EtOGQ0YTZiODlmZDM2K2luc3RhbmNl</meter_id>
- </value>
-</values>
diff --git a/api-ref/source/samples/resource-show-response.json b/api-ref/source/samples/resource-show-response.json
deleted file mode 100644
index 8679b5e7..00000000
--- a/api-ref/source/samples/resource-show-response.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "links": [
- {
- "href": "http://localhost:8777/v2/resources/bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "rel": "self"
- },
- {
- "href": "http://localhost:8777/v2/meters/volume?q.field=resource_id&q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "rel": "volume"
- }
- ],
- "metadata": {
- "name1": "value1",
- "name2": "value2"
- },
- "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68",
- "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "source": "openstack",
- "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff"
-}
diff --git a/api-ref/source/samples/resource-show-response.xml b/api-ref/source/samples/resource-show-response.xml
deleted file mode 100644
index 0516d540..00000000
--- a/api-ref/source/samples/resource-show-response.xml
+++ /dev/null
@@ -1,27 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<value>
- <resource_id>bd9431c1-8d69-4ad3-803a-8d4a6b89fd36</resource_id>
- <project_id>35b17138-b364-4e6a-a131-8f3099c5be68</project_id>
- <user_id>efd87807-12d2-4b38-9c70-5f5c2ac427ff</user_id>
- <metadata>
- <item>
- <key>name2</key>
- <value>value2</value>
- </item>
- <item>
- <key>name1</key>
- <value>value1</value>
- </item>
- </metadata>
- <links>
- <item>
- <href>http://localhost:8777/v2/resources/bd9431c1-8d69-4ad3-803a-8d4a6b89fd36</href>
- <rel>self</rel>
- </item>
- <item>
- <href>http://localhost:8777/v2/meters/volume?q.field=resource_id&amp;q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36</href>
- <rel>volume</rel>
- </item>
- </links>
- <source>openstack</source>
-</value>
diff --git a/api-ref/source/samples/resources-list-response.json b/api-ref/source/samples/resources-list-response.json
deleted file mode 100644
index 3655077e..00000000
--- a/api-ref/source/samples/resources-list-response.json
+++ /dev/null
@@ -1,22 +0,0 @@
-[
- {
- "links": [
- {
- "href": "http://localhost:8777/v2/resources/bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "rel": "self"
- },
- {
- "href": "http://localhost:8777/v2/meters/volume?q.field=resource_id&q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "rel": "volume"
- }
- ],
- "metadata": {
- "name1": "value1",
- "name2": "value2"
- },
- "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68",
- "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "source": "openstack",
- "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff"
- }
-]
diff --git a/api-ref/source/samples/resources-list-response.xml b/api-ref/source/samples/resources-list-response.xml
deleted file mode 100644
index 9041d6e7..00000000
--- a/api-ref/source/samples/resources-list-response.xml
+++ /dev/null
@@ -1,29 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<values>
- <value>
- <resource_id>bd9431c1-8d69-4ad3-803a-8d4a6b89fd36</resource_id>
- <project_id>35b17138-b364-4e6a-a131-8f3099c5be68</project_id>
- <user_id>efd87807-12d2-4b38-9c70-5f5c2ac427ff</user_id>
- <metadata>
- <item>
- <key>name2</key>
- <value>value2</value>
- </item>
- <item>
- <key>name1</key>
- <value>value1</value>
- </item>
- </metadata>
- <links>
- <item>
- <href>http://localhost:8777/v2/resources/bd9431c1-8d69-4ad3-803a-8d4a6b89fd36</href>
- <rel>self</rel>
- </item>
- <item>
- <href>http://localhost:8777/v2/meters/volume?q.field=resource_id&amp;q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36</href>
- <rel>volume</rel>
- </item>
- </links>
- <source>openstack</source>
- </value>
-</values>
diff --git a/api-ref/source/samples/sample-create-request.json b/api-ref/source/samples/sample-create-request.json
deleted file mode 100644
index 38a94eed..00000000
--- a/api-ref/source/samples/sample-create-request.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "id": "8db08c68-bc70-11e4-a8c4-fa163e1d1a9b",
- "metadata": {
- "name1": "value1",
- "name2": "value2"
- },
- "meter": "instance",
- "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68",
- "recorded_at": "2015-02-24T22:00:32.747930",
- "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "source": "openstack",
- "timestamp": "2015-02-24T22:00:32.747930",
- "type": "gauge",
- "unit": "instance",
- "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff",
- "volume": 1.0
-}
diff --git a/api-ref/source/samples/sample-create-request.xml b/api-ref/source/samples/sample-create-request.xml
deleted file mode 100644
index 21af1af5..00000000
--- a/api-ref/source/samples/sample-create-request.xml
+++ /dev/null
@@ -1,23 +0,0 @@
-<value>
- <id>8db08c68-bc70-11e4-a8c4-fa163e1d1a9b</id>
- <meter>instance</meter>
- <type>gauge</type>
- <unit>instance</unit>
- <volume>1.0</volume>
- <user_id>efd87807-12d2-4b38-9c70-5f5c2ac427ff</user_id>
- <project_id>35b17138-b364-4e6a-a131-8f3099c5be68</project_id>
- <resource_id>bd9431c1-8d69-4ad3-803a-8d4a6b89fd36</resource_id>
- <source>openstack</source>
- <timestamp>2015-02-24T22:00:32.747930</timestamp>
- <recorded_at>2015-02-24T22:00:32.747930</recorded_at>
- <metadata>
- <item>
- <key>name2</key>
- <value>value2</value>
- </item>
- <item>
- <key>name1</key>
- <value>value1</value>
- </item>
- </metadata>
-</value>
diff --git a/api-ref/source/samples/sample-show-response.json b/api-ref/source/samples/sample-show-response.json
deleted file mode 100644
index 9b3df91b..00000000
--- a/api-ref/source/samples/sample-show-response.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "id": "9b23b398-6139-11e5-97e9-bc764e045bf6",
- "metadata": {
- "name1": "value1",
- "name2": "value2"
- },
- "meter": "instance",
- "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68",
- "recorded_at": "2015-09-22T14:52:54.850725",
- "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "source": "openstack",
- "timestamp": "2015-09-22T14:52:54.850718",
- "type": "gauge",
- "unit": "instance",
- "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff",
- "volume": 1
-}
diff --git a/api-ref/source/samples/sample-show-response.xml b/api-ref/source/samples/sample-show-response.xml
deleted file mode 100644
index a11b74fb..00000000
--- a/api-ref/source/samples/sample-show-response.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<value>
- <id>9b23b398-6139-11e5-97e9-bc764e045bf6</id>
- <meter>instance</meter>
- <type>gauge</type>
- <unit>instance</unit>
- <volume>1.0</volume>
- <user_id>efd87807-12d2-4b38-9c70-5f5c2ac427ff</user_id>
- <project_id>35b17138-b364-4e6a-a131-8f3099c5be68</project_id>
- <resource_id>bd9431c1-8d69-4ad3-803a-8d4a6b89fd36</resource_id>
- <source>openstack</source>
- <timestamp>2015-09-22T14:52:54.850718</timestamp>
- <recorded_at>2015-09-22T14:52:54.850725</recorded_at>
- <metadata>
- <item>
- <key>name2</key>
- <value>value2</value>
- </item>
- <item>
- <key>name1</key>
- <value>value1</value>
- </item>
- </metadata>
-</value>
diff --git a/api-ref/source/samples/samples-list-response.json b/api-ref/source/samples/samples-list-response.json
deleted file mode 100644
index 7d8e5bc7..00000000
--- a/api-ref/source/samples/samples-list-response.json
+++ /dev/null
@@ -1,19 +0,0 @@
-[
- {
- "id": "9b23b398-6139-11e5-97e9-bc764e045bf6",
- "metadata": {
- "name1": "value1",
- "name2": "value2"
- },
- "meter": "instance",
- "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68",
- "recorded_at": "2015-09-22T14:52:54.850725",
- "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "source": "openstack",
- "timestamp": "2015-09-22T14:52:54.850718",
- "type": "gauge",
- "unit": "instance",
- "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff",
- "volume": 1
- }
-]
diff --git a/api-ref/source/samples/samples-list-response.xml b/api-ref/source/samples/samples-list-response.xml
deleted file mode 100644
index 04f44175..00000000
--- a/api-ref/source/samples/samples-list-response.xml
+++ /dev/null
@@ -1,26 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<values>
- <value>
- <id>9b23b398-6139-11e5-97e9-bc764e045bf6</id>
- <meter>instance</meter>
- <type>gauge</type>
- <unit>instance</unit>
- <volume>1.0</volume>
- <user_id>efd87807-12d2-4b38-9c70-5f5c2ac427ff</user_id>
- <project_id>35b17138-b364-4e6a-a131-8f3099c5be68</project_id>
- <resource_id>bd9431c1-8d69-4ad3-803a-8d4a6b89fd36</resource_id>
- <source>openstack</source>
- <timestamp>2015-09-22T14:52:54.850718</timestamp>
- <recorded_at>2015-09-22T14:52:54.850725</recorded_at>
- <metadata>
- <item>
- <key>name2</key>
- <value>value2</value>
- </item>
- <item>
- <key>name1</key>
- <value>value1</value>
- </item>
- </metadata>
- </value>
-</values>
diff --git a/api-ref/source/samples/statistics-list-response.json b/api-ref/source/samples/statistics-list-response.json
deleted file mode 100644
index 1d8e1bc8..00000000
--- a/api-ref/source/samples/statistics-list-response.json
+++ /dev/null
@@ -1,16 +0,0 @@
-[
- {
- "avg": 4.5,
- "count": 10,
- "duration": 300,
- "duration_end": "2013-01-04T16:47:00",
- "duration_start": "2013-01-04T16:42:00",
- "max": 9,
- "min": 1,
- "period": 7200,
- "period_end": "2013-01-04T18:00:00",
- "period_start": "2013-01-04T16:00:00",
- "sum": 45,
- "unit": "GiB"
- }
-]
diff --git a/api-ref/source/samples/statistics-list-response.xml b/api-ref/source/samples/statistics-list-response.xml
deleted file mode 100644
index 503e068d..00000000
--- a/api-ref/source/samples/statistics-list-response.xml
+++ /dev/null
@@ -1,17 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<values>
- <value>
- <avg>4.5</avg>
- <count>10</count>
- <duration>300.0</duration>
- <duration_end>2013-01-04T16:47:00</duration_end>
- <duration_start>2013-01-04T16:42:00</duration_start>
- <max>9.0</max>
- <min>1.0</min>
- <period>7200</period>
- <period_end>2013-01-04T18:00:00</period_end>
- <period_start>2013-01-04T16:00:00</period_start>
- <sum>45.0</sum>
- <unit>GiB</unit>
- </value>
-</values>
diff --git a/ceilometer/__init__.py b/ceilometer/__init__.py
index 676c802f..9bd5528d 100644
--- a/ceilometer/__init__.py
+++ b/ceilometer/__init__.py
@@ -14,7 +14,4 @@
class NotImplementedError(NotImplementedError):
- # FIXME(jd) This is used by WSME to return a correct HTTP code. We should
- # not expose it here but wrap our methods in the API to convert it to a
- # proper HTTP error.
- code = 501
+ pass
diff --git a/ceilometer/api/__init__.py b/ceilometer/api/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/api/__init__.py
+++ /dev/null
diff --git a/ceilometer/api/app.py b/ceilometer/api/app.py
deleted file mode 100644
index c8ff1c43..00000000
--- a/ceilometer/api/app.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2015-2016 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import os
-import uuid
-
-from oslo_config import cfg
-from oslo_log import log
-from paste import deploy
-import pecan
-
-from ceilometer.api import hooks
-from ceilometer.api import middleware
-
-LOG = log.getLogger(__name__)
-
-OPTS = [
- cfg.StrOpt('api_paste_config',
- default="api_paste.ini",
- help="Configuration file for WSGI definition of API."
- ),
-]
-
-API_OPTS = [
- cfg.IntOpt('default_api_return_limit',
- min=1,
- default=100,
- help='Default maximum number of items returned by API request.'
- ),
-]
-
-
-def setup_app(pecan_config=None, conf=None):
- if conf is None:
- raise RuntimeError("No configuration passed")
-
- # FIXME: Replace DBHook with a hooks.TransactionHook
- app_hooks = [hooks.ConfigHook(conf),
- hooks.DBHook(conf),
- hooks.NotifierHook(conf),
- hooks.TranslationHook()]
-
- pecan_config = pecan_config or {
- "app": {
- 'root': 'ceilometer.api.controllers.root.RootController',
- 'modules': ['ceilometer.api'],
- }
- }
-
- pecan.configuration.set_config(dict(pecan_config), overwrite=True)
-
- app = pecan.make_app(
- pecan_config['app']['root'],
- hooks=app_hooks,
- wrap_app=middleware.ParsableErrorMiddleware,
- guess_content_type_from_ext=False
- )
-
- return app
-
-
-# NOTE(sileht): pastedeploy uses ConfigParser to handle
-# global_conf, since python 3 ConfigParser doesn't
-# allow to store object as config value, only strings are
-# permit, so to be able to pass an object created before paste load
-# the app, we store them into a global var. But the each loaded app
-# store it's configuration in unique key to be concurrency safe.
-global APPCONFIGS
-APPCONFIGS = {}
-
-
-def load_app(conf):
- global APPCONFIGS
-
- # Build the WSGI app
- cfg_file = None
- cfg_path = conf.api_paste_config
- if not os.path.isabs(cfg_path):
- cfg_file = conf.find_file(cfg_path)
- elif os.path.exists(cfg_path):
- cfg_file = cfg_path
-
- if not cfg_file:
- raise cfg.ConfigFilesNotFoundError([conf.api_paste_config])
-
- configkey = str(uuid.uuid4())
- APPCONFIGS[configkey] = conf
-
- LOG.info("Full WSGI config used: %s", cfg_file)
- LOG.warning("Note: Ceilometer API is deprecated; use APIs from Aodh"
- " (alarms), Gnocchi (metrics) and/or Panko (events).")
- return deploy.loadapp("config:" + cfg_file,
- global_conf={'configkey': configkey})
-
-
-def app_factory(global_config, **local_conf):
- global APPCONFIGS
- conf = APPCONFIGS.get(global_config.get('configkey'))
- return setup_app(conf=conf)
diff --git a/ceilometer/api/app.wsgi b/ceilometer/api/app.wsgi
deleted file mode 100644
index 130a63aa..00000000
--- a/ceilometer/api/app.wsgi
+++ /dev/null
@@ -1,25 +0,0 @@
-# -*- mode: python -*-
-#
-# Copyright 2013 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Use this file for deploying the API under mod_wsgi.
-
-See http://pecan.readthedocs.org/en/latest/deployment.html for details.
-"""
-from ceilometer import service
-from ceilometer.api import app
-
-# Initialize the oslo configuration library and logging
-conf = service.prepare_service([])
-application = app.load_app(conf)
diff --git a/ceilometer/api/controllers/__init__.py b/ceilometer/api/controllers/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/api/controllers/__init__.py
+++ /dev/null
diff --git a/ceilometer/api/controllers/root.py b/ceilometer/api/controllers/root.py
deleted file mode 100644
index 90abbca8..00000000
--- a/ceilometer/api/controllers/root.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pecan
-
-from ceilometer.api.controllers.v2 import root as v2
-
-MEDIA_TYPE_JSON = 'application/vnd.openstack.telemetry-%s+json'
-MEDIA_TYPE_XML = 'application/vnd.openstack.telemetry-%s+xml'
-
-
-class RootController(object):
-
- def __init__(self):
- self.v2 = v2.V2Controller()
-
- @pecan.expose('json')
- def index(self):
- base_url = pecan.request.application_url
- available = [{'tag': 'v2', 'date': '2013-02-13T00:00:00Z', }]
- collected = [version_descriptor(base_url, v['tag'], v['date'])
- for v in available]
- versions = {'versions': {'values': collected}}
- return versions
-
-
-def version_descriptor(base_url, version, released_on):
- url = version_url(base_url, version)
- return {
- 'id': version,
- 'links': [
- {'href': url, 'rel': 'self', },
- {'href': 'http://docs.openstack.org/',
- 'rel': 'describedby', 'type': 'text/html', }],
- 'media-types': [
- {'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, },
- {'base': 'application/xml', 'type': MEDIA_TYPE_XML % version, }],
- 'status': 'stable',
- 'updated': released_on,
- }
-
-
-def version_url(base_url, version_number):
- return '%s/%s' % (base_url, version_number)
diff --git a/ceilometer/api/controllers/v2/__init__.py b/ceilometer/api/controllers/v2/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/api/controllers/v2/__init__.py
+++ /dev/null
diff --git a/ceilometer/api/controllers/v2/base.py b/ceilometer/api/controllers/v2/base.py
deleted file mode 100644
index ce2e0dac..00000000
--- a/ceilometer/api/controllers/v2/base.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2013 IBM Corp.
-# Copyright 2013 eNovance <licensing@enovance.com>
-# Copyright Ericsson AB 2013. All rights reserved
-# Copyright 2014 Hewlett-Packard Company
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import ast
-import datetime
-import functools
-import inspect
-import json
-
-from oslo_utils import strutils
-from oslo_utils import timeutils
-import pecan
-import six
-import wsme
-from wsme import types as wtypes
-
-from ceilometer.i18n import _
-
-
-operation_kind = ('lt', 'le', 'eq', 'ne', 'ge', 'gt')
-operation_kind_enum = wtypes.Enum(str, *operation_kind)
-
-
-class ClientSideError(wsme.exc.ClientSideError):
- def __init__(self, error, status_code=400):
- pecan.response.translatable_error = error
- super(ClientSideError, self).__init__(error, status_code)
-
-
-class EntityNotFound(ClientSideError):
- def __init__(self, entity, id):
- super(EntityNotFound, self).__init__(
- _("%(entity)s %(id)s Not Found") % {'entity': entity,
- 'id': id},
- status_code=404)
-
-
-class ProjectNotAuthorized(ClientSideError):
- def __init__(self, id, aspect='project'):
- params = dict(aspect=aspect, id=id)
- super(ProjectNotAuthorized, self).__init__(
- _("Not Authorized to access %(aspect)s %(id)s") % params,
- status_code=401)
-
-
-class Base(wtypes.DynamicBase):
-
- @classmethod
- def from_db_model(cls, m):
- return cls(**(m.as_dict()))
-
- @classmethod
- def from_db_and_links(cls, m, links):
- return cls(links=links, **(m.as_dict()))
-
- def as_dict(self, db_model):
- valid_keys = inspect.getargspec(db_model.__init__)[0]
- if 'self' in valid_keys:
- valid_keys.remove('self')
- return self.as_dict_from_keys(valid_keys)
-
- def as_dict_from_keys(self, keys):
- return dict((k, getattr(self, k))
- for k in keys
- if hasattr(self, k) and
- getattr(self, k) != wsme.Unset)
-
-
-class Link(Base):
- """A link representation."""
-
- href = wtypes.text
- "The url of a link"
-
- rel = wtypes.text
- "The name of a link"
-
- @classmethod
- def sample(cls):
- return cls(href=('http://localhost:8777/v2/meters/volume?'
- 'q.field=resource_id&'
- 'q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
- rel='volume'
- )
-
-
-class Query(Base):
- """Query filter."""
-
- # The data types supported by the query.
- _supported_types = ['integer', 'float', 'string', 'boolean', 'datetime']
-
- # Functions to convert the data field to the correct type.
- _type_converters = {'integer': int,
- 'float': float,
- 'boolean': functools.partial(
- strutils.bool_from_string, strict=True),
- 'string': six.text_type,
- 'datetime': timeutils.parse_isotime}
-
- _op = None # provide a default
-
- def get_op(self):
- return self._op or 'eq'
-
- def set_op(self, value):
- self._op = value
-
- field = wsme.wsattr(wtypes.text, mandatory=True)
- "The name of the field to test"
-
- # op = wsme.wsattr(operation_kind, default='eq')
- # this ^ doesn't seem to work.
- op = wsme.wsproperty(operation_kind_enum, get_op, set_op)
- "The comparison operator. Defaults to 'eq'."
-
- value = wsme.wsattr(wtypes.text, mandatory=True)
- "The value to compare against the stored data"
-
- type = wtypes.text
- "The data type of value to compare against the stored data"
-
- def __repr__(self):
- # for logging calls
- return '<Query %r %s %r %s>' % (self.field,
- self.op,
- self.value,
- self.type)
-
- @classmethod
- def sample(cls):
- return cls(field='resource_id',
- op='eq',
- value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- type='string'
- )
-
- def as_dict(self):
- return self.as_dict_from_keys(['field', 'op', 'type', 'value'])
-
- def _get_value_as_type(self, forced_type=None):
- """Convert metadata value to the specified data type.
-
- This method is called during metadata query to help convert the
- querying metadata to the data type specified by user. If there is no
- data type given, the metadata will be parsed by ast.literal_eval to
- try to do a smart converting.
-
- NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised
- from wsmeext/sphinxext.py. It's OK to call it outside the Query class.
- Because the "public" side of that class is actually the outside of the
- API, and the "private" side is the API implementation. The method is
- only used in the API implementation, so it's OK.
-
- :returns: metadata value converted with the specified data type.
- """
- type = forced_type or self.type
- try:
- converted_value = self.value
- if not type:
- try:
- converted_value = ast.literal_eval(self.value)
- except (ValueError, SyntaxError):
- # Unable to convert the metadata value automatically
- # let it default to self.value
- pass
- else:
- if type not in self._supported_types:
- # Types must be explicitly declared so the
- # correct type converter may be used. Subclasses
- # of Query may define _supported_types and
- # _type_converters to define their own types.
- raise TypeError()
- converted_value = self._type_converters[type](self.value)
- if isinstance(converted_value, datetime.datetime):
- converted_value = timeutils.normalize_time(converted_value)
- except ValueError:
- msg = (_('Unable to convert the value %(value)s'
- ' to the expected data type %(type)s.') %
- {'value': self.value, 'type': type})
- raise ClientSideError(msg)
- except TypeError:
- msg = (_('The data type %(type)s is not supported. The supported'
- ' data type list is: %(supported)s') %
- {'type': type, 'supported': self._supported_types})
- raise ClientSideError(msg)
- except Exception:
- msg = (_('Unexpected exception converting %(value)s to'
- ' the expected data type %(type)s.') %
- {'value': self.value, 'type': type})
- raise ClientSideError(msg)
- return converted_value
-
-
-class JsonType(wtypes.UserType):
- """A simple JSON type."""
-
- basetype = wtypes.text
- name = 'json'
-
- @staticmethod
- def validate(value):
- # check that value can be serialised
- json.dumps(value)
- return value
diff --git a/ceilometer/api/controllers/v2/capabilities.py b/ceilometer/api/controllers/v2/capabilities.py
deleted file mode 100644
index 0e2e4133..00000000
--- a/ceilometer/api/controllers/v2/capabilities.py
+++ /dev/null
@@ -1,90 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2013 IBM Corp.
-# Copyright 2013 eNovance <licensing@enovance.com>
-# Copyright Ericsson AB 2013. All rights reserved
-# Copyright 2014 Hewlett-Packard Company
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import pecan
-from pecan import rest
-from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
-
-from ceilometer.api.controllers.v2 import base
-from ceilometer import utils
-
-
-def _flatten_capabilities(capabilities):
- return dict((k, v) for k, v in utils.recursive_keypairs(capabilities))
-
-
-class Capabilities(base.Base):
- """A representation of the API and storage capabilities.
-
- Usually constrained by restrictions imposed by the storage driver.
- """
-
- api = {wtypes.text: bool}
- "A flattened dictionary of API capabilities"
- storage = {wtypes.text: bool}
- "A flattened dictionary of storage capabilities"
-
- @classmethod
- def sample(cls):
- return cls(
- api=_flatten_capabilities({
- 'meters': {'query': {'simple': True,
- 'metadata': True}},
- 'resources': {'query': {'simple': True,
- 'metadata': True}},
- 'samples': {'query': {'simple': True,
- 'metadata': True,
- 'complex': True}},
- 'statistics': {'groupby': True,
- 'query': {'simple': True,
- 'metadata': True},
- 'aggregation': {'standard': True,
- 'selectable': {
- 'max': True,
- 'min': True,
- 'sum': True,
- 'avg': True,
- 'count': True,
- 'stddev': True,
- 'cardinality': True,
- 'quartile': False}}},
- }),
- storage=_flatten_capabilities(
- {'storage': {'production_ready': True}}),
- )
-
-
-class CapabilitiesController(rest.RestController):
- """Manages capabilities queries."""
-
- @wsme_pecan.wsexpose(Capabilities)
- def get(self):
- """Returns a flattened dictionary of API capabilities.
-
- Capabilities supported by the currently configured storage driver.
- """
- # variation in API capabilities is effectively determined by
- # the lack of strict feature parity across storage drivers
- conn = pecan.request.storage_conn
- driver_capabilities = conn.get_capabilities().copy()
- driver_perf = conn.get_storage_capabilities()
- return Capabilities(api=_flatten_capabilities(driver_capabilities),
- storage=_flatten_capabilities(driver_perf))
diff --git a/ceilometer/api/controllers/v2/meters.py b/ceilometer/api/controllers/v2/meters.py
deleted file mode 100644
index 0a63a305..00000000
--- a/ceilometer/api/controllers/v2/meters.py
+++ /dev/null
@@ -1,505 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2013 IBM Corp.
-# Copyright 2013 eNovance <licensing@enovance.com>
-# Copyright Ericsson AB 2013. All rights reserved
-# Copyright 2014 Hewlett-Packard Company
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import base64
-import datetime
-
-from oslo_log import log
-from oslo_utils import strutils
-from oslo_utils import timeutils
-import pecan
-from pecan import rest
-import six
-import wsme
-from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
-
-from ceilometer.api.controllers.v2 import base
-from ceilometer.api.controllers.v2 import utils as v2_utils
-from ceilometer.api import rbac
-from ceilometer.i18n import _
-from ceilometer.publisher import utils as publisher_utils
-from ceilometer import sample
-from ceilometer import storage
-from ceilometer.storage import base as storage_base
-from ceilometer import utils
-
-LOG = log.getLogger(__name__)
-
-
-class OldSample(base.Base):
- """A single measurement for a given meter and resource.
-
- This class is deprecated in favor of Sample.
- """
-
- source = wtypes.text
- "The ID of the source that identifies where the sample comes from"
-
- counter_name = wsme.wsattr(wtypes.text, mandatory=True)
- "The name of the meter"
- # FIXME(dhellmann): Make this meter_name?
-
- counter_type = wsme.wsattr(wtypes.text, mandatory=True)
- "The type of the meter (see :ref:`measurements`)"
- # FIXME(dhellmann): Make this meter_type?
-
- counter_unit = wsme.wsattr(wtypes.text, mandatory=True)
- "The unit of measure for the value in counter_volume"
- # FIXME(dhellmann): Make this meter_unit?
-
- counter_volume = wsme.wsattr(float, mandatory=True)
- "The actual measured value"
-
- user_id = wtypes.text
- "The ID of the user who last triggered an update to the resource"
-
- project_id = wtypes.text
- "The ID of the project or tenant that owns the resource"
-
- resource_id = wsme.wsattr(wtypes.text, mandatory=True)
- "The ID of the :class:`Resource` for which the measurements are taken"
-
- timestamp = datetime.datetime
- "UTC date and time when the measurement was made"
-
- recorded_at = datetime.datetime
- "When the sample has been recorded."
-
- resource_metadata = {wtypes.text: wtypes.text}
- "Arbitrary metadata associated with the resource"
-
- message_id = wtypes.text
- "A unique identifier for the sample"
-
- def __init__(self, counter_volume=None, resource_metadata=None,
- timestamp=None, **kwds):
- resource_metadata = resource_metadata or {}
- if counter_volume is not None:
- counter_volume = float(counter_volume)
- resource_metadata = v2_utils.flatten_metadata(resource_metadata)
- # this is to make it easier for clients to pass a timestamp in
- if timestamp and isinstance(timestamp, six.string_types):
- timestamp = timeutils.parse_isotime(timestamp)
-
- super(OldSample, self).__init__(counter_volume=counter_volume,
- resource_metadata=resource_metadata,
- timestamp=timestamp, **kwds)
-
- if self.resource_metadata in (wtypes.Unset, None):
- self.resource_metadata = {}
-
- @classmethod
- def sample(cls):
- return cls(source='openstack',
- counter_name='instance',
- counter_type='gauge',
- counter_unit='instance',
- counter_volume=1,
- resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
- user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- recorded_at=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
- timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
- resource_metadata={'name1': 'value1',
- 'name2': 'value2'},
- message_id='5460acce-4fd6-480d-ab18-9735ec7b1996',
- )
-
-
-class Statistics(base.Base):
- """Computed statistics for a query."""
-
- groupby = {wtypes.text: wtypes.text}
- "Dictionary of field names for group, if groupby statistics are requested"
-
- unit = wtypes.text
- "The unit type of the data set"
-
- min = float
- "The minimum volume seen in the data"
-
- max = float
- "The maximum volume seen in the data"
-
- avg = float
- "The average of all of the volume values seen in the data"
-
- sum = float
- "The total of all of the volume values seen in the data"
-
- count = int
- "The number of samples seen"
-
- aggregate = {wtypes.text: float}
- "The selectable aggregate value(s)"
-
- duration = float
- "The difference, in seconds, between the oldest and newest timestamp"
-
- duration_start = datetime.datetime
- "UTC date and time of the earliest timestamp, or the query start time"
-
- duration_end = datetime.datetime
- "UTC date and time of the oldest timestamp, or the query end time"
-
- period = int
- "The difference, in seconds, between the period start and end"
-
- period_start = datetime.datetime
- "UTC date and time of the period start"
-
- period_end = datetime.datetime
- "UTC date and time of the period end"
-
- def __init__(self, start_timestamp=None, end_timestamp=None, **kwds):
- super(Statistics, self).__init__(**kwds)
- self._update_duration(start_timestamp, end_timestamp)
-
- def _update_duration(self, start_timestamp, end_timestamp):
- # "Clamp" the timestamps we return to the original time
- # range, excluding the offset.
- if (start_timestamp and
- self.duration_start and
- self.duration_start < start_timestamp):
- self.duration_start = start_timestamp
- LOG.debug('clamping min timestamp to range')
- if (end_timestamp and
- self.duration_end and
- self.duration_end > end_timestamp):
- self.duration_end = end_timestamp
- LOG.debug('clamping max timestamp to range')
-
- # If we got valid timestamps back, compute a duration in seconds.
- #
- # If the min > max after clamping then we know the
- # timestamps on the samples fell outside of the time
- # range we care about for the query, so treat them as
- # "invalid."
- #
- # If the timestamps are invalid, return None as a
- # sentinel indicating that there is something "funny"
- # about the range.
- if (self.duration_start and
- self.duration_end and
- self.duration_start <= self.duration_end):
- self.duration = timeutils.delta_seconds(self.duration_start,
- self.duration_end)
- else:
- self.duration_start = self.duration_end = self.duration = None
-
- @classmethod
- def sample(cls):
- return cls(unit='GiB',
- min=1,
- max=9,
- avg=4.5,
- sum=45,
- count=10,
- duration_start=datetime.datetime(2013, 1, 4, 16, 42),
- duration_end=datetime.datetime(2013, 1, 4, 16, 47),
- period=7200,
- period_start=datetime.datetime(2013, 1, 4, 16, 00),
- period_end=datetime.datetime(2013, 1, 4, 18, 00),
- )
-
-
-class Aggregate(base.Base):
-
- func = wsme.wsattr(wtypes.text, mandatory=True)
- "The aggregation function name"
-
- param = wsme.wsattr(wtypes.text, default=None)
- "The paramter to the aggregation function"
-
- def __init__(self, **kwargs):
- super(Aggregate, self).__init__(**kwargs)
-
- @staticmethod
- def validate(aggregate):
- valid_agg = (storage_base.Connection.CAPABILITIES.get('statistics', {})
- .get('aggregation', {}).get('selectable', {}).keys())
- if aggregate.func not in valid_agg:
- msg = _('Invalid aggregation function: %s') % aggregate.func
- raise base.ClientSideError(msg)
- return aggregate
-
- @classmethod
- def sample(cls):
- return cls(func='cardinality',
- param='resource_id')
-
-
-def _validate_groupby_fields(groupby_fields):
- """Checks that the list of groupby fields from request is valid.
-
- If all fields are valid, returns fields with duplicates removed.
- """
- # NOTE(terriyu): Currently, metadata fields are supported in our
- # group by statistics implementation only for mongodb
- valid_fields = set(['user_id', 'resource_id', 'project_id', 'source',
- 'resource_metadata.instance_type'])
-
- invalid_fields = set(groupby_fields) - valid_fields
- if invalid_fields:
- raise wsme.exc.UnknownArgument(invalid_fields,
- "Invalid groupby fields")
-
- # Remove duplicate fields
- # NOTE(terriyu): This assumes that we don't care about the order of the
- # group by fields.
- return list(set(groupby_fields))
-
-
-class MeterController(rest.RestController):
- """Manages operations on a single meter."""
- _custom_actions = {
- 'statistics': ['GET'],
- }
-
- def __init__(self, meter_name):
- pecan.request.context['meter_name'] = meter_name
- self.meter_name = meter_name
-
- @wsme_pecan.wsexpose([OldSample], [base.Query], int)
- def get_all(self, q=None, limit=None):
- """Return samples for the meter.
-
- :param q: Filter rules for the data to be returned.
- :param limit: Maximum number of samples to return.
- """
-
- rbac.enforce('get_samples', pecan.request)
-
- q = q or []
- limit = v2_utils.enforce_limit(limit)
- kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__)
- kwargs['meter'] = self.meter_name
- f = storage.SampleFilter(**kwargs)
- return [OldSample.from_db_model(e)
- for e in pecan.request.storage_conn.get_samples(f, limit=limit)
- ]
-
- @wsme_pecan.wsexpose([OldSample], str, body=[OldSample], status_code=201)
- def post(self, direct='', samples=None):
- """Post a list of new Samples to Telemetry.
-
- :param direct: a flag indicates whether the samples will be posted
- directly to storage or not.
- :param samples: a list of samples within the request body.
- """
- rbac.enforce('create_samples', pecan.request)
-
- direct = strutils.bool_from_string(direct)
- if not samples:
- msg = _('Samples should be included in request body')
- raise base.ClientSideError(msg)
-
- now = timeutils.utcnow()
- auth_project = rbac.get_limited_to_project(pecan.request.headers)
- def_source = pecan.request.cfg.sample_source
- def_project_id = pecan.request.headers.get('X-Project-Id')
- def_user_id = pecan.request.headers.get('X-User-Id')
-
- published_samples = []
- for s in samples:
- if self.meter_name != s.counter_name:
- raise wsme.exc.InvalidInput('counter_name', s.counter_name,
- 'should be %s' % self.meter_name)
-
- if s.message_id:
- raise wsme.exc.InvalidInput('message_id', s.message_id,
- 'The message_id must not be set')
-
- if s.counter_type not in sample.TYPES:
- raise wsme.exc.InvalidInput('counter_type', s.counter_type,
- 'The counter type must be: ' +
- ', '.join(sample.TYPES))
-
- s.user_id = (s.user_id or def_user_id)
- s.project_id = (s.project_id or def_project_id)
- s.source = '%s:%s' % (s.project_id, (s.source or def_source))
- s.timestamp = (s.timestamp or now)
-
- if auth_project and auth_project != s.project_id:
- # non admin user trying to cross post to another project_id
- auth_msg = 'can not post samples to other projects'
- raise wsme.exc.InvalidInput('project_id', s.project_id,
- auth_msg)
-
- published_sample = sample.Sample(
- name=s.counter_name,
- type=s.counter_type,
- unit=s.counter_unit,
- volume=s.counter_volume,
- user_id=s.user_id,
- project_id=s.project_id,
- resource_id=s.resource_id,
- timestamp=s.timestamp.isoformat(),
- resource_metadata=utils.restore_nesting(s.resource_metadata,
- separator='.'),
- source=s.source)
- s.message_id = published_sample.id
-
- sample_dict = publisher_utils.meter_message_from_counter(
- published_sample,
- pecan.request.cfg.publisher.telemetry_secret)
- if direct:
- ts = timeutils.parse_isotime(sample_dict['timestamp'])
- sample_dict['timestamp'] = timeutils.normalize_time(ts)
- pecan.request.storage_conn.record_metering_data(sample_dict)
- else:
- published_samples.append(sample_dict)
- if not direct:
- pecan.request.notifier.sample(
- {'user': def_user_id,
- 'tenant': def_project_id,
- 'is_admin': True},
- 'telemetry.api',
- {'samples': published_samples})
-
- return samples
-
- @wsme_pecan.wsexpose([Statistics],
- [base.Query], [six.text_type], int, [Aggregate])
- def statistics(self, q=None, groupby=None, period=None, aggregate=None):
- """Computes the statistics of the samples in the time range given.
-
- :param q: Filter rules for the data to be returned.
- :param groupby: Fields for group by aggregation
- :param period: Returned result will be an array of statistics for a
- period long of that number of seconds.
- :param aggregate: The selectable aggregation functions to be applied.
- """
-
- rbac.enforce('compute_statistics', pecan.request)
-
- q = q or []
- groupby = groupby or []
- aggregate = aggregate or []
-
- if period and period < 0:
- raise base.ClientSideError(_("Period must be positive."))
-
- kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__)
- kwargs['meter'] = self.meter_name
- f = storage.SampleFilter(**kwargs)
- g = _validate_groupby_fields(groupby)
-
- aggregate = utils.uniq(aggregate, ['func', 'param'])
- # Find the original timestamp in the query to use for clamping
- # the duration returned in the statistics.
- start = end = None
- for i in q:
- if i.field == 'timestamp' and i.op in ('lt', 'le'):
- end = timeutils.parse_isotime(i.value).replace(
- tzinfo=None)
- elif i.field == 'timestamp' and i.op in ('gt', 'ge'):
- start = timeutils.parse_isotime(i.value).replace(
- tzinfo=None)
-
- try:
- computed = pecan.request.storage_conn.get_meter_statistics(
- f, period, g, aggregate)
- return [Statistics(start_timestamp=start,
- end_timestamp=end,
- **c.as_dict())
- for c in computed]
- except OverflowError as e:
- params = dict(period=period, err=e)
- raise base.ClientSideError(
- _("Invalid period %(period)s: %(err)s") % params)
-
-
-class Meter(base.Base):
- """One category of measurements."""
-
- name = wtypes.text
- "The unique name for the meter"
-
- type = wtypes.Enum(str, *sample.TYPES)
- "The meter type (see :ref:`measurements`)"
-
- unit = wtypes.text
- "The unit of measure"
-
- resource_id = wtypes.text
- "The ID of the :class:`Resource` for which the measurements are taken"
-
- project_id = wtypes.text
- "The ID of the project or tenant that owns the resource"
-
- user_id = wtypes.text
- "The ID of the user who last triggered an update to the resource"
-
- source = wtypes.text
- "The ID of the source that identifies where the meter comes from"
-
- meter_id = wtypes.text
- "The unique identifier for the meter"
-
- def __init__(self, **kwargs):
- meter_id = '%s+%s' % (kwargs['resource_id'], kwargs['name'])
- # meter_id is of type Unicode but base64.encodestring() only accepts
- # strings. See bug #1333177
- meter_id = base64.b64encode(meter_id.encode('utf-8'))
- kwargs['meter_id'] = meter_id
- super(Meter, self).__init__(**kwargs)
-
- @classmethod
- def sample(cls):
- return cls(name='instance',
- type='gauge',
- unit='instance',
- resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
- user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- source='openstack',
- )
-
-
-class MetersController(rest.RestController):
- """Works on meters."""
-
- @pecan.expose()
- def _lookup(self, meter_name, *remainder):
- return MeterController(meter_name), remainder
-
- @wsme_pecan.wsexpose([Meter], [base.Query], int, str)
- def get_all(self, q=None, limit=None, unique=''):
- """Return all known meters, based on the data recorded so far.
-
- :param q: Filter rules for the meters to be returned.
- :param unique: flag to indicate unique meters to be returned.
- """
-
- rbac.enforce('get_meters', pecan.request)
-
- q = q or []
-
- # Timestamp field is not supported for Meter queries
- limit = v2_utils.enforce_limit(limit)
- kwargs = v2_utils.query_to_kwargs(
- q, pecan.request.storage_conn.get_meters,
- ['limit'], allow_timestamps=False)
- return [Meter.from_db_model(m)
- for m in pecan.request.storage_conn.get_meters(
- limit=limit, unique=strutils.bool_from_string(unique),
- **kwargs)]
diff --git a/ceilometer/api/controllers/v2/query.py b/ceilometer/api/controllers/v2/query.py
deleted file mode 100644
index d85ba272..00000000
--- a/ceilometer/api/controllers/v2/query.py
+++ /dev/null
@@ -1,359 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2013 IBM Corp.
-# Copyright 2013 eNovance <licensing@enovance.com>
-# Copyright Ericsson AB 2013. All rights reserved
-# Copyright 2014 Hewlett-Packard Company
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import json
-
-import jsonschema
-from oslo_log import log
-from oslo_utils import timeutils
-import pecan
-from pecan import rest
-from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
-
-from ceilometer.api.controllers.v2 import base
-from ceilometer.api.controllers.v2 import samples
-from ceilometer.api.controllers.v2 import utils as v2_utils
-from ceilometer.api import rbac
-from ceilometer.i18n import _
-from ceilometer import storage
-from ceilometer import utils
-
-LOG = log.getLogger(__name__)
-
-
-class ComplexQuery(base.Base):
- """Holds a sample query encoded in json."""
-
- filter = wtypes.text
- "The filter expression encoded in json."
-
- orderby = wtypes.text
- "List of single-element dicts for specifying the ordering of the results."
-
- limit = int
- "The maximum number of results to be returned."
-
- @classmethod
- def sample(cls):
- return cls(filter='{"and": [{"and": [{"=": ' +
- '{"counter_name": "cpu_util"}}, ' +
- '{">": {"counter_volume": 0.23}}, ' +
- '{"<": {"counter_volume": 0.26}}]}, ' +
- '{"or": [{"and": [{">": ' +
- '{"timestamp": "2013-12-01T18:00:00"}}, ' +
- '{"<": ' +
- '{"timestamp": "2013-12-01T18:15:00"}}]}, ' +
- '{"and": [{">": ' +
- '{"timestamp": "2013-12-01T18:30:00"}}, ' +
- '{"<": ' +
- '{"timestamp": "2013-12-01T18:45:00"}}]}]}]}',
- orderby='[{"counter_volume": "ASC"}, ' +
- '{"timestamp": "DESC"}]',
- limit=42
- )
-
-
-def _list_to_regexp(items, regexp_prefix=""):
- regexp = ["^%s$" % item for item in items]
- regexp = regexp_prefix + "|".join(regexp)
- return regexp
-
-
-class ValidatedComplexQuery(object):
- complex_operators = ["and", "or"]
- order_directions = ["asc", "desc"]
- simple_ops = ["=", "!=", "<", ">", "<=", "=<", ">=", "=>", "=~"]
- regexp_prefix = "(?i)"
-
- complex_ops = _list_to_regexp(complex_operators, regexp_prefix)
- simple_ops = _list_to_regexp(simple_ops, regexp_prefix)
- order_directions = _list_to_regexp(order_directions, regexp_prefix)
-
- timestamp_fields = ["timestamp", "state_timestamp"]
-
- def __init__(self, query, db_model, additional_name_mapping=None,
- metadata_allowed=False):
- additional_name_mapping = additional_name_mapping or {}
- self.name_mapping = {"user": "user_id",
- "project": "project_id"}
- self.name_mapping.update(additional_name_mapping)
- valid_keys = db_model.get_field_names()
- valid_keys = list(valid_keys) + list(self.name_mapping.keys())
- valid_fields = _list_to_regexp(valid_keys)
-
- if metadata_allowed:
- valid_filter_fields = valid_fields + "|^metadata\.[\S]+$"
- else:
- valid_filter_fields = valid_fields
-
- schema_value = {
- "oneOf": [{"type": "string"},
- {"type": "number"},
- {"type": "boolean"}],
- "minProperties": 1,
- "maxProperties": 1}
-
- schema_value_in = {
- "type": "array",
- "items": {"oneOf": [{"type": "string"},
- {"type": "number"}]},
- "minItems": 1}
-
- schema_field = {
- "type": "object",
- "patternProperties": {valid_filter_fields: schema_value},
- "additionalProperties": False,
- "minProperties": 1,
- "maxProperties": 1}
-
- schema_field_in = {
- "type": "object",
- "patternProperties": {valid_filter_fields: schema_value_in},
- "additionalProperties": False,
- "minProperties": 1,
- "maxProperties": 1}
-
- schema_leaf_in = {
- "type": "object",
- "patternProperties": {"(?i)^in$": schema_field_in},
- "additionalProperties": False,
- "minProperties": 1,
- "maxProperties": 1}
-
- schema_leaf_simple_ops = {
- "type": "object",
- "patternProperties": {self.simple_ops: schema_field},
- "additionalProperties": False,
- "minProperties": 1,
- "maxProperties": 1}
-
- schema_and_or_array = {
- "type": "array",
- "items": {"$ref": "#"},
- "minItems": 2}
-
- schema_and_or = {
- "type": "object",
- "patternProperties": {self.complex_ops: schema_and_or_array},
- "additionalProperties": False,
- "minProperties": 1,
- "maxProperties": 1}
-
- schema_not = {
- "type": "object",
- "patternProperties": {"(?i)^not$": {"$ref": "#"}},
- "additionalProperties": False,
- "minProperties": 1,
- "maxProperties": 1}
-
- self.schema = {
- "oneOf": [{"$ref": "#/definitions/leaf_simple_ops"},
- {"$ref": "#/definitions/leaf_in"},
- {"$ref": "#/definitions/and_or"},
- {"$ref": "#/definitions/not"}],
- "minProperties": 1,
- "maxProperties": 1,
- "definitions": {"leaf_simple_ops": schema_leaf_simple_ops,
- "leaf_in": schema_leaf_in,
- "and_or": schema_and_or,
- "not": schema_not}}
-
- self.orderby_schema = {
- "type": "array",
- "items": {
- "type": "object",
- "patternProperties":
- {valid_fields:
- {"type": "string",
- "pattern": self.order_directions}},
- "additionalProperties": False,
- "minProperties": 1,
- "maxProperties": 1}}
-
- self.original_query = query
-
- def validate(self, visibility_field):
- """Validates the query content and does the necessary conversions."""
- if self.original_query.filter is wtypes.Unset:
- self.filter_expr = None
- else:
- try:
- self.filter_expr = json.loads(self.original_query.filter)
- self._validate_filter(self.filter_expr)
- except (ValueError, jsonschema.exceptions.ValidationError) as e:
- raise base.ClientSideError(
- _("Filter expression not valid: %s") % e)
- self._replace_isotime_with_datetime(self.filter_expr)
- self._convert_operator_to_lower_case(self.filter_expr)
- self._normalize_field_names_for_db_model(self.filter_expr)
-
- self._force_visibility(visibility_field)
-
- if self.original_query.orderby is wtypes.Unset:
- self.orderby = None
- else:
- try:
- self.orderby = json.loads(self.original_query.orderby)
- self._validate_orderby(self.orderby)
- except (ValueError, jsonschema.exceptions.ValidationError) as e:
- raise base.ClientSideError(
- _("Order-by expression not valid: %s") % e)
- self._convert_orderby_to_lower_case(self.orderby)
- self._normalize_field_names_in_orderby(self.orderby)
-
- self.limit = (None if self.original_query.limit is wtypes.Unset
- else self.original_query.limit)
-
- self.limit = v2_utils.enforce_limit(self.limit)
-
- @staticmethod
- def _convert_orderby_to_lower_case(orderby):
- for orderby_field in orderby:
- utils.lowercase_values(orderby_field)
-
- def _normalize_field_names_in_orderby(self, orderby):
- for orderby_field in orderby:
- self._replace_field_names(orderby_field)
-
- def _traverse_postorder(self, tree, visitor):
- op = list(tree.keys())[0]
- if op.lower() in self.complex_operators:
- for i, operand in enumerate(tree[op]):
- self._traverse_postorder(operand, visitor)
- if op.lower() == "not":
- self._traverse_postorder(tree[op], visitor)
-
- visitor(tree)
-
- def _check_cross_project_references(self, own_project_id,
- visibility_field):
- """Do not allow other than own_project_id."""
- def check_project_id(subfilter):
- op, value = list(subfilter.items())[0]
- if (op.lower() not in self.complex_operators
- and list(value.keys())[0] == visibility_field
- and value[visibility_field] != own_project_id):
- raise base.ProjectNotAuthorized(value[visibility_field])
-
- self._traverse_postorder(self.filter_expr, check_project_id)
-
- def _force_visibility(self, visibility_field):
- """Force visibility field.
-
- If the tenant is not admin insert an extra
- "and <visibility_field>=<tenant's project_id>" clause to the query.
- """
- authorized_project = rbac.get_limited_to_project(pecan.request.headers)
- is_admin = authorized_project is None
- if not is_admin:
- self._restrict_to_project(authorized_project, visibility_field)
- self._check_cross_project_references(authorized_project,
- visibility_field)
-
- def _restrict_to_project(self, project_id, visibility_field):
- restriction = {"=": {visibility_field: project_id}}
- if self.filter_expr is None:
- self.filter_expr = restriction
- else:
- self.filter_expr = {"and": [restriction, self.filter_expr]}
-
- def _replace_isotime_with_datetime(self, filter_expr):
- def replace_isotime(subfilter):
- op, value = list(subfilter.items())[0]
- if op.lower() not in self.complex_operators:
- field = list(value.keys())[0]
- if field in self.timestamp_fields:
- date_time = self._convert_to_datetime(subfilter[op][field])
- subfilter[op][field] = date_time
-
- self._traverse_postorder(filter_expr, replace_isotime)
-
- def _normalize_field_names_for_db_model(self, filter_expr):
- def _normalize_field_names(subfilter):
- op, value = list(subfilter.items())[0]
- if op.lower() not in self.complex_operators:
- self._replace_field_names(value)
- self._traverse_postorder(filter_expr,
- _normalize_field_names)
-
- def _replace_field_names(self, subfilter):
- field, value = list(subfilter.items())[0]
- if field in self.name_mapping:
- del subfilter[field]
- subfilter[self.name_mapping[field]] = value
- if field.startswith("metadata."):
- del subfilter[field]
- subfilter["resource_" + field] = value
-
- def _convert_operator_to_lower_case(self, filter_expr):
- self._traverse_postorder(filter_expr, utils.lowercase_keys)
-
- @staticmethod
- def _convert_to_datetime(isotime):
- try:
- date_time = timeutils.parse_isotime(isotime)
- date_time = date_time.replace(tzinfo=None)
- return date_time
- except ValueError:
- LOG.exception("String %s is not a valid isotime" % isotime)
- msg = _('Failed to parse the timestamp value %s') % isotime
- raise base.ClientSideError(msg)
-
- def _validate_filter(self, filter_expr):
- jsonschema.validate(filter_expr, self.schema)
-
- def _validate_orderby(self, orderby_expr):
- jsonschema.validate(orderby_expr, self.orderby_schema)
-
-
-class QuerySamplesController(rest.RestController):
- """Provides complex query possibilities for samples."""
-
- @wsme_pecan.wsexpose([samples.Sample], body=ComplexQuery)
- def post(self, body):
- """Define query for retrieving Sample data.
-
- :param body: Query rules for the samples to be returned.
- """
-
- rbac.enforce('query_sample', pecan.request)
-
- sample_name_mapping = {"resource": "resource_id",
- "meter": "counter_name",
- "type": "counter_type",
- "unit": "counter_unit",
- "volume": "counter_volume"}
-
- query = ValidatedComplexQuery(body,
- storage.models.Sample,
- sample_name_mapping,
- metadata_allowed=True)
- query.validate(visibility_field="project_id")
- conn = pecan.request.storage_conn
- return [samples.Sample.from_db_model(s)
- for s in conn.query_samples(query.filter_expr,
- query.orderby,
- query.limit)]
-
-
-class QueryController(rest.RestController):
-
- samples = QuerySamplesController()
diff --git a/ceilometer/api/controllers/v2/resources.py b/ceilometer/api/controllers/v2/resources.py
deleted file mode 100644
index e60d7d40..00000000
--- a/ceilometer/api/controllers/v2/resources.py
+++ /dev/null
@@ -1,158 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2013 IBM Corp.
-# Copyright 2013 eNovance <licensing@enovance.com>
-# Copyright Ericsson AB 2013. All rights reserved
-# Copyright 2014 Hewlett-Packard Company
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-from six.moves import urllib
-
-import pecan
-from pecan import rest
-import six
-from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
-
-from ceilometer.api.controllers.v2 import base
-from ceilometer.api.controllers.v2 import utils
-from ceilometer.api import rbac
-from ceilometer.i18n import _
-
-
-class Resource(base.Base):
- """An externally defined object for which samples have been received."""
-
- resource_id = wtypes.text
- "The unique identifier for the resource"
-
- project_id = wtypes.text
- "The ID of the owning project or tenant"
-
- user_id = wtypes.text
- "The ID of the user who created the resource or updated it last"
-
- first_sample_timestamp = datetime.datetime
- "UTC date & time not later than the first sample known for this resource"
-
- last_sample_timestamp = datetime.datetime
- "UTC date & time not earlier than the last sample known for this resource"
-
- metadata = {wtypes.text: wtypes.text}
- "Arbitrary metadata associated with the resource"
-
- links = [base.Link]
- "A list containing a self link and associated meter links"
-
- source = wtypes.text
- "The source where the resource come from"
-
- def __init__(self, metadata=None, **kwds):
- metadata = metadata or {}
- metadata = utils.flatten_metadata(metadata)
- super(Resource, self).__init__(metadata=metadata, **kwds)
-
- @classmethod
- def sample(cls):
- return cls(
- resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
- user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
- source="openstack",
- metadata={'name1': 'value1',
- 'name2': 'value2'},
- links=[
- base.Link(href=('http://localhost:8777/v2/resources/'
- 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
- rel='self'),
- base.Link(href=('http://localhost:8777/v2/meters/volume?'
- 'q.field=resource_id&q.value='
- 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
- rel='volume')
- ],
- )
-
-
-class ResourcesController(rest.RestController):
- """Works on resources."""
-
- @staticmethod
- def _make_link(rel_name, url, type, type_arg, query=None):
- query_str = ''
- if query:
- query_str = '?q.field=%s&q.value=%s' % (query['field'],
- query['value'])
- return base.Link(href='%s/v2/%s/%s%s' % (url, type,
- type_arg, query_str),
- rel=rel_name)
-
- def _resource_links(self, resource_id, meter_links=1):
- links = [self._make_link('self', pecan.request.application_url,
- 'resources', resource_id)]
- if meter_links:
- for meter in pecan.request.storage_conn.get_meters(
- resource=resource_id):
- query = {'field': 'resource_id', 'value': resource_id}
- links.append(self._make_link(meter.name,
- pecan.request.application_url,
- 'meters', meter.name,
- query=query))
- return links
-
- @wsme_pecan.wsexpose(Resource, six.text_type)
- def get_one(self, resource_id):
- """Retrieve details about one resource.
-
- :param resource_id: The UUID of the resource.
- """
-
- rbac.enforce('get_resource', pecan.request)
- # In case we have special character in resource id, for example, swift
- # can generate samples with resource id like
- # 29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance
- resource_id = urllib.parse.unquote(resource_id)
-
- authorized_project = rbac.get_limited_to_project(pecan.request.headers)
- resources = list(pecan.request.storage_conn.get_resources(
- resource=resource_id, project=authorized_project))
- if not resources:
- raise base.EntityNotFound(_('Resource'), resource_id)
- return Resource.from_db_and_links(resources[0],
- self._resource_links(resource_id))
-
- @wsme_pecan.wsexpose([Resource], [base.Query], int, int)
- def get_all(self, q=None, limit=None, meter_links=1):
- """Retrieve definitions of all of the resources.
-
- :param q: Filter rules for the resources to be returned.
- :param limit: Maximum number of resources to return.
- :param meter_links: option to include related meter links.
- """
-
- rbac.enforce('get_resources', pecan.request)
-
- q = q or []
- limit = utils.enforce_limit(limit)
- kwargs = utils.query_to_kwargs(
- q, pecan.request.storage_conn.get_resources, ['limit'])
- resources = [
- Resource.from_db_and_links(r,
- self._resource_links(r.resource_id,
- meter_links))
- for r in pecan.request.storage_conn.get_resources(limit=limit,
- **kwargs)]
- return resources
diff --git a/ceilometer/api/controllers/v2/root.py b/ceilometer/api/controllers/v2/root.py
deleted file mode 100644
index 973c9d0e..00000000
--- a/ceilometer/api/controllers/v2/root.py
+++ /dev/null
@@ -1,218 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2013 IBM Corp.
-# Copyright 2013 eNovance <licensing@enovance.com>
-# Copyright Ericsson AB 2013. All rights reserved
-# Copyright 2014 Hewlett-Packard Company
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from keystoneauth1 import exceptions
-from oslo_config import cfg
-from oslo_log import log
-from oslo_utils import strutils
-import pecan
-
-from ceilometer.api.controllers.v2 import capabilities
-from ceilometer.api.controllers.v2 import meters
-from ceilometer.api.controllers.v2 import query
-from ceilometer.api.controllers.v2 import resources
-from ceilometer.api.controllers.v2 import samples
-from ceilometer.i18n import _
-from ceilometer import keystone_client
-
-
-API_OPTS = [
- cfg.BoolOpt('gnocchi_is_enabled',
- help=('Set True to disable resource/meter/sample URLs. '
- 'Default autodetection by querying keystone.')),
- cfg.BoolOpt('aodh_is_enabled',
- help=('Set True to redirect alarms URLs to aodh. '
- 'Default autodetection by querying keystone.')),
- cfg.StrOpt('aodh_url',
- help=('The endpoint of Aodh to redirect alarms URLs '
- 'to Aodh API. Default autodetection by querying '
- 'keystone.')),
- cfg.BoolOpt('panko_is_enabled',
- help=('Set True to redirect events URLs to Panko. '
- 'Default autodetection by querying keystone.')),
- cfg.StrOpt('panko_url',
- help=('The endpoint of Panko to redirect events URLs '
- 'to Panko API. Default autodetection by querying '
- 'keystone.')),
-]
-
-LOG = log.getLogger(__name__)
-
-
-def gnocchi_abort():
- pecan.abort(410, ("This telemetry installation is configured to use "
- "Gnocchi. Please use the Gnocchi API available on "
- "the metric endpoint to retrieve data."))
-
-
-def aodh_abort():
- pecan.abort(410, _("alarms URLs is unavailable when Aodh is "
- "disabled or unavailable."))
-
-
-def _redirect(url):
- pecan.redirect(location=url + pecan.request.path_qs,
- code=308)
-
-
-class QueryController(object):
- def __init__(self, gnocchi_is_enabled=False,
- aodh_url=None):
- self.gnocchi_is_enabled = gnocchi_is_enabled
- self.aodh_url = aodh_url
-
- @pecan.expose()
- def _lookup(self, kind, *remainder):
- if kind == 'alarms' and self.aodh_url:
- _redirect(self.aodh_url)
- elif kind == 'alarms':
- aodh_abort()
- elif kind == 'samples' and self.gnocchi_is_enabled:
- gnocchi_abort()
- elif kind == 'samples':
- return query.QuerySamplesController(), remainder
- else:
- pecan.abort(404)
-
-
-class V2Controller(object):
- """Version 2 API controller root."""
-
- capabilities = capabilities.CapabilitiesController()
-
- def __init__(self):
- self._gnocchi_is_enabled = None
- self._aodh_is_enabled = None
- self._aodh_url = None
- self._panko_is_enabled = None
- self._panko_url = None
-
- @property
- def gnocchi_is_enabled(self):
- if self._gnocchi_is_enabled is None:
- if pecan.request.cfg.api.gnocchi_is_enabled is not None:
- self._gnocchi_is_enabled = (
- pecan.request.cfg.api.gnocchi_is_enabled)
- else:
- try:
- catalog = keystone_client.get_service_catalog(
- keystone_client.get_client(pecan.request.cfg))
- catalog.url_for(service_type='metric')
- except exceptions.EndpointNotFound:
- self._gnocchi_is_enabled = False
- except exceptions.ClientException:
- LOG.warning("Can't connect to keystone, assuming "
- "gnocchi is disabled and retry later")
- else:
- self._gnocchi_is_enabled = True
- LOG.warning("ceilometer-api started with gnocchi "
- "enabled. The resources/meters/samples "
- "URLs are disabled.")
- return self._gnocchi_is_enabled
-
- @property
- def aodh_url(self):
- if self._aodh_url is None:
- if pecan.request.cfg.api.aodh_is_enabled is False:
- self._aodh_url = ""
- elif pecan.request.cfg.api.aodh_url is not None:
- self._aodh_url = self._normalize_url(
- pecan.request.cfg.api.aodh_url)
- else:
- try:
- catalog = keystone_client.get_service_catalog(
- keystone_client.get_client(pecan.request.cfg))
- self._aodh_url = self._normalize_url(
- catalog.url_for(service_type='alarming'))
- except exceptions.EndpointNotFound:
- self._aodh_url = ""
- except exceptions.ClientException:
- LOG.warning("Can't connect to keystone, assuming aodh "
- "is disabled and retry later.")
- else:
- LOG.warning("ceilometer-api started with aodh "
- "enabled. Alarms URLs will be redirected "
- "to aodh endpoint.")
- return self._aodh_url
-
- @property
- def panko_url(self):
- if self._panko_url is None:
- if pecan.request.cfg.api.panko_is_enabled is False:
- self._panko_url = ""
- elif pecan.request.cfg.api.panko_url is not None:
- self._panko_url = self._normalize_url(
- pecan.request.cfg.api.panko_url)
- else:
- try:
- catalog = keystone_client.get_service_catalog(
- keystone_client.get_client(pecan.request.cfg))
- self._panko_url = self._normalize_url(
- catalog.url_for(service_type='event'))
- except exceptions.EndpointNotFound:
- self._panko_url = ""
- except exceptions.ClientException:
- LOG.warning(
- "Can't connect to keystone, assuming Panko "
- "is disabled and retry later.")
- else:
- LOG.warning("ceilometer-api started with Panko "
- "enabled. Events URLs will be redirected "
- "to Panko endpoint.")
- return self._panko_url
-
- @pecan.expose()
- def _lookup(self, kind, *remainder):
- if (kind in ['meters', 'resources', 'samples']
- and self.gnocchi_is_enabled):
- if kind == 'meters' and pecan.request.method == 'POST':
- direct = pecan.request.params.get('direct', '')
- if strutils.bool_from_string(direct):
- pecan.abort(400, _('direct option cannot be true when '
- 'Gnocchi is enabled.'))
- return meters.MetersController(), remainder
- gnocchi_abort()
- elif kind == 'meters':
- return meters.MetersController(), remainder
- elif kind == 'resources':
- return resources.ResourcesController(), remainder
- elif kind == 'samples':
- return samples.SamplesController(), remainder
- elif kind == 'query':
- return QueryController(
- gnocchi_is_enabled=self.gnocchi_is_enabled,
- aodh_url=self.aodh_url,
- ), remainder
- elif kind == 'alarms' and (not self.aodh_url):
- aodh_abort()
- elif kind == 'alarms' and self.aodh_url:
- _redirect(self.aodh_url)
- elif kind == 'events' and self.panko_url:
- return _redirect(self.panko_url)
- elif kind == 'event_types' and self.panko_url:
- return _redirect(self.panko_url)
- else:
- pecan.abort(404)
-
- @staticmethod
- def _normalize_url(url):
- if url.endswith("/"):
- return url[:-1]
- return url
diff --git a/ceilometer/api/controllers/v2/samples.py b/ceilometer/api/controllers/v2/samples.py
deleted file mode 100644
index 05ded82f..00000000
--- a/ceilometer/api/controllers/v2/samples.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2013 IBM Corp.
-# Copyright 2013 eNovance <licensing@enovance.com>
-# Copyright Ericsson AB 2013. All rights reserved
-# Copyright 2014 Hewlett-Packard Company
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import datetime
-import uuid
-
-import pecan
-from pecan import rest
-from wsme import types as wtypes
-import wsmeext.pecan as wsme_pecan
-
-from ceilometer.api.controllers.v2 import base
-from ceilometer.api.controllers.v2 import utils
-from ceilometer.api import rbac
-from ceilometer.i18n import _
-from ceilometer import sample
-from ceilometer import storage
-
-
-class Sample(base.Base):
- """One measurement."""
-
- id = wtypes.text
- "The unique identifier for the sample."
-
- meter = wtypes.text
- "The meter name this sample is for."
-
- type = wtypes.Enum(str, *sample.TYPES)
- "The meter type (see :ref:`meter_types`)"
-
- unit = wtypes.text
- "The unit of measure."
-
- volume = float
- "The metered value."
-
- user_id = wtypes.text
- "The user this sample was taken for."
-
- project_id = wtypes.text
- "The project this sample was taken for."
-
- resource_id = wtypes.text
- "The :class:`Resource` this sample was taken for."
-
- source = wtypes.text
- "The source that identifies where the sample comes from."
-
- timestamp = datetime.datetime
- "When the sample has been generated."
-
- recorded_at = datetime.datetime
- "When the sample has been recorded."
-
- metadata = {wtypes.text: wtypes.text}
- "Arbitrary metadata associated with the sample."
-
- @classmethod
- def from_db_model(cls, m):
- return cls(id=m.message_id,
- meter=m.counter_name,
- type=m.counter_type,
- unit=m.counter_unit,
- volume=m.counter_volume,
- user_id=m.user_id,
- project_id=m.project_id,
- resource_id=m.resource_id,
- source=m.source,
- timestamp=m.timestamp,
- recorded_at=m.recorded_at,
- metadata=utils.flatten_metadata(m.resource_metadata))
-
- @classmethod
- def sample(cls):
- return cls(id=str(uuid.uuid1()),
- meter='instance',
- type='gauge',
- unit='instance',
- volume=1,
- resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
- user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- timestamp=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
- recorded_at=datetime.datetime(2015, 1, 1, 12, 0, 0, 0),
- source='openstack',
- metadata={'name1': 'value1',
- 'name2': 'value2'},
- )
-
-
-class SamplesController(rest.RestController):
- """Controller managing the samples."""
-
- @wsme_pecan.wsexpose([Sample], [base.Query], int)
- def get_all(self, q=None, limit=None):
- """Return all known samples, based on the data recorded so far.
-
- :param q: Filter rules for the samples to be returned.
- :param limit: Maximum number of samples to be returned.
- """
-
- rbac.enforce('get_samples', pecan.request)
-
- q = q or []
-
- limit = utils.enforce_limit(limit)
- kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
- f = storage.SampleFilter(**kwargs)
- return map(Sample.from_db_model,
- pecan.request.storage_conn.get_samples(f, limit=limit))
-
- @wsme_pecan.wsexpose(Sample, wtypes.text)
- def get_one(self, sample_id):
- """Return a sample.
-
- :param sample_id: the id of the sample.
- """
-
- rbac.enforce('get_sample', pecan.request)
-
- f = storage.SampleFilter(message_id=sample_id)
-
- samples = list(pecan.request.storage_conn.get_samples(f))
- if len(samples) < 1:
- raise base.EntityNotFound(_('Sample'), sample_id)
-
- return Sample.from_db_model(samples[0])
diff --git a/ceilometer/api/controllers/v2/utils.py b/ceilometer/api/controllers/v2/utils.py
deleted file mode 100644
index 90a611ea..00000000
--- a/ceilometer/api/controllers/v2/utils.py
+++ /dev/null
@@ -1,316 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2013 IBM Corp.
-# Copyright 2013 eNovance <licensing@enovance.com>
-# Copyright Ericsson AB 2013. All rights reserved
-# Copyright 2014 Hewlett-Packard Company
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-import datetime
-import inspect
-
-from oslo_log import log
-from oslo_utils import timeutils
-import pecan
-import six
-import wsme
-
-from ceilometer.api.controllers.v2 import base
-from ceilometer.api import rbac
-from ceilometer.i18n import _
-from ceilometer import utils
-
-LOG = log.getLogger(__name__)
-
-
-def enforce_limit(limit):
- """Ensure limit is defined and is valid. if not, set a default."""
- if limit is None:
- limit = pecan.request.cfg.api.default_api_return_limit
- LOG.info('No limit value provided, result set will be'
- ' limited to %(limit)d.', {'limit': limit})
- if not limit or limit <= 0:
- raise base.ClientSideError(_("Limit must be positive"))
- return limit
-
-
-def get_auth_project(on_behalf_of=None):
- auth_project = rbac.get_limited_to_project(pecan.request.headers)
- created_by = pecan.request.headers.get('X-Project-Id')
- is_admin = auth_project is None
-
- if is_admin and on_behalf_of != created_by:
- auth_project = on_behalf_of
- return auth_project
-
-
-def sanitize_query(query, db_func, on_behalf_of=None):
- """Check the query.
-
- See if:
- 1) the request is coming from admin - then allow full visibility
- 2) non-admin - make sure that the query includes the requester's project.
- """
- q = copy.copy(query)
-
- auth_project = get_auth_project(on_behalf_of)
- if auth_project:
- _verify_query_segregation(q, auth_project)
-
- proj_q = [i for i in q if i.field == 'project_id']
- valid_keys = inspect.getargspec(db_func)[0]
- if not proj_q and 'on_behalf_of' not in valid_keys:
- # The user is restricted, but they didn't specify a project
- # so add it for them.
- q.append(base.Query(field='project_id',
- op='eq',
- value=auth_project))
- return q
-
-
-def _verify_query_segregation(query, auth_project=None):
- """Ensure non-admin queries are not constrained to another project."""
- auth_project = (auth_project or
- rbac.get_limited_to_project(pecan.request.headers))
-
- if not auth_project:
- return
-
- for q in query:
- if q.field in ('project', 'project_id') and auth_project != q.value:
- raise base.ProjectNotAuthorized(q.value)
-
-
-def validate_query(query, db_func, internal_keys=None,
- allow_timestamps=True):
- """Validates the syntax of the query and verifies the query.
-
- Verification check if the query request is authorized for the included
- project.
- :param query: Query expression that should be validated
- :param db_func: the function on the storage level, of which arguments
- will form the valid_keys list, which defines the valid fields for a
- query expression
- :param internal_keys: internally used field names, that should not be
- used for querying
- :param allow_timestamps: defines whether the timestamp-based constraint is
- applicable for this query or not
-
- :raises InvalidInput: if an operator is not supported for a given field
- :raises InvalidInput: if timestamp constraints are allowed, but
- search_offset was included without timestamp constraint
- :raises: UnknownArgument: if a field name is not a timestamp field, nor
- in the list of valid keys
- """
-
- internal_keys = internal_keys or []
- _verify_query_segregation(query)
-
- valid_keys = inspect.getargspec(db_func)[0]
-
- internal_timestamp_keys = ['end_timestamp', 'start_timestamp',
- 'end_timestamp_op', 'start_timestamp_op']
- if 'start_timestamp' in valid_keys:
- internal_keys += internal_timestamp_keys
- valid_keys += ['timestamp', 'search_offset']
- internal_keys.append('self')
- internal_keys.append('metaquery')
- valid_keys = set(valid_keys) - set(internal_keys)
- translation = {'user_id': 'user',
- 'project_id': 'project',
- 'resource_id': 'resource'}
-
- has_timestamp_query = _validate_timestamp_fields(query,
- 'timestamp',
- ('lt', 'le', 'gt', 'ge'),
- allow_timestamps)
- has_search_offset_query = _validate_timestamp_fields(query,
- 'search_offset',
- 'eq',
- allow_timestamps)
-
- if has_search_offset_query and not has_timestamp_query:
- raise wsme.exc.InvalidInput('field', 'search_offset',
- "search_offset cannot be used without " +
- "timestamp")
-
- def _is_field_metadata(field):
- return (field.startswith('metadata.') or
- field.startswith('resource_metadata.'))
-
- for i in query:
- if i.field not in ('timestamp', 'search_offset'):
- key = translation.get(i.field, i.field)
- operator = i.op
- if key in valid_keys or _is_field_metadata(i.field):
- if operator == 'eq':
- if key == 'enabled':
- i._get_value_as_type('boolean')
- elif _is_field_metadata(key):
- i._get_value_as_type()
- else:
- raise wsme.exc.InvalidInput('op', i.op,
- 'unimplemented operator for '
- '%s' % i.field)
- else:
- msg = ("unrecognized field in query: %s, "
- "valid keys: %s") % (query, sorted(valid_keys))
- raise wsme.exc.UnknownArgument(key, msg)
-
-
-def _validate_timestamp_fields(query, field_name, operator_list,
- allow_timestamps):
- """Validates the timestamp related constraints in a query if there are any.
-
- :param query: query expression that may contain the timestamp fields
- :param field_name: timestamp name, which should be checked (timestamp,
- search_offset)
- :param operator_list: list of operators that are supported for that
- timestamp, which was specified in the parameter field_name
- :param allow_timestamps: defines whether the timestamp-based constraint is
- applicable to this query or not
-
- :returns: True, if there was a timestamp constraint, containing
- a timestamp field named as defined in field_name, in the query and it
- was allowed and syntactically correct.
- :returns: False, if there wasn't timestamp constraint, containing a
- timestamp field named as defined in field_name, in the query
-
- :raises InvalidInput: if an operator is unsupported for a given timestamp
- field
- :raises UnknownArgument: if the timestamp constraint is not allowed in
- the query
- """
-
- for item in query:
- if item.field == field_name:
- # If *timestamp* or *search_offset* field was specified in the
- # query, but timestamp is not supported on that resource, on
- # which the query was invoked, then raise an exception.
- if not allow_timestamps:
- raise wsme.exc.UnknownArgument(field_name,
- "not valid for " +
- "this resource")
- if item.op not in operator_list:
- raise wsme.exc.InvalidInput('op', item.op,
- 'unimplemented operator for %s' %
- item.field)
- return True
- return False
-
-
-def query_to_kwargs(query, db_func, internal_keys=None,
- allow_timestamps=True):
- validate_query(query, db_func, internal_keys=internal_keys,
- allow_timestamps=allow_timestamps)
- query = sanitize_query(query, db_func)
- translation = {'user_id': 'user',
- 'project_id': 'project',
- 'resource_id': 'resource'}
- stamp = {}
- metaquery = {}
- kwargs = {}
- for i in query:
- if i.field == 'timestamp':
- if i.op in ('lt', 'le'):
- stamp['end_timestamp'] = i.value
- stamp['end_timestamp_op'] = i.op
- elif i.op in ('gt', 'ge'):
- stamp['start_timestamp'] = i.value
- stamp['start_timestamp_op'] = i.op
- else:
- if i.op == 'eq':
- if i.field == 'search_offset':
- stamp['search_offset'] = i.value
- elif i.field == 'enabled':
- kwargs[i.field] = i._get_value_as_type('boolean')
- elif i.field.startswith('metadata.'):
- metaquery[i.field] = i._get_value_as_type()
- elif i.field.startswith('resource_metadata.'):
- metaquery[i.field[9:]] = i._get_value_as_type()
- else:
- key = translation.get(i.field, i.field)
- kwargs[key] = i.value
-
- if metaquery and 'metaquery' in inspect.getargspec(db_func)[0]:
- kwargs['metaquery'] = metaquery
- if stamp:
- kwargs.update(_get_query_timestamps(stamp))
- return kwargs
-
-
-def _get_query_timestamps(args=None):
- """Return any optional timestamp information in the request.
-
- Determine the desired range, if any, from the GET arguments. Set
- up the query range using the specified offset.
-
- [query_start ... start_timestamp ... end_timestamp ... query_end]
-
- Returns a dictionary containing:
-
- start_timestamp: First timestamp to use for query
- start_timestamp_op: First timestamp operator to use for query
- end_timestamp: Final timestamp to use for query
- end_timestamp_op: Final timestamp operator to use for query
- """
-
- if args is None:
- return {}
- search_offset = int(args.get('search_offset', 0))
-
- def _parse_timestamp(timestamp):
- if not timestamp:
- return None
- try:
- iso_timestamp = timeutils.parse_isotime(timestamp)
- iso_timestamp = iso_timestamp.replace(tzinfo=None)
- except ValueError:
- raise wsme.exc.InvalidInput('timestamp', timestamp,
- 'invalid timestamp format')
- return iso_timestamp
-
- start_timestamp = _parse_timestamp(args.get('start_timestamp'))
- end_timestamp = _parse_timestamp(args.get('end_timestamp'))
- start_timestamp = start_timestamp - datetime.timedelta(
- minutes=search_offset) if start_timestamp else None
- end_timestamp = end_timestamp + datetime.timedelta(
- minutes=search_offset) if end_timestamp else None
- return {'start_timestamp': start_timestamp,
- 'end_timestamp': end_timestamp,
- 'start_timestamp_op': args.get('start_timestamp_op'),
- 'end_timestamp_op': args.get('end_timestamp_op')}
-
-
-def flatten_metadata(metadata):
- """Return flattened resource metadata.
-
- Metadata is returned with flattened nested structures (except nested sets)
- and with all values converted to unicode strings.
- """
- if metadata:
- # After changing recursive_keypairs` output we need to keep
- # flattening output unchanged.
- # Example: recursive_keypairs({'a': {'b':{'c':'d'}}}, '.')
- # output before: a.b:c=d
- # output now: a.b.c=d
- # So to keep the first variant just replace all dots except the first
- return dict((k.replace('.', ':').replace(':', '.', 1),
- six.text_type(v))
- for k, v in utils.recursive_keypairs(metadata,
- separator='.')
- if type(v) is not set)
- return {}
diff --git a/ceilometer/api/hooks.py b/ceilometer/api/hooks.py
deleted file mode 100644
index bfa85f53..00000000
--- a/ceilometer/api/hooks.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log
-import oslo_messaging
-from oslo_policy import policy
-
-from pecan import hooks
-
-from ceilometer import messaging
-from ceilometer import storage
-
-LOG = log.getLogger(__name__)
-
-
-class ConfigHook(hooks.PecanHook):
- """Attach the configuration object to the request.
-
- That allows controllers to get it.
- """
- def __init__(self, conf):
- super(ConfigHook, self).__init__()
- self.conf = conf
- self.enforcer = policy.Enforcer(conf)
- self.enforcer.load_rules()
-
- def on_route(self, state):
- state.request.cfg = self.conf
- state.request.enforcer = self.enforcer
-
-
-class DBHook(hooks.PecanHook):
-
- def __init__(self, conf):
- self.storage_connection = self.get_connection(conf)
-
- if not self.storage_connection:
- raise Exception(
- "API failed to start. Failed to connect to database")
-
- def before(self, state):
- state.request.storage_conn = self.storage_connection
-
- @staticmethod
- def get_connection(conf):
- try:
- return storage.get_connection_from_config(conf)
- except Exception as err:
- LOG.exception("Failed to connect to db" "retry later: %s",
- err)
-
-
-class NotifierHook(hooks.PecanHook):
- """Create and attach a notifier to the request.
-
- Usually, samples will be push to notification bus by notifier when they
- are posted via /v2/meters/ API.
- """
-
- def __init__(self, conf):
- transport = messaging.get_transport(conf)
- self.notifier = oslo_messaging.Notifier(
- transport, driver=conf.publisher_notifier.telemetry_driver,
- publisher_id="ceilometer.api")
-
- def before(self, state):
- state.request.notifier = self.notifier
-
-
-class TranslationHook(hooks.PecanHook):
-
- def after(self, state):
- # After a request has been done, we need to see if
- # ClientSideError has added an error onto the response.
- # If it has we need to get it info the thread-safe WSGI
- # environ to be used by the ParsableErrorMiddleware.
- if hasattr(state.response, 'translatable_error'):
- state.request.environ['translatable_error'] = (
- state.response.translatable_error)
diff --git a/ceilometer/api/middleware.py b/ceilometer/api/middleware.py
deleted file mode 100644
index 3cc937c2..00000000
--- a/ceilometer/api/middleware.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#
-# Copyright 2013 IBM Corp.
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Middleware to replace the plain text message body of an error
-response with one formatted so the client can parse it.
-
-Based on pecan.middleware.errordocument
-"""
-
-import json
-
-from lxml import etree
-from oslo_log import log
-import six
-import webob
-
-from ceilometer import i18n
-
-LOG = log.getLogger(__name__)
-
-
-class ParsableErrorMiddleware(object):
- """Replace error body with something the client can parse."""
-
- @staticmethod
- def best_match_language(accept_language):
- """Determines best available locale from the Accept-Language header.
-
- :returns: the best language match or None if the 'Accept-Language'
- header was not available in the request.
- """
- if not accept_language:
- return None
- all_languages = i18n.get_available_languages()
- return accept_language.best_match(all_languages)
-
- def __init__(self, app):
- self.app = app
-
- def __call__(self, environ, start_response):
- # Request for this state, modified by replace_start_response()
- # and used when an error is being reported.
- state = {}
-
- def replacement_start_response(status, headers, exc_info=None):
- """Overrides the default response to make errors parsable."""
- try:
- status_code = int(status.split(' ')[0])
- state['status_code'] = status_code
- except (ValueError, TypeError): # pragma: nocover
- raise Exception((
- 'ErrorDocumentMiddleware received an invalid '
- 'status %s' % status
- ))
- else:
- if (state['status_code'] // 100) not in (2, 3):
- # Remove some headers so we can replace them later
- # when we have the full error message and can
- # compute the length.
- headers = [(h, v)
- for (h, v) in headers
- if h not in ('Content-Length', 'Content-Type')
- ]
- # Save the headers in case we need to modify them.
- state['headers'] = headers
- return start_response(status, headers, exc_info)
-
- app_iter = self.app(environ, replacement_start_response)
- if (state['status_code'] // 100) not in (2, 3):
- req = webob.Request(environ)
- error = environ.get('translatable_error')
- user_locale = self.best_match_language(req.accept_language)
- if (req.accept.best_match(['application/json', 'application/xml'])
- == 'application/xml'):
- content_type = 'application/xml'
- try:
- # simple check xml is valid
- fault = etree.fromstring(b'\n'.join(app_iter))
- # Add the translated error to the xml data
- if error is not None:
- for fault_string in fault.findall('faultstring'):
- fault_string.text = i18n.translate(error,
- user_locale)
- error_message = etree.tostring(fault)
- body = b''.join((b'<error_message>',
- error_message,
- b'</error_message>'))
- except etree.XMLSyntaxError as err:
- LOG.error('Error parsing HTTP response: %s', err)
- error_message = state['status_code']
- body = '<error_message>%s</error_message>' % error_message
- if six.PY3:
- body = body.encode('utf-8')
- else:
- content_type = 'application/json'
- app_data = b'\n'.join(app_iter)
- if six.PY3:
- app_data = app_data.decode('utf-8')
- try:
- fault = json.loads(app_data)
- if error is not None and 'faultstring' in fault:
- fault['faultstring'] = i18n.translate(error,
- user_locale)
- except ValueError as err:
- fault = app_data
- body = json.dumps({'error_message': fault})
- if six.PY3:
- body = body.encode('utf-8')
-
- state['headers'].append(('Content-Length', str(len(body))))
- state['headers'].append(('Content-Type', content_type))
- body = [body]
- else:
- body = app_iter
- return body
diff --git a/ceilometer/api/rbac.py b/ceilometer/api/rbac.py
deleted file mode 100644
index ca6f7d1f..00000000
--- a/ceilometer/api/rbac.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2014 Hewlett-Packard Company
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Access Control Lists (ACL's) control access the API server."""
-
-import pecan
-
-
-def _has_rule(name):
- return name in pecan.request.enforcer.rules.keys()
-
-
-def enforce(policy_name, request):
- """Checks authorization of a rule against the request.
-
- :param request: HTTP request
- :param policy_name: the policy name to validate authz against.
-
-
- """
-
- rule_method = "telemetry:" + policy_name
- headers = request.headers
-
- policy_dict = dict()
- policy_dict['roles'] = headers.get('X-Roles', "").split(",")
- policy_dict['user_id'] = (headers.get('X-User-Id'))
- policy_dict['project_id'] = (headers.get('X-Project-Id'))
-
- # maintain backward compat with Juno and previous by allowing the action if
- # there is no rule defined for it
- if ((_has_rule('default') or _has_rule(rule_method)) and
- not pecan.request.enforcer.enforce(rule_method, {}, policy_dict)):
- pecan.core.abort(status_code=403, detail='RBAC Authorization Failed')
-
-
-# TODO(fabiog): these methods are still used because the scoping part is really
-# convoluted and difficult to separate out.
-
-def get_limited_to(headers):
- """Return the user and project the request should be limited to.
-
- :param headers: HTTP headers dictionary
- :return: A tuple of (user, project), set to None if there's no limit on
- one of these.
-
- """
-
- policy_dict = dict()
- policy_dict['roles'] = headers.get('X-Roles', "").split(",")
- policy_dict['user_id'] = (headers.get('X-User-Id'))
- policy_dict['project_id'] = (headers.get('X-Project-Id'))
-
- # maintain backward compat with Juno and previous by using context_is_admin
- # rule if the segregation rule (added in Kilo) is not defined
- rule_name = 'segregation' if _has_rule(
- 'segregation') else 'context_is_admin'
- if not pecan.request.enforcer.enforce(rule_name,
- {},
- policy_dict):
- return headers.get('X-User-Id'), headers.get('X-Project-Id')
-
- return None, None
-
-
-def get_limited_to_project(headers):
- """Return the project the request should be limited to.
-
- :param headers: HTTP headers dictionary
- :return: A project, or None if there's no limit on it.
-
- """
- return get_limited_to(headers)[1]
diff --git a/ceilometer/cmd/api.py b/ceilometer/cmd/api.py
deleted file mode 100644
index 7591fb47..00000000
--- a/ceilometer/cmd/api.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-# Copyright 2015-2016 Red Hat, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import logging
-
-from cotyledon import oslo_config_glue
-from oslo_log import log
-
-from ceilometer.api import app
-from ceilometer import service
-
-LOG = log.getLogger(__name__)
-
-
-def build_wsgi_app(argv=None):
- conf = service.prepare_service(argv=argv)
- conf.register_opts(oslo_config_glue.service_opts)
- if conf.log_options:
- LOG.debug('Full set of CONF:')
- conf.log_opt_values(LOG, logging.DEBUG)
- return app.load_app(conf)
diff --git a/ceilometer/conf/__init__.py b/ceilometer/conf/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/conf/__init__.py
+++ /dev/null
diff --git a/ceilometer/conf/defaults.py b/ceilometer/conf/defaults.py
deleted file mode 100644
index e87b1530..00000000
--- a/ceilometer/conf/defaults.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_middleware import cors
-
-
-def set_cors_middleware_defaults():
- """Update default configuration options for oslo.middleware."""
- cors.set_defaults(
- allow_headers=['X-Auth-Token',
- 'X-Identity-Status',
- 'X-Roles',
- 'X-Service-Catalog',
- 'X-User-Id',
- 'X-Tenant-Id',
- 'X-Openstack-Request-Id'],
- expose_headers=['X-Auth-Token',
- 'X-Subject-Token',
- 'X-Service-Token',
- 'X-Openstack-Request-Id'],
- allow_methods=['GET',
- 'PUT',
- 'POST',
- 'DELETE',
- 'PATCH']
- )
diff --git a/ceilometer/opts.py b/ceilometer/opts.py
index e655e2e4..34fe4d14 100644
--- a/ceilometer/opts.py
+++ b/ceilometer/opts.py
@@ -18,8 +18,6 @@ from keystoneauth1 import loading
from oslo_config import cfg
import ceilometer.agent.manager
-import ceilometer.api.app
-import ceilometer.api.controllers.v2.root
import ceilometer.compute.discovery
import ceilometer.compute.virt.inspector
import ceilometer.compute.virt.libvirt.utils
@@ -75,7 +73,6 @@ def list_opts():
return [
('DEFAULT',
itertools.chain(ceilometer.agent.manager.OPTS,
- ceilometer.api.app.OPTS,
ceilometer.compute.virt.inspector.OPTS,
ceilometer.compute.virt.libvirt.utils.OPTS,
ceilometer.dispatcher.OPTS,
@@ -84,8 +81,6 @@ def list_opts():
ceilometer.sample.OPTS,
ceilometer.utils.OPTS,
OPTS)),
- ('api', itertools.chain(ceilometer.api.app.API_OPTS,
- ceilometer.api.controllers.v2.root.API_OPTS)),
('compute', ceilometer.compute.discovery.OPTS),
('coordination', [
cfg.StrOpt(
diff --git a/ceilometer/service.py b/ceilometer/service.py
index d4188f9a..dca96d4e 100644
--- a/ceilometer/service.py
+++ b/ceilometer/service.py
@@ -18,10 +18,8 @@ from oslo_config import cfg
from oslo_db import options as db_options
import oslo_i18n
from oslo_log import log
-from oslo_policy import opts as policy_opts
from oslo_reports import guru_meditation_report as gmr
-from ceilometer.conf import defaults
from ceilometer import keystone_client
from ceilometer import messaging
from ceilometer import opts
@@ -47,8 +45,6 @@ def prepare_service(argv=None, config_files=None, conf=None):
['futurist=INFO', 'neutronclient=INFO',
'keystoneclient=INFO'])
log.set_defaults(default_log_levels=log_levels)
- defaults.set_cors_middleware_defaults()
- policy_opts.set_defaults(conf)
db_options.set_defaults(conf)
conf(argv[1:], project='ceilometer', validate_default_values=True,
@@ -61,10 +57,6 @@ def prepare_service(argv=None, config_files=None, conf=None):
utils.setup_root_helper(conf)
sample.setup(conf)
- # NOTE(liusheng): guru cannot run with service under apache daemon, so when
- # ceilometer-api running with mod_wsgi, the argv is [], we don't start
- # guru.
- if argv:
- gmr.TextGuruMeditation.setup_autorun(version)
+ gmr.TextGuruMeditation.setup_autorun(version)
messaging.setup()
return conf
diff --git a/ceilometer/telemetry/notifications.py b/ceilometer/telemetry/notifications.py
index fd9db0da..77983f3b 100644
--- a/ceilometer/telemetry/notifications.py
+++ b/ceilometer/telemetry/notifications.py
@@ -37,10 +37,10 @@ class TelemetryBase(plugin_base.NotificationBase):
class TelemetryIpc(TelemetryBase):
"""Handle sample from notification bus
- Telemetry samples can be posted via API or polled by Polling agent.
+ Telemetry samples polled by polling agent.
"""
- event_types = ['telemetry.api', 'telemetry.polling']
+ event_types = ['telemetry.polling']
def process_notification(self, message):
samples = message['payload']['samples']
diff --git a/ceilometer/tests/base.py b/ceilometer/tests/base.py
index 7cf29bff..1f6ed459 100644
--- a/ceilometer/tests/base.py
+++ b/ceilometer/tests/base.py
@@ -23,7 +23,6 @@ from oslo_utils import timeutils
from oslotest import base
import six
from testtools import testcase
-import webtest
import yaml
import ceilometer
@@ -98,10 +97,6 @@ def _skip_decorator(func):
return func(*args, **kwargs)
except ceilometer.NotImplementedError as e:
raise testcase.TestSkipped(six.text_type(e))
- except webtest.app.AppError as e:
- if 'not implemented' in six.text_type(e):
- raise testcase.TestSkipped(six.text_type(e))
- raise
return skip_if_not_implemented
diff --git a/ceilometer/tests/functional/api/__init__.py b/ceilometer/tests/functional/api/__init__.py
deleted file mode 100644
index b20c5250..00000000
--- a/ceilometer/tests/functional/api/__init__.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Base classes for API tests.
-"""
-
-import pecan
-import pecan.testing
-
-from ceilometer import service
-from ceilometer.tests import db as db_test_base
-
-
-class FunctionalTest(db_test_base.TestBase):
- """Used for functional tests of Pecan controllers.
-
- Used in case when you need to test your literal application and its
- integration with the framework.
- """
-
- PATH_PREFIX = ''
-
- def setUp(self):
- super(FunctionalTest, self).setUp()
- self.CONF = service.prepare_service([], [])
- self.setup_messaging(self.CONF)
-
- self.CONF.set_override("policy_file",
- self.path_get('etc/ceilometer/policy.json'),
- group='oslo_policy')
-
- self.CONF.set_override('gnocchi_is_enabled', False, group='api')
- self.CONF.set_override('aodh_is_enabled', False, group='api')
- self.CONF.set_override('panko_is_enabled', False, group='api')
-
- self.app = self._make_app()
-
- def _make_app(self, enable_acl=False):
- self.config = {
- 'app': {
- 'root': 'ceilometer.api.controllers.root.RootController',
- 'modules': ['ceilometer.api'],
- 'enable_acl': enable_acl,
- },
- 'wsme': {
- 'debug': True,
- },
- }
-
- return pecan.testing.load_test_app(self.config, conf=self.CONF)
-
- def tearDown(self):
- super(FunctionalTest, self).tearDown()
- pecan.set_config({}, overwrite=True)
-
- def put_json(self, path, params, expect_errors=False, headers=None,
- extra_environ=None, status=None):
- """Sends simulated HTTP PUT request to Pecan test app.
-
- :param path: url path of target service
- :param params: content for wsgi.input of request
- :param expect_errors: boolean value whether an error is expected based
- on request
- :param headers: A dictionary of headers to send along with the request
- :param extra_environ: A dictionary of environ variables to send along
- with the request
- :param status: Expected status code of response
- """
- return self.post_json(path=path, params=params,
- expect_errors=expect_errors,
- headers=headers, extra_environ=extra_environ,
- status=status, method="put")
-
- def post_json(self, path, params, expect_errors=False, headers=None,
- method="post", extra_environ=None, status=None):
- """Sends simulated HTTP POST request to Pecan test app.
-
- :param path: url path of target service
- :param params: content for wsgi.input of request
- :param expect_errors: boolean value whether an error is expected based
- on request
- :param headers: A dictionary of headers to send along with the request
- :param method: Request method type. Appropriate method function call
- should be used rather than passing attribute in.
- :param extra_environ: A dictionary of environ variables to send along
- with the request
- :param status: Expected status code of response
- """
- full_path = self.PATH_PREFIX + path
- response = getattr(self.app, "%s_json" % method)(
- str(full_path),
- params=params,
- headers=headers,
- status=status,
- extra_environ=extra_environ,
- expect_errors=expect_errors
- )
- return response
-
- def delete(self, path, expect_errors=False, headers=None,
- extra_environ=None, status=None):
- """Sends simulated HTTP DELETE request to Pecan test app.
-
- :param path: url path of target service
- :param expect_errors: boolean value whether an error is expected based
- on request
- :param headers: A dictionary of headers to send along with the request
- :param extra_environ: A dictionary of environ variables to send along
- with the request
- :param status: Expected status code of response
- """
- full_path = self.PATH_PREFIX + path
- response = self.app.delete(str(full_path),
- headers=headers,
- status=status,
- extra_environ=extra_environ,
- expect_errors=expect_errors)
- return response
-
- def get_json(self, path, expect_errors=False, headers=None,
- extra_environ=None, q=None, groupby=None, status=None,
- override_params=None, **params):
- """Sends simulated HTTP GET request to Pecan test app.
-
- :param path: url path of target service
- :param expect_errors: boolean value whether an error is expected based
- on request
- :param headers: A dictionary of headers to send along with the request
- :param extra_environ: A dictionary of environ variables to send along
- with the request
- :param q: list of queries consisting of: field, value, op, and type
- keys
- :param groupby: list of fields to group by
- :param status: Expected status code of response
- :param override_params: literally encoded query param string
- :param params: content for wsgi.input of request
- """
- q = q or []
- groupby = groupby or []
- full_path = self.PATH_PREFIX + path
- if override_params:
- all_params = override_params
- else:
- query_params = {'q.field': [],
- 'q.value': [],
- 'q.op': [],
- 'q.type': [],
- }
- for query in q:
- for name in ['field', 'op', 'value', 'type']:
- query_params['q.%s' % name].append(query.get(name, ''))
- all_params = {}
- all_params.update(params)
- if q:
- all_params.update(query_params)
- if groupby:
- all_params.update({'groupby': groupby})
- response = self.app.get(full_path,
- params=all_params,
- headers=headers,
- extra_environ=extra_environ,
- expect_errors=expect_errors,
- status=status)
- if not expect_errors:
- response = response.json
- return response
diff --git a/ceilometer/tests/functional/api/v2/__init__.py b/ceilometer/tests/functional/api/v2/__init__.py
deleted file mode 100644
index fc70f5e2..00000000
--- a/ceilometer/tests/functional/api/v2/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from ceilometer.tests.functional import api
-
-
-class FunctionalTest(api.FunctionalTest):
- PATH_PREFIX = '/v2'
diff --git a/ceilometer/tests/functional/api/v2/test_acl_scenarios.py b/ceilometer/tests/functional/api/v2/test_acl_scenarios.py
deleted file mode 100644
index d8001f6c..00000000
--- a/ceilometer/tests/functional/api/v2/test_acl_scenarios.py
+++ /dev/null
@@ -1,180 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test ACL."""
-
-import datetime
-import uuid
-
-from keystonemiddleware import fixture as ksm_fixture
-import webtest
-
-from ceilometer.api import app
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer.tests.functional.api import v2
-
-VALID_TOKEN = uuid.uuid4().hex
-VALID_TOKEN2 = uuid.uuid4().hex
-
-
-class TestAPIACL(v2.FunctionalTest):
-
- def setUp(self):
- super(TestAPIACL, self).setUp()
- self.auth_token_fixture = self.useFixture(
- ksm_fixture.AuthTokenFixture())
- self.auth_token_fixture.add_token_data(
- token_id=VALID_TOKEN,
- # FIXME(morganfainberg): The project-id should be a proper uuid
- project_id='123i2910',
- role_list=['admin'],
- user_name='user_id2',
- user_id='user_id2',
- is_v2=True
- )
- self.auth_token_fixture.add_token_data(
- token_id=VALID_TOKEN2,
- # FIXME(morganfainberg): The project-id should be a proper uuid
- project_id='project-good',
- role_list=['Member'],
- user_name='user_id1',
- user_id='user_id1',
- is_v2=True)
-
- for cnt in [
- sample.Sample(
- 'meter.test',
- 'cumulative',
- '',
- 1,
- 'user-good',
- 'project-good',
- 'resource-good',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample'},
- source='test_source'),
- sample.Sample(
- 'meter.mine',
- 'gauge',
- '',
- 1,
- 'user-fred',
- 'project-good',
- 'resource-56',
- timestamp=datetime.datetime(2012, 7, 2, 10, 43),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample4'},
- source='test_source')]:
- msg = utils.meter_message_from_counter(
- cnt, self.CONF.publisher.telemetry_secret)
- self.conn.record_metering_data(msg)
-
- def get_json(self, path, expect_errors=False, headers=None,
- q=None, **params):
- return super(TestAPIACL, self).get_json(path,
- expect_errors=expect_errors,
- headers=headers,
- q=q or [],
- **params)
-
- def _make_app(self):
- file_name = self.path_get('etc/ceilometer/api_paste.ini')
- self.CONF.set_override("api_paste_config", file_name)
- return webtest.TestApp(app.load_app(self.CONF))
-
- def test_non_authenticated(self):
- response = self.get_json('/meters', expect_errors=True)
- self.assertEqual(401, response.status_int)
-
- def test_authenticated_wrong_role(self):
- response = self.get_json('/meters',
- expect_errors=True,
- headers={
- "X-Roles": "Member",
- "X-Tenant-Name": "admin",
- "X-Project-Id":
- "bc23a9d531064583ace8f67dad60f6bb",
- })
- self.assertEqual(401, response.status_int)
-
- # FIXME(dhellmann): This test is not properly looking at the tenant
- # info. We do not correctly detect the improper tenant. That's
- # really something the keystone middleware would have to do using
- # the incoming token, which we aren't providing.
- #
- # def test_authenticated_wrong_tenant(self):
- # response = self.get_json('/meters',
- # expect_errors=True,
- # headers={
- # "X-Roles": "admin",
- # "X-Tenant-Name": "achoo",
- # "X-Project-Id": "bc23a9d531064583ace8f67dad60f6bb",
- # })
- # self.assertEqual(401, response.status_int)
-
- def test_authenticated(self):
- data = self.get_json('/meters',
- headers={"X-Auth-Token": VALID_TOKEN,
- "X-Roles": "admin",
- "X-Project-Id":
- "bc23a9d531064583ace8f67dad60f6bb",
- })
- ids = set(r['resource_id'] for r in data)
- self.assertEqual(set(['resource-good', 'resource-56']), ids)
-
- def test_with_non_admin_missing_project_query(self):
- data = self.get_json('/meters',
- headers={"X-Roles": "Member",
- "X-Auth-Token": VALID_TOKEN2,
- "X-Project-Id": "project-good"})
- ids = set(r['resource_id'] for r in data)
- self.assertEqual(set(['resource-good', 'resource-56']), ids)
-
- def test_with_non_admin(self):
- data = self.get_json('/meters',
- headers={"X-Roles": "Member",
- "X-Auth-Token": VALID_TOKEN2,
- "X-Project-Id": "project-good"},
- q=[{'field': 'project_id',
- 'value': 'project-good',
- }])
- ids = set(r['resource_id'] for r in data)
- self.assertEqual(set(['resource-good', 'resource-56']), ids)
-
- def test_non_admin_wrong_project(self):
- data = self.get_json('/meters',
- expect_errors=True,
- headers={"X-Roles": "Member",
- "X-Auth-Token": VALID_TOKEN2,
- "X-Project-Id": "project-good"},
- q=[{'field': 'project_id',
- 'value': 'project-wrong',
- }])
- self.assertEqual(401, data.status_int)
-
- def test_non_admin_two_projects(self):
- data = self.get_json('/meters',
- expect_errors=True,
- headers={"X-Roles": "Member",
- "X-Auth-Token": VALID_TOKEN2,
- "X-Project-Id": "project-good"},
- q=[{'field': 'project_id',
- 'value': 'project-good',
- },
- {'field': 'project_id',
- 'value': 'project-naughty',
- }])
- self.assertEqual(401, data.status_int)
diff --git a/ceilometer/tests/functional/api/v2/test_api_upgrade.py b/ceilometer/tests/functional/api/v2/test_api_upgrade.py
deleted file mode 100644
index 6958eab9..00000000
--- a/ceilometer/tests/functional/api/v2/test_api_upgrade.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import fixtures
-import mock
-from oslo_utils import fileutils
-import six
-
-from ceilometer.tests.functional.api import v2
-
-
-class TestAPIUpgradePath(v2.FunctionalTest):
- def _make_app(self):
- content = ('{"default": ""}')
- if six.PY3:
- content = content.encode('utf-8')
- self.tempfile = fileutils.write_to_tempfile(content=content,
- prefix='policy',
- suffix='.json')
- self.CONF.set_override("policy_file", self.tempfile,
- group='oslo_policy')
- return super(TestAPIUpgradePath, self)._make_app()
-
- def _setup_osloconfig_options(self):
- self.CONF.set_override('gnocchi_is_enabled', True, group='api')
- self.CONF.set_override('aodh_is_enabled', True, group='api')
- self.CONF.set_override('aodh_url', 'http://alarm-endpoint:8008/',
- group='api')
- self.CONF.set_override('panko_is_enabled', True, group='api')
- self.CONF.set_override('panko_url', 'http://event-endpoint:8009/',
- group='api')
-
- def _setup_keystone_mock(self):
- self.CONF.set_override('gnocchi_is_enabled', None, group='api')
- self.CONF.set_override('aodh_is_enabled', None, group='api')
- self.CONF.set_override('aodh_url', None, group='api')
- self.CONF.set_override('panko_is_enabled', None, group='api')
- self.CONF.set_override('panko_url', None, group='api')
- self.CONF.set_override('meter_dispatchers', ['database'])
- self.ks = mock.Mock()
- self.catalog = (self.ks.session.auth.get_access.
- return_value.service_catalog)
- self.catalog.url_for.side_effect = self._url_for
- self.useFixture(fixtures.MockPatch(
- 'ceilometer.keystone_client.get_client', return_value=self.ks))
-
- @staticmethod
- def _url_for(service_type=None):
- if service_type == 'metric':
- return 'http://gnocchi/'
- elif service_type == 'alarming':
- return 'http://alarm-endpoint:8008/'
- elif service_type == 'event':
- return 'http://event-endpoint:8009/'
-
- def _do_test_gnocchi_enabled_without_database_backend(self):
- for endpoint in ['meters', 'samples', 'resources']:
- response = self.app.get(self.PATH_PREFIX + '/' + endpoint,
- status=410)
- self.assertIn(b'Gnocchi API', response.body)
-
- response = self.post_json('/query/samples',
- params={
- "filter": '{"=": {"type": "creation"}}',
- "orderby": '[{"timestamp": "DESC"}]',
- "limit": 3
- }, status=410)
- self.assertIn(b'Gnocchi API', response.body)
- sample_params = {
- "counter_type": "gauge",
- "counter_name": "fake_counter",
- "resource_id": "fake_resource_id",
- "counter_unit": "fake_unit",
- "counter_volume": "1"
- }
- self.post_json('/meters/fake_counter',
- params=[sample_params],
- status=201)
- response = self.post_json('/meters/fake_counter?direct=1',
- params=[sample_params],
- status=400)
- self.assertIn(b'direct option cannot be true when Gnocchi is enabled',
- response.body)
-
- def _do_test_alarm_redirect(self):
- response = self.app.get(self.PATH_PREFIX + '/alarms',
- expect_errors=True)
-
- self.assertEqual(308, response.status_code)
- self.assertEqual("http://alarm-endpoint:8008/v2/alarms",
- response.headers['Location'])
-
- response = self.app.get(self.PATH_PREFIX + '/alarms/uuid',
- expect_errors=True)
-
- self.assertEqual(308, response.status_code)
- self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid",
- response.headers['Location'])
-
- response = self.app.delete(self.PATH_PREFIX + '/alarms/uuid',
- expect_errors=True)
-
- self.assertEqual(308, response.status_code)
- self.assertEqual("http://alarm-endpoint:8008/v2/alarms/uuid",
- response.headers['Location'])
-
- response = self.post_json('/query/alarms',
- params={
- "filter": '{"=": {"type": "creation"}}',
- "orderby": '[{"timestamp": "DESC"}]',
- "limit": 3
- }, status=308)
- self.assertEqual("http://alarm-endpoint:8008/v2/query/alarms",
- response.headers['Location'])
-
- def _do_test_event_redirect(self):
- response = self.app.get(self.PATH_PREFIX + '/events',
- expect_errors=True)
-
- self.assertEqual(308, response.status_code)
- self.assertEqual("http://event-endpoint:8009/v2/events",
- response.headers['Location'])
-
- response = self.app.get(self.PATH_PREFIX + '/events/uuid',
- expect_errors=True)
-
- self.assertEqual(308, response.status_code)
- self.assertEqual("http://event-endpoint:8009/v2/events/uuid",
- response.headers['Location'])
-
- response = self.app.delete(self.PATH_PREFIX + '/events/uuid',
- expect_errors=True)
-
- self.assertEqual(308, response.status_code)
- self.assertEqual("http://event-endpoint:8009/v2/events/uuid",
- response.headers['Location'])
-
- response = self.app.get(self.PATH_PREFIX + '/event_types',
- expect_errors=True)
-
- self.assertEqual(308, response.status_code)
- self.assertEqual("http://event-endpoint:8009/v2/event_types",
- response.headers['Location'])
-
- def test_gnocchi_enabled_without_database_backend_keystone(self):
- self._setup_keystone_mock()
- self._do_test_gnocchi_enabled_without_database_backend()
- self.catalog.url_for.assert_has_calls(
- [mock.call(service_type="metric")])
-
- def test_gnocchi_enabled_without_database_backend_configoptions(self):
- self._setup_osloconfig_options()
- self._do_test_gnocchi_enabled_without_database_backend()
-
- def test_alarm_redirect_keystone(self):
- self._setup_keystone_mock()
- self._do_test_alarm_redirect()
- self.catalog.url_for.assert_has_calls(
- [mock.call(service_type="alarming")])
-
- def test_event_redirect_keystone(self):
- self._setup_keystone_mock()
- self._do_test_event_redirect()
- self.catalog.url_for.assert_has_calls(
- [mock.call(service_type="event")])
-
- def test_alarm_redirect_configoptions(self):
- self._setup_osloconfig_options()
- self._do_test_alarm_redirect()
-
- def test_event_redirect_configoptions(self):
- self._setup_osloconfig_options()
- self._do_test_event_redirect()
diff --git a/ceilometer/tests/functional/api/v2/test_app.py b/ceilometer/tests/functional/api/v2/test_app.py
deleted file mode 100644
index 9aef1612..00000000
--- a/ceilometer/tests/functional/api/v2/test_app.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-# Copyright 2013 IBM Corp.
-# Copyright 2013 Julien Danjou
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test basic ceilometer-api app
-"""
-from ceilometer.tests.functional.api import v2
-
-
-class TestPecanApp(v2.FunctionalTest):
-
- def test_pecan_extension_guessing_unset(self):
- # check Pecan does not assume .jpg is an extension
- response = self.app.get(self.PATH_PREFIX + '/meters/meter.jpg')
- self.assertEqual('application/json', response.content_type)
-
-
-class TestApiMiddleware(v2.FunctionalTest):
-
- no_lang_translated_error = 'No lang translated error'
- en_US_translated_error = 'en-US translated error'
-
- def _fake_translate(self, message, user_locale):
- if user_locale is None:
- return self.no_lang_translated_error
- else:
- return self.en_US_translated_error
-
- def test_json_parsable_error_middleware_404(self):
- response = self.get_json('/invalid_path',
- expect_errors=True,
- headers={"Accept":
- "application/json"}
- )
- self.assertEqual(404, response.status_int)
- self.assertEqual("application/json", response.content_type)
- self.assertTrue(response.json['error_message'])
- response = self.get_json('/invalid_path',
- expect_errors=True,
- headers={"Accept":
- "application/json,application/xml"}
- )
- self.assertEqual(404, response.status_int)
- self.assertEqual("application/json", response.content_type)
- self.assertTrue(response.json['error_message'])
- response = self.get_json('/invalid_path',
- expect_errors=True,
- headers={"Accept":
- "application/xml;q=0.8, \
- application/json"}
- )
- self.assertEqual(404, response.status_int)
- self.assertEqual("application/json", response.content_type)
- self.assertTrue(response.json['error_message'])
- response = self.get_json('/invalid_path',
- expect_errors=True
- )
- self.assertEqual(404, response.status_int)
- self.assertEqual("application/json", response.content_type)
- self.assertTrue(response.json['error_message'])
- response = self.get_json('/invalid_path',
- expect_errors=True,
- headers={"Accept":
- "text/html,*/*"}
- )
- self.assertEqual(404, response.status_int)
- self.assertEqual("application/json", response.content_type)
- self.assertTrue(response.json['error_message'])
-
- def test_xml_parsable_error_middleware_404(self):
- response = self.get_json('/invalid_path',
- expect_errors=True,
- headers={"Accept":
- "application/xml,*/*"}
- )
- self.assertEqual(404, response.status_int)
- self.assertEqual("application/xml", response.content_type)
- self.assertEqual('error_message', response.xml.tag)
- response = self.get_json('/invalid_path',
- expect_errors=True,
- headers={"Accept":
- "application/json;q=0.8 \
- ,application/xml"}
- )
- self.assertEqual(404, response.status_int)
- self.assertEqual("application/xml", response.content_type)
- self.assertEqual('error_message', response.xml.tag)
diff --git a/ceilometer/tests/functional/api/v2/test_capabilities.py b/ceilometer/tests/functional/api/v2/test_capabilities.py
deleted file mode 100644
index 67b76f48..00000000
--- a/ceilometer/tests/functional/api/v2/test_capabilities.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Copyright Ericsson AB 2014. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-from ceilometer.tests.functional.api import v2 as tests_api
-
-
-class TestCapabilitiesController(tests_api.FunctionalTest):
-
- def setUp(self):
- super(TestCapabilitiesController, self).setUp()
- self.url = '/capabilities'
-
- def test_capabilities(self):
- data = self.get_json(self.url)
- # check that capabilities data contains both 'api' and 'storage' fields
- self.assertIsNotNone(data)
- self.assertNotEqual({}, data)
- self.assertIn('api', data)
- self.assertIn('storage', data)
diff --git a/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py b/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py
deleted file mode 100644
index fe07f27c..00000000
--- a/ceilometer/tests/functional/api/v2/test_complex_query_scenarios.py
+++ /dev/null
@@ -1,312 +0,0 @@
-#
-# Copyright Ericsson AB 2013. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Tests complex queries for samples
-"""
-
-import datetime
-
-from oslo_utils import timeutils
-
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer.tests.functional.api import v2 as tests_api
-
-
-admin_header = {"X-Roles": "admin",
- "X-Project-Id":
- "project-id1"}
-non_admin_header = {"X-Roles": "Member",
- "X-Project-Id":
- "project-id1"}
-
-
-class TestQueryMetersController(tests_api.FunctionalTest):
- def setUp(self):
- super(TestQueryMetersController, self).setUp()
- self.url = '/query/samples'
-
- for cnt in [
- sample.Sample('meter.test',
- 'cumulative',
- '',
- 1,
- 'user-id1',
- 'project-id1',
- 'resource-id1',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server1',
- 'tag': 'self.sample',
- 'size': 456,
- 'util': 0.25,
- 'is_public': True},
- source='test_source'),
- sample.Sample('meter.test',
- 'cumulative',
- '',
- 2,
- 'user-id2',
- 'project-id2',
- 'resource-id2',
- timestamp=datetime.datetime(2012, 7, 2, 10, 41),
- resource_metadata={'display_name': 'test-server2',
- 'tag': 'self.sample',
- 'size': 123,
- 'util': 0.75,
- 'is_public': True},
- source='test_source'),
- sample.Sample('meter.test',
- 'cumulative',
- '',
- 3,
- 'user-id3',
- 'project-id3',
- 'resource-id3',
- timestamp=datetime.datetime(2012, 7, 2, 10, 42),
- resource_metadata={'display_name': 'test-server3',
- 'tag': 'self.sample',
- 'size': 789,
- 'util': 0.95,
- 'is_public': True},
- source='test_source')]:
-
- msg = utils.meter_message_from_counter(
- cnt, self.CONF.publisher.telemetry_secret)
- self.conn.record_metering_data(msg)
-
- def test_query_fields_are_optional(self):
- data = self.post_json(self.url, params={})
- self.assertEqual(3, len(data.json))
-
- def test_query_with_isotime(self):
- date_time = datetime.datetime(2012, 7, 2, 10, 41)
- isotime = date_time.isoformat()
-
- data = self.post_json(self.url,
- params={"filter":
- '{">=": {"timestamp": "'
- + isotime + '"}}'})
-
- self.assertEqual(2, len(data.json))
- for sample_item in data.json:
- result_time = timeutils.parse_isotime(sample_item['timestamp'])
- result_time = result_time.replace(tzinfo=None)
- self.assertGreaterEqual(result_time, date_time)
-
- def test_non_admin_tenant_sees_only_its_own_project(self):
- data = self.post_json(self.url,
- params={},
- headers=non_admin_header)
- for sample_item in data.json:
- self.assertEqual("project-id1", sample_item['project_id'])
-
- def test_non_admin_tenant_cannot_query_others_project(self):
- data = self.post_json(self.url,
- params={"filter":
- '{"=": {"project_id": "project-id2"}}'},
- expect_errors=True,
- headers=non_admin_header)
-
- self.assertEqual(401, data.status_int)
- self.assertIn(b"Not Authorized to access project project-id2",
- data.body)
-
- def test_non_admin_tenant_can_explicitly_filter_for_own_project(self):
- data = self.post_json(self.url,
- params={"filter":
- '{"=": {"project_id": "project-id1"}}'},
- headers=non_admin_header)
-
- for sample_item in data.json:
- self.assertEqual("project-id1", sample_item['project_id'])
-
- def test_admin_tenant_sees_every_project(self):
- data = self.post_json(self.url,
- params={},
- headers=admin_header)
-
- self.assertEqual(3, len(data.json))
- for sample_item in data.json:
- self.assertIn(sample_item['project_id'],
- (["project-id1", "project-id2", "project-id3"]))
-
- def test_admin_tenant_sees_every_project_with_complex_filter(self):
- filter = ('{"OR": ' +
- '[{"=": {"project_id": "project-id1"}}, ' +
- '{"=": {"project_id": "project-id2"}}]}')
- data = self.post_json(self.url,
- params={"filter": filter},
- headers=admin_header)
-
- self.assertEqual(2, len(data.json))
- for sample_item in data.json:
- self.assertIn(sample_item['project_id'],
- (["project-id1", "project-id2"]))
-
- def test_admin_tenant_sees_every_project_with_in_filter(self):
- filter = ('{"In": ' +
- '{"project_id": ["project-id1", "project-id2"]}}')
- data = self.post_json(self.url,
- params={"filter": filter},
- headers=admin_header)
-
- self.assertEqual(2, len(data.json))
- for sample_item in data.json:
- self.assertIn(sample_item['project_id'],
- (["project-id1", "project-id2"]))
-
- def test_admin_tenant_can_query_any_project(self):
- data = self.post_json(self.url,
- params={"filter":
- '{"=": {"project_id": "project-id2"}}'},
- headers=admin_header)
-
- self.assertEqual(1, len(data.json))
- for sample_item in data.json:
- self.assertIn(sample_item['project_id'], set(["project-id2"]))
-
- def test_query_with_orderby(self):
- data = self.post_json(self.url,
- params={"orderby": '[{"project_id": "DESC"}]'})
-
- self.assertEqual(3, len(data.json))
- self.assertEqual(["project-id3", "project-id2", "project-id1"],
- [s["project_id"] for s in data.json])
-
- def test_query_with_field_name_project(self):
- data = self.post_json(self.url,
- params={"filter":
- '{"=": {"project": "project-id2"}}'})
-
- self.assertEqual(1, len(data.json))
- for sample_item in data.json:
- self.assertIn(sample_item['project_id'], set(["project-id2"]))
-
- def test_query_with_field_name_resource(self):
- data = self.post_json(self.url,
- params={"filter":
- '{"=": {"resource": "resource-id2"}}'})
-
- self.assertEqual(1, len(data.json))
- for sample_item in data.json:
- self.assertIn(sample_item['resource_id'], set(["resource-id2"]))
-
- def test_query_with_wrong_field_name(self):
- data = self.post_json(self.url,
- params={"filter":
- '{"=": {"unknown": "resource-id2"}}'},
- expect_errors=True)
-
- self.assertEqual(400, data.status_int)
- self.assertIn(b"is not valid under any of the given schemas",
- data.body)
-
- def test_query_with_wrong_json(self):
- data = self.post_json(self.url,
- params={"filter":
- '{"=": "resource": "resource-id2"}}'},
- expect_errors=True)
-
- self.assertEqual(400, data.status_int)
- self.assertIn(b"Filter expression not valid", data.body)
-
- def test_query_with_field_name_user(self):
- data = self.post_json(self.url,
- params={"filter":
- '{"=": {"user": "user-id2"}}'})
-
- self.assertEqual(1, len(data.json))
- for sample_item in data.json:
- self.assertIn(sample_item['user_id'], set(["user-id2"]))
-
- def test_query_with_field_name_meter(self):
- data = self.post_json(self.url,
- params={"filter":
- '{"=": {"meter": "meter.test"}}'})
-
- self.assertEqual(3, len(data.json))
- for sample_item in data.json:
- self.assertIn(sample_item['meter'], set(["meter.test"]))
-
- def test_query_with_lower_and_upper_case_orderby(self):
- data = self.post_json(self.url,
- params={"orderby": '[{"project_id": "DeSc"}]'})
-
- self.assertEqual(3, len(data.json))
- self.assertEqual(["project-id3", "project-id2", "project-id1"],
- [s["project_id"] for s in data.json])
-
- def test_query_with_user_field_name_orderby(self):
- data = self.post_json(self.url,
- params={"orderby": '[{"user": "aSc"}]'})
-
- self.assertEqual(3, len(data.json))
- self.assertEqual(["user-id1", "user-id2", "user-id3"],
- [s["user_id"] for s in data.json])
-
- def test_query_with_volume_field_name_orderby(self):
- data = self.post_json(self.url,
- params={"orderby": '[{"volume": "deSc"}]'})
-
- self.assertEqual(3, len(data.json))
- self.assertEqual([3, 2, 1],
- [s["volume"] for s in data.json])
-
- def test_query_with_missing_order_in_orderby(self):
- data = self.post_json(self.url,
- params={"orderby": '[{"project_id": ""}]'},
- expect_errors=True)
-
- self.assertEqual(400, data.status_int)
- self.assertIn(b"does not match '(?i)^asc$|^desc$'", data.body)
-
- def test_query_with_wrong_json_in_orderby(self):
- data = self.post_json(self.url,
- params={"orderby": '{"project_id": "desc"}]'},
- expect_errors=True)
-
- self.assertEqual(400, data.status_int)
- self.assertIn(b"Order-by expression not valid: Extra data", data.body)
-
- def test_filter_with_metadata(self):
- data = self.post_json(self.url,
- params={"filter":
- '{">=": {"metadata.util": 0.5}}'})
-
- self.assertEqual(2, len(data.json))
- for sample_item in data.json:
- self.assertGreaterEqual(float(sample_item["metadata"]["util"]),
- 0.5)
-
- def test_filter_with_negation(self):
- filter_expr = '{"not": {">=": {"metadata.util": 0.5}}}'
- data = self.post_json(self.url,
- params={"filter": filter_expr})
-
- self.assertEqual(1, len(data.json))
- for sample_item in data.json:
- self.assertLess(float(sample_item["metadata"]["util"]), 0.5)
-
- def test_limit_must_be_positive(self):
- data = self.post_json(self.url,
- params={"limit": 0},
- expect_errors=True)
-
- self.assertEqual(400, data.status_int)
- self.assertIn(b"Limit must be positive", data.body)
-
- def test_default_limit(self):
- self.CONF.set_override('default_api_return_limit', 1, group='api')
- data = self.post_json(self.url, params={})
- self.assertEqual(1, len(data.json))
diff --git a/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py b/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py
deleted file mode 100644
index fb633035..00000000
--- a/ceilometer/tests/functional/api/v2/test_compute_duration_by_resource_scenarios.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test listing raw events.
-"""
-
-import datetime
-
-import mock
-from oslo_utils import timeutils
-
-from ceilometer.storage import models
-from ceilometer.tests.functional.api import v2
-
-
-class TestComputeDurationByResource(v2.FunctionalTest):
-
- def setUp(self):
- super(TestComputeDurationByResource, self).setUp()
- # Create events relative to the range and pretend
- # that the intervening events exist.
-
- self.early1 = datetime.datetime(2012, 8, 27, 7, 0)
- self.early2 = datetime.datetime(2012, 8, 27, 17, 0)
-
- self.start = datetime.datetime(2012, 8, 28, 0, 0)
-
- self.middle1 = datetime.datetime(2012, 8, 28, 8, 0)
- self.middle2 = datetime.datetime(2012, 8, 28, 18, 0)
-
- self.end = datetime.datetime(2012, 8, 28, 23, 59)
-
- self.late1 = datetime.datetime(2012, 8, 29, 9, 0)
- self.late2 = datetime.datetime(2012, 8, 29, 19, 0)
-
- def _patch_get_interval(self, start, end):
- def get_interval(sample_filter, period, groupby, aggregate):
- self.assertIsNotNone(sample_filter.start_timestamp)
- self.assertIsNotNone(sample_filter.end_timestamp)
- if (sample_filter.start_timestamp > end or
- sample_filter.end_timestamp < start):
- return []
- duration_start = max(sample_filter.start_timestamp, start)
- duration_end = min(sample_filter.end_timestamp, end)
- duration = timeutils.delta_seconds(duration_start, duration_end)
- return [
- models.Statistics(
- unit='',
- min=0,
- max=0,
- avg=0,
- sum=0,
- count=0,
- period=None,
- period_start=None,
- period_end=None,
- duration=duration,
- duration_start=duration_start,
- duration_end=duration_end,
- groupby=None,
- )
- ]
- return mock.patch.object(type(self.conn), 'get_meter_statistics',
- side_effect=get_interval)
-
- def _invoke_api(self):
- return self.get_json('/meters/instance/statistics',
- q=[{'field': 'timestamp',
- 'op': 'ge',
- 'value': self.start.isoformat()},
- {'field': 'timestamp',
- 'op': 'le',
- 'value': self.end.isoformat()},
- {'field': 'search_offset',
- 'value': 10}])
-
- def test_before_range(self):
- with self._patch_get_interval(self.early1, self.early2):
- data = self._invoke_api()
- self.assertEqual([], data)
-
- def _assert_times_match(self, actual, expected):
- if actual:
- actual = timeutils.parse_isotime(actual)
- actual = actual.replace(tzinfo=None)
- self.assertEqual(expected, actual)
-
- def test_overlap_range_start(self):
- with self._patch_get_interval(self.early1, self.middle1):
- data = self._invoke_api()
- self._assert_times_match(data[0]['duration_start'], self.start)
- self._assert_times_match(data[0]['duration_end'], self.middle1)
- self.assertEqual(8 * 60 * 60, data[0]['duration'])
-
- def test_within_range(self):
- with self._patch_get_interval(self.middle1, self.middle2):
- data = self._invoke_api()
- self._assert_times_match(data[0]['duration_start'], self.middle1)
- self._assert_times_match(data[0]['duration_end'], self.middle2)
- self.assertEqual(10 * 60 * 60, data[0]['duration'])
-
- def test_within_range_zero_duration(self):
- with self._patch_get_interval(self.middle1, self.middle1):
- data = self._invoke_api()
- self._assert_times_match(data[0]['duration_start'], self.middle1)
- self._assert_times_match(data[0]['duration_end'], self.middle1)
- self.assertEqual(0, data[0]['duration'])
-
- def test_overlap_range_end(self):
- with self._patch_get_interval(self.middle2, self.late1):
- data = self._invoke_api()
- self._assert_times_match(data[0]['duration_start'], self.middle2)
- self._assert_times_match(data[0]['duration_end'], self.end)
- self.assertEqual(((6 * 60) - 1) * 60, data[0]['duration'])
-
- def test_after_range(self):
- with self._patch_get_interval(self.late1, self.late2):
- data = self._invoke_api()
- self.assertEqual([], data)
-
- def test_without_end_timestamp(self):
- statistics = [
- models.Statistics(
- unit=None,
- count=0,
- min=None,
- max=None,
- avg=None,
- duration=None,
- duration_start=self.late1,
- duration_end=self.late2,
- sum=0,
- period=None,
- period_start=None,
- period_end=None,
- groupby=None,
- )
- ]
- with mock.patch.object(type(self.conn), 'get_meter_statistics',
- return_value=statistics):
- data = self.get_json('/meters/instance/statistics',
- q=[{'field': 'timestamp',
- 'op': 'ge',
- 'value': self.late1.isoformat()},
- {'field': 'resource_id',
- 'value': 'resource-id'},
- {'field': 'search_offset',
- 'value': 10}])
- self._assert_times_match(data[0]['duration_start'], self.late1)
- self._assert_times_match(data[0]['duration_end'], self.late2)
-
- def test_without_start_timestamp(self):
- statistics = [
- models.Statistics(
- unit=None,
- count=0,
- min=None,
- max=None,
- avg=None,
- duration=None,
- duration_start=self.early1,
- duration_end=self.early2,
- sum=0,
- period=None,
- period_start=None,
- period_end=None,
- groupby=None,
- )
- ]
-
- with mock.patch.object(type(self.conn), 'get_meter_statistics',
- return_value=statistics):
- data = self.get_json('/meters/instance/statistics',
- q=[{'field': 'timestamp',
- 'op': 'le',
- 'value': self.early2.isoformat()},
- {'field': 'resource_id',
- 'value': 'resource-id'},
- {'field': 'search_offset',
- 'value': 10}])
- self._assert_times_match(data[0]['duration_start'], self.early1)
- self._assert_times_match(data[0]['duration_end'], self.early2)
diff --git a/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py b/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py
deleted file mode 100644
index 6ed3bdd9..00000000
--- a/ceilometer/tests/functional/api/v2/test_list_meters_scenarios.py
+++ /dev/null
@@ -1,797 +0,0 @@
-#
-# Copyright 2012 Red Hat, Inc.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test listing meters.
-"""
-
-import base64
-import datetime
-
-from oslo_serialization import jsonutils
-import six
-import webtest.app
-
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer.tests.functional.api import v2
-
-
-class TestListEmptyMeters(v2.FunctionalTest):
-
- def test_empty(self):
- data = self.get_json('/meters')
- self.assertEqual([], data)
-
-
-class TestValidateUserInput(v2.FunctionalTest):
-
- def test_list_meters_query_float_metadata(self):
- self.assertRaises(webtest.app.AppError, self.get_json,
- '/meters/meter.test',
- q=[{'field': 'metadata.util',
- 'op': 'eq',
- 'value': '0.7.5',
- 'type': 'float'}])
- self.assertRaises(webtest.app.AppError, self.get_json,
- '/meters/meter.test',
- q=[{'field': 'metadata.util',
- 'op': 'eq',
- 'value': 'abacaba',
- 'type': 'boolean'}])
- self.assertRaises(webtest.app.AppError, self.get_json,
- '/meters/meter.test',
- q=[{'field': 'metadata.util',
- 'op': 'eq',
- 'value': '45.765',
- 'type': 'integer'}])
-
-
-class TestListMetersRestriction(v2.FunctionalTest):
-
- def setUp(self):
- super(TestListMetersRestriction, self).setUp()
- self.CONF.set_override('default_api_return_limit', 3, group='api')
- for x in range(5):
- for i in range(5):
- s = sample.Sample(
- 'volume.size%s' % x,
- 'gauge',
- 'GiB',
- 5 + i,
- 'user-id',
- 'project1',
- 'resource-id',
- timestamp=(datetime.datetime(2012, 9, 25, 10, 30) +
- datetime.timedelta(seconds=i)),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.sample',
- },
- source='source1',
- )
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_meter_limit(self):
- data = self.get_json('/meters?limit=1')
- self.assertEqual(1, len(data))
-
- def test_meter_limit_negative(self):
- self.assertRaises(webtest.app.AppError,
- self.get_json,
- '/meters?limit=-2')
-
- def test_meter_limit_bigger(self):
- data = self.get_json('/meters?limit=42')
- self.assertEqual(5, len(data))
-
- def test_meter_default_limit(self):
- data = self.get_json('/meters')
- self.assertEqual(3, len(data))
-
- def test_old_sample_limit(self):
- data = self.get_json('/meters/volume.size0?limit=1')
- self.assertEqual(1, len(data))
-
- def test_old_sample_limit_negative(self):
- self.assertRaises(webtest.app.AppError,
- self.get_json,
- '/meters/volume.size0?limit=-2')
-
- def test_old_sample_limit_bigger(self):
- data = self.get_json('/meters/volume.size0?limit=42')
- self.assertEqual(5, len(data))
-
- def test_old_sample_default_limit(self):
- data = self.get_json('/meters/volume.size0')
- self.assertEqual(3, len(data))
-
- def test_sample_limit(self):
- data = self.get_json('/samples?limit=1')
- self.assertEqual(1, len(data))
-
- def test_sample_limit_negative(self):
- self.assertRaises(webtest.app.AppError,
- self.get_json,
- '/samples?limit=-2')
-
- def test_sample_limit_bigger(self):
- data = self.get_json('/samples?limit=42')
- self.assertEqual(25, len(data))
-
- def test_sample_default_limit(self):
- data = self.get_json('/samples')
- self.assertEqual(3, len(data))
-
-
-class TestListMeters(v2.FunctionalTest):
-
- def setUp(self):
- super(TestListMeters, self).setUp()
- self.messages = []
- for cnt in [
- sample.Sample(
- 'meter.test',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- 'size': 123,
- 'util': 0.75,
- 'is_public': True},
- source='test_source'),
- sample.Sample(
- 'meter.test',
- 'cumulative',
- '',
- 3,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 11, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample1',
- 'size': 0,
- 'util': 0.47,
- 'is_public': False},
- source='test_source'),
- sample.Sample(
- 'meter.mine',
- 'gauge',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id2',
- timestamp=datetime.datetime(2012, 7, 2, 10, 41),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample2',
- 'size': 456,
- 'util': 0.64,
- 'is_public': False},
- source='test_source'),
- sample.Sample(
- 'meter.test',
- 'cumulative',
- '',
- 1,
- 'user-id2',
- 'project-id2',
- 'resource-id3',
- timestamp=datetime.datetime(2012, 7, 2, 10, 42),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample3',
- 'size': 0,
- 'util': 0.75,
- 'is_public': False},
- source='test_source'),
- sample.Sample(
- 'meter.test.new',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample3',
- 'size': 0,
- 'util': 0.75,
- 'is_public': False},
- source='test_source'),
-
- sample.Sample(
- 'meter.mine',
- 'gauge',
- '',
- 1,
- 'user-id4',
- 'project-id2',
- 'resource-id4',
- timestamp=datetime.datetime(2012, 7, 2, 10, 43),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample4',
- 'properties': {
- 'prop_1': 'prop_value',
- 'prop_2': {'sub_prop_1':
- 'sub_prop_value'},
- 'prop.3': {'$sub_prop.2':
- 'sub_prop_value2'}
- },
- 'size': 0,
- 'util': 0.58,
- 'is_public': True},
- source='test_source1'),
- sample.Sample(
- u'meter.accent\xe9\u0437',
- 'gauge',
- '',
- 1,
- 'user-id4',
- 'project-id2',
- 'resource-id4',
- timestamp=datetime.datetime(2014, 7, 2, 10, 43),
- resource_metadata={},
- source='test_source1')]:
- msg = utils.meter_message_from_counter(
- cnt, self.CONF.publisher.telemetry_secret)
- self.messages.append(msg)
- self.conn.record_metering_data(msg)
-
- def test_list_meters(self):
- data = self.get_json('/meters')
- self.assertEqual(6, len(data))
- self.assertEqual(set(['resource-id',
- 'resource-id2',
- 'resource-id3',
- 'resource-id4']),
- set(r['resource_id'] for r in data))
- self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new',
- u'meter.accent\xe9\u0437']),
- set(r['name'] for r in data))
- self.assertEqual(set(['test_source', 'test_source1']),
- set(r['source'] for r in data))
-
- def test_list_unique_meters(self):
- data = self.get_json('/meters?unique=True')
- self.assertEqual(4, len(data))
- self.assertEqual(set(['meter.test', 'meter.mine', 'meter.test.new',
- u'meter.accent\xe9\u0437']),
- set(r['name'] for r in data))
-
- def test_meters_query_with_timestamp(self):
- date_time = datetime.datetime(2012, 7, 2, 10, 41)
- isotime = date_time.isoformat()
- resp = self.get_json('/meters',
- q=[{'field': 'timestamp',
- 'op': 'gt',
- 'value': isotime}],
- expect_errors=True)
- self.assertEqual(400, resp.status_code)
- self.assertEqual('Unknown argument: "timestamp": '
- 'not valid for this resource',
- jsonutils.loads(resp.body)['error_message']
- ['faultstring'])
-
- def test_list_samples(self):
- data = self.get_json('/samples')
- self.assertEqual(7, len(data))
-
- def test_query_samples_with_invalid_field_name_and_non_eq_operator(self):
- resp = self.get_json('/samples',
- q=[{'field': 'non_valid_field_name',
- 'op': 'gt',
- 'value': 3}],
- expect_errors=True)
- resp_string = jsonutils.loads(resp.body)
- fault_string = resp_string['error_message']['faultstring']
- msg = ('Unknown argument: "non_valid_field_name"'
- ': unrecognized field in query: '
- '[<Query {key!r} '
- 'gt {value!r} ')
- msg = msg.format(key=u'non_valid_field_name', value=u'3')
- self.assertEqual(400, resp.status_code)
- self.assertTrue(fault_string.startswith(msg))
-
- def test_query_samples_with_invalid_field_name_and_eq_operator(self):
- resp = self.get_json('/samples',
- q=[{'field': 'non_valid_field_name',
- 'op': 'eq',
- 'value': 3}],
- expect_errors=True)
- resp_string = jsonutils.loads(resp.body)
- fault_string = resp_string['error_message']['faultstring']
- msg = ('Unknown argument: "non_valid_field_name"'
- ': unrecognized field in query: '
- '[<Query {key!r} eq {value!r} ')
- msg = msg.format(key=u'non_valid_field_name', value=u'3')
- self.assertEqual(400, resp.status_code)
- self.assertTrue(fault_string.startswith(msg))
-
- def test_query_samples_with_invalid_operator_and_valid_field_name(self):
- resp = self.get_json('/samples',
- q=[{'field': 'project_id',
- 'op': 'lt',
- 'value': '3'}],
- expect_errors=True)
- resp_string = jsonutils.loads(resp.body)
- fault_string = resp_string['error_message']['faultstring']
- expected_error_message = ("Invalid input for field/attribute op. " +
- "Value: 'lt'. unimplemented operator for" +
- " project_id")
- self.assertEqual(400, resp.status_code)
- self.assertEqual(fault_string, expected_error_message)
-
- def test_list_meters_query_wrong_type_metadata(self):
- resp = self.get_json('/meters/meter.test',
- q=[{'field': 'metadata.size',
- 'op': 'eq',
- 'value': '0',
- 'type': 'blob'}],
- expect_errors=True
- )
- expected_error_message = 'The data type blob is not supported.'
- resp_string = jsonutils.loads(resp.body)
- fault_string = resp_string['error_message']['faultstring']
- self.assertTrue(fault_string.startswith(expected_error_message))
-
- def test_query_samples_with_search_offset(self):
- resp = self.get_json('/samples',
- q=[{'field': 'search_offset',
- 'op': 'eq',
- 'value': 42}],
- expect_errors=True)
- self.assertEqual(400, resp.status_code)
- self.assertEqual("Invalid input for field/attribute field. "
- "Value: 'search_offset'. "
- "search_offset cannot be used without timestamp",
- jsonutils.loads(resp.body)['error_message']
- ['faultstring'])
-
- def test_list_meters_with_dict_metadata(self):
- data = self.get_json('/meters/meter.mine',
- q=[{'field':
- 'metadata.properties.prop_2.sub_prop_1',
- 'op': 'eq',
- 'value': 'sub_prop_value',
- }])
- self.assertEqual(1, len(data))
- self.assertEqual('resource-id4', data[0]['resource_id'])
- metadata = data[0]['resource_metadata']
- self.assertIsNotNone(metadata)
- self.assertEqual('self.sample4', metadata['tag'])
- self.assertEqual('prop_value', metadata['properties.prop_1'])
-
- def test_list_meters_with_dict_metadata_with_dot_dollar_in_key(self):
- data = self.get_json('/meters/meter.mine',
- q=[{'field':
- 'metadata.properties.prop.3.$sub_prop.2',
- 'op': 'eq',
- 'value': 'sub_prop_value2',
- }])
- self.assertEqual(1, len(data))
- self.assertEqual('resource-id4', data[0]['resource_id'])
- metadata = data[0]['resource_metadata']
- self.assertIsNotNone(metadata)
- self.assertEqual('self.sample4', metadata['tag'])
- self.assertEqual('prop_value', metadata['properties.prop_1'])
- self.assertEqual('sub_prop_value',
- metadata['properties.prop_2:sub_prop_1'])
-
- def test_get_one_sample(self):
- sample_id = self.messages[1]['message_id']
- data = self.get_json('/samples/%s' % sample_id)
- self.assertIn('id', data)
- del data['recorded_at']
- self.assertEqual({
- u'id': sample_id,
- u'metadata': {u'display_name': u'test-server',
- u'is_public': u'False',
- u'size': u'0',
- u'tag': u'self.sample1',
- u'util': u'0.47'},
- u'meter': u'meter.test',
- u'project_id': u'project-id',
- u'resource_id': u'resource-id',
- u'timestamp': u'2012-07-02T11:40:00',
- u'type': u'cumulative',
- u'unit': u'',
- u'source': 'test_source',
- u'user_id': u'user-id',
- u'volume': 3.0}, data)
-
- def test_get_not_existing_sample(self):
- resp = self.get_json('/samples/not_exists', expect_errors=True,
- status=404)
- self.assertEqual("Sample not_exists Not Found",
- jsonutils.loads(resp.body)['error_message']
- ['faultstring'])
-
- def test_list_samples_with_dict_metadata(self):
- data = self.get_json('/samples',
- q=[{'field':
- 'metadata.properties.prop_2.sub_prop_1',
- 'op': 'eq',
- 'value': 'sub_prop_value',
- }])
- self.assertIn('id', data[0])
- del data[0]['id'] # Randomly generated
- del data[0]['recorded_at']
- self.assertEqual([{
- u'user_id': u'user-id4',
- u'resource_id': u'resource-id4',
- u'timestamp': u'2012-07-02T10:43:00',
- u'meter': u'meter.mine',
- u'volume': 1.0,
- u'project_id': u'project-id2',
- u'type': u'gauge',
- u'unit': u'',
- u'source': u'test_source1',
- u'metadata': {
- u'display_name': u'test-server',
- u'properties.prop_2:sub_prop_1': u'sub_prop_value',
- u'util': u'0.58',
- u'tag': u'self.sample4',
- u'properties.prop_1': u'prop_value',
- u'is_public': u'True',
- u'size': u'0',
- u'properties.prop:3:$sub_prop:2': u'sub_prop_value2',
- }
- }], data)
-
- def test_list_with_field_metaquery(self):
- def _helper(url):
- resp = self.get_json(url,
- q=[{'field':
- 'metaquery',
- 'op': 'eq',
- 'value': 'cow',
- }],
- expect_errors=True)
- self.assertEqual(400, resp.status_code)
- expected = ('Unknown argument: "metaquery": '
- 'unrecognized field in query')
- self.assertIn(expected, resp.json['error_message']['faultstring'])
-
- _helper('/samples')
- _helper('/meters/meter.test')
-
- def test_list_meters_metadata_query(self):
- data = self.get_json('/meters/meter.test',
- q=[{'field': 'metadata.tag',
- 'op': 'eq',
- 'value': 'self.sample1',
- }],)
- self.assertEqual(1, len(data))
- self.assertEqual(set(['resource-id']),
- set(r['resource_id'] for r in data))
- self.assertEqual(set(['meter.test']),
- set(r['counter_name'] for r in data))
-
- def test_list_meters_resource_metadata_query(self):
- # NOTE(jd) Same test as above, but with the alias resource_metadata
- # as query field
- data = self.get_json('/meters/meter.test',
- q=[{'field': 'resource_metadata.tag',
- 'op': 'eq',
- 'value': 'self.sample1',
- }],)
- self.assertEqual(1, len(data))
- self.assertEqual(set(['resource-id']),
- set(r['resource_id'] for r in data))
- self.assertEqual(set(['meter.test']),
- set(r['counter_name'] for r in data))
-
- def test_list_meters_multi_metadata_query(self):
- data = self.get_json('/meters/meter.test',
- q=[{'field': 'metadata.tag',
- 'op': 'eq',
- 'value': 'self.sample1',
- },
- {'field': 'metadata.display_name',
- 'op': 'eq',
- 'value': 'test-server',
- }],)
- self.assertEqual(1, len(data))
- self.assertEqual(set(['resource-id']),
- set(r['resource_id'] for r in data))
- self.assertEqual(set(['meter.test']),
- set(r['counter_name'] for r in data))
-
- def test_list_meters_query_integer_metadata(self):
- data = self.get_json('/meters/meter.test',
- q=[{'field': 'metadata.size',
- 'op': 'eq',
- 'value': '0',
- 'type': 'integer'}]
- )
- self.assertEqual(2, len(data))
- self.assertEqual(set(['resource-id',
- 'resource-id3']),
- set(r['resource_id'] for r in data))
- self.assertEqual(set(['meter.test']),
- set(r['counter_name'] for r in data))
- self.assertEqual(set(['0']),
- set(r['resource_metadata']['size'] for r in data))
-
- def test_list_meters_query_float_metadata(self):
- data = self.get_json('/meters/meter.test',
- q=[{'field': 'metadata.util',
- 'op': 'eq',
- 'value': '0.75',
- 'type': 'float'}]
- )
- self.assertEqual(2, len(data))
- self.assertEqual(set(['resource-id',
- 'resource-id3']),
- set(r['resource_id'] for r in data))
- self.assertEqual(set(['meter.test']),
- set(r['counter_name'] for r in data))
- self.assertEqual(set(['0.75']),
- set(r['resource_metadata']['util'] for r in data))
-
- def test_list_meters_query_boolean_metadata(self):
- data = self.get_json('/meters/meter.mine',
- q=[{'field': 'metadata.is_public',
- 'op': 'eq',
- 'value': 'False',
- 'type': 'boolean'}]
- )
- self.assertEqual(1, len(data))
- self.assertEqual(set(['resource-id2']),
- set(r['resource_id'] for r in data))
- self.assertEqual(set(['meter.mine']),
- set(r['counter_name'] for r in data))
- self.assertEqual(set(['False']),
- set(r['resource_metadata']['is_public']
- for r in data))
-
- def test_list_meters_query_string_metadata(self):
- data = self.get_json('/meters/meter.test',
- q=[{'field': 'metadata.tag',
- 'op': 'eq',
- 'value': 'self.sample'}]
- )
- self.assertEqual(1, len(data))
- self.assertEqual(set(['resource-id']),
- set(r['resource_id'] for r in data))
- self.assertEqual(set(['meter.test']),
- set(r['counter_name'] for r in data))
- self.assertEqual(set(['self.sample']),
- set(r['resource_metadata']['tag'] for r in data))
-
- def test_list_meters_query_integer_float_metadata_without_type(self):
- data = self.get_json('/meters/meter.test',
- q=[{'field': 'metadata.size',
- 'op': 'eq',
- 'value': '0'},
- {'field': 'metadata.util',
- 'op': 'eq',
- 'value': '0.75'}]
- )
- self.assertEqual(1, len(data))
- self.assertEqual(set(['resource-id3']),
- set(r['resource_id'] for r in data))
- self.assertEqual(set(['meter.test']),
- set(r['counter_name'] for r in data))
- self.assertEqual(set(['0']),
- set(r['resource_metadata']['size'] for r in data))
- self.assertEqual(set(['0.75']),
- set(r['resource_metadata']['util'] for r in data))
-
- def test_with_resource(self):
- data = self.get_json('/meters', q=[{'field': 'resource_id',
- 'value': 'resource-id',
- }])
- nids = set(r['name'] for r in data)
- self.assertEqual(set(['meter.test', 'meter.test.new']), nids)
-
- sids = set(r['source'] for r in data)
- self.assertEqual(set(['test_source']), sids)
-
- def test_with_resource_and_source(self):
- data = self.get_json('/meters', q=[{'field': 'resource_id',
- 'value': 'resource-id4',
- },
- {'field': 'source',
- 'value': 'test_source1',
- }])
- nids = set(r['name'] for r in data)
- self.assertEqual(set(['meter.mine', u'meter.accent\xe9\u0437']), nids)
-
- sids = set(r['source'] for r in data)
- self.assertEqual(set(['test_source1']), sids)
-
- def test_with_resource_and_metadata_query(self):
- data = self.get_json('/meters/meter.mine',
- q=[{'field': 'resource_id',
- 'op': 'eq',
- 'value': 'resource-id2',
- },
- {'field': 'metadata.tag',
- 'op': 'eq',
- 'value': 'self.sample2',
- }])
- self.assertEqual(1, len(data))
- self.assertEqual(set(['resource-id2']),
- set(r['resource_id'] for r in data))
- self.assertEqual(set(['meter.mine']),
- set(r['counter_name'] for r in data))
-
- def test_with_source(self):
- data = self.get_json('/meters', q=[{'field': 'source',
- 'value': 'test_source',
- }])
- rids = set(r['resource_id'] for r in data)
- self.assertEqual(set(['resource-id',
- 'resource-id2',
- 'resource-id3']), rids)
-
- sids = set(r['source'] for r in data)
- self.assertEqual(set(['test_source']), sids)
-
- def test_with_source_and_metadata_query(self):
- data = self.get_json('/meters/meter.mine',
- q=[{'field': 'source',
- 'op': 'eq',
- 'value': 'test_source',
- },
- {'field': 'metadata.tag',
- 'op': 'eq',
- 'value': 'self.sample2',
- }])
- self.assertEqual(1, len(data))
- self.assertEqual(set(['test_source']),
- set(r['source'] for r in data))
- self.assertEqual(set(['meter.mine']),
- set(r['counter_name'] for r in data))
-
- def test_with_source_non_existent(self):
- data = self.get_json('/meters',
- q=[{'field': 'source',
- 'value': 'test_source_doesnt_exist',
- }],
- )
- self.assertIsEmpty(data)
-
- def test_with_user(self):
- data = self.get_json('/meters',
- q=[{'field': 'user_id',
- 'value': 'user-id',
- }],
- )
-
- uids = set(r['user_id'] for r in data)
- self.assertEqual(set(['user-id']), uids)
-
- nids = set(r['name'] for r in data)
- self.assertEqual(set(['meter.mine', 'meter.test', 'meter.test.new']),
- nids)
-
- rids = set(r['resource_id'] for r in data)
- self.assertEqual(set(['resource-id', 'resource-id2']), rids)
-
- sids = set(r['source'] for r in data)
- self.assertEqual(set(['test_source']), sids)
-
- def test_with_user_and_source(self):
- data = self.get_json('/meters',
- q=[{'field': 'user_id',
- 'value': 'user-id4',
- },
- {'field': 'source',
- 'value': 'test_source1',
- }],
- )
-
- uids = set(r['user_id'] for r in data)
- self.assertEqual(set(['user-id4']), uids)
-
- sids = set(r['source'] for r in data)
- self.assertEqual(set(['test_source1']), sids)
-
- def test_with_user_and_metadata_query(self):
- data = self.get_json('/meters/meter.test',
- q=[{'field': 'user_id',
- 'op': 'eq',
- 'value': 'user-id',
- },
- {'field': 'metadata.tag',
- 'op': 'eq',
- 'value': 'self.sample1',
- }])
- self.assertEqual(1, len(data))
- self.assertEqual(set(['user-id']), set(r['user_id'] for r in data))
- self.assertEqual(set(['meter.test']),
- set(r['counter_name'] for r in data))
-
- def test_with_user_non_existent(self):
- data = self.get_json('/meters',
- q=[{'field': 'user_id',
- 'value': 'user-id-foobar123',
- }],
- )
- self.assertEqual([], data)
-
- def test_with_project(self):
- data = self.get_json('/meters',
- q=[{'field': 'project_id',
- 'value': 'project-id2',
- }],
- )
- rids = set(r['resource_id'] for r in data)
- self.assertEqual(set(['resource-id3', 'resource-id4']), rids)
-
- sids = set(r['source'] for r in data)
- self.assertEqual(set(['test_source', 'test_source1']), sids)
-
- def test_with_project_and_source(self):
- data = self.get_json('/meters',
- q=[{'field': 'project_id',
- 'value': 'project-id2',
- },
- {'field': 'source',
- 'value': 'test_source1',
- }],
- )
- rids = set(r['resource_id'] for r in data)
- self.assertEqual(set(['resource-id4']), rids)
-
- sids = set(r['source'] for r in data)
- self.assertEqual(set(['test_source1']), sids)
-
- def test_with_project_and_metadata_query(self):
- data = self.get_json('/meters/meter.test',
- q=[{'field': 'project_id',
- 'op': 'eq',
- 'value': 'project-id',
- },
- {'field': 'metadata.tag',
- 'op': 'eq',
- 'value': 'self.sample1',
- }])
- self.assertEqual(1, len(data))
- self.assertEqual(set(['project-id']),
- set(r['project_id'] for r in data))
- self.assertEqual(set(['meter.test']),
- set(r['counter_name'] for r in data))
-
- def test_with_project_non_existent(self):
- data = self.get_json('/meters',
- q=[{'field': 'project_id',
- 'value': 'jd-was-here',
- }],
- )
- self.assertEqual([], data)
-
- def test_list_meters_meter_id(self):
- data = self.get_json('/meters')
- for i in data:
- meter_id = '%s+%s' % (i['resource_id'], i['name'])
- expected = base64.b64encode(meter_id.encode('utf-8'))
- if six.PY3:
- expected = expected.decode('ascii')
- self.assertEqual(expected, i['meter_id'])
- self.assertFalse(i['meter_id'].endswith('\n'))
- self.assertEqual(
- [i['resource_id'], i['name']],
- base64.b64decode(i['meter_id']).decode('utf-8').split('+'))
diff --git a/ceilometer/tests/functional/api/v2/test_list_resources_scenarios.py b/ceilometer/tests/functional/api/v2/test_list_resources_scenarios.py
deleted file mode 100644
index e919e36c..00000000
--- a/ceilometer/tests/functional/api/v2/test_list_resources_scenarios.py
+++ /dev/null
@@ -1,586 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test listing resources.
-"""
-
-import datetime
-import json
-
-import six
-import webtest.app
-
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer.tests.functional.api import v2
-
-
-class TestListResources(v2.FunctionalTest):
-
- def test_empty(self):
- data = self.get_json('/resources')
- self.assertEqual([], data)
-
- def _verify_resource_timestamps(self, res, first, last):
- # Bounds need not be tight (see ceilometer bug #1288372)
- self.assertIn('first_sample_timestamp', res)
- self.assertGreaterEqual(first.isoformat(),
- res['first_sample_timestamp'])
- self.assertIn('last_sample_timestamp', res)
- self.assertLessEqual(last.isoformat(), res['last_sample_timestamp'])
-
- def test_instance_no_metadata(self):
- timestamp = datetime.datetime(2012, 7, 2, 10, 40)
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=timestamp,
- resource_metadata=None,
- source='test',
- )
- msg = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- data = self.get_json('/resources')
- self.assertEqual(1, len(data))
- self._verify_resource_timestamps(data[0], timestamp, timestamp)
-
- def test_instances(self):
- timestamps = {
- 'resource-id': datetime.datetime(2012, 7, 2, 10, 40),
- 'resource-id-alternate': datetime.datetime(2012, 7, 2, 10, 41),
- }
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=timestamps['resource-id'],
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- },
- source='test',
- )
- msg = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- sample2 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id-alternate',
- timestamp=timestamps['resource-id-alternate'],
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample2',
- },
- source='test',
- )
- msg2 = utils.meter_message_from_counter(
- sample2, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg2)
-
- data = self.get_json('/resources')
- self.assertEqual(2, len(data))
- for res in data:
- timestamp = timestamps.get(res['resource_id'])
- self._verify_resource_timestamps(res, timestamp, timestamp)
-
- def test_instance_multiple_samples(self):
- timestamps = [
- datetime.datetime(2012, 7, 2, 10, 41),
- datetime.datetime(2012, 7, 2, 10, 42),
- datetime.datetime(2012, 7, 2, 10, 40),
- ]
- for timestamp in timestamps:
- datapoint = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=timestamp,
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample-%s' % timestamp,
- },
- source='test',
- )
- msg = utils.meter_message_from_counter(
- datapoint,
- self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- data = self.get_json('/resources')
- self.assertEqual(1, len(data))
- self._verify_resource_timestamps(data[0],
- timestamps[-1], timestamps[1])
-
- def test_instances_one(self):
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- },
- source='test',
- )
- msg = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- sample2 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id-alternate',
- timestamp=datetime.datetime(2012, 7, 2, 10, 41),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample2',
- },
- source='test',
- )
- msg2 = utils.meter_message_from_counter(
- sample2, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg2)
-
- data = self.get_json('/resources/resource-id')
- self.assertEqual('resource-id', data['resource_id'])
-
- def test_with_source(self):
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- },
- source='test_list_resources',
- )
- msg = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- sample2 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id2',
- 'project-id',
- 'resource-id-alternate',
- timestamp=datetime.datetime(2012, 7, 2, 10, 41),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample2',
- },
- source='not-test',
- )
- msg2 = utils.meter_message_from_counter(
- sample2, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg2)
-
- data = self.get_json('/resources', q=[{'field': 'source',
- 'value': 'test_list_resources',
- }])
- ids = [r['resource_id'] for r in data]
- self.assertEqual(['resource-id'], ids)
- sources = [r['source'] for r in data]
- self.assertEqual(['test_list_resources'], sources)
-
- def test_resource_id_with_slash(self):
- s = sample.Sample(
- 'storage.containers.objects',
- 'gauge',
- '',
- 1,
- '19fbed01c21f4912901057021b9e7111',
- '45acc90399134206b3b41f3d3a0a06d6',
- '29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40).isoformat(),
- resource_metadata={},
- source='test_show_special_resource',
- )
-
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret,
- )
- msg['timestamp'] = datetime.datetime(2012, 7, 2, 10, 40)
- self.conn.record_metering_data(msg)
-
- rid_encoded = '29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb%252Fglance'
- resp = self.get_json('/resources/%s' % rid_encoded)
- self.assertEqual("19fbed01c21f4912901057021b9e7111", resp["user_id"])
- self.assertEqual('29f809d9-88bb-4c40-b1ba-a77a1fcf8ceb/glance',
- resp["resource_id"])
-
- def test_with_invalid_resource_id(self):
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id-1',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- },
- source='test_list_resources',
- )
- msg = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- sample2 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id2',
- 'project-id',
- 'resource-id-2',
- timestamp=datetime.datetime(2012, 7, 2, 10, 41),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample2',
- },
- source='test_list_resources',
- )
- msg2 = utils.meter_message_from_counter(
- sample2, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg2)
-
- resp1 = self.get_json('/resources/resource-id-1')
- self.assertEqual("resource-id-1", resp1["resource_id"])
-
- resp2 = self.get_json('/resources/resource-id-2')
- self.assertEqual("resource-id-2", resp2["resource_id"])
-
- resp3 = self.get_json('/resources/resource-id-3', expect_errors=True)
- self.assertEqual(404, resp3.status_code)
- json_data = resp3.body
- if six.PY3:
- json_data = json_data.decode('utf-8')
- self.assertEqual("Resource resource-id-3 Not Found",
- json.loads(json_data)['error_message']
- ['faultstring'])
-
- def test_with_user(self):
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- },
- source='test_list_resources',
- )
- msg = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- sample2 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id2',
- 'project-id',
- 'resource-id-alternate',
- timestamp=datetime.datetime(2012, 7, 2, 10, 41),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample2',
- },
- source='not-test',
- )
- msg2 = utils.meter_message_from_counter(
- sample2, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg2)
-
- data = self.get_json('/resources', q=[{'field': 'user_id',
- 'value': 'user-id',
- }])
- ids = [r['resource_id'] for r in data]
- self.assertEqual(['resource-id'], ids)
-
- def test_with_project(self):
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- },
- source='test_list_resources',
- )
- msg = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- sample2 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id2',
- 'project-id2',
- 'resource-id-alternate',
- timestamp=datetime.datetime(2012, 7, 2, 10, 41),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample2',
- },
- source='not-test',
- )
- msg2 = utils.meter_message_from_counter(
- sample2, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg2)
-
- data = self.get_json('/resources', q=[{'field': 'project_id',
- 'value': 'project-id',
- }])
- ids = [r['resource_id'] for r in data]
- self.assertEqual(['resource-id'], ids)
-
- def test_with_user_non_admin(self):
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id2',
- 'project-id2',
- 'resource-id-alternate',
- timestamp=datetime.datetime(2012, 7, 2, 10, 41),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample1',
- },
- source='not-test',
- )
- msg2 = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg2)
-
- data = self.get_json('/resources',
- headers={"X-Roles": "Member",
- "X-Project-Id": "project-id2"})
- ids = set(r['resource_id'] for r in data)
- self.assertEqual(set(['resource-id-alternate']), ids)
-
- def test_with_user_wrong_tenant(self):
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id2',
- 'project-id2',
- 'resource-id-alternate',
- timestamp=datetime.datetime(2012, 7, 2, 10, 41),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample1',
- },
- source='not-test',
- )
- msg2 = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg2)
-
- data = self.get_json('/resources',
- headers={"X-Roles": "Member",
- "X-Project-Id": "project-wrong"})
- ids = set(r['resource_id'] for r in data)
- self.assertEqual(set(), ids)
-
- def test_metadata(self):
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- 'dict_properties': {'key.$1': {'$key': 'val'}},
- 'not_ignored_list': ['returned'],
- },
- source='test',
- )
- msg = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- data = self.get_json('/resources')
- metadata = data[0]['metadata']
- self.assertEqual([(u'dict_properties.key:$1:$key', u'val'),
- (u'display_name', u'test-server'),
- (u'not_ignored_list', u"['returned']"),
- (u'tag', u'self.sample')],
- list(sorted(six.iteritems(metadata))))
-
- def test_resource_meter_links(self):
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- },
- source='test_list_resources',
- )
- msg = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- data = self.get_json('/resources')
- links = data[0]['links']
- self.assertEqual(2, len(links))
- self.assertEqual('self', links[0]['rel'])
- self.assertIn((self.PATH_PREFIX + '/resources/resource-id'),
- links[0]['href'])
- self.assertEqual('instance', links[1]['rel'])
- self.assertIn((self.PATH_PREFIX + '/meters/instance?'
- 'q.field=resource_id&q.value=resource-id'),
- links[1]['href'])
-
- def test_resource_skip_meter_links(self):
- sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project-id',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- },
- source='test_list_resources',
- )
- msg = utils.meter_message_from_counter(
- sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- data = self.get_json('/resources?meter_links=0')
- links = data[0]['links']
- self.assertEqual(len(links), 1)
- self.assertEqual(links[0]['rel'], 'self')
- self.assertIn((self.PATH_PREFIX + '/resources/resource-id'),
- links[0]['href'])
-
-
-class TestListResourcesRestriction(v2.FunctionalTest):
- def setUp(self):
- super(TestListResourcesRestriction, self).setUp()
- self.CONF.set_override('default_api_return_limit', 10, group='api')
- for i in range(20):
- s = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- 5 + i,
- 'user-id',
- 'project1',
- 'resource-id%s' % i,
- timestamp=(datetime.datetime(2012, 9, 25, 10, 30) +
- datetime.timedelta(seconds=i)),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.sample',
- },
- source='source1',
- )
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_resource_limit(self):
- data = self.get_json('/resources?limit=1')
- self.assertEqual(1, len(data))
-
- def test_resource_limit_negative(self):
- self.assertRaises(webtest.app.AppError, self.get_json,
- '/resources?limit=-2')
-
- def test_resource_limit_bigger(self):
- data = self.get_json('/resources?limit=42')
- self.assertEqual(20, len(data))
-
- def test_resource_default_limit(self):
- data = self.get_json('/resources')
- self.assertEqual(10, len(data))
diff --git a/ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py b/ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py
deleted file mode 100644
index 7134a8ca..00000000
--- a/ceilometer/tests/functional/api/v2/test_list_samples_scenarios.py
+++ /dev/null
@@ -1,156 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test listing raw samples.
-"""
-
-import datetime
-
-import mock
-from oslo_utils import timeutils
-import six
-
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer.tests.functional.api import v2
-
-
-class TestListSamples(v2.FunctionalTest):
-
- def setUp(self):
- super(TestListSamples, self).setUp()
- patcher = mock.patch.object(timeutils, 'utcnow')
- self.addCleanup(patcher.stop)
- self.mock_utcnow = patcher.start()
- self.mock_utcnow.return_value = datetime.datetime(2014, 2, 11, 16, 42)
- self.sample1 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id',
- 'project1',
- 'resource-id',
- timestamp=datetime.datetime(2012, 7, 2, 10, 40),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample',
- 'dict_properties': {'key': 'value'},
- 'not_ignored_list': ['returned'],
- },
- source='test_source',
- )
- msg = utils.meter_message_from_counter(
- self.sample1, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- self.sample2 = sample.Sample(
- 'instance',
- 'cumulative',
- '',
- 1,
- 'user-id2',
- 'project2',
- 'resource-id-alternate',
- timestamp=datetime.datetime(2012, 7, 2, 10, 41),
- resource_metadata={'display_name': 'test-server',
- 'tag': 'self.sample2',
- },
- source='source2',
- )
- msg2 = utils.meter_message_from_counter(
- self.sample2, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg2)
-
- def test_all(self):
- data = self.get_json('/meters/instance')
- self.assertEqual(2, len(data))
- for s in data:
- self.assertEqual(timeutils.utcnow().isoformat(), s['recorded_at'])
-
- def test_all_trailing_slash(self):
- data = self.get_json('/meters/instance/')
- self.assertEqual(2, len(data))
-
- def test_empty_project(self):
- data = self.get_json('/meters/instance',
- q=[{'field': 'project_id',
- 'value': 'no-such-project',
- }])
- self.assertEqual([], data)
-
- def test_by_project(self):
- data = self.get_json('/meters/instance',
- q=[{'field': 'project_id',
- 'value': 'project1',
- }])
- self.assertEqual(1, len(data))
-
- def test_empty_resource(self):
- data = self.get_json('/meters/instance',
- q=[{'field': 'resource_id',
- 'value': 'no-such-resource',
- }])
- self.assertEqual([], data)
-
- def test_by_resource(self):
- data = self.get_json('/meters/instance',
- q=[{'field': 'resource_id',
- 'value': 'resource-id',
- }])
- self.assertEqual(1, len(data))
-
- def test_empty_source(self):
- data = self.get_json('/meters/instance',
- q=[{'field': 'source',
- 'value': 'no-such-source',
- }])
- self.assertEqual(0, len(data))
-
- def test_by_source(self):
- data = self.get_json('/meters/instance',
- q=[{'field': 'source',
- 'value': 'test_source',
- }])
- self.assertEqual(1, len(data))
-
- def test_empty_user(self):
- data = self.get_json('/meters/instance',
- q=[{'field': 'user_id',
- 'value': 'no-such-user',
- }])
- self.assertEqual([], data)
-
- def test_by_user(self):
- data = self.get_json('/meters/instance',
- q=[{'field': 'user_id',
- 'value': 'user-id',
- }])
- self.assertEqual(1, len(data))
-
- def test_metadata(self):
- data = self.get_json('/meters/instance',
- q=[{'field': 'resource_id',
- 'value': 'resource-id',
- }])
- sample = data[0]
- self.assertIn('resource_metadata', sample)
- self.assertEqual(
- [('dict_properties.key', 'value'),
- ('display_name', 'test-server'),
- ('not_ignored_list', "['returned']"),
- ('tag', 'self.sample'),
- ],
- list(sorted(six.iteritems(sample['resource_metadata']))))
diff --git a/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py b/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py
deleted file mode 100644
index fe6ba01d..00000000
--- a/ceilometer/tests/functional/api/v2/test_post_samples_scenarios.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#
-# Copyright 2013 Red Hat, Inc
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test listing raw events.
-"""
-
-import copy
-import datetime
-import os
-
-import fixtures
-import mock
-from oslo_utils import fileutils
-from oslo_utils import timeutils
-import six
-
-from ceilometer.tests.functional.api import v2
-
-
-class TestPostSamples(v2.FunctionalTest):
- def fake_notifier_sample(self, ctxt, event_type, payload):
- samples = payload['samples']
- for m in samples:
- del m['message_signature']
- self.published.append(samples)
-
- def _make_app(self, enable_acl=False):
- content = ('{"context_is_project": "project_id:%(project_id)s",'
- '"default" : "!",'
- '"telemetry:create_samples": ""}')
- if six.PY3:
- content = content.encode('utf-8')
- self.tempfile = fileutils.write_to_tempfile(content=content,
- prefix='policy',
- suffix='.json')
- self.CONF.set_override("policy_file", self.tempfile,
- group='oslo_policy')
- return super(TestPostSamples, self)._make_app()
-
- def tearDown(self):
- os.remove(self.tempfile)
- super(TestPostSamples, self).tearDown()
-
- def setUp(self):
- self.published = []
- notifier = mock.Mock()
- notifier.sample.side_effect = self.fake_notifier_sample
- self.useFixture(fixtures.MockPatch('oslo_messaging.Notifier',
- return_value=notifier))
- super(TestPostSamples, self).setUp()
-
- def test_one(self):
- s1 = [{'counter_name': 'apples',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 1,
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_metadata': {'name1': 'value1',
- 'name2': 'value2'}}]
- data = self.post_json('/meters/apples/', s1)
-
- # timestamp not given so it is generated.
- s1[0]['timestamp'] = data.json[0]['timestamp']
- # Ignore message id that is randomly generated
- s1[0]['message_id'] = data.json[0]['message_id']
- # source is generated if not provided.
- s1[0]['source'] = '%s:openstack' % s1[0]['project_id']
-
- self.assertEqual(s1, data.json)
- s1[0]["monotonic_time"] = None
- self.assertEqual(s1[0], self.published[0][0])
-
- def test_nested_metadata(self):
- s1 = [{'counter_name': 'apples',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 1,
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_metadata': {'nest.name1': 'value1',
- 'name2': 'value2',
- 'nest.name2': 'value3'}}]
-
- data = self.post_json('/meters/apples/', s1)
-
- # timestamp not given so it is generated.
- s1[0]['timestamp'] = data.json[0]['timestamp']
- # Ignore message id that is randomly generated
- s1[0]['message_id'] = data.json[0]['message_id']
- # source is generated if not provided.
- s1[0]['source'] = '%s:openstack' % s1[0]['project_id']
-
- unwound = copy.copy(s1[0])
- unwound['resource_metadata'] = {'nest': {'name1': 'value1',
- 'name2': 'value3'},
- 'name2': 'value2'}
- unwound["monotonic_time"] = None
- # only the published sample should be unwound, not the representation
- # in the API response
- self.assertEqual(s1[0], data.json[0])
- self.assertEqual(unwound, self.published[0][0])
-
- def test_invalid_counter_type(self):
- s1 = [{'counter_name': 'my_counter_name',
- 'counter_type': 'INVALID_TYPE',
- 'counter_unit': 'instance',
- 'counter_volume': 1,
- 'source': 'closedstack',
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_metadata': {'name1': 'value1',
- 'name2': 'value2'}}]
-
- data = self.post_json('/meters/my_counter_name/', s1,
- expect_errors=True)
-
- self.assertEqual(400, data.status_int)
- self.assertEqual(0, len(self.published))
-
- def test_messsage_id_provided(self):
- """Do not accept sample with message_id."""
- s1 = [{'counter_name': 'my_counter_name',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 1,
- 'message_id': 'evil',
- 'source': 'closedstack',
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_metadata': {'name1': 'value1',
- 'name2': 'value2'}}]
-
- data = self.post_json('/meters/my_counter_name/', s1,
- expect_errors=True)
-
- self.assertEqual(400, data.status_int)
- self.assertEqual(0, len(self.published))
-
- def test_wrong_project_id(self):
- """Do not accept cross posting samples to different projects."""
- s1 = [{'counter_name': 'my_counter_name',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 1,
- 'source': 'closedstack',
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_metadata': {'name1': 'value1',
- 'name2': 'value2'}}]
-
- data = self.post_json('/meters/my_counter_name/', s1,
- expect_errors=True,
- headers={
- "X-Roles": "Member",
- "X-Tenant-Name": "lu-tenant",
- "X-Project-Id":
- "bc23a9d531064583ace8f67dad60f6bb",
- })
-
- self.assertEqual(400, data.status_int)
- self.assertEqual(0, len(self.published))
-
- def test_multiple_samples(self):
- """Send multiple samples.
-
- The usecase here is to reduce the chatter and send the counters
- at a slower cadence.
- """
- samples = []
- for x in range(6):
- dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None)
- s = {'counter_name': 'apples',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': float(x * 3),
- 'source': 'evil',
- 'timestamp': dt.isoformat(),
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_metadata': {'name1': str(x),
- 'name2': str(x + 4)}}
- samples.append(s)
-
- data = self.post_json('/meters/apples/', samples)
-
- for x, s in enumerate(samples):
- # source is modified to include the project_id.
- s['source'] = '%s:%s' % (s['project_id'],
- s['source'])
- # Ignore message id that is randomly generated
- s['message_id'] = data.json[x]['message_id']
-
- # remove tzinfo to compare generated timestamp
- # with the provided one
- c = data.json[x]
- timestamp = timeutils.parse_isotime(c['timestamp'])
- c['timestamp'] = timestamp.replace(tzinfo=None).isoformat()
-
- # do the same on the pipeline
- msg = self.published[0][x]
- timestamp = timeutils.parse_isotime(msg['timestamp'])
- msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat()
-
- self.assertEqual(s, c)
- s["monotonic_time"] = None
- self.assertEqual(s, self.published[0][x])
-
- def test_missing_mandatory_fields(self):
- """Do not accept posting samples with missing mandatory fields."""
- s1 = [{'counter_name': 'my_counter_name',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 1,
- 'source': 'closedstack',
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_metadata': {'name1': 'value1',
- 'name2': 'value2'}}]
-
- # one by one try posting without a mandatory field.
- for m in ['counter_volume', 'counter_unit', 'counter_type',
- 'resource_id', 'counter_name']:
- s_broke = copy.copy(s1)
- del s_broke[0][m]
- print('posting without %s' % m)
- data = self.post_json('/meters/my_counter_name', s_broke,
- expect_errors=True)
- self.assertEqual(400, data.status_int)
-
- def test_multiple_project_id_and_admin(self):
- """Allow admin is allowed to set multiple project_id."""
- s1 = [{'counter_name': 'my_counter_name',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 1,
- 'source': 'closedstack',
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- },
- {'counter_name': 'my_counter_name',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 2,
- 'source': 'closedstack',
- 'project_id': '4af38dca-f6fc-11e2-94f5-14dae9283f29',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- 'resource_metadata': {'name1': 'value1',
- 'name2': 'value2'}}]
- data = self.post_json('/meters/my_counter_name/', s1,
- headers={"X-Roles": "admin"})
-
- self.assertEqual(201, data.status_int)
- for x, s in enumerate(s1):
- # source is modified to include the project_id.
- s['source'] = '%s:%s' % (s['project_id'],
- 'closedstack')
- # Ignore message id that is randomly generated
- s['message_id'] = data.json[x]['message_id']
- # timestamp not given so it is generated.
- s['timestamp'] = data.json[x]['timestamp']
- s.setdefault('resource_metadata', dict())
- self.assertEqual(s, data.json[x])
- s['monotonic_time'] = None
- self.assertEqual(s, self.published[0][x])
-
- def test_multiple_samples_multiple_sources(self):
- """Test posting with special conditions.
-
- Do accept a single post with some multiples sources with some of them
- null.
- """
- s1 = [{'counter_name': 'my_counter_name',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 1,
- 'source': 'paperstack',
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- },
- {'counter_name': 'my_counter_name',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 5,
- 'source': 'waterstack',
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- },
- {'counter_name': 'my_counter_name',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 2,
- 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
- 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- 'resource_metadata': {'name1': 'value1',
- 'name2': 'value2'}}]
- data = self.post_json('/meters/my_counter_name/', s1,
- expect_errors=True)
- self.assertEqual(201, data.status_int)
- for x, s in enumerate(s1):
- # source is modified to include the project_id.
- s['source'] = '%s:%s' % (
- s['project_id'],
- s.get('source', self.CONF.sample_source)
- )
- # Ignore message id that is randomly generated
- s['message_id'] = data.json[x]['message_id']
- # timestamp not given so it is generated.
- s['timestamp'] = data.json[x]['timestamp']
- s.setdefault('resource_metadata', dict())
- self.assertEqual(s, data.json[x])
- s['monotonic_time'] = None
- self.assertEqual(s, self.published[0][x])
-
- def test_missing_project_user_id(self):
- """Ensure missing project & user IDs are defaulted appropriately."""
- s1 = [{'counter_name': 'my_counter_name',
- 'counter_type': 'gauge',
- 'counter_unit': 'instance',
- 'counter_volume': 1,
- 'source': 'closedstack',
- 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
- 'resource_metadata': {'name1': 'value1',
- 'name2': 'value2'}}]
-
- project_id = 'bc23a9d531064583ace8f67dad60f6bb'
- user_id = 'fd87807-12d2-4b38-9c70-5f5c2ac427ff'
- data = self.post_json('/meters/my_counter_name/', s1,
- expect_errors=True,
- headers={
- 'X-Roles': 'chief-bottle-washer',
- 'X-Project-Id': project_id,
- 'X-User-Id': user_id,
- })
-
- self.assertEqual(201, data.status_int)
- for x, s in enumerate(s1):
- # source is modified to include the project_id.
- s['source'] = '%s:%s' % (project_id,
- s['source'])
- # Ignore message id that is randomly generated
- s['message_id'] = data.json[x]['message_id']
- # timestamp not given so it is generated.
- s['timestamp'] = data.json[x]['timestamp']
- s['user_id'] = user_id
- s['project_id'] = project_id
-
- self.assertEqual(s, data.json[x])
-
- s['monotonic_time'] = None
- self.assertEqual(s, self.published[0][x])
diff --git a/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py b/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py
deleted file mode 100644
index cafa1c80..00000000
--- a/ceilometer/tests/functional/api/v2/test_statistics_scenarios.py
+++ /dev/null
@@ -1,1693 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test events statistics retrieval."""
-
-import datetime
-
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer.tests import db as tests_db
-from ceilometer.tests.functional.api import v2
-
-
-class TestMaxProjectVolume(v2.FunctionalTest):
- PATH = '/meters/volume.size/statistics'
-
- def setUp(self):
- super(TestMaxProjectVolume, self).setUp()
- for i in range(3):
- s = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- 5 + i,
- 'user-id',
- 'project1',
- 'resource-id-%s' % i,
- timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.sample',
- },
- source='source1',
- )
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_no_time_bounds(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- }])
- self.assertEqual(7, data[0]['max'])
- self.assertEqual(3, data[0]['count'])
-
- def test_start_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T11:30:00',
- },
- ])
- self.assertEqual(7, data[0]['max'])
- self.assertEqual(2, data[0]['count'])
-
- def test_start_timestamp_after(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T12:34:00',
- },
- ])
- self.assertEqual([], data)
-
- def test_end_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T11:30:00',
- },
- ])
- self.assertEqual(5, data[0]['max'])
- self.assertEqual(1, data[0]['count'])
-
- def test_end_timestamp_before(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T09:54:00',
- },
- ])
- self.assertEqual([], data)
-
- def test_start_end_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T11:30:00',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T11:32:00',
- },
- ])
- self.assertEqual(6, data[0]['max'])
- self.assertEqual(1, data[0]['count'])
-
-
-class TestMaxResourceVolume(v2.FunctionalTest):
- PATH = '/meters/volume.size/statistics'
-
- def setUp(self):
- super(TestMaxResourceVolume, self).setUp()
- for i in range(3):
- s = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- 5 + i,
- 'user-id',
- 'project1',
- 'resource-id',
- timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.sample',
- },
- source='source1',
- )
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_no_time_bounds(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- }])
- self.assertEqual(7, data[0]['max'])
- self.assertEqual(3, data[0]['count'])
-
- def test_no_time_bounds_with_period(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'resource_id',
- 'value': 'resource-id'}],
- period=3600)
- self.assertEqual(3, len(data))
- self.assertEqual(set([u'2012-09-25T10:30:00',
- u'2012-09-25T12:32:00',
- u'2012-09-25T11:31:00']),
- set(x['duration_start'] for x in data))
- self.assertEqual(3600, data[0]['period'])
- self.assertEqual(set([u'2012-09-25T10:30:00',
- u'2012-09-25T11:30:00',
- u'2012-09-25T12:30:00']),
- set(x['period_start'] for x in data))
-
- def test_period_with_negative_value(self):
- resp = self.get_json(self.PATH, expect_errors=True,
- q=[{'field': 'resource_id',
- 'value': 'resource-id'}],
- period=-1)
- self.assertEqual(400, resp.status_code)
-
- @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase')
- def test_period_with_large_value(self):
- resp = self.get_json(self.PATH, expect_errors=True,
- q=[{'field': 'user_id',
- 'value': 'user-id'}],
- period=10000000000000)
- self.assertEqual(400, resp.status_code)
- self.assertIn(b"Invalid period", resp.body)
-
- def test_start_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T11:30:00',
- },
- ])
- self.assertEqual(7, data[0]['max'])
- self.assertEqual(2, data[0]['count'])
-
- def test_start_timestamp_after(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T12:34:00',
- },
- ])
- self.assertEqual([], data)
-
- def test_end_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T11:30:00',
- },
- ])
- self.assertEqual(5, data[0]['max'])
- self.assertEqual(1, data[0]['count'])
-
- def test_end_timestamp_before(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T09:54:00',
- },
- ])
- self.assertEqual([], data)
-
- def test_start_end_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T11:30:00',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T11:32:00',
- },
- ])
- self.assertEqual(6, data[0]['max'])
- self.assertEqual(1, data[0]['count'])
-
-
-class TestSumProjectVolume(v2.FunctionalTest):
-
- PATH = '/meters/volume.size/statistics'
-
- def setUp(self):
- super(TestSumProjectVolume, self).setUp()
- for i in range(3):
- s = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- 5 + i,
- 'user-id',
- 'project1',
- 'resource-id-%s' % i,
- timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.sample',
- },
- source='source1',
- )
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_no_time_bounds(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- }])
- expected = 5 + 6 + 7
- self.assertEqual(expected, data[0]['sum'])
- self.assertEqual(3, data[0]['count'])
-
- def test_start_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T11:30:00',
- },
- ])
- expected = 6 + 7
- self.assertEqual(expected, data[0]['sum'])
- self.assertEqual(2, data[0]['count'])
-
- def test_start_timestamp_after(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T12:34:00',
- },
- ])
- self.assertEqual([], data)
-
- def test_end_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T11:30:00',
- },
- ])
- self.assertEqual(5, data[0]['sum'])
- self.assertEqual(1, data[0]['count'])
-
- def test_end_timestamp_before(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T09:54:00',
- },
- ])
- self.assertEqual([], data)
-
- def test_start_end_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'project_id',
- 'value': 'project1',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T11:30:00',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T11:32:00',
- },
- ])
- self.assertEqual(6, data[0]['sum'])
- self.assertEqual(1, data[0]['count'])
-
-
-class TestSumResourceVolume(v2.FunctionalTest):
-
- PATH = '/meters/volume.size/statistics'
-
- def setUp(self):
- super(TestSumResourceVolume, self).setUp()
- for i in range(3):
- s = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- 5 + i,
- 'user-id',
- 'project1',
- 'resource-id',
- timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.sample',
- },
- source='source1',
- )
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_no_time_bounds(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- }])
- self.assertEqual(5 + 6 + 7, data[0]['sum'])
- self.assertEqual(3, data[0]['count'])
-
- def test_no_time_bounds_with_period(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'resource_id',
- 'value': 'resource-id'}],
- period=1800)
- self.assertEqual(3, len(data))
- self.assertEqual(set([u'2012-09-25T10:30:00',
- u'2012-09-25T12:32:00',
- u'2012-09-25T11:31:00']),
- set(x['duration_start'] for x in data))
- self.assertEqual(1800, data[0]['period'])
- self.assertEqual(set([u'2012-09-25T10:30:00',
- u'2012-09-25T11:30:00',
- u'2012-09-25T12:30:00']),
- set(x['period_start'] for x in data))
-
- def test_start_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T11:30:00',
- }])
- self.assertEqual(6 + 7, data[0]['sum'])
- self.assertEqual(2, data[0]['count'])
-
- def test_start_timestamp_with_period(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'resource_id',
- 'value': 'resource-id'},
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T10:15:00'}],
- period=7200)
- self.assertEqual(2, len(data))
- self.assertEqual(set([u'2012-09-25T10:30:00',
- u'2012-09-25T12:32:00']),
- set(x['duration_start'] for x in data))
- self.assertEqual(7200, data[0]['period'])
- self.assertEqual(set([u'2012-09-25T10:15:00',
- u'2012-09-25T12:15:00']),
- set(x['period_start'] for x in data))
-
- def test_start_timestamp_after(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T12:34:00',
- }])
- self.assertEqual([], data)
-
- def test_end_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T11:30:00',
- }])
- self.assertEqual(5, data[0]['sum'])
- self.assertEqual(1, data[0]['count'])
-
- def test_end_timestamp_before(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- },
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2012-09-25T09:54:00',
- }])
- self.assertEqual([], data)
-
- def test_start_end_timestamp(self):
- data = self.get_json(self.PATH, q=[{'field': 'resource_id',
- 'value': 'resource-id',
- },
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2012-09-25T11:30:00',
- },
- {'field': 'timestamp',
- 'op': 'lt',
- 'value': '2012-09-25T11:32:00',
- }])
- self.assertEqual(6, data[0]['sum'])
- self.assertEqual(1, data[0]['count'])
-
-
-class TestGroupByInstance(v2.FunctionalTest):
-
- PATH = '/meters/instance/statistics'
-
- def setUp(self):
- super(TestGroupByInstance, self).setUp()
-
- test_sample_data = (
- {'volume': 2, 'user': 'user-1', 'project': 'project-1',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1',
- 'source': 'source-2'},
- {'volume': 2, 'user': 'user-1', 'project': 'project-2',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1',
- 'source': 'source-2'},
- {'volume': 1, 'user': 'user-2', 'project': 'project-1',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2',
- 'source': 'source-1'},
- {'volume': 1, 'user': 'user-2', 'project': 'project-1',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source-1'},
- {'volume': 2, 'user': 'user-2', 'project': 'project-1',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source-1'},
- {'volume': 4, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source-1'},
- {'volume': 4, 'user': 'user-3', 'project': 'project-1',
- 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2',
- 'source': 'source-3'},
- )
-
- for test_sample in test_sample_data:
- c = sample.Sample(
- 'instance',
- sample.TYPE_CUMULATIVE,
- unit='s',
- volume=test_sample['volume'],
- user_id=test_sample['user'],
- project_id=test_sample['project'],
- resource_id=test_sample['resource'],
- timestamp=datetime.datetime(*test_sample['timestamp']),
- resource_metadata={'flavor': test_sample['metadata_flavor'],
- 'event': test_sample['metadata_event'], },
- source=test_sample['source'],
- )
- msg = utils.meter_message_from_counter(
- c, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_group_by_user(self):
- data = self.get_json(self.PATH, groupby=['user_id'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['user_id']), groupby_keys_set)
- self.assertEqual(set(['user-1', 'user-2', 'user-3']), groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'user_id': 'user-1'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'user_id': 'user-2'}:
- self.assertEqual(4, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(8, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'user_id': 'user-3'}:
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(4, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(4, r['avg'])
-
- def test_group_by_resource(self):
- data = self.get_json(self.PATH, groupby=['resource_id'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['resource_id']), groupby_keys_set)
- self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']),
- groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'resource_id': 'resource-1'}:
- self.assertEqual(3, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(6, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'resource_id': 'resource-2'}:
- self.assertEqual(3, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(6, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'resource_id': 'resource-3'}:
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(4, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(4, r['avg'])
-
- def test_group_by_project(self):
- data = self.get_json(self.PATH, groupby=['project_id'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'project_id': 'project-1'}:
- self.assertEqual(5, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(10, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'project_id': 'project-2'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(6, r['sum'])
- self.assertEqual(3, r['avg'])
-
- def test_group_by_unknown_field(self):
- response = self.get_json(self.PATH,
- expect_errors=True,
- groupby=['wtf'])
- self.assertEqual(400, response.status_code)
-
- def test_group_by_multiple_regular(self):
- data = self.get_json(self.PATH, groupby=['user_id', 'resource_id'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['user_id', 'resource_id']), groupby_keys_set)
- self.assertEqual(set(['user-1', 'user-2', 'user-3', 'resource-1',
- 'resource-2', 'resource-3']),
- groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'user_id': 'user-1',
- 'resource_id': 'resource-1'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'user_id': 'user-2',
- 'resource_id': 'resource-1'}:
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'user_id': 'user-2',
- 'resource_id': 'resource-2'}:
- self.assertEqual(3, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(6, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'user_id': 'user-3',
- 'resource_id': 'resource-3'}:
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(4, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(4, r['avg'])
- else:
- self.assertNotEqual(grp, {'user_id': 'user-1',
- 'resource_id': 'resource-2'})
- self.assertNotEqual(grp, {'user_id': 'user-1',
- 'resource_id': 'resource-3'})
- self.assertNotEqual(grp, {'user_id': 'user-2',
- 'resource_id': 'resource-3'})
- self.assertNotEqual(grp, {'user_id': 'user-3',
- 'resource_id': 'resource-1'})
- self.assertNotEqual(grp, {'user_id': 'user-3',
- 'resource_id': 'resource-2'})
-
- def test_group_by_with_query_filter(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'project_id',
- 'op': 'eq',
- 'value': 'project-1'}],
- groupby=['resource_id'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['resource_id']), groupby_keys_set)
- self.assertEqual(set(['resource-1', 'resource-2', 'resource-3']),
- groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'resource_id': 'resource-1'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'resource_id': 'resource-2'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(1, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(1, r['avg'])
- elif grp == {'resource_id': 'resource-3'}:
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(4, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(4, r['avg'])
-
- def test_group_by_with_query_filter_multiple(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'user_id',
- 'op': 'eq',
- 'value': 'user-2'},
- {'field': 'source',
- 'op': 'eq',
- 'value': 'source-1'}],
- groupby=['project_id', 'resource_id'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id', 'resource_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2',
- 'resource-1', 'resource-2']),
- groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'project_id': 'project-1',
- 'resource_id': 'resource-1'}:
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'project_id': 'project-1',
- 'resource_id': 'resource-2'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(1, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(1, r['avg'])
- elif grp == {'project_id': 'project-2',
- 'resource_id': 'resource-2'}:
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(4, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(4, r['avg'])
- else:
- self.assertNotEqual(grp, {'project_id': 'project-2',
- 'resource_id': 'resource-1'})
-
- def test_group_by_with_period(self):
- data = self.get_json(self.PATH,
- groupby=['project_id'],
- period=7200)
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
- period_start_set = set(sub_dict['period_start'] for sub_dict in data)
- period_start_valid = set([u'2013-08-01T10:11:00',
- u'2013-08-01T14:11:00',
- u'2013-08-01T16:11:00'])
- self.assertEqual(period_start_valid, period_start_set)
-
- for r in data:
- grp = r['groupby']
- period_start = r['period_start']
- if (grp == {'project_id': 'project-1'} and
- period_start == u'2013-08-01T10:11:00'):
- self.assertEqual(3, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(6, r['sum'])
- self.assertEqual(2, r['avg'])
- self.assertEqual(4260, r['duration'])
- self.assertEqual(u'2013-08-01T10:11:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T11:22:00', r['duration_end'])
- self.assertEqual(7200, r['period'])
- self.assertEqual(u'2013-08-01T12:11:00', r['period_end'])
- elif (grp == {'project_id': 'project-1'} and
- period_start == u'2013-08-01T14:11:00'):
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(2, r['avg'])
- self.assertEqual(4260, r['duration'])
- self.assertEqual(u'2013-08-01T14:59:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T16:10:00', r['duration_end'])
- self.assertEqual(7200, r['period'])
- self.assertEqual(u'2013-08-01T16:11:00', r['period_end'])
- elif (grp == {'project_id': 'project-2'} and
- period_start == u'2013-08-01T14:11:00'):
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(2, r['avg'])
- self.assertEqual(0, r['duration'])
- self.assertEqual(u'2013-08-01T15:37:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T15:37:00', r['duration_end'])
- self.assertEqual(7200, r['period'])
- self.assertEqual(u'2013-08-01T16:11:00', r['period_end'])
- elif (grp == {'project_id': 'project-2'} and
- period_start == u'2013-08-01T16:11:00'):
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(4, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(4, r['avg'])
- self.assertEqual(0, r['duration'])
- self.assertEqual(u'2013-08-01T17:28:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T17:28:00', r['duration_end'])
- self.assertEqual(7200, r['period'])
- self.assertEqual(u'2013-08-01T18:11:00', r['period_end'])
- else:
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-1'},
- u'2013-08-01T16:11:00'])
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-2'},
- u'2013-08-01T10:11:00'])
-
- def test_group_by_with_query_filter_and_period(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'source',
- 'op': 'eq',
- 'value': 'source-1'}],
- groupby=['project_id'],
- period=7200)
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
- period_start_set = set(sub_dict['period_start'] for sub_dict in data)
- period_start_valid = set([u'2013-08-01T10:11:00',
- u'2013-08-01T14:11:00',
- u'2013-08-01T16:11:00'])
- self.assertEqual(period_start_valid, period_start_set)
-
- for r in data:
- grp = r['groupby']
- period_start = r['period_start']
- if (grp == {'project_id': 'project-1'} and
- period_start == u'2013-08-01T10:11:00'):
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(1, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(1, r['avg'])
- self.assertEqual(1740, r['duration'])
- self.assertEqual(u'2013-08-01T10:11:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T10:40:00', r['duration_end'])
- self.assertEqual(7200, r['period'])
- self.assertEqual(u'2013-08-01T12:11:00', r['period_end'])
- elif (grp == {'project_id': 'project-1'} and
- period_start == u'2013-08-01T14:11:00'):
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(2, r['avg'])
- self.assertEqual(0, r['duration'])
- self.assertEqual(u'2013-08-01T14:59:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T14:59:00', r['duration_end'])
- self.assertEqual(7200, r['period'])
- self.assertEqual(u'2013-08-01T16:11:00', r['period_end'])
- elif (grp == {'project_id': 'project-2'} and
- period_start == u'2013-08-01T16:11:00'):
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(4, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(4, r['avg'])
- self.assertEqual(0, r['duration'])
- self.assertEqual(u'2013-08-01T17:28:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T17:28:00', r['duration_end'])
- self.assertEqual(7200, r['period'])
- self.assertEqual(u'2013-08-01T18:11:00', r['period_end'])
- else:
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-1'},
- u'2013-08-01T16:11:00'])
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-2'},
- u'2013-08-01T10:11:00'])
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-2'},
- u'2013-08-01T14:11:00'])
-
- def test_group_by_start_timestamp_after(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'timestamp',
- 'op': 'ge',
- 'value': '2013-08-01T17:28:01'}],
- groupby=['project_id'])
- self.assertEqual([], data)
-
- def test_group_by_end_timestamp_before(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'timestamp',
- 'op': 'le',
- 'value': '2013-08-01T10:10:59'}],
- groupby=['project_id'])
- self.assertEqual([], data)
-
- def test_group_by_start_timestamp(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'timestamp',
- 'op': 'ge',
- 'value': '2013-08-01T14:58:00'}],
- groupby=['project_id'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'project_id': 'project-1'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'project_id': 'project-2'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(6, r['sum'])
- self.assertEqual(3, r['avg'])
-
- def test_group_by_end_timestamp(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'timestamp',
- 'op': 'le',
- 'value': '2013-08-01T11:45:00'}],
- groupby=['project_id'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1']), groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'project_id': 'project-1'}:
- self.assertEqual(3, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(6, r['sum'])
- self.assertEqual(2, r['avg'])
-
- def test_group_by_start_end_timestamp(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'timestamp',
- 'op': 'ge',
- 'value': '2013-08-01T08:17:03'},
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2013-08-01T23:59:59'}],
- groupby=['project_id'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'project_id': 'project-1'}:
- self.assertEqual(5, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(10, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'project_id': 'project-2'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(6, r['sum'])
- self.assertEqual(3, r['avg'])
-
- def test_group_by_start_end_timestamp_with_query_filter(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'project_id',
- 'op': 'eq',
- 'value': 'project-1'},
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2013-08-01T11:01:00'},
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2013-08-01T20:00:00'}],
- groupby=['resource_id'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['resource_id']), groupby_keys_set)
- self.assertEqual(set(['resource-1', 'resource-3']), groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'resource_id': 'resource-1'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'resource_id': 'resource-3'}:
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(4, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(4, r['avg'])
-
- def test_group_by_start_end_timestamp_with_period(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'timestamp',
- 'op': 'ge',
- 'value': '2013-08-01T14:00:00'},
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2013-08-01T17:00:00'}],
- groupby=['project_id'],
- period=3600)
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
- period_start_set = set(sub_dict['period_start'] for sub_dict in data)
- period_start_valid = set([u'2013-08-01T14:00:00',
- u'2013-08-01T15:00:00',
- u'2013-08-01T16:00:00'])
- self.assertEqual(period_start_valid, period_start_set)
-
- for r in data:
- grp = r['groupby']
- period_start = r['period_start']
- if (grp == {'project_id': 'project-1'} and
- period_start == u'2013-08-01T14:00:00'):
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(2, r['avg'])
- self.assertEqual(0, r['duration'])
- self.assertEqual(u'2013-08-01T14:59:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T14:59:00', r['duration_end'])
- self.assertEqual(3600, r['period'])
- self.assertEqual(u'2013-08-01T15:00:00', r['period_end'])
- elif (grp == {'project_id': 'project-1'} and
- period_start == u'2013-08-01T16:00:00'):
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(2, r['avg'])
- self.assertEqual(0, r['duration'])
- self.assertEqual(u'2013-08-01T16:10:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T16:10:00', r['duration_end'])
- self.assertEqual(3600, r['period'])
- self.assertEqual(u'2013-08-01T17:00:00', r['period_end'])
- elif (grp == {'project_id': 'project-2'} and
- period_start == u'2013-08-01T15:00:00'):
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(2, r['avg'])
- self.assertEqual(0, r['duration'])
- self.assertEqual(u'2013-08-01T15:37:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T15:37:00', r['duration_end'])
- self.assertEqual(3600, r['period'])
- self.assertEqual(u'2013-08-01T16:00:00', r['period_end'])
- else:
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-1'},
- u'2013-08-01T15:00:00'])
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-2'},
- u'2013-08-01T14:00:00'])
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-2'},
- u'2013-08-01T16:00:00'])
-
- def test_group_by_start_end_timestamp_with_query_filter_and_period(self):
- data = self.get_json(self.PATH,
- q=[{'field': 'source',
- 'op': 'eq',
- 'value': 'source-1'},
- {'field': 'timestamp',
- 'op': 'ge',
- 'value': '2013-08-01T10:00:00'},
- {'field': 'timestamp',
- 'op': 'le',
- 'value': '2013-08-01T18:00:00'}],
- groupby=['project_id'],
- period=7200)
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- self.assertEqual(set(['project-1', 'project-2']), groupby_vals_set)
- period_start_set = set(sub_dict['period_start'] for sub_dict in data)
- period_start_valid = set([u'2013-08-01T10:00:00',
- u'2013-08-01T14:00:00',
- u'2013-08-01T16:00:00'])
- self.assertEqual(period_start_valid, period_start_set)
-
- for r in data:
- grp = r['groupby']
- period_start = r['period_start']
- if (grp == {'project_id': 'project-1'} and
- period_start == u'2013-08-01T10:00:00'):
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(1, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(1, r['avg'])
- self.assertEqual(1740, r['duration'])
- self.assertEqual(u'2013-08-01T10:11:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T10:40:00', r['duration_end'])
- self.assertEqual(7200, r['period'])
- self.assertEqual(u'2013-08-01T12:00:00', r['period_end'])
- elif (grp == {'project_id': 'project-1'} and
- period_start == u'2013-08-01T14:00:00'):
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(2, r['sum'])
- self.assertEqual(2, r['avg'])
- self.assertEqual(0, r['duration'])
- self.assertEqual(u'2013-08-01T14:59:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T14:59:00', r['duration_end'])
- self.assertEqual(7200, r['period'])
- self.assertEqual(u'2013-08-01T16:00:00', r['period_end'])
- elif (grp == {'project_id': 'project-2'} and
- period_start == u'2013-08-01T16:00:00'):
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(4, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(4, r['avg'])
- self.assertEqual(0, r['duration'])
- self.assertEqual(u'2013-08-01T17:28:00', r['duration_start'])
- self.assertEqual(u'2013-08-01T17:28:00', r['duration_end'])
- self.assertEqual(7200, r['period'])
- self.assertEqual(u'2013-08-01T18:00:00', r['period_end'])
- else:
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-1'},
- u'2013-08-01T16:00:00'])
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-2'},
- u'2013-08-01T10:00:00'])
- self.assertNotEqual([grp, period_start],
- [{'project_id': 'project-2'},
- u'2013-08-01T14:00:00'])
-
-
-@tests_db.run_with('mongodb', 'hbase')
-class TestGroupBySource(v2.FunctionalTest):
-
- # FIXME(terriyu): We have to put test_group_by_source in its own class
- # because SQLAlchemy currently doesn't support group by source statistics.
- # When group by source is supported in SQLAlchemy, this test should be
- # moved to TestGroupByInstance with all the other group by statistics
- # tests.
-
- PATH = '/meters/instance/statistics'
-
- def setUp(self):
- super(TestGroupBySource, self).setUp()
-
- test_sample_data = (
- {'volume': 2, 'user': 'user-1', 'project': 'project-1',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1',
- 'source': 'source-2'},
- {'volume': 2, 'user': 'user-1', 'project': 'project-2',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1',
- 'source': 'source-2'},
- {'volume': 1, 'user': 'user-2', 'project': 'project-1',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2',
- 'source': 'source-1'},
- {'volume': 1, 'user': 'user-2', 'project': 'project-1',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source-1'},
- {'volume': 2, 'user': 'user-2', 'project': 'project-1',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source-1'},
- {'volume': 4, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source-1'},
- {'volume': 4, 'user': 'user-3', 'project': 'project-1',
- 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2',
- 'source': 'source-3'},
- )
-
- for test_sample in test_sample_data:
- c = sample.Sample(
- 'instance',
- sample.TYPE_CUMULATIVE,
- unit='s',
- volume=test_sample['volume'],
- user_id=test_sample['user'],
- project_id=test_sample['project'],
- resource_id=test_sample['resource'],
- timestamp=datetime.datetime(*test_sample['timestamp']),
- resource_metadata={'flavor': test_sample['metadata_flavor'],
- 'event': test_sample['metadata_event'], },
- source=test_sample['source'],
- )
- msg = utils.meter_message_from_counter(
- c, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def tearDown(self):
- self.conn.clear()
- super(TestGroupBySource, self).tearDown()
-
- def test_group_by_source(self):
- data = self.get_json(self.PATH, groupby=['source'])
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['source']), groupby_keys_set)
- self.assertEqual(set(['source-1', 'source-2', 'source-3']),
- groupby_vals_set)
-
- for r in data:
- grp = r['groupby']
- if grp == {'source': 'source-1'}:
- self.assertEqual(4, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(1, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(8, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'source': 'source-2'}:
- self.assertEqual(2, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(2, r['min'])
- self.assertEqual(2, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(2, r['avg'])
- elif grp == {'source': 'source-3'}:
- self.assertEqual(1, r['count'])
- self.assertEqual('s', r['unit'])
- self.assertEqual(4, r['min'])
- self.assertEqual(4, r['max'])
- self.assertEqual(4, r['sum'])
- self.assertEqual(4, r['avg'])
-
-
-class TestSelectableAggregates(v2.FunctionalTest):
-
- PATH = '/meters/instance/statistics'
-
- def setUp(self):
- super(TestSelectableAggregates, self).setUp()
-
- test_sample_data = (
- {'volume': 2, 'user': 'user-1', 'project': 'project-1',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1',
- 'source': 'source'},
- {'volume': 2, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1',
- 'source': 'source'},
- {'volume': 1, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11),
- 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2',
- 'source': 'source'},
- {'volume': 2, 'user': 'user-1', 'project': 'project-1',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source'},
- {'volume': 2, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source'},
- {'volume': 5, 'user': 'user-1', 'project': 'project-1',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source'},
- {'volume': 4, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source'},
- {'volume': 9, 'user': 'user-3', 'project': 'project-3',
- 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3',
- 'source': 'source'},
- )
-
- for test_sample in test_sample_data:
- c = sample.Sample(
- 'instance',
- sample.TYPE_GAUGE,
- unit='instance',
- volume=test_sample['volume'],
- user_id=test_sample['user'],
- project_id=test_sample['project'],
- resource_id=test_sample['resource'],
- timestamp=datetime.datetime(*test_sample['timestamp']),
- resource_metadata={'flavor': test_sample['metadata_flavor'],
- 'event': test_sample['metadata_event'], },
- source=test_sample['source'],
- )
- msg = utils.meter_message_from_counter(
- c, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def _do_test_per_tenant_selectable_standard_aggregate(self,
- aggregate,
- expected_values):
- agg_args = {'aggregate.func': aggregate}
- data = self.get_json(self.PATH, groupby=['project_id'], **agg_args)
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- projects = ['project-1', 'project-2', 'project-3']
- self.assertEqual(set(projects), groupby_vals_set)
-
- standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
- for r in data:
- grp = r['groupby']
- for project in projects:
- if grp == {'project_id': project}:
- expected = expected_values[projects.index(project)]
- self.assertEqual('instance', r['unit'])
- self.assertAlmostEqual(r[aggregate], expected)
- self.assertIn('aggregate', r)
- self.assertIn(aggregate, r['aggregate'])
- self.assertAlmostEqual(r['aggregate'][aggregate], expected)
- for a in standard_aggregates - set([aggregate]):
- self.assertNotIn(a, r)
-
- def test_per_tenant_selectable_max(self):
- self._do_test_per_tenant_selectable_standard_aggregate('max',
- [5, 4, 9])
-
- def test_per_tenant_selectable_min(self):
- self._do_test_per_tenant_selectable_standard_aggregate('min',
- [2, 1, 9])
-
- def test_per_tenant_selectable_sum(self):
- self._do_test_per_tenant_selectable_standard_aggregate('sum',
- [9, 9, 9])
-
- def test_per_tenant_selectable_avg(self):
- self._do_test_per_tenant_selectable_standard_aggregate('avg',
- [3, 2.25, 9])
-
- def test_per_tenant_selectable_count(self):
- self._do_test_per_tenant_selectable_standard_aggregate('count',
- [3, 4, 1])
-
- def test_per_tenant_selectable_parameterized_aggregate(self):
- agg_args = {'aggregate.func': 'cardinality',
- 'aggregate.param': 'resource_id'}
- data = self.get_json(self.PATH, groupby=['project_id'], **agg_args)
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- projects = ['project-1', 'project-2', 'project-3']
- self.assertEqual(set(projects), groupby_vals_set)
-
- aggregate = 'cardinality/resource_id'
- expected_values = [2.0, 3.0, 1.0]
- standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
- for r in data:
- grp = r['groupby']
- for project in projects:
- if grp == {'project_id': project}:
- expected = expected_values[projects.index(project)]
- self.assertEqual('instance', r['unit'])
- self.assertNotIn(aggregate, r)
- self.assertIn('aggregate', r)
- self.assertIn(aggregate, r['aggregate'])
- self.assertEqual(expected, r['aggregate'][aggregate])
- for a in standard_aggregates:
- self.assertNotIn(a, r)
-
- def test_large_quantum_selectable_parameterized_aggregate(self):
- # add a large number of datapoints that won't impact on cardinality
- # if the computation logic is tolerant of different DB behavior on
- # larger numbers of samples per-period
- for i in range(200):
- s = sample.Sample(
- 'instance',
- sample.TYPE_GAUGE,
- unit='instance',
- volume=i * 1.0,
- user_id='user-1',
- project_id='project-1',
- resource_id='resource-1',
- timestamp=datetime.datetime(2013, 8, 1, 11, i % 60),
- resource_metadata={'flavor': 'm1.tiny',
- 'event': 'event-1', },
- source='source',
- )
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- agg_args = {'aggregate.func': 'cardinality',
- 'aggregate.param': 'resource_id'}
- data = self.get_json(self.PATH, **agg_args)
-
- aggregate = 'cardinality/resource_id'
- expected_value = 5.0
- standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
- r = data[0]
- self.assertNotIn(aggregate, r)
- self.assertIn('aggregate', r)
- self.assertIn(aggregate, r['aggregate'])
- self.assertEqual(expected_value, r['aggregate'][aggregate])
- for a in standard_aggregates:
- self.assertNotIn(a, r)
-
- def test_repeated_unparameterized_aggregate(self):
- agg_params = 'aggregate.func=count&aggregate.func=count'
- data = self.get_json(self.PATH, override_params=agg_params)
-
- aggregate = 'count'
- expected_value = 8.0
- standard_aggregates = set(['min', 'max', 'sum', 'avg'])
- r = data[0]
- self.assertIn(aggregate, r)
- self.assertEqual(expected_value, r[aggregate])
- self.assertIn('aggregate', r)
- self.assertIn(aggregate, r['aggregate'])
- self.assertEqual(expected_value, r['aggregate'][aggregate])
- for a in standard_aggregates:
- self.assertNotIn(a, r)
-
- def test_fully_repeated_parameterized_aggregate(self):
- agg_params = ('aggregate.func=cardinality&'
- 'aggregate.param=resource_id&'
- 'aggregate.func=cardinality&'
- 'aggregate.param=resource_id&')
- data = self.get_json(self.PATH, override_params=agg_params)
-
- aggregate = 'cardinality/resource_id'
- expected_value = 5.0
- standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
- r = data[0]
- self.assertIn('aggregate', r)
- self.assertNotIn(aggregate, r)
- self.assertIn(aggregate, r['aggregate'])
- self.assertEqual(expected_value, r['aggregate'][aggregate])
- for a in standard_aggregates:
- self.assertNotIn(a, r)
-
- def test_partially_repeated_parameterized_aggregate(self):
- agg_params = ('aggregate.func=cardinality&'
- 'aggregate.param=resource_id&'
- 'aggregate.func=cardinality&'
- 'aggregate.param=project_id&')
- data = self.get_json(self.PATH, override_params=agg_params)
-
- expected_values = {'cardinality/resource_id': 5.0,
- 'cardinality/project_id': 3.0}
- standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
- r = data[0]
- self.assertIn('aggregate', r)
- for aggregate in expected_values.keys():
- self.assertNotIn(aggregate, r)
- self.assertIn(aggregate, r['aggregate'])
- self.assertEqual(expected_values[aggregate],
- r['aggregate'][aggregate])
- for a in standard_aggregates:
- self.assertNotIn(a, r)
-
- def test_bad_selectable_parameterized_aggregate(self):
- agg_args = {'aggregate.func': 'cardinality',
- 'aggregate.param': 'injection_attack'}
- resp = self.get_json(self.PATH, status=[400],
- groupby=['project_id'], **agg_args)
- self.assertIn('error_message', resp)
- self.assertEqual(resp['error_message'].get('faultcode'),
- 'Client')
- self.assertEqual(resp['error_message'].get('faultstring'),
- 'Bad aggregate: cardinality.injection_attack')
-
-
-@tests_db.run_with('mongodb', 'hbase')
-class TestUnparameterizedAggregates(v2.FunctionalTest):
-
- # We put the stddev test case in a separate class so that we
- # can easily exclude the sqlalchemy scenario, as sqlite doesn't
- # support the stddev_pop function and fails ungracefully with
- # OperationalError when it is used. However we still want to
- # test the corresponding functionality in the mongo driver.
- # For hbase, the skip on NotImplementedError logic works
- # in the usual way.
-
- PATH = '/meters/instance/statistics'
-
- def setUp(self):
- super(TestUnparameterizedAggregates, self).setUp()
-
- test_sample_data = (
- {'volume': 2, 'user': 'user-1', 'project': 'project-1',
- 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10),
- 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1',
- 'source': 'source'},
- {'volume': 2, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 15, 37),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1',
- 'source': 'source'},
- {'volume': 1, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-5', 'timestamp': (2013, 8, 1, 10, 11),
- 'metadata_flavor': 'm1.medium', 'metadata_event': 'event-2',
- 'source': 'source'},
- {'volume': 2, 'user': 'user-1', 'project': 'project-1',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source'},
- {'volume': 2, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 14, 59),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source'},
- {'volume': 5, 'user': 'user-1', 'project': 'project-1',
- 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source'},
- {'volume': 4, 'user': 'user-2', 'project': 'project-2',
- 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2',
- 'source': 'source'},
- {'volume': 9, 'user': 'user-3', 'project': 'project-3',
- 'resource': 'resource-4', 'timestamp': (2013, 8, 1, 11, 59),
- 'metadata_flavor': 'm1.large', 'metadata_event': 'event-3',
- 'source': 'source'},
- )
-
- for test_sample in test_sample_data:
- c = sample.Sample(
- 'instance',
- sample.TYPE_GAUGE,
- unit='instance',
- volume=test_sample['volume'],
- user_id=test_sample['user'],
- project_id=test_sample['project'],
- resource_id=test_sample['resource'],
- timestamp=datetime.datetime(*test_sample['timestamp']),
- resource_metadata={'flavor': test_sample['metadata_flavor'],
- 'event': test_sample['metadata_event'], },
- source=test_sample['source'],
- )
- msg = utils.meter_message_from_counter(
- c, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_per_tenant_selectable_unparameterized_aggregate(self):
- agg_args = {'aggregate.func': 'stddev'}
- data = self.get_json(self.PATH, groupby=['project_id'], **agg_args)
- groupby_keys_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].keys())
- groupby_vals_set = set(x for sub_dict in data
- for x in sub_dict['groupby'].values())
- self.assertEqual(set(['project_id']), groupby_keys_set)
- projects = ['project-1', 'project-2', 'project-3']
- self.assertEqual(set(projects), groupby_vals_set)
-
- aggregate = 'stddev'
- expected_values = [1.4142, 1.0897, 0.0]
- standard_aggregates = set(['count', 'min', 'max', 'sum', 'avg'])
- for r in data:
- grp = r['groupby']
- for project in projects:
- if grp == {'project_id': project}:
- expected = expected_values[projects.index(project)]
- self.assertEqual('instance', r['unit'])
- self.assertNotIn(aggregate, r)
- self.assertIn('aggregate', r)
- self.assertIn(aggregate, r['aggregate'])
- self.assertAlmostEqual(r['aggregate'][aggregate],
- expected,
- places=4)
- for a in standard_aggregates:
- self.assertNotIn(a, r)
-
-
-@tests_db.run_with('mongodb')
-class TestBigValueStatistics(v2.FunctionalTest):
-
- PATH = '/meters/volume.size/statistics'
-
- def setUp(self):
- super(TestBigValueStatistics, self).setUp()
- for i in range(0, 3):
- s = sample.Sample(
- 'volume.size',
- 'gauge',
- 'GiB',
- (i + 1) * (10 ** 12),
- 'user-id',
- 'project1',
- 'resource-id',
- timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i),
- resource_metadata={'display_name': 'test-volume',
- 'tag': 'self.sample',
- },
- source='source1',
- )
- msg = utils.meter_message_from_counter(
- s, self.CONF.publisher.telemetry_secret,
- )
- self.conn.record_metering_data(msg)
-
- def test_big_value_statistics(self):
- data = self.get_json(self.PATH)
-
- expected_values = {'count': 3,
- 'min': 10 ** 12,
- 'max': 3 * 10 ** 12,
- 'sum': 6 * 10 ** 12,
- 'avg': 2 * 10 ** 12}
- self.assertEqual(1, len(data))
- for d in data:
- for name, expected_value in expected_values.items():
- self.assertIn(name, d)
- self.assertEqual(expected_value, d[name])
diff --git a/ceilometer/tests/functional/api/v2/test_versions.py b/ceilometer/tests/functional/api/v2/test_versions.py
deleted file mode 100644
index 44bb97ce..00000000
--- a/ceilometer/tests/functional/api/v2/test_versions.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from ceilometer.tests.functional import api
-
-V2_MEDIA_TYPES = [
- {
- 'base': 'application/json',
- 'type': 'application/vnd.openstack.telemetry-v2+json'
- }, {
- 'base': 'application/xml',
- 'type': 'application/vnd.openstack.telemetry-v2+xml'
- }
-]
-
-V2_HTML_DESCRIPTION = {
- 'href': 'http://docs.openstack.org/',
- 'rel': 'describedby',
- 'type': 'text/html',
-}
-
-V2_EXPECTED_RESPONSE = {
- 'id': 'v2',
- 'links': [
- {
- 'rel': 'self',
- 'href': 'http://localhost/v2',
- },
- V2_HTML_DESCRIPTION
- ],
- 'media-types': V2_MEDIA_TYPES,
- 'status': 'stable',
- 'updated': '2013-02-13T00:00:00Z',
-}
-
-V2_VERSION_RESPONSE = {
- "version": V2_EXPECTED_RESPONSE
-}
-
-VERSIONS_RESPONSE = {
- "versions": {
- "values": [
- V2_EXPECTED_RESPONSE
- ]
- }
-}
-
-
-class TestVersions(api.FunctionalTest):
-
- def test_versions(self):
- data = self.get_json('/')
- self.assertEqual(VERSIONS_RESPONSE, data)
diff --git a/ceilometer/tests/functional/gabbi/__init__.py b/ceilometer/tests/functional/gabbi/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/functional/gabbi/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/functional/gabbi/fixtures.py b/ceilometer/tests/functional/gabbi/fixtures.py
deleted file mode 100644
index f52c0ba6..00000000
--- a/ceilometer/tests/functional/gabbi/fixtures.py
+++ /dev/null
@@ -1,170 +0,0 @@
-#
-# Copyright 2015 Red Hat. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Fixtures used during Gabbi-based test runs."""
-
-import datetime
-import os
-import random
-from unittest import case
-import uuid
-
-from gabbi import fixture
-from oslo_config import cfg
-from oslo_utils import fileutils
-import six
-from six.moves.urllib import parse as urlparse
-
-from ceilometer.api import app
-from ceilometer.publisher import utils
-from ceilometer import sample
-from ceilometer import service
-from ceilometer import storage
-
-# TODO(chdent): For now only MongoDB is supported, because of easy
-# database name handling and intentional focus on the API, not the
-# data store.
-ENGINES = ['mongodb']
-
-# NOTE(chdent): Hack to restore semblance of global configuration to
-# pass to the WSGI app used per test suite. LOAD_APP_KWARGS are the olso
-# configuration, and the pecan application configuration of
-# which the critical part is a reference to the current indexer.
-LOAD_APP_KWARGS = None
-
-
-def setup_app():
- global LOAD_APP_KWARGS
- return app.load_app(**LOAD_APP_KWARGS)
-
-
-class ConfigFixture(fixture.GabbiFixture):
- """Establish the relevant configuration for a test run."""
-
- def start_fixture(self):
- """Set up config."""
-
- global LOAD_APP_KWARGS
-
- self.conf = None
-
- # Determine the database connection.
- db_url = os.environ.get('PIFPAF_URL', "sqlite://").replace(
- "mysql://", "mysql+pymysql://")
- if not db_url:
- raise case.SkipTest('No database connection configured')
-
- engine = urlparse.urlparse(db_url).scheme
- if engine not in ENGINES:
- raise case.SkipTest('Database engine not supported')
-
- self.conf = service.prepare_service([], [])
-
- content = ('{"default": ""}')
- if six.PY3:
- content = content.encode('utf-8')
- self.tempfile = fileutils.write_to_tempfile(content=content,
- prefix='policy',
- suffix='.json')
-
- self.conf.set_override("policy_file", self.tempfile,
- group='oslo_policy')
- self.conf.set_override(
- 'api_paste_config',
- os.path.abspath(
- 'ceilometer/tests/functional/gabbi/gabbi_paste.ini')
- )
-
- # A special pipeline is required to use the direct publisher.
- self.conf.set_override(
- 'pipeline_cfg_file',
- 'ceilometer/tests/functional/gabbi_pipeline.yaml')
-
- database_name = '%s-%s' % (db_url, str(uuid.uuid4()))
- self.conf.set_override('connection', database_name, group='database')
- self.conf.set_override('metering_connection', '', group='database')
-
- self.conf.set_override('gnocchi_is_enabled', False, group='api')
- self.conf.set_override('aodh_is_enabled', False, group='api')
- self.conf.set_override('panko_is_enabled', False, group='api')
-
- LOAD_APP_KWARGS = {
- 'conf': self.conf,
- }
-
- def stop_fixture(self):
- """Reset the config and remove data."""
- if self.conf:
- storage.get_connection_from_config(self.conf).clear()
- self.conf.reset()
-
-
-class SampleDataFixture(fixture.GabbiFixture):
- """Instantiate some sample data for use in testing."""
-
- def start_fixture(self):
- """Create some samples."""
- global LOAD_APP_KWARGS
- conf = LOAD_APP_KWARGS['conf']
- self.conn = storage.get_connection_from_config(conf)
- timestamp = datetime.datetime.utcnow()
- project_id = str(uuid.uuid4())
- self.source = str(uuid.uuid4())
- resource_metadata = {'farmed_by': 'nancy'}
-
- for name in ['cow', 'pig', 'sheep']:
- resource_metadata.update({'breed': name}),
- c = sample.Sample(name='livestock',
- type='gauge',
- unit='head',
- volume=int(10 * random.random()),
- user_id='farmerjon',
- project_id=project_id,
- resource_id=project_id,
- timestamp=timestamp,
- resource_metadata=resource_metadata,
- source=self.source)
- data = utils.meter_message_from_counter(
- c, conf.publisher.telemetry_secret)
- self.conn.record_metering_data(data)
-
- def stop_fixture(self):
- """Destroy the samples."""
- # NOTE(chdent): print here for sake of info during testing.
- # This will go away eventually.
- print('resource',
- self.conn.db.resource.remove({'source': self.source}))
- print('meter', self.conn.db.meter.remove({'source': self.source}))
-
-
-class CORSConfigFixture(fixture.GabbiFixture):
- """Inject mock configuration for the CORS middleware."""
-
- def start_fixture(self):
- # Here we monkeypatch GroupAttr.__getattr__, necessary because the
- # paste.ini method of initializing this middleware creates its own
- # ConfigOpts instance, bypassing the regular config fixture.
-
- def _mock_getattr(instance, key):
- if key != 'allowed_origin':
- return self._original_call_method(instance, key)
- return "http://valid.example.com"
-
- self._original_call_method = cfg.ConfigOpts.GroupAttr.__getattr__
- cfg.ConfigOpts.GroupAttr.__getattr__ = _mock_getattr
-
- def stop_fixture(self):
- """Remove the monkeypatch."""
- cfg.ConfigOpts.GroupAttr.__getattr__ = self._original_call_method
diff --git a/ceilometer/tests/functional/gabbi/gabbi_paste.ini b/ceilometer/tests/functional/gabbi/gabbi_paste.ini
deleted file mode 100644
index b4761528..00000000
--- a/ceilometer/tests/functional/gabbi/gabbi_paste.ini
+++ /dev/null
@@ -1,24 +0,0 @@
-# Ceilometer API WSGI Pipeline
-# Define the filters that make up the pipeline for processing WSGI requests
-# Note: This pipeline is PasteDeploy's term rather than Ceilometer's pipeline
-# used for processing samples
-#
-# This version is specific for gabbi. It removes support for keystone while
-# keeping support for CORS.
-
-# Remove authtoken from the pipeline if you don't want to use keystone authentication
-[pipeline:main]
-pipeline = cors api-server
-
-[app:api-server]
-paste.app_factory = ceilometer.api.app:app_factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-
-[filter:request_id]
-paste.filter_factory = oslo_middleware:RequestId.factory
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = ceilometer
diff --git a/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml b/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml
deleted file mode 100644
index 35250176..00000000
--- a/ceilometer/tests/functional/gabbi/gabbi_pipeline.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
-# A limited pipeline for use with the Gabbi spike.
-# direct writes to the metering database without using an
-# intermediary dispatcher.
-#
-# This is one of several things that will need some extensive
-# tidying to be more right.
----
-sources:
- - name: meter_source
- interval: 1
- meters:
- - "*"
- sinks:
- - meter_sink
-sinks:
- - name: meter_sink
- transformers:
- publishers:
- - direct://
diff --git a/ceilometer/tests/functional/gabbi/gabbits/basic.yaml b/ceilometer/tests/functional/gabbi/gabbits/basic.yaml
deleted file mode 100644
index d56a0de6..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits/basic.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Some simple tests just to confirm that the system works.
-#
-fixtures:
- - ConfigFixture
-
-tests:
-
-# Root gives us some information on where to go from here.
-- name: quick root check
- GET: /
- response_headers:
- content-type: application/json
- response_strings:
- - '"base": "application/json"'
- response_json_paths:
- versions.values.[0].status: stable
- versions.values.[0].media-types.[0].base: application/json
-
-# NOTE(chdent): Ideally since / has a links ref to /v2, /v2 ought not 404!
-- name: v2 visit
- desc: this demonstrates a bug in the info in /
- GET: $RESPONSE['versions.values.[0].links.[0].href']
- status: 404
diff --git a/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml b/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml
deleted file mode 100644
index 44299bc8..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits/capabilities.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Explore the capabilities API
-#
-fixtures:
- - ConfigFixture
-
-tests:
-
-- name: get capabilities
- desc: retrieve capabilities for the mongo store
- GET: /v2/capabilities
- response_json_paths:
- $.storage.['storage:production_ready']: true
diff --git a/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml b/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml
deleted file mode 100644
index a33f9dcc..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits/clean-samples.yaml
+++ /dev/null
@@ -1,102 +0,0 @@
-# Post a simple sample, sir, and the retrieve it in various ways.
-fixtures:
- - ConfigFixture
-
-tests:
-
-# POST one sample and verify its existence.
-
- - name: post sample for meter
- desc: post a single sample
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data: |
- [
- {
- "counter_name": "apples",
- "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68",
- "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff",
- "counter_unit": "instance",
- "counter_volume": 1,
- "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "resource_metadata": {
- "name2": "value2",
- "name1": "value1"
- },
- "counter_type": "gauge"
- }
- ]
-
- response_json_paths:
- $.[0].counter_name: apples
- status: 201
- response_headers:
- content-type: application/json
-
-# When POSTing a sample perhaps we should get back a location header
-# with the URI of the posted sample
-
- - name: post a sample expect location
- desc: https://bugs.launchpad.net/ceilometer/+bug/1426426
- xfail: true
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data:
- - counter_name: apples
- project_id: 35b17138-b364-4e6a-a131-8f3099c5be68
- user_id: efd87807-12d2-4b38-9c70-5f5c2ac427ff
- counter_unit: instance
- counter_volume: 1
- resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36
- resource_metadata:
- name2: value2
- name1: value1
- counter_type: gauge
- response_headers:
- location: /$SCHEME://$NETLOC/
-
-# GET all the samples created for the apples meter
-
- - name: get samples for meter
- desc: get all the samples at that meter
- GET: /v2/meters/apples
- response_json_paths:
- $.[0].counter_name: apples
- $.[0].counter_volume: 1
- $.[0].resource_metadata.name2: value2
-
-# POSTing a sample to a meter will implicitly create a resource
-
- - name: get resources
- desc: get the resources that exist because of the sample
- GET: /v2/resources
- response_json_paths:
- $.[0].metadata.name2: value2
-
-# NOTE(chdent): We assume that the first item in links is self.
-# Need to determine how to express the more correct JSONPath here
-# (if possible).
-
- - name: get resource
- desc: get just one of those resources via self
- GET: $RESPONSE['$[0].links[0].href']
- response_json_paths:
- $.metadata.name2: value2
-
-# GET the created samples
-
- - name: get samples
- desc: get all the created samples
- GET: /v2/samples
- response_json_paths:
- $.[0].metadata.name2: value2
- $.[0].meter: apples
-
- - name: get one sample
- desc: get the one sample that exists
- GET: /v2/samples/$RESPONSE['$[0].id']
- response_json_paths:
- $.metadata.name2: value2
- $.meter: apples
diff --git a/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml b/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml
deleted file mode 100644
index 10efc7cd..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits/fixture-samples.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
-#
-# Demonstrate a simple sample fixture.
-#
-fixtures:
- - ConfigFixture
- - SampleDataFixture
-
-tests:
-- name: get fixture samples
- desc: get all the samples at livestock
- GET: /v2/meters/livestock
- response_json_paths:
- $.[0].counter_name: livestock
- $.[1].counter_name: livestock
- $.[2].counter_name: livestock
- $.[2].user_id: farmerjon
- $.[0].resource_metadata.breed: cow
- $.[1].resource_metadata.farmed_by: nancy
diff --git a/ceilometer/tests/functional/gabbi/gabbits/meters.yaml b/ceilometer/tests/functional/gabbi/gabbits/meters.yaml
deleted file mode 100644
index 1df4980a..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits/meters.yaml
+++ /dev/null
@@ -1,384 +0,0 @@
-#
-# Tests to explore and cover the /v2/meters section of the
-# Ceilometer API.
-#
-
-fixtures:
- - ConfigFixture
-
-tests:
-
-# Generic HTTP health explorations of all meters.
-
- - name: empty meters list
- GET: /v2/meters
- response_headers:
- content-type: /application/json/
- response_strings:
- - "[]"
-
- - name: meters list bad accept
- GET: /v2/meters
- request_headers:
- accept: text/plain
- status: 406
-
- - name: meters list bad method
- POST: /v2/meters
- status: 405
- response_headers:
- allow: GET
-
- - name: try to delete meters
- DELETE: /v2/meters
- status: 405
- response_headers:
- allow: GET
-
-# Generic HTTP health explorations of single meter.
-
- - name: get non exist meter
- GET: /v2/meters/noexist
- response_strings:
- - "[]"
-
- - name: meter bad accept
- GET: /v2/meters/noexist?direct=True
- request_headers:
- accept: text/plain
- status: 406
-
- - name: meter delete noexist
- DELETE: /v2/meters/noexist
- status: "404 || 405"
-
- - name: post meter no data
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data: ""
- status: 400
-
- - name: post meter error is JSON
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data: ""
- status: 400
- response_headers:
- content-type: /application/json/
- response_json_paths:
- $.error_message.faultstring: "Samples should be included in request body"
-
- - name: post meter bad content-type
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: text/plain
- data: hello
- status: 415
-
- - name: post bad samples to meter
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data:
- samples:
- - red
- - blue
- - yellow
- status: 400
-
-# POST variations on a malformed sample
-
- - name: post limited counter to meter
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data:
- - counter_unit: instance
- counter_volume: 1
- resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36
- status: 400
- response_strings:
- - "Invalid input for field/attribute counter_name"
-
- - name: post mismatched counter name to meter
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data:
- - counter_name: cars
- counter_type: gauge
- counter_unit: instance
- counter_volume: 1
- resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36
- status: 400
- response_strings:
- - "Invalid input for field/attribute counter_name"
- - "should be apples"
-
- - name: post counter no resource to meter
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data:
- - counter_name: apples
- counter_type: gauge
- counter_unit: instance
- counter_volume: 1
- status: 400
- response_strings:
- - "Invalid input for field/attribute resource_id"
- - "Mandatory field missing."
-
- - name: post counter bad type to meter
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data:
- - counter_name: apples
- counter_type: elevation
- counter_unit: instance
- counter_volume: 1
- resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36
- status: 400
- response_strings:
- - "Invalid input for field/attribute counter_type."
- - "The counter type must be: gauge, delta, cumulative"
-
-# Manipulate samples
-
- - name: post counter to meter
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data:
- - counter_name: apples
- counter_type: gauge
- counter_unit: instance
- counter_volume: 1
- resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36
- status: 201
-
- - name: list apple samples
- GET: /v2/meters/apples
- response_json_paths:
- $[0].counter_volume: 1.0
- $[0].counter_name: apples
- $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36
-
- - name: list meters
- GET: /v2/meters
- response_json_paths:
- $[0].name: apples
- $[0].resource_id: bd9431c1-8d69-4ad3-803a-8d4a6b89fd36
- $[0].type: gauge
- $[-1].name: apples
-
- - name: negative limit on meter list
- GET: /v2/meters/apples?limit=-5
- status: 400
- response_strings:
- - Limit must be positive
-
- - name: nan limit on meter list
- GET: /v2/meters/apples?limit=NaN
- status: 400
- response_strings:
- - unable to convert to int
-
- - name: post counter to meter different resource
- POST: /v2/meters/apples?direct=True
- status: 201
- request_headers:
- content-type: application/json
- data:
- - counter_name: apples
- counter_type: gauge
- counter_unit: instance
- counter_volume: 2
- resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa
-
- - name: query for resource
- GET: /v2/meters/apples?q.field=resource_id&q.value=aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa&q.op=eq
- response_json_paths:
- $[0].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa
- $[-1].resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa
-
-# Explore posting samples with less than perfect data.
-
- - name: post counter with bad timestamp
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data:
- - counter_name: apples
- counter_type: gauge
- counter_unit: instance
- counter_volume: 3
- resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa
- timestamp: "2013-01-bad 23:23:20"
- status: 400
- response_strings:
- - 'Invalid input for field/attribute samples'
-
- - name: post counter with good timestamp
- POST: /v2/meters/apples?direct=True
- status: 201
- request_headers:
- content-type: application/json
- data:
- - counter_name: apples
- counter_type: gauge
- counter_unit: instance
- counter_volume: 3
- resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa
- timestamp: "2013-01-01 23:23:20"
-
- - name: post counter with wrong metadata
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data:
- - counter_name: apples
- counter_type: gauge
- counter_unit: instance
- counter_volume: 3
- resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa
- timestamp: "2013-01-01 23:23:20"
- resource_metadata: "a string"
- status: 400
- response_strings:
- - "Invalid input for field/attribute samples"
-
- - name: post counter with empty metadata
- POST: /v2/meters/apples?direct=True
- status: 201
- request_headers:
- content-type: application/json
- data:
- - counter_name: apples
- counter_type: gauge
- counter_unit: instance
- counter_volume: 3
- resource_id: aa9431c1-8d69-4ad3-803a-8d4a6b89fdaa
- timestamp: "2013-01-01 23:23:20"
- resource_metadata: {}
-
-# Statistics
-
- - name: get sample statistics
- GET: /v2/meters/apples/statistics
- response_json_paths:
- $[0].groupby: null
- $[0].unit: instance
- $[0].sum: 9.0
- $[0].min: 1.0
- $[0].max: 3.0
- $[0].count: 4
-
- - name: get incorrectly grouped sample statistics
- GET: /v2/meters/apples/statistics?groupby=house_id
- status: 400
- response_strings:
- - Invalid groupby fields
-
- - name: get grouped sample statistics
- GET: /v2/meters/apples/statistics?groupby=resource_id
- response_json_paths:
- $[1].max: 3.0
- $[0].max: 1.0
-
- - name: get sample statistics bad period
- GET: /v2/meters/apples/statistics?period=seven
- status: 400
- response_strings:
- - unable to convert to int
-
- - name: get sample statistics negative period
- GET: /v2/meters/apples/statistics?period=-7
- status: 400
- response_strings:
- - Period must be positive.
-
- - name: get sample statistics 600 period
- GET: /v2/meters/apples/statistics?period=600
- response_json_paths:
- $[0].period: 600
-
- - name: get sample statistics time limit not time
- GET: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=Remember%20Remember
- status: 400
- response_strings:
- - invalid timestamp format
-
- - name: get sample statistics time limit gt
- GET: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2014-01-01
- response_json_paths:
- $[0].count: 2
-
- - name: get sample statistics time limit lt
- GET: /v2/meters/apples/statistics?q.field=timestamp&q.op=lt&q.value=2014-01-01
- response_json_paths:
- $[0].count: 2
-
- - name: get sample statistics time limit bounded
- GET: /v2/meters/apples/statistics?q.field=timestamp&q.op=gt&q.value=2013-06-01&q.field=timestamp&q.op=lt&q.value=2014-01-01
- response_strings:
- - "[]"
-
- - name: get sample statistics select aggregate bad format
- GET: /v2/meters/apples/statistics?aggregate=max
- status: 400
-
- - name: get sample statistics select aggregate
- GET: /v2/meters/apples/statistics?aggregate.func=max
- response_json_paths:
- $[0].aggregate.max: 3.0
-
- - name: get sample statistics select aggregate multiple
- GET: /v2/meters/apples/statistics?aggregate.func=max&aggregate.func=count
- response_json_paths:
- $[0].aggregate.max: 3.0
- $[0].aggregate.count: 4
-
- - name: get sample statistics select aggregate bad function
- GET: /v2/meters/apples/statistics?aggregate.func=mmm
- status: 400
- response_strings:
- - 'Invalid aggregation function: mmm'
-
- - name: get sample statistics select aggregate good function and bad function
- GET: /v2/meters/apples/statistics?aggregate.func=max&aggregate.func=mmm
- status: 400
- response_strings:
- - 'Invalid aggregation function: mmm'
-
-# limit meters results
-
- - name: get meters ulimited
- GET: /v2/meters
- response_json_paths:
- $.`len`: 2
-
- - name: get meters limited
- GET: /v2/meters?limit=1
- response_json_paths:
- $.`len`: 1
-
- - name: get meters double limit
- GET: /v2/meters?limit=1&limit=1
- status: 400
-
- - name: get meters filter limit
- desc: expressing limit this way is now disallowed
- GET: /v2/meters?q.field=limit&q.op=eq&q.type=&q.value=1
- status: 400
- response_strings:
- - 'Unknown argument: \"limit\": unrecognized field in query'
-
- - name: get meters filter limit and limit
- GET: /v2/meters?q.field=limit&q.op=eq&q.type=&q.value=1&limit=1
- status: 400
- response_strings:
- - 'Unknown argument: \"limit\": unrecognized field in query'
diff --git a/ceilometer/tests/functional/gabbi/gabbits/middleware.yaml b/ceilometer/tests/functional/gabbi/gabbits/middleware.yaml
deleted file mode 100644
index 3d220483..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits/middleware.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-# Test the middlewares. Just CORS for now.
-#
-
-fixtures:
- - ConfigFixture
- - CORSConfigFixture
-
-tests:
-
- - name: valid cors options
- OPTIONS: /
- status: 200
- request_headers:
- origin: http://valid.example.com
- access-control-request-method: GET
- response_headers:
- access-control-allow-origin: http://valid.example.com
-
- - name: invalid cors options
- OPTIONS: /
- status: 200
- request_headers:
- origin: http://invalid.example.com
- access-control-request-method: GET
- response_forbidden_headers:
- - access-control-allow-origin
-
- - name: valid cors get
- GET: /
- status: 200
- request_headers:
- origin: http://valid.example.com
- access-control-request-method: GET
- response_headers:
- access-control-allow-origin: http://valid.example.com
-
- - name: invalid cors get
- GET: /
- status: 200
- request_headers:
- origin: http://invalid.example.com
- response_forbidden_headers:
- - access-control-allow-origin
diff --git a/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml b/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml
deleted file mode 100644
index 291c0861..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits/resources-empty.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-#
-# Explore and cover resources API with gabbi tests when there are no
-# resources.
-#
-
-fixtures:
- - ConfigFixture
-
-tests:
-
-# Check for a list of resources, modifying the request in various
-# ways.
-
- - name: list resources no extra
- desc: Provide no additional header guidelines
- GET: /v2/resources
- response_headers:
- content-type: /application/json/
- response_strings:
- - "[]"
-
- - name: list resources but get url wrong
- GET: /v2/resrces
- status: 404
-
- - name: list resources explicit accept
- GET: /v2/resources
- request_headers:
- accept: application/json
- response_strings:
- - "[]"
-
- - name: list resources bad accept
- GET: /v2/resources
- request_headers:
- accept: text/plain
- status: 406
-
- - name: list resources with bad query field
- GET: /v2/resources?q.field=id&q.value=cars
- status: 400
- response_strings:
- - unrecognized field in query
-
- - name: list resources with query
- GET: /v2/resources?q.field=resource&q.value=cars
- response_strings:
- - "[]"
-
- - name: list resource bad type meter links
- GET: /v2/resources?meter_links=yes%20please
- status: 400
- response_strings:
- - unable to convert to int
-
- - name: list resource meter links int
- GET: /v2/resources?meter_links=0
- response_strings:
- - "[]"
diff --git a/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml b/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml
deleted file mode 100644
index 4ef014df..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits/resources-fixtured.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-# Explore and cover resources API with gabbi tests when there are a
-# small number of pre-existing resources
-#
-
-fixtures:
- - ConfigFixture
- - SampleDataFixture
-
-tests:
-
- - name: list all resources
- GET: /v2/resources
- response_json_paths:
- $[0].user_id: farmerjon
- $[0].links[1].rel: livestock
-
- - name: get one resource
- desc: get a resource via the links in the first resource listed above
- GET: $RESPONSE['$[0].links[0].href']
- response_json_paths:
- $.resource_id: $RESPONSE['$[0].resource_id']
-
- - name: list resources limit user_id
- GET: /v2/resources?q.field=user_id&q.value=farmerjon
- response_json_paths:
- $[0].user_id: farmerjon
- $[0].links[1].rel: livestock
-
- - name: list resources limit metadata
- GET: /v2/resources?q.field=metadata.breed&q.value=sheep
- response_json_paths:
- $[0].user_id: farmerjon
- $[0].links[1].rel: livestock
-
- - name: list resources limit metadata no match
- GET: /v2/resources?q.field=metadata.breed&q.value=llamma
- response_strings:
- - "[]"
-
- - name: fail to get one resource
- GET: /v2/resources/nosirnothere
- status: 404
-
- - name: list resource meter links present
- GET: /v2/resources?meter_links=1
- response_json_paths:
- $[0].links[0].rel: self
- $[0].links[1].rel: livestock
- $[0].links[-1].rel: livestock
-
- - name: list resource meter links not present
- GET: /v2/resources?meter_links=0
- desc: there is only one links entry when meter_links is 0
- response_json_paths:
- $[0].links[0].rel: self
- $[0].links[-1].rel: self
-
-# limit resource results
-
- - name: get resources ulimited
- GET: /v2/resources
- response_json_paths:
- $.`len`: 1
-
- - name: get resources limited
- GET: /v2/resources?limit=1
- response_json_paths:
- $.`len`: 1
-
- - name: get resources double limit
- GET: /v2/resources?limit=1&limit=1
- status: 400
-
- - name: get resources filter limit
- desc: expressing limit this way is now disallowed
- GET: /v2/resources?q.field=limit&q.op=eq&q.type=&q.value=1
- status: 400
- response_strings:
- - 'Unknown argument: \"limit\": unrecognized field in query'
-
- - name: get resources filter limit and limit
- GET: /v2/resources?q.field=limit&q.op=eq&q.type=&q.value=1&limit=1
- status: 400
- response_strings:
- - 'Unknown argument: \"limit\": unrecognized field in query'
diff --git a/ceilometer/tests/functional/gabbi/gabbits/samples.yaml b/ceilometer/tests/functional/gabbi/gabbits/samples.yaml
deleted file mode 100644
index cb9fee5d..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits/samples.yaml
+++ /dev/null
@@ -1,154 +0,0 @@
-#
-# Explore and test the samples controller, using samples supplied by
-# the SampleDataFixture.
-#
-
-fixtures:
- - ConfigFixture
- - SampleDataFixture
-
-tests:
-
-# Confirm all the samples are there and expected requests behave.
-# TODO(chdent): There's a danger here that the ordering of multiple
-# samples will not be consistent.
-
- - name: lists samples
- GET: /v2/samples
- response_headers:
- content-type: /application/json/
- response_json_paths:
- $[0].meter: livestock
- $[0].metadata.breed: cow
- $[1].metadata.breed: pig
- $[2].metadata.breed: sheep
-
- - name: get just one
- GET: /v2/samples/$RESPONSE['$[0].id']
- response_json_paths:
- $.meter: livestock
- $.metadata.breed: cow
-
- - name: list samples with limit
- GET: /v2/samples?limit=1
- response_json_paths:
- $[0].meter: livestock
- $[0].metadata.breed: cow
- $[-1].metadata.breed: cow
-
- - name: list zero samples with zero limit
- GET: /v2/samples?limit=0
- status: 400
-
- - name: list samples with query
- GET: /v2/samples?q.field=resource_metadata.breed&q.value=cow&q.op=eq
- response_json_paths:
- $[0].meter: livestock
- $[0].metadata.breed: cow
- $[-1].metadata.breed: cow
-
- - name: query by user
- GET: /v2/samples?q.field=user&q.value=$RESPONSE['$[0].user_id']&q.op=eq
- response_json_paths:
- $[0].user_id: $RESPONSE['$[0].user_id']
-
- - name: query by user_id
- GET: /v2/samples?q.field=user_id&q.value=$RESPONSE['$[0].user_id']&q.op=eq
- response_json_paths:
- $[0].user_id: $RESPONSE['$[0].user_id']
-
- - name: query by project
- GET: /v2/samples?q.field=project&q.value=$RESPONSE['$[0].project_id']&q.op=eq
- response_json_paths:
- $[0].project_id: $RESPONSE['$[0].project_id']
-
- - name: query by project_id
- GET: /v2/samples?q.field=project_id&q.value=$RESPONSE['$[0].project_id']&q.op=eq
- response_json_paths:
- $[0].project_id: $RESPONSE['$[0].project_id']
-
-# Explore failure modes for listing samples
-
- - name: list samples with bad field
- GET: /v2/samples?q.field=harpoon&q.value=cow&q.op=eq
- status: 400
- response_strings:
- - timestamp
- - project
- - unrecognized field in query
-
- - name: list samples with bad metaquery field
- GET: /v2/samples?q.field=metaquery&q.value=cow&q.op=eq
- status: 400
- response_strings:
- - unrecognized field in query
-
- - name: bad limit value
- GET: /v2/samples?limit=happiness
- status: 400
- response_strings:
- - Invalid input for field/attribute limit
-
- - name: negative limit value 400
- GET: /v2/samples?limit=-99
- status: 400
-
- - name: negative limit value error message
- GET: /v2/samples?limit=-99
- status: 400
- response_headers:
- content-type: /application/json/
- response_json_paths:
- $.error_message.faultstring: Limit must be positive
-
- - name: bad accept
- desc: try an unexpected content type
- GET: /v2/samples
- request_headers:
- accept: text/plain
- status: 406
-
- - name: complex good accept
- desc: client sends complex accept do we adapt
- GET: /v2/samples
- request_headers:
- accept: text/plain, application/json; q=0.8
-
- - name: complex bad accept
- desc: client sends complex accept do we adapt
- GET: /v2/samples
- request_headers:
- accept: text/plain, application/binary; q=0.8
- status: 406
-
- - name: bad method
- POST: /v2/samples
- status: 405
- response_headers:
- allow: GET
-
-# Work with just one sample.
-
- - name: list one of the samples
- GET: /v2/samples?limit=1
-
- - name: retrieve one sample
- GET: /v2/samples/$RESPONSE['$[0].id']
- response_headers:
- content-type: /application/json/
- response_json_paths:
- $.meter: livestock
-
- - name: retrieve sample with useless query
- GET: /v2/samples/$RESPONSE['$.id']?limit=5
- status: 400
- response_strings:
- - "Unknown argument:"
-
- - name: attempt missing sample
- GET: /v2/samples/davesnothere
- status: 404
- response_headers:
- content-type: /application/json/
- response_json_paths:
- $.error_message.faultstring: Sample davesnothere Not Found
diff --git a/ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml b/ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml
deleted file mode 100644
index 8e715439..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits_prefix/basic.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-#
-# Confirm root reports the right data including a prefixed URL
-#
-fixtures:
- - ConfigFixture
-
-tests:
-
-# Root gives us some information on where to go from here.
-- name: quick root check
- GET: /
- response_headers:
- content-type: application/json
- response_strings:
- - '"base": "application/json"'
- response_json_paths:
- versions.values.[0].status: stable
- versions.values.[0].media-types.[0].base: application/json
- response_strings:
- - /telemetry/
diff --git a/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml b/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml
deleted file mode 100644
index 55da69db..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits_prefix/clean-samples.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Post a simple sample and confirm the created resource has
-# reasonable URLs
-fixtures:
- - ConfigFixture
-
-tests:
-
-# POST one sample and verify its existence.
-
- - name: post sample for meter
- desc: post a single sample
- POST: /v2/meters/apples?direct=True
- request_headers:
- content-type: application/json
- data: |
- [
- {
- "counter_name": "apples",
- "project_id": "35b17138-b364-4e6a-a131-8f3099c5be68",
- "user_id": "efd87807-12d2-4b38-9c70-5f5c2ac427ff",
- "counter_unit": "instance",
- "counter_volume": 1,
- "resource_id": "bd9431c1-8d69-4ad3-803a-8d4a6b89fd36",
- "resource_metadata": {
- "name2": "value2",
- "name1": "value1"
- },
- "counter_type": "gauge"
- }
- ]
-
- response_json_paths:
- $.[0].counter_name: apples
- status: 201
- response_headers:
- content-type: application/json
-
- - name: get resources
- desc: get the resources that exist because of the sample
- GET: /v2/resources
- response_json_paths:
- $.[0].metadata.name2: value2
-
- - name: get resource
- desc: get just one of those resources via self
- GET: $RESPONSE['$[0].links[0].href']
- response_json_paths:
- $.metadata.name2: value2
- response_strings:
- - /telemetry/
diff --git a/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml b/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml
deleted file mode 100644
index 69d91c7f..00000000
--- a/ceilometer/tests/functional/gabbi/gabbits_prefix/resources-fixtured.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Explore and cover resources API with gabbi tests when there are a
-# small number of pre-existing resources
-#
-
-fixtures:
- - ConfigFixture
- - SampleDataFixture
-
-tests:
-
- - name: list all resources
- GET: /v2/resources
- response_json_paths:
- $[0].user_id: farmerjon
- $[0].links[1].rel: livestock
- response_strings:
- - /telemetry/
-
- - name: get one resource
- desc: get a resource via the links in the first resource listed above
- GET: $RESPONSE['$[0].links[0].href']
- response_json_paths:
- $.resource_id: $RESPONSE['$[0].resource_id']
diff --git a/ceilometer/tests/functional/gabbi/test_gabbi.py b/ceilometer/tests/functional/gabbi/test_gabbi.py
deleted file mode 100644
index 162e426f..00000000
--- a/ceilometer/tests/functional/gabbi/test_gabbi.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-# Copyright 2015 Red Hat. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A test module to exercise the Ceilometer API with gabbi
-
-For the sake of exploratory development.
-"""
-
-import os
-
-from gabbi import driver
-
-from ceilometer.tests.functional.gabbi import fixtures as fixture_module
-
-TESTS_DIR = 'gabbits'
-
-
-def load_tests(loader, tests, pattern):
- """Provide a TestSuite to the discovery process."""
- test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
- return driver.build_tests(test_dir, loader, host=None,
- intercept=fixture_module.setup_app,
- fixture_module=fixture_module)
diff --git a/ceilometer/tests/functional/gabbi/test_gabbi_prefix.py b/ceilometer/tests/functional/gabbi/test_gabbi_prefix.py
deleted file mode 100644
index 04f337c9..00000000
--- a/ceilometer/tests/functional/gabbi/test_gabbi_prefix.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Copyright 2015 Red Hat. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""A module to exercise the Ceilometer API with gabbi with a URL prefix"""
-
-import os
-
-from gabbi import driver
-
-from ceilometer.tests.functional.gabbi import fixtures as fixture_module
-
-TESTS_DIR = 'gabbits_prefix'
-
-
-def load_tests(loader, tests, pattern):
- """Provide a TestSuite to the discovery process."""
- test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
- return driver.build_tests(test_dir, loader, host=None,
- prefix='/telemetry',
- intercept=fixture_module.setup_app,
- fixture_module=fixture_module)
diff --git a/ceilometer/tests/tempest/api/__init__.py b/ceilometer/tests/tempest/api/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/tempest/api/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/tempest/api/base.py b/ceilometer/tests/tempest/api/base.py
deleted file mode 100644
index ac65c3cf..00000000
--- a/ceilometer/tests/tempest/api/base.py
+++ /dev/null
@@ -1,145 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-
-from oslo_utils import timeutils
-from tempest.common import compute
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import exceptions as lib_exc
-import tempest.test
-
-from ceilometer.tests.tempest.service import client
-
-
-CONF = config.CONF
-
-
-class ClientManager(client.Manager):
-
- load_clients = [
- 'servers_client',
- 'compute_networks_client',
- 'compute_floating_ips_client',
- 'flavors_client',
- 'image_client_v2',
- 'telemetry_client',
- ]
-
-
-class BaseTelemetryTest(tempest.test.BaseTestCase):
-
- """Base test case class for all Telemetry API tests."""
-
- credentials = ['primary']
- client_manager = ClientManager
-
- @classmethod
- def skip_checks(cls):
- super(BaseTelemetryTest, cls).skip_checks()
- if (not CONF.service_available.ceilometer or
- not CONF.telemetry.deprecated_api_enabled):
- raise cls.skipException("Ceilometer API support is required")
-
- @classmethod
- def setup_credentials(cls):
- cls.set_network_resources()
- super(BaseTelemetryTest, cls).setup_credentials()
-
- @classmethod
- def setup_clients(cls):
- super(BaseTelemetryTest, cls).setup_clients()
- cls.telemetry_client = cls.os_primary.telemetry_client
- cls.servers_client = cls.os_primary.servers_client
- cls.flavors_client = cls.os_primary.flavors_client
- cls.image_client_v2 = cls.os_primary.image_client_v2
-
- @classmethod
- def resource_setup(cls):
- super(BaseTelemetryTest, cls).resource_setup()
- cls.nova_notifications = ['memory', 'vcpus', 'disk.root.size',
- 'disk.ephemeral.size']
-
- cls.glance_v2_notifications = ['image.download', 'image.serve']
-
- cls.server_ids = []
- cls.image_ids = []
-
- @classmethod
- def create_server(cls):
- tenant_network = cls.get_tenant_network()
- body, server = compute.create_test_server(
- cls.os_primary,
- tenant_network=tenant_network,
- name=data_utils.rand_name('ceilometer-instance'),
- wait_until='ACTIVE')
- cls.server_ids.append(body['id'])
- return body
-
- @classmethod
- def create_image(cls, client, **kwargs):
- body = client.create_image(name=data_utils.rand_name('image'),
- container_format='bare',
- disk_format='raw',
- **kwargs)
- # TODO(jswarren) Move ['image'] up to initial body value assignment
- # once both v1 and v2 glance clients include the full response
- # object.
- if 'image' in body:
- body = body['image']
- cls.image_ids.append(body['id'])
- return body
-
- @staticmethod
- def cleanup_resources(method, list_of_ids):
- for resource_id in list_of_ids:
- try:
- method(resource_id)
- except lib_exc.NotFound:
- pass
-
- @classmethod
- def resource_cleanup(cls):
- cls.cleanup_resources(cls.servers_client.delete_server, cls.server_ids)
- cls.cleanup_resources(cls.image_client_v2.delete_image, cls.image_ids)
- super(BaseTelemetryTest, cls).resource_cleanup()
-
- def await_samples(self, metric, query):
- """This method is to wait for sample to add it to database.
-
- There are long time delays when using Postgresql (or Mysql)
- database as ceilometer backend
- """
- timeout = CONF.compute.build_timeout
- start = timeutils.utcnow()
- while timeutils.delta_seconds(start, timeutils.utcnow()) < timeout:
- body = self.telemetry_client.list_samples(metric, query)
- if body:
- return body
- time.sleep(CONF.compute.build_interval)
-
- raise lib_exc.TimeoutException(
- 'Sample for metric:%s with query:%s has not been added to the '
- 'database within %d seconds' % (metric, query,
- CONF.compute.build_timeout))
-
-
-class BaseTelemetryAdminTest(BaseTelemetryTest):
- """Base test case class for admin Telemetry API tests."""
-
- credentials = ['primary', 'admin']
-
- @classmethod
- def setup_clients(cls):
- super(BaseTelemetryAdminTest, cls).setup_clients()
- cls.telemetry_admin_client = cls.os_admin.telemetry_client
diff --git a/ceilometer/tests/tempest/api/test_telemetry_notification_api.py b/ceilometer/tests/tempest/api/test_telemetry_notification_api.py
deleted file mode 100644
index ec754c83..00000000
--- a/ceilometer/tests/tempest/api/test_telemetry_notification_api.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# Change-Id: I14e16a1a7d9813b324ee40545c07f0e88fb637b7
-
-import six
-import testtools
-
-from ceilometer.tests.tempest.api import base
-from tempest.common import utils
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-
-CONF = config.CONF
-
-
-class TelemetryNotificationAPITest(base.BaseTelemetryTest):
- @classmethod
- def skip_checks(cls):
- super(TelemetryNotificationAPITest, cls).skip_checks()
-
- if ("gnocchi" in CONF.service_available and
- CONF.service_available.gnocchi):
- skip_msg = ("%s skipped as gnocchi is enabled" %
- cls.__name__)
- raise cls.skipException(skip_msg)
-
- @decorators.idempotent_id('d7f8c1c8-d470-4731-8604-315d3956caae')
- @utils.services('compute')
- def test_check_nova_notification(self):
-
- body = self.create_server()
-
- query = ('resource', 'eq', body['id'])
-
- for metric in self.nova_notifications:
- self.await_samples(metric, query)
-
- @decorators.idempotent_id('c240457d-d943-439b-8aea-85e26d64fe8f')
- @utils.services("image")
- @testtools.skipIf(not CONF.image_feature_enabled.api_v2,
- "Glance api v2 is disabled")
- def test_check_glance_v2_notifications(self):
- body = self.create_image(self.image_client_v2, visibility='private')
-
- file_content = data_utils.random_bytes()
- image_file = six.BytesIO(file_content)
- self.image_client_v2.store_image_file(body['id'], image_file)
- self.image_client_v2.show_image_file(body['id'])
-
- query = 'resource', 'eq', body['id']
-
- for metric in self.glance_v2_notifications:
- self.await_samples(metric, query)
-
-
-class TelemetryNotificationAdminAPITest(base.BaseTelemetryAdminTest):
- @classmethod
- def skip_checks(cls):
- super(TelemetryNotificationAdminAPITest, cls).skip_checks()
-
- if ("gnocchi" in CONF.service_available and
- CONF.service_available.gnocchi):
- skip_msg = ("%s skipped as gnocchi is enabled" %
- cls.__name__)
- raise cls.skipException(skip_msg)
-
- @decorators.idempotent_id('29604198-8b45-4fc0-8af8-1cae4f94ebea')
- @utils.services('compute')
- def test_check_nova_notification_event_and_meter(self):
-
- body = self.create_server()
-
- query = ('resource', 'eq', body['id'])
- for metric in self.nova_notifications:
- self.await_samples(metric, query)
diff --git a/ceilometer/tests/tempest/config.py b/ceilometer/tests/tempest/config.py
index 0df195b1..d39874c1 100644
--- a/ceilometer/tests/tempest/config.py
+++ b/ceilometer/tests/tempest/config.py
@@ -25,20 +25,6 @@ telemetry_group = cfg.OptGroup(name='telemetry',
title='Telemetry Service Options')
TelemetryGroup = [
- cfg.StrOpt('catalog_type',
- default='metering',
- help="Catalog type of the Telemetry service."),
- cfg.StrOpt('endpoint_type',
- default='publicURL',
- choices=['public', 'admin', 'internal',
- 'publicURL', 'adminURL', 'internalURL'],
- help="The endpoint type to use for the telemetry service."),
- cfg.BoolOpt('event_enabled',
- default=True,
- help="Runs Ceilometer event-related tests"),
- cfg.BoolOpt('deprecated_api_enabled',
- default=True,
- help="Runs Ceilometer deprecated API tests"),
cfg.IntOpt('notification_wait',
default=120,
help="The seconds to wait for notifications which "
diff --git a/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py b/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py
deleted file mode 100644
index e02e4541..00000000
--- a/ceilometer/tests/tempest/scenario/test_object_storage_telemetry_middleware.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# Copyright 2014 Red Hat
-#
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_log import log as logging
-from tempest.common import utils
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
-from tempest.lib import decorators
-from tempest import test
-
-from ceilometer.tests.tempest.service import client
-
-
-CONF = config.CONF
-
-LOG = logging.getLogger(__name__)
-
-
-class ClientManager(client.Manager):
-
- load_clients = [
- 'telemetry_client',
- 'container_client',
- 'object_client',
- ]
-
-
-class TestObjectStorageTelemetry(test.BaseTestCase):
- """Test that swift uses the ceilometer middleware.
-
- * create container.
- * upload a file to the created container.
- * retrieve the file from the created container.
- * wait for notifications from ceilometer.
- """
-
- credentials = ['primary']
- client_manager = ClientManager
-
- @classmethod
- def skip_checks(cls):
- super(TestObjectStorageTelemetry, cls).skip_checks()
- if ("gnocchi" in CONF.service_available and
- CONF.service_available.gnocchi):
- skip_msg = ("%s skipped as gnocchi is enabled" %
- cls.__name__)
- raise cls.skipException(skip_msg)
- if not CONF.service_available.swift:
- skip_msg = ("%s skipped as swift is not available" %
- cls.__name__)
- raise cls.skipException(skip_msg)
- if not CONF.service_available.ceilometer:
- skip_msg = ("%s skipped as ceilometer is not available" %
- cls.__name__)
- raise cls.skipException(skip_msg)
-
- @classmethod
- def setup_credentials(cls):
- cls.set_network_resources()
- super(TestObjectStorageTelemetry, cls).setup_credentials()
-
- @classmethod
- def setup_clients(cls):
- super(TestObjectStorageTelemetry, cls).setup_clients()
- cls.telemetry_client = cls.os_primary.telemetry_client
- cls.container_client = cls.os_primary.container_client
- cls.object_client = cls.os_primary.object_client
-
- def _confirm_notifications(self, container_name, obj_name):
- # NOTE: Loop seeking for appropriate notifications about the containers
- # and objects sent to swift.
-
- def _check_samples():
- # NOTE: Return True only if we have notifications about some
- # containers and some objects and the notifications are about
- # the expected containers and objects.
- # Otherwise returning False will case _check_samples to be
- # called again.
- results = self.telemetry_client.list_samples(
- 'storage.objects.incoming.bytes')
- LOG.debug('got samples %s', results)
-
- # Extract container info from samples.
- containers, objects = [], []
- for sample in results:
- meta = sample['resource_metadata']
- if meta.get('container') and meta['container'] != 'None':
- containers.append(meta['container'])
- elif (meta.get('target.metadata:container') and
- meta['target.metadata:container'] != 'None'):
- containers.append(meta['target.metadata:container'])
-
- if meta.get('object') and meta['object'] != 'None':
- objects.append(meta['object'])
- elif (meta.get('target.metadata:object') and
- meta['target.metadata:object'] != 'None'):
- objects.append(meta['target.metadata:object'])
-
- return (container_name in containers and obj_name in objects)
-
- self.assertTrue(
- test_utils.call_until_true(_check_samples,
- CONF.telemetry.notification_wait,
- CONF.telemetry.notification_sleep),
- 'Correct notifications were not received after '
- '%s seconds.' % CONF.telemetry.notification_wait)
-
- def create_container(self):
- name = data_utils.rand_name('swift-scenario-container')
- self.container_client.create_container(name)
- # look for the container to assure it is created
- self.container_client.list_container_objects(name)
- LOG.debug('Container %s created' % (name))
- self.addCleanup(self.container_client.delete_container,
- name)
- return name
-
- def upload_object_to_container(self, container_name):
- obj_name = data_utils.rand_name('swift-scenario-object')
- obj_data = data_utils.arbitrary_string()
- self.object_client.create_object(container_name, obj_name, obj_data)
- self.addCleanup(self.object_client.delete_object,
- container_name,
- obj_name)
- return obj_name
-
- @decorators.idempotent_id('6d6b88e5-3e38-41bc-b34a-79f713a6cb85')
- @utils.services('object_storage')
- def test_swift_middleware_notifies(self):
- container_name = self.create_container()
- obj_name = self.upload_object_to_container(container_name)
- self._confirm_notifications(container_name, obj_name)
diff --git a/ceilometer/tests/tempest/service/__init__.py b/ceilometer/tests/tempest/service/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/tempest/service/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/tempest/service/client.py b/ceilometer/tests/tempest/service/client.py
deleted file mode 100644
index 59c585e7..00000000
--- a/ceilometer/tests/tempest/service/client.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_serialization import jsonutils as json
-from six.moves.urllib import parse as urllib
-
-from tempest import clients
-from tempest import config
-from tempest.lib.common import rest_client
-
-
-CONF = config.CONF
-
-
-class TelemetryClient(rest_client.RestClient):
-
- version = '2'
- uri_prefix = "v2"
-
- def deserialize(self, body):
- return json.loads(body.replace("\n", ""))
-
- def serialize(self, body):
- return json.dumps(body)
-
- def create_sample(self, meter_name, sample_list):
- uri = "%s/meters/%s" % (self.uri_prefix, meter_name)
- body = self.serialize(sample_list)
- resp, body = self.post(uri, body)
- self.expected_success(200, resp.status)
- body = self.deserialize(body)
- return rest_client.ResponseBody(resp, body)
-
- def _helper_list(self, uri, query=None, period=None):
- uri_dict = {}
- if query:
- uri_dict = {'q.field': query[0],
- 'q.op': query[1],
- 'q.value': query[2]}
- if period:
- uri_dict['period'] = period
- if uri_dict:
- uri += "?%s" % urllib.urlencode(uri_dict)
- resp, body = self.get(uri)
- self.expected_success(200, resp.status)
- body = self.deserialize(body)
- return rest_client.ResponseBodyList(resp, body)
-
- def list_resources(self, query=None):
- uri = '%s/resources' % self.uri_prefix
- return self._helper_list(uri, query)
-
- def list_meters(self, query=None):
- uri = '%s/meters' % self.uri_prefix
- return self._helper_list(uri, query)
-
- def list_statistics(self, meter, period=None, query=None):
- uri = "%s/meters/%s/statistics" % (self.uri_prefix, meter)
- return self._helper_list(uri, query, period)
-
- def list_samples(self, meter_id, query=None):
- uri = '%s/meters/%s' % (self.uri_prefix, meter_id)
- return self._helper_list(uri, query)
-
- def show_resource(self, resource_id):
- uri = '%s/resources/%s' % (self.uri_prefix, resource_id)
- resp, body = self.get(uri)
- self.expected_success(200, resp.status)
- body = self.deserialize(body)
- return rest_client.ResponseBody(resp, body)
-
-
-class Manager(clients.Manager):
-
- default_params = config.service_client_config()
-
- telemetry_params = {
- 'service': CONF.telemetry.catalog_type,
- 'region': CONF.identity.region,
- 'endpoint_type': CONF.telemetry.endpoint_type,
- }
- telemetry_params.update(default_params)
-
- def __init__(self, credentials):
- # TODO(andreaf) Overriding Manager is a workaround. The "proper" way
- # would it to expose the ceilometer service client via the plugin
- # interface, use tempest.lib.clients and tempest master.
- # Then ceilometer service client would be loaded and configured
- # automatically into ServiceClients.
- # In any case we're about to declare clients.Manager a stable
- # interface for plugins and we won't change it, so this code won't
- # break.
- super(Manager, self).__init__(credentials=credentials)
- self.set_telemetry_client()
-
- def set_telemetry_client(self):
- self.telemetry_client = TelemetryClient(self.auth_provider,
- **self.telemetry_params)
diff --git a/ceilometer/tests/unit/api/__init__.py b/ceilometer/tests/unit/api/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/unit/api/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/unit/api/test_app.py b/ceilometer/tests/unit/api/test_app.py
deleted file mode 100644
index e32b9bb1..00000000
--- a/ceilometer/tests/unit/api/test_app.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright 2014 IBM Corp.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import mock
-from oslo_config import cfg
-
-from ceilometer.api import app
-from ceilometer import service
-from ceilometer.tests import base
-
-
-class TestApp(base.BaseTestCase):
-
- def setUp(self):
- super(TestApp, self).setUp()
- self.CONF = service.prepare_service([], [])
-
- def test_api_paste_file_not_exist(self):
- self.CONF.set_override('api_paste_config', 'non-existent-file')
- with mock.patch.object(self.CONF, 'find_file') as ff:
- ff.return_value = None
- self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app,
- self.CONF)
diff --git a/ceilometer/tests/unit/api/test_hooks.py b/ceilometer/tests/unit/api/test_hooks.py
deleted file mode 100644
index f987b472..00000000
--- a/ceilometer/tests/unit/api/test_hooks.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import oslo_messaging
-
-from ceilometer.api import hooks
-from ceilometer import service
-from ceilometer.tests import base
-
-
-class TestTestNotifierHook(base.BaseTestCase):
-
- def setUp(self):
- super(TestTestNotifierHook, self).setUp()
- self.CONF = service.prepare_service([], [])
-
- def test_init_notifier_with_drivers(self):
- hook = hooks.NotifierHook(self.CONF)
- notifier = hook.notifier
- self.assertIsInstance(notifier, oslo_messaging.Notifier)
- self.assertEqual(['messagingv2'], notifier._driver_names)
diff --git a/ceilometer/tests/unit/api/v2/__init__.py b/ceilometer/tests/unit/api/v2/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/unit/api/v2/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/unit/api/v2/test_complex_query.py b/ceilometer/tests/unit/api/v2/test_complex_query.py
deleted file mode 100644
index 5c9f9c76..00000000
--- a/ceilometer/tests/unit/api/v2/test_complex_query.py
+++ /dev/null
@@ -1,339 +0,0 @@
-#
-# Copyright Ericsson AB 2013. All rights reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test the methods related to complex query."""
-import datetime
-
-import fixtures
-import jsonschema
-import mock
-from oslotest import base
-import wsme
-
-from ceilometer.api.controllers.v2 import query
-from ceilometer.storage import models
-
-
-class FakeComplexQuery(query.ValidatedComplexQuery):
- def __init__(self, db_model, additional_name_mapping=None, metadata=False):
- super(FakeComplexQuery, self).__init__(query=None,
- db_model=db_model,
- additional_name_mapping=(
- additional_name_mapping or
- {}),
- metadata_allowed=metadata)
-
-
-sample_name_mapping = {"resource": "resource_id",
- "meter": "counter_name",
- "type": "counter_type",
- "unit": "counter_unit",
- "volume": "counter_volume"}
-
-
-class TestComplexQuery(base.BaseTestCase):
- def setUp(self):
- super(TestComplexQuery, self).setUp()
- self.useFixture(fixtures.MonkeyPatch(
- 'pecan.response', mock.MagicMock()))
- self.query = FakeComplexQuery(models.Sample,
- sample_name_mapping,
- True)
-
- def test_replace_isotime_utc(self):
- filter_expr = {"=": {"timestamp": "2013-12-05T19:38:29Z"}}
- self.query._replace_isotime_with_datetime(filter_expr)
- self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29),
- filter_expr["="]["timestamp"])
-
- def test_replace_isotime_timezone_removed(self):
- filter_expr = {"=": {"timestamp": "2013-12-05T20:38:29+01:00"}}
- self.query._replace_isotime_with_datetime(filter_expr)
- self.assertEqual(datetime.datetime(2013, 12, 5, 20, 38, 29),
- filter_expr["="]["timestamp"])
-
- def test_replace_isotime_wrong_syntax(self):
- filter_expr = {"=": {"timestamp": "not a valid isotime string"}}
- self.assertRaises(wsme.exc.ClientSideError,
- self.query._replace_isotime_with_datetime,
- filter_expr)
-
- def test_replace_isotime_in_complex_filter(self):
- filter_expr = {"and": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}},
- {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]}
- self.query._replace_isotime_with_datetime(filter_expr)
- self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29),
- filter_expr["and"][0]["="]["timestamp"])
- self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29),
- filter_expr["and"][1]["="]["timestamp"])
-
- def test_replace_isotime_in_complex_filter_with_unbalanced_tree(self):
- subfilter = {"and": [{"=": {"project_id": 42}},
- {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]}
-
- filter_expr = {"or": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}},
- subfilter]}
-
- self.query._replace_isotime_with_datetime(filter_expr)
- self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29),
- filter_expr["or"][0]["="]["timestamp"])
- self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29),
- filter_expr["or"][1]["and"][1]["="]["timestamp"])
-
- def test_convert_operator_to_lower_case(self):
- filter_expr = {"AND": [{"=": {"project_id": 42}},
- {"=": {"project_id": 44}}]}
- self.query._convert_operator_to_lower_case(filter_expr)
- self.assertEqual("and", list(filter_expr.keys())[0])
-
- filter_expr = {"Or": [{"=": {"project_id": 43}},
- {"anD": [{"=": {"project_id": 44}},
- {"=": {"project_id": 42}}]}]}
- self.query._convert_operator_to_lower_case(filter_expr)
- self.assertEqual("or", list(filter_expr.keys())[0])
- self.assertEqual("and", list(filter_expr["or"][1].keys())[0])
-
- def test_invalid_filter_misstyped_field_name_samples(self):
- filter = {"=": {"project_id11": 42}}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_invalid_complex_filter_wrong_field_names(self):
- filter = {"and":
- [{"=": {"non_existing_field": 42}},
- {"=": {"project_id": 42}}]}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- filter = {"or":
- [{"=": {"non_existing_field": 42}},
- {"and":
- [{"=": {"project_id": 44}},
- {"=": {"project_id": 42}}]}]}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_convert_orderby(self):
- orderby = []
- self.query._convert_orderby_to_lower_case(orderby)
- self.assertEqual([], orderby)
-
- orderby = [{"project_id": "DESC"}]
- self.query._convert_orderby_to_lower_case(orderby)
- self.assertEqual([{"project_id": "desc"}], orderby)
-
- orderby = [{"project_id": "ASC"}, {"resource_id": "DESC"}]
- self.query._convert_orderby_to_lower_case(orderby)
- self.assertEqual([{"project_id": "asc"}, {"resource_id": "desc"}],
- orderby)
-
- def test_validate_orderby_empty_direction(self):
- orderby = [{"project_id": ""}]
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_orderby,
- orderby)
- orderby = [{"project_id": "asc"}, {"resource_id": ""}]
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_orderby,
- orderby)
-
- def test_validate_orderby_wrong_order_string(self):
- orderby = [{"project_id": "not a valid order"}]
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_orderby,
- orderby)
-
- def test_validate_orderby_wrong_multiple_item_order_string(self):
- orderby = [{"project_id": "not a valid order"}, {"resource_id": "ASC"}]
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_orderby,
- orderby)
-
- def test_validate_orderby_empty_field_name(self):
- orderby = [{"": "ASC"}]
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_orderby,
- orderby)
- orderby = [{"project_id": "asc"}, {"": "desc"}]
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_orderby,
- orderby)
-
- def test_validate_orderby_wrong_field_name(self):
- orderby = [{"project_id11": "ASC"}]
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_orderby,
- orderby)
-
- def test_validate_orderby_wrong_field_name_multiple_item_orderby(self):
- orderby = [{"project_id": "asc"}, {"resource_id11": "ASC"}]
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_orderby,
- orderby)
-
- def test_validate_orderby_metadata_is_not_allowed(self):
- orderby = [{"metadata.display_name": "asc"}]
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_orderby,
- orderby)
-
-
-class TestFilterSyntaxValidation(base.BaseTestCase):
- def setUp(self):
- super(TestFilterSyntaxValidation, self).setUp()
- self.query = FakeComplexQuery(models.Sample,
- sample_name_mapping,
- True)
-
- def test_simple_operator(self):
- filter = {"=": {"project_id": "string_value"}}
- self.query._validate_filter(filter)
-
- filter = {"=>": {"project_id": "string_value"}}
- self.query._validate_filter(filter)
-
- def test_valid_value_types(self):
- valid_values = ["string_value", 42, 3.14, True, False]
- for valid_value in valid_values:
- filter = {"=": {"project_id": valid_value}}
- self.query._validate_filter(filter)
-
- def test_invalid_simple_operator(self):
- invalid_operators = ["==", ""]
- for invalid_operator in invalid_operators:
- filter = {invalid_operator: {"project_id": "string_value"}}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_more_than_one_operator_is_invalid(self):
- filter = {"=": {"project_id": "string_value"},
- "<": {"": ""}}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_empty_expression_is_invalid(self):
- filter = {}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_invalid_field_name(self):
- invalid_names = ["", " ", "\t"]
- for invalid_name in invalid_names:
- filter = {"=": {invalid_name: "value"}}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_more_than_one_field_is_invalid(self):
- filter = {"=": {"project_id": "value", "resource_id": "value"}}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_missing_field_after_simple_op_is_invalid(self):
- filter = {"=": {}}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_and_or(self):
- filter = {"and": [{"=": {"project_id": "string_value"}},
- {"=": {"resource_id": "value"}}]}
- self.query._validate_filter(filter)
-
- filter = {"or": [{"and": [{"=": {"project_id": "string_value"}},
- {"=": {"resource_id": "value"}}]},
- {"=": {"counter_name": "value"}}]}
- self.query._validate_filter(filter)
-
- filter = {"or": [{"and": [{"=": {"project_id": "string_value"}},
- {"=": {"resource_id": "value"}},
- {"<": {"counter_name": 42}}]},
- {"=": {"counter_name": "value"}}]}
- self.query._validate_filter(filter)
-
- def test_complex_operator_with_in(self):
- filter = {"and": [{"<": {"counter_volume": 42}},
- {">=": {"counter_volume": 36}},
- {"in": {"project_id": ["project_id1",
- "project_id2",
- "project_id3"]}}]}
- self.query._validate_filter(filter)
-
- def test_invalid_complex_operator(self):
- filter = {"xor": [{"=": {"project_id": "string_value"}},
- {"=": {"resource_id": "value"}}]}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_and_or_with_one_child_is_invalid(self):
- filter = {"or": [{"=": {"project_id": "string_value"}}]}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_complex_operator_with_zero_child_is_invalid(self):
- filter = {"or": []}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_more_than_one_complex_operator_is_invalid(self):
- filter = {"and": [{"=": {"project_id": "string_value"}},
- {"=": {"resource_id": "value"}}],
- "or": [{"=": {"project_id": "string_value"}},
- {"=": {"resource_id": "value"}}]}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_not(self):
- filter = {"not": {"=": {"project_id": "value"}}}
- self.query._validate_filter(filter)
-
- filter = {
- "not":
- {"or":
- [{"and":
- [{"=": {"project_id": "string_value"}},
- {"=": {"resource_id": "value"}},
- {"<": {"counter_name": 42}}]},
- {"=": {"counter_name": "value"}}]}}
- self.query._validate_filter(filter)
-
- def test_not_with_zero_child_is_invalid(self):
- filter = {"not": {}}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_not_with_more_than_one_child_is_invalid(self):
- filter = {"not": {"=": {"project_id": "value"},
- "!=": {"resource_id": "value"}}}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
-
- def test_empty_in_query_not_passing(self):
- filter = {"in": {"resource_id": []}}
- self.assertRaises(jsonschema.ValidationError,
- self.query._validate_filter,
- filter)
diff --git a/ceilometer/tests/unit/api/v2/test_query.py b/ceilometer/tests/unit/api/v2/test_query.py
deleted file mode 100644
index efe99b77..00000000
--- a/ceilometer/tests/unit/api/v2/test_query.py
+++ /dev/null
@@ -1,387 +0,0 @@
-# Copyright 2013 OpenStack Foundation.
-# All Rights Reserved.
-# Copyright 2013 IBM Corp.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test the methods related to query."""
-import datetime
-
-import fixtures
-import mock
-from oslo_utils import timeutils
-from oslotest import base
-import wsme
-
-from ceilometer.api.controllers.v2 import base as v2_base
-from ceilometer.api.controllers.v2 import meters
-from ceilometer.api.controllers.v2 import utils
-from ceilometer import storage
-from ceilometer.storage import base as storage_base
-from ceilometer.tests import base as tests_base
-
-
-class TestQuery(base.BaseTestCase):
- def setUp(self):
- super(TestQuery, self).setUp()
- self.useFixture(fixtures.MonkeyPatch(
- 'pecan.response', mock.MagicMock()))
-
- def test_get_value_as_type_with_integer(self):
- query = v2_base.Query(field='metadata.size',
- op='eq',
- value='123',
- type='integer')
- expected = 123
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_with_float(self):
- query = v2_base.Query(field='metadata.size',
- op='eq',
- value='123.456',
- type='float')
- expected = 123.456
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_with_boolean(self):
- query = v2_base.Query(field='metadata.is_public',
- op='eq',
- value='True',
- type='boolean')
- expected = True
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_with_string(self):
- query = v2_base.Query(field='metadata.name',
- op='eq',
- value='linux',
- type='string')
- expected = 'linux'
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_with_datetime(self):
- query = v2_base.Query(field='metadata.date',
- op='eq',
- value='2014-01-01T05:00:00',
- type='datetime')
- self.assertIsInstance(query._get_value_as_type(), datetime.datetime)
- self.assertIsNone(query._get_value_as_type().tzinfo)
-
- def test_get_value_as_type_with_integer_without_type(self):
- query = v2_base.Query(field='metadata.size',
- op='eq',
- value='123')
- expected = 123
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_with_float_without_type(self):
- query = v2_base.Query(field='metadata.size',
- op='eq',
- value='123.456')
- expected = 123.456
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_with_boolean_without_type(self):
- query = v2_base.Query(field='metadata.is_public',
- op='eq',
- value='True')
- expected = True
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_with_string_without_type(self):
- query = v2_base.Query(field='metadata.name',
- op='eq',
- value='linux')
- expected = 'linux'
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_with_bad_type(self):
- query = v2_base.Query(field='metadata.size',
- op='eq',
- value='123.456',
- type='blob')
- self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type)
-
- def test_get_value_as_type_with_bad_value(self):
- query = v2_base.Query(field='metadata.size',
- op='eq',
- value='fake',
- type='integer')
- self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type)
-
- def test_get_value_as_type_integer_expression_without_type(self):
- # bug 1221736
- query = v2_base.Query(field='should_be_a_string',
- op='eq',
- value='WWW-Layer-4a80714f')
- expected = 'WWW-Layer-4a80714f'
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_boolean_expression_without_type(self):
- # bug 1221736
- query = v2_base.Query(field='should_be_a_string',
- op='eq',
- value='True or False')
- expected = 'True or False'
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_with_syntax_error(self):
- # bug 1221736
- value = 'WWW-Layer-4a80714f-0232-4580-aa5e-81494d1a4147-uolhh25p5xxm'
- query = v2_base.Query(field='group_id',
- op='eq',
- value=value)
- expected = value
- self.assertEqual(expected, query._get_value_as_type())
-
- def test_get_value_as_type_with_syntax_error_colons(self):
- # bug 1221736
- value = 'Ref::StackId'
- query = v2_base.Query(field='field_name',
- op='eq',
- value=value)
- expected = value
- self.assertEqual(expected, query._get_value_as_type())
-
-
-class TestValidateGroupByFields(base.BaseTestCase):
-
- def test_valid_field(self):
- result = meters._validate_groupby_fields(['user_id'])
- self.assertEqual(['user_id'], result)
-
- def test_valid_fields_multiple(self):
- result = set(meters._validate_groupby_fields(
- ['user_id', 'project_id', 'source']))
- self.assertEqual(set(['user_id', 'project_id', 'source']), result)
-
- def test_invalid_field(self):
- self.assertRaises(wsme.exc.UnknownArgument,
- meters._validate_groupby_fields,
- ['wtf'])
-
- def test_invalid_field_multiple(self):
- self.assertRaises(wsme.exc.UnknownArgument,
- meters._validate_groupby_fields,
- ['user_id', 'wtf', 'project_id', 'source'])
-
- def test_duplicate_fields(self):
- result = set(
- meters._validate_groupby_fields(['user_id', 'source', 'user_id'])
- )
- self.assertEqual(set(['user_id', 'source']), result)
-
-
-class TestQueryToKwArgs(tests_base.BaseTestCase):
- def setUp(self):
- super(TestQueryToKwArgs, self).setUp()
- self.useFixture(fixtures.MockPatchObject(
- utils, 'sanitize_query', side_effect=lambda x, y, **z: x))
- self.useFixture(fixtures.MockPatchObject(
- utils, '_verify_query_segregation', side_effect=lambda x, **z: x))
-
- def test_sample_filter_single(self):
- q = [v2_base.Query(field='user_id',
- op='eq',
- value='uid')]
- kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
- self.assertIn('user', kwargs)
- self.assertEqual(1, len(kwargs))
- self.assertEqual('uid', kwargs['user'])
-
- def test_sample_filter_multi(self):
- q = [v2_base.Query(field='user_id',
- op='eq',
- value='uid'),
- v2_base.Query(field='project_id',
- op='eq',
- value='pid'),
- v2_base.Query(field='resource_id',
- op='eq',
- value='rid'),
- v2_base.Query(field='source',
- op='eq',
- value='source_name'),
- v2_base.Query(field='meter',
- op='eq',
- value='meter_name')]
- kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
- self.assertEqual(5, len(kwargs))
- self.assertEqual('uid', kwargs['user'])
- self.assertEqual('pid', kwargs['project'])
- self.assertEqual('rid', kwargs['resource'])
- self.assertEqual('source_name', kwargs['source'])
- self.assertEqual('meter_name', kwargs['meter'])
-
- def test_sample_filter_timestamp(self):
- ts_start = timeutils.utcnow()
- ts_end = ts_start + datetime.timedelta(minutes=5)
- q = [v2_base.Query(field='timestamp',
- op='lt',
- value=str(ts_end)),
- v2_base.Query(field='timestamp',
- op='gt',
- value=str(ts_start))]
- kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
- self.assertEqual(4, len(kwargs))
- self.assertTimestampEqual(kwargs['start_timestamp'], ts_start)
- self.assertTimestampEqual(kwargs['end_timestamp'], ts_end)
- self.assertEqual('gt', kwargs['start_timestamp_op'])
- self.assertEqual('lt', kwargs['end_timestamp_op'])
-
- def test_sample_filter_meta(self):
- q = [v2_base.Query(field='metadata.size',
- op='eq',
- value='20'),
- v2_base.Query(field='resource_metadata.id',
- op='eq',
- value='meta_id')]
- kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
- self.assertEqual(1, len(kwargs))
- self.assertEqual(2, len(kwargs['metaquery']))
- self.assertEqual(20, kwargs['metaquery']['metadata.size'])
- self.assertEqual('meta_id', kwargs['metaquery']['metadata.id'])
-
- def test_sample_filter_non_equality_on_metadata(self):
- queries = [v2_base.Query(field='resource_metadata.image_id',
- op='gt',
- value='image',
- type='string'),
- v2_base.Query(field='metadata.ramdisk_id',
- op='le',
- value='ramdisk',
- type='string')]
- with mock.patch('pecan.request') as request:
- request.headers.return_value = {'X-ProjectId': 'foobar'}
- self.assertRaises(
- wsme.exc.InvalidInput,
- utils.query_to_kwargs,
- queries,
- storage.SampleFilter.__init__)
-
- def test_sample_filter_invalid_field(self):
- q = [v2_base.Query(field='invalid',
- op='eq',
- value='20')]
- self.assertRaises(
- wsme.exc.UnknownArgument,
- utils.query_to_kwargs, q, storage.SampleFilter.__init__)
-
- def test_sample_filter_invalid_op(self):
- q = [v2_base.Query(field='user_id',
- op='lt',
- value='20')]
- self.assertRaises(
- wsme.exc.InvalidInput,
- utils.query_to_kwargs, q, storage.SampleFilter.__init__)
-
- def test_sample_filter_timestamp_invalid_op(self):
- ts_start = timeutils.utcnow()
- q = [v2_base.Query(field='timestamp',
- op='eq',
- value=str(ts_start))]
- self.assertRaises(
- wsme.exc.InvalidInput,
- utils.query_to_kwargs, q, storage.SampleFilter.__init__)
-
- def test_sample_filter_exclude_internal(self):
- queries = [v2_base.Query(field=f,
- op='eq',
- value='fake',
- type='string')
- for f in ['y', 'on_behalf_of', 'x']]
- with mock.patch('pecan.request') as request:
- request.headers.return_value = {'X-ProjectId': 'foobar'}
- self.assertRaises(wsme.exc.ClientSideError,
- utils.query_to_kwargs,
- queries,
- storage.SampleFilter.__init__,
- internal_keys=['on_behalf_of'])
-
- def test_sample_filter_self_always_excluded(self):
- queries = [v2_base.Query(field='user_id',
- op='eq',
- value='20')]
- with mock.patch('pecan.request') as request:
- request.headers.return_value = {'X-ProjectId': 'foobar'}
- kwargs = utils.query_to_kwargs(queries,
- storage.SampleFilter.__init__)
- self.assertNotIn('self', kwargs)
-
- def test_sample_filter_translation(self):
- queries = [v2_base.Query(field=f,
- op='eq',
- value='fake_%s' % f,
- type='string') for f in ['user_id',
- 'project_id',
- 'resource_id']]
- with mock.patch('pecan.request') as request:
- request.headers.return_value = {'X-ProjectId': 'foobar'}
- kwargs = utils.query_to_kwargs(queries,
- storage.SampleFilter.__init__)
- for o in ['user', 'project', 'resource']:
- self.assertEqual('fake_%s_id' % o, kwargs.get(o))
-
- def test_timestamp_validation(self):
- q = [v2_base.Query(field='timestamp',
- op='le',
- value='123')]
-
- exc = self.assertRaises(
- wsme.exc.InvalidInput,
- utils.query_to_kwargs, q, storage.SampleFilter.__init__)
- expected_exc = wsme.exc.InvalidInput('timestamp', '123',
- 'invalid timestamp format')
- self.assertEqual(str(expected_exc), str(exc))
-
- def test_sample_filter_valid_fields(self):
- q = [v2_base.Query(field='abc',
- op='eq',
- value='abc')]
- exc = self.assertRaises(
- wsme.exc.UnknownArgument,
- utils.query_to_kwargs, q, storage.SampleFilter.__init__)
- valid_keys = ['message_id', 'meter', 'project', 'resource',
- 'search_offset', 'source', 'timestamp', 'user']
- msg = ("unrecognized field in query: %s, "
- "valid keys: %s") % (q, valid_keys)
- expected_exc = wsme.exc.UnknownArgument('abc', msg)
- self.assertEqual(str(expected_exc), str(exc))
-
- def test_get_meters_filter_valid_fields(self):
- q = [v2_base.Query(field='abc',
- op='eq',
- value='abc')]
- exc = self.assertRaises(
- wsme.exc.UnknownArgument,
- utils.query_to_kwargs,
- q, storage_base.Connection.get_meters, ['limit', 'unique'])
- valid_keys = ['project', 'resource', 'source', 'user']
- msg = ("unrecognized field in query: %s, "
- "valid keys: %s") % (q, valid_keys)
- expected_exc = wsme.exc.UnknownArgument('abc', msg)
- self.assertEqual(str(expected_exc), str(exc))
-
- def test_get_resources_filter_valid_fields(self):
- q = [v2_base.Query(field='abc',
- op='eq',
- value='abc')]
- exc = self.assertRaises(
- wsme.exc.UnknownArgument,
- utils.query_to_kwargs,
- q, storage_base.Connection.get_resources, ['limit'])
- valid_keys = ['project', 'resource',
- 'search_offset', 'source', 'timestamp', 'user']
- msg = ("unrecognized field in query: %s, "
- "valid keys: %s") % (q, valid_keys)
- expected_exc = wsme.exc.UnknownArgument('abc', msg)
- self.assertEqual(str(expected_exc), str(exc))
diff --git a/ceilometer/tests/unit/api/v2/test_statistics.py b/ceilometer/tests/unit/api/v2/test_statistics.py
deleted file mode 100644
index d5198540..00000000
--- a/ceilometer/tests/unit/api/v2/test_statistics.py
+++ /dev/null
@@ -1,105 +0,0 @@
-#
-# Copyright 2012 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""Test statistics objects."""
-
-import datetime
-
-from oslotest import base
-
-from ceilometer.api.controllers.v2 import meters
-
-
-class TestStatisticsDuration(base.BaseTestCase):
-
- def setUp(self):
- super(TestStatisticsDuration, self).setUp()
-
- # Create events relative to the range and pretend
- # that the intervening events exist.
-
- self.early1 = datetime.datetime(2012, 8, 27, 7, 0)
- self.early2 = datetime.datetime(2012, 8, 27, 17, 0)
-
- self.start = datetime.datetime(2012, 8, 28, 0, 0)
-
- self.middle1 = datetime.datetime(2012, 8, 28, 8, 0)
- self.middle2 = datetime.datetime(2012, 8, 28, 18, 0)
-
- self.end = datetime.datetime(2012, 8, 28, 23, 59)
-
- self.late1 = datetime.datetime(2012, 8, 29, 9, 0)
- self.late2 = datetime.datetime(2012, 8, 29, 19, 0)
-
- def test_nulls(self):
- s = meters.Statistics(duration_start=None,
- duration_end=None,
- start_timestamp=None,
- end_timestamp=None)
- self.assertIsNone(s.duration_start)
- self.assertIsNone(s.duration_end)
- self.assertIsNone(s.duration)
-
- def test_overlap_range_start(self):
- s = meters.Statistics(duration_start=self.early1,
- duration_end=self.middle1,
- start_timestamp=self.start,
- end_timestamp=self.end)
- self.assertEqual(self.start, s.duration_start)
- self.assertEqual(self.middle1, s.duration_end)
- self.assertEqual(8 * 60 * 60, s.duration)
-
- def test_within_range(self):
- s = meters.Statistics(duration_start=self.middle1,
- duration_end=self.middle2,
- start_timestamp=self.start,
- end_timestamp=self.end)
- self.assertEqual(self.middle1, s.duration_start)
- self.assertEqual(self.middle2, s.duration_end)
- self.assertEqual(10 * 60 * 60, s.duration)
-
- def test_within_range_zero_duration(self):
- s = meters.Statistics(duration_start=self.middle1,
- duration_end=self.middle1,
- start_timestamp=self.start,
- end_timestamp=self.end)
- self.assertEqual(self.middle1, s.duration_start)
- self.assertEqual(self.middle1, s.duration_end)
- self.assertEqual(0, s.duration)
-
- def test_overlap_range_end(self):
- s = meters.Statistics(duration_start=self.middle2,
- duration_end=self.late1,
- start_timestamp=self.start,
- end_timestamp=self.end)
- self.assertEqual(self.middle2, s.duration_start)
- self.assertEqual(self.end, s.duration_end)
- self.assertEqual(((6 * 60) - 1) * 60, s.duration)
-
- def test_after_range(self):
- s = meters.Statistics(duration_start=self.late1,
- duration_end=self.late2,
- start_timestamp=self.start,
- end_timestamp=self.end)
- self.assertIsNone(s.duration_start)
- self.assertIsNone(s.duration_end)
- self.assertIsNone(s.duration)
-
- def test_without_timestamp(self):
- s = meters.Statistics(duration_start=self.late1,
- duration_end=self.late2,
- start_timestamp=None,
- end_timestamp=None)
- self.assertEqual(self.late1, s.duration_start)
- self.assertEqual(self.late2, s.duration_end)
diff --git a/ceilometer/tests/unit/telemetry/__init__.py b/ceilometer/tests/unit/telemetry/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/ceilometer/tests/unit/telemetry/__init__.py
+++ /dev/null
diff --git a/ceilometer/tests/unit/telemetry/test_notifications.py b/ceilometer/tests/unit/telemetry/test_notifications.py
deleted file mode 100644
index 98e65192..00000000
--- a/ceilometer/tests/unit/telemetry/test_notifications.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslotest import base
-
-from ceilometer.telemetry import notifications
-
-NOTIFICATION = {
- u'_context_domain': None,
- u'_context_request_id': u'req-da91b4bf-d2b5-43ae-8b66-c7752e72726d',
- 'event_type': u'telemetry.api',
- 'timestamp': u'2015-06-19T09:19:35.786893',
- u'_context_auth_token': None,
- u'_context_read_only': False,
- 'payload': {'samples':
- [{'counter_name': u'instance100',
- u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2',
- u'resource_id': u'instance',
- u'timestamp': u'2015-06-19T09: 19: 35.785330',
- u'message_signature': u'fake_signature1',
- u'resource_metadata': {u'foo': u'bar'},
- u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack',
- u'counter_unit': u'instance',
- u'counter_volume': 1.0,
- u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2',
- u'message_id': u'4d865c6e-1664-11e5-9d41-0819a6cff905',
- u'counter_type': u'gauge'},
- {u'counter_name': u'instance100',
- u'user_id': u'e1d870e51c7340cb9d555b15cbfcaec2',
- u'resource_id': u'instance',
- u'timestamp': u'2015-06-19T09: 19: 35.785330',
- u'message_signature': u'fake_signature12',
- u'resource_metadata': {u'foo': u'bar'},
- u'source': u'30be1fc9a03c4e94ab05c403a8a377f2: openstack',
- u'counter_unit': u'instance',
- u'counter_volume': 1.0,
- u'project_id': u'30be1fc9a03c4e94ab05c403a8a377f2',
- u'message_id': u'4d866da8-1664-11e5-9d41-0819a6cff905',
- u'counter_type': u'gauge'}]},
- u'_context_resource_uuid': None,
- u'_context_user_identity': u'fake_user_identity---',
- u'_context_show_deleted': False,
- u'_context_tenant': u'30be1fc9a03c4e94ab05c403a8a377f2',
- 'priority': 'info',
- u'_context_is_admin': True,
- u'_context_project_domain': None,
- u'_context_user': u'e1d870e51c7340cb9d555b15cbfcaec2',
- u'_context_user_domain': None,
- 'publisher_id': u'ceilometer.api',
- 'message_id': u'939823de-c242-45a2-a399-083f4d6a8c3e'
-}
-
-
-class TelemetryIpcTestCase(base.BaseTestCase):
-
- def test_process_notification(self):
- sample_creation = notifications.TelemetryIpc(None)
- samples = list(sample_creation.process_notification(NOTIFICATION))
- self.assertEqual(2, len(samples))
- payload = NOTIFICATION["payload"]['samples']
- for index, sample in enumerate(samples):
- self.assertEqual(payload[index]["user_id"], sample.user_id)
- self.assertEqual(payload[index]["counter_name"], sample.name)
- self.assertEqual(payload[index]["resource_id"], sample.resource_id)
- self.assertEqual(payload[index]["timestamp"], sample.timestamp)
- self.assertEqual(payload[index]["resource_metadata"],
- sample.resource_metadata)
- self.assertEqual(payload[index]["counter_volume"], sample.volume)
- self.assertEqual(payload[index]["source"], sample.source)
- self.assertEqual(payload[index]["counter_type"], sample.type)
- self.assertEqual(payload[index]["message_id"], sample.id)
- self.assertEqual(payload[index]["counter_unit"], sample.unit)
diff --git a/ceilometer/tests/unit/test_utils.py b/ceilometer/tests/unit/test_utils.py
index f64aa55f..8caa6eb1 100644
--- a/ceilometer/tests/unit/test_utils.py
+++ b/ceilometer/tests/unit/test_utils.py
@@ -84,41 +84,6 @@ class TestUtils(base.BaseTestCase):
else:
self.assertIn((k, v), expected)
- def test_restore_nesting_unested(self):
- metadata = {'a': 'A', 'b': 'B'}
- unwound = utils.restore_nesting(metadata)
- self.assertIs(metadata, unwound)
-
- def test_restore_nesting(self):
- metadata = {'a': 'A', 'b': 'B',
- 'nested:a': 'A',
- 'nested:b': 'B',
- 'nested:twice:c': 'C',
- 'nested:twice:d': 'D',
- 'embedded:e': 'E'}
- unwound = utils.restore_nesting(metadata)
- expected = {'a': 'A', 'b': 'B',
- 'nested': {'a': 'A', 'b': 'B',
- 'twice': {'c': 'C', 'd': 'D'}},
- 'embedded': {'e': 'E'}}
- self.assertEqual(expected, unwound)
- self.assertIsNot(metadata, unwound)
-
- def test_restore_nesting_with_separator(self):
- metadata = {'a': 'A', 'b': 'B',
- 'nested.a': 'A',
- 'nested.b': 'B',
- 'nested.twice.c': 'C',
- 'nested.twice.d': 'D',
- 'embedded.e': 'E'}
- unwound = utils.restore_nesting(metadata, separator='.')
- expected = {'a': 'A', 'b': 'B',
- 'nested': {'a': 'A', 'b': 'B',
- 'twice': {'c': 'C', 'd': 'D'}},
- 'embedded': {'e': 'E'}}
- self.assertEqual(expected, unwound)
- self.assertIsNot(metadata, unwound)
-
def test_decimal_to_dt_with_none_parameter(self):
self.assertIsNone(utils.decimal_to_dt(None))
@@ -173,30 +138,3 @@ class TestUtils(base.BaseTestCase):
}
dict_to_update = utils.update_nested(original_dict, updates)
self.assertEqual(dict_to_update, expected_dict)
-
- def test_uniq(self):
- class DriverA(object):
- source = 'class_A'
- func = 'func_A'
- param = 'param_A'
-
- class DriverB(object):
- source = 'class_A'
- func = 'func_A'
- param = 'param_B'
-
- class DriverC(object):
- source = 'class_A'
- func = 'func_C'
- param = 'param_C'
-
- driver_list = [DriverA(), DriverB(), DriverC()]
-
- uniq_driver_a = utils.uniq(driver_list, ['source'])
- self.assertEqual(len(uniq_driver_a), 1)
-
- uniq_driver_b = utils.uniq(driver_list, ['source', 'func'])
- self.assertEqual(len(uniq_driver_b), 2)
-
- uniq_driver_c = utils.uniq(driver_list, ['source', 'func', 'param'])
- self.assertEqual(len(uniq_driver_c), 3)
diff --git a/ceilometer/utils.py b/ceilometer/utils.py
index 26bbad8d..ae734b69 100644
--- a/ceilometer/utils.py
+++ b/ceilometer/utils.py
@@ -98,19 +98,6 @@ def recursive_keypairs(d, separator=':'):
yield name, value
-def restore_nesting(d, separator=':'):
- """Unwinds a flattened dict to restore nesting."""
- d = copy.copy(d) if any([separator in k for k in d.keys()]) else d
- for k, v in d.copy().items():
- if separator in k:
- top, rem = k.split(separator, 1)
- nest = d[top] if isinstance(d.get(top), dict) else {}
- nest[rem] = v
- d[top] = restore_nesting(nest, separator)
- del d[k]
- return d
-
-
def dt_to_decimal(utc):
"""Datetime to Decimal.
@@ -173,21 +160,6 @@ def dict_to_keyval(value, key_base=None):
yield key_gen, v
-def lowercase_keys(mapping):
- """Converts the values of the keys in mapping to lowercase."""
- items = mapping.items()
- for key, value in items:
- del mapping[key]
- mapping[key.lower()] = value
-
-
-def lowercase_values(mapping):
- """Converts the values in the mapping dict to lowercase."""
- items = mapping.items()
- for key, value in items:
- mapping[key] = value.lower()
-
-
def update_nested(original_dict, updates):
"""Updates the leaf nodes in a nest dict.
@@ -203,18 +175,6 @@ def update_nested(original_dict, updates):
return dict_to_update
-def uniq(dupes, attrs):
- """Exclude elements of dupes with a duplicated set of attribute values."""
- key = lambda d: '/'.join([getattr(d, a) or '' for a in attrs])
- keys = []
- deduped = []
- for d in dupes:
- if key(d) not in keys:
- deduped.append(d)
- keys.append(key(d))
- return deduped
-
-
def hash_of_set(s):
return str(hash(frozenset(s)))
diff --git a/devstack/apache-ceilometer.template b/devstack/apache-ceilometer.template
deleted file mode 100644
index 79f14c38..00000000
--- a/devstack/apache-ceilometer.template
+++ /dev/null
@@ -1,15 +0,0 @@
-Listen %PORT%
-
-<VirtualHost *:%PORT%>
- WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV%
- WSGIProcessGroup ceilometer-api
- WSGIScriptAlias / %WSGIAPP%
- WSGIApplicationGroup %{GLOBAL}
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/%APACHE_NAME%/ceilometer.log
- CustomLog /var/log/%APACHE_NAME%/ceilometer_access.log combined
-</VirtualHost>
-
-WSGISocketPrefix /var/run/%APACHE_NAME%
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index 70923fad..c3e9acc3 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -57,10 +57,6 @@ function is_ceilometer_enabled {
return 1
}
-function ceilometer_service_url {
- echo "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT"
-}
-
function gnocchi_service_url {
echo "$GNOCCHI_SERVICE_PROTOCOL://$GNOCCHI_SERVICE_HOST/metric"
@@ -110,9 +106,6 @@ function _ceilometer_config_apache_wsgi {
local apache_version=$(get_apache_version)
local venv_path=""
- # Copy proxy vhost and wsgi file
- sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app
-
if [[ ${USE_VENV} = True ]]; then
venv_path="python-path=${PROJECT_VENV["ceilometer"]}/lib/$(python_version)/site-packages"
fi
@@ -169,15 +162,6 @@ function ceilometer_create_accounts {
create_service_user "ceilometer" "admin"
- if is_service_enabled ceilometer-api; then
- get_or_create_service "ceilometer" "metering" "OpenStack Telemetry Service"
- get_or_create_endpoint "metering" \
- "$REGION_NAME" \
- "$(ceilometer_service_url)" \
- "$(ceilometer_service_url)" \
- "$(ceilometer_service_url)"
- fi
-
if is_service_enabled swift; then
# Ceilometer needs ResellerAdmin role to access Swift account stats.
get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_PROJECT_NAME
@@ -236,28 +220,9 @@ function preinstall_ceilometer {
echo_summary "Preinstall not in virtualenv context. Skipping."
}
-# Remove WSGI files, disable and remove Apache vhost file
-function _ceilometer_cleanup_apache_wsgi {
- if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
- sudo rm -f "$CEILOMETER_WSGI_DIR"/*
- sudo rmdir "$CEILOMETER_WSGI_DIR"
- sudo rm -f $(apache_site_config_for ceilometer)
- fi
-}
-
-function _ceilometer_drop_database {
- if is_service_enabled ceilometer-api ; then
- if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then
- mongo ceilometer --eval "db.dropDatabase();"
- fi
- fi
-}
-
# cleanup_ceilometer() - Remove residual data files, anything left over
# from previous runs that a clean run would need to clean up
function cleanup_ceilometer {
- _ceilometer_cleanup_apache_wsgi
- _ceilometer_drop_database
sudo rm -f "$CEILOMETER_CONF_DIR"/*
sudo rmdir "$CEILOMETER_CONF_DIR"
}
@@ -311,8 +276,6 @@ function _ceilometer_configure_storage_backend {
echo ' - panko://' >> $CEILOMETER_CONF_DIR/event_pipeline.yaml
fi
fi
-
- _ceilometer_drop_database
}
# Configure Ceilometer
@@ -342,9 +305,6 @@ function configure_ceilometer {
# with rootwrap installation done elsewhere and also clobber
# ceilometer.conf settings that have already been made.
# Anyway, explicit is better than implicit.
- for conffile in policy.json api_paste.ini; do
- cp $CEILOMETER_DIR/etc/ceilometer/$conffile $CEILOMETER_CONF_DIR
- done
cp $CEILOMETER_DIR/etc/ceilometer/polling_all.yaml $CEILOMETER_CONF_DIR/polling.yaml
cp $CEILOMETER_DIR/ceilometer/pipeline/data/*.yaml $CEILOMETER_CONF_DIR
@@ -371,11 +331,6 @@ function configure_ceilometer {
configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR
- # Configure storage
- if is_service_enabled ceilometer-api; then
- _ceilometer_configure_storage_backend
- fi
-
if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere
iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP"
@@ -383,9 +338,7 @@ function configure_ceilometer {
iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD"
fi
- if is_service_enabled ceilometer-api && [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
- _ceilometer_config_apache_wsgi
- fi
+ _ceilometer_configure_storage_backend
if is_service_enabled ceilometer-aipmi; then
# Configure rootwrap for the ipmi agent
@@ -398,15 +351,6 @@ function init_ceilometer {
# Create cache dir
sudo install -d -o $STACK_USER $CEILOMETER_AUTH_CACHE_DIR
rm -f $CEILOMETER_AUTH_CACHE_DIR/*
-
- if is_service_enabled ceilometer-api; then
- if is_service_enabled mysql postgresql ; then
- if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then
- recreate_database ceilometer
- $CEILOMETER_BIN_DIR/ceilometer-upgrade --skip-gnocchi-resource-types
- fi
- fi
- fi
}
# Install Ceilometer.
@@ -421,10 +365,6 @@ function install_ceilometer {
! [[ $DEVSTACK_PLUGINS =~ 'gnocchi' ]] && [ "$CEILOMETER_BACKEND" = 'gnocchi' ] && install_gnocchi
- if is_service_enabled ceilometer-api; then
- _ceilometer_prepare_storage_backend
- fi
-
if is_service_enabled ceilometer-acompute ; then
_ceilometer_prepare_virt_drivers
fi
@@ -465,15 +405,6 @@ function start_ceilometer {
run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF"
run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF"
- if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then
- run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api --port $CEILOMETER_SERVICE_PORT"
- elif is_service_enabled ceilometer-api; then
- enable_apache_site ceilometer
- restart_apache_server
- tail_log ceilometer /var/log/$APACHE_NAME/ceilometer.log
- tail_log ceilometer-api /var/log/$APACHE_NAME/ceilometer_access.log
- fi
-
# run the notification agent after restarting apache as it needs
# operational keystone if using gnocchi
run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF"
@@ -490,14 +421,6 @@ function start_ceilometer {
# stop_ceilometer() - Stop running processes
function stop_ceilometer {
- if is_service_enabled ceilometer-api ; then
- if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then
- disable_apache_site ceilometer
- restart_apache_server
- else
- stop_process ceilometer-api
- fi
- fi
# Kill the ceilometer screen windows
for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification; do
diff --git a/devstack/settings b/devstack/settings
index 7c2bfb3c..b2768eed 100644
--- a/devstack/settings
+++ b/devstack/settings
@@ -11,11 +11,8 @@ CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf
CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer}
CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer}
-# Set up no backend
CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-gnocchi}
-if [ "$CEILOMETER_BACKEND" = "es" ] || [ "$CEILOMETER_BACKEND" = "mysql" ] || [ "$CEILOMETER_BACKEND" = "postgresql" ] || [ "$CEILOMETER_BACKEND" = "mongodb" ]; then
- enable_service ceilometer-api
-elif [ "$CEILOMETER_BACKEND" = "gnocchi" ]; then
+if [ "$CEILOMETER_BACKEND" = "gnocchi" ]; then
enable_service gnocchi-api gnocchi-metricd
fi
@@ -38,12 +35,6 @@ GNOCCHI_WSGI_DIR=${GNOCCHI_WSGI_DIR:-/var/www/gnocchi}
# when the gate job have overrided this.
CEILOMETER_ALARM_GRANULARITY=${CEILOMETER_ALARM_GRANULARITY:-60}
-# Ceilometer connection info.
-CEILOMETER_SERVICE_PROTOCOL=http
-CEILOMETER_SERVICE_HOST=${CEILOMETER_SERVICE_HOST:-${SERVICE_HOST}}
-CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777}
-CEILOMETER_USE_MOD_WSGI=${CEILOMETER_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}}
-
# To enable OSprofiler change value of this variable to "notifications,profiler"
CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications}
diff --git a/devstack/upgrade/settings b/devstack/upgrade/settings
index bd7a20b8..0324704f 100644
--- a/devstack/upgrade/settings
+++ b/devstack/upgrade/settings
@@ -1,7 +1,7 @@
register_project_for_upgrade ceilometer
devstack_localrc base enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer
-devstack_localrc base enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-api tempest
+devstack_localrc base enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification tempest
devstack_localrc target enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer
-devstack_localrc target enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-api tempest
+devstack_localrc target enable_service ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification tempest
diff --git a/devstack/upgrade/shutdown.sh b/devstack/upgrade/shutdown.sh
index 615a3737..ccd42bac 100755
--- a/devstack/upgrade/shutdown.sh
+++ b/devstack/upgrade/shutdown.sh
@@ -22,6 +22,6 @@ stop_ceilometer
# ensure everything is stopped
-SERVICES_DOWN="ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-api"
+SERVICES_DOWN="ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification"
ensure_services_stopped $SERVICES_DOWN
diff --git a/devstack/upgrade/upgrade.sh b/devstack/upgrade/upgrade.sh
index 12fef542..e6ff2b59 100755
--- a/devstack/upgrade/upgrade.sh
+++ b/devstack/upgrade/upgrade.sh
@@ -79,8 +79,7 @@ start_ceilometer
# the impi is not ready. The ceilometer-polling should fail.
ensure_services_started "ceilometer-polling --polling-namespaces compute" \
"ceilometer-polling --polling-namespaces central" \
- ceilometer-agent-notification \
- ceilometer-api
+ ceilometer-agent-notification
# Save mongodb state (replace with snapshot)
if grep -q 'connection *= *mongo' /etc/ceilometer/ceilometer.conf; then
diff --git a/doc/source/admin/index.rst b/doc/source/admin/index.rst
index da6c1cb1..fe192d72 100644
--- a/doc/source/admin/index.rst
+++ b/doc/source/admin/index.rst
@@ -12,6 +12,5 @@
telemetry-troubleshooting-guide
telemetry-data-pipelines
telemetry-data-collection
- telemetry-data-retrieval
telemetry-best-practices
telemetry-events
diff --git a/doc/source/admin/telemetry-best-practices.rst b/doc/source/admin/telemetry-best-practices.rst
index 16e9be97..ec7b1924 100644
--- a/doc/source/admin/telemetry-best-practices.rst
+++ b/doc/source/admin/telemetry-best-practices.rst
@@ -44,85 +44,3 @@ Data collection
central and compute agents as necessary. The agents are designed to scale
horizontally. For more information refer to the `high availability guide
<https://docs.openstack.org/ha-guide/controller-ha-telemetry.html>`_.
-
-Data storage
-------------
-
-.. note::
-
- As of Newton, data storage is not recommended in ceilometer. Alarm,
- metric, and event data should be stored in aodh, gnocchi, and panko
- respectively. The following details only relate to ceilometer's legacy
- API.
-
-#. We recommend that you avoid open-ended queries. In order to get better
- performance you can use reasonable time ranges and/or other query
- constraints for retrieving measurements.
-
- For example, this open-ended query might return an unpredictable amount
- of data:
-
- .. code-block:: console
-
- $ ceilometer sample-list --meter cpu -q 'resource_id=INSTANCE_ID_1'
-
- Whereas, this well-formed query returns a more reasonable amount of
- data, hence better performance:
-
- .. code-block:: console
-
- $ ceilometer sample-list --meter cpu -q 'resource_id=INSTANCE_ID_1;timestamp > 2015-05-01T00:00:00;timestamp < 2015-06-01T00:00:00'
-
- .. note::
-
- The number of items returned will be
- restricted to the value defined by ``default_api_return_limit`` in the
- ``ceilometer.conf`` configuration file. Alternatively, the value can
- be set per query by passing ``limit`` option in request.
-
-#. We recommend that you install the API behind ``mod_wsgi``, as it provides
- more settings to tweak, like ``threads`` and ``processes`` in case of
- ``WSGIDaemon``.
-
- .. note::
-
- For more information on how to configure ``mod_wsgi``, see the
- `Telemetry Install Documentation
- <https://docs.openstack.org/developer/ceilometer/install/mod_wsgi.html>`__.
-
-#. The collection service provided by the Telemetry project is not intended
- to be an archival service. Set a Time to Live (TTL) value to expire data
- and minimize the database size. If you would like to keep your data for
- longer time period, you may consider storing it in a data warehouse
- outside of Telemetry.
-
- .. note::
-
- For more information on how to set the TTL, see
- :ref:`telemetry-expiry`.
-
-#. We recommend that you do not run MongoDB on the same node as the
- controller. Keep it on a separate node optimized for fast storage for
- better performance. Also it is advisable for the MongoDB node to have a
- lot of memory.
-
- .. note::
-
- For more information on how much memory you need, see `MongoDB
- FAQ <http://docs.mongodb.org/manual/faq/diagnostics/#how-do-i-calculate-how-much-ram-i-need-for-my-application>`__.
-
-#. Use replica sets in MongoDB. Replica sets provide high availability
- through automatic failover. If your primary node fails, MongoDB will
- elect a secondary node to replace the primary node, and your cluster
- will remain functional.
-
- For more information on replica sets, see the `MongoDB replica sets
- docs <http://docs.mongodb.org/manual/tutorial/deploy-replica-set/>`__.
-
-#. Use sharding in MongoDB. Sharding helps in storing data records across
- multiple machines and is the MongoDB’s approach to meet the demands of
- data growth.
-
- For more information on sharding, see the `MongoDB sharding
- docs <http://docs.mongodb.org/manual/sharding/>`__.
-
diff --git a/doc/source/admin/telemetry-data-collection.rst b/doc/source/admin/telemetry-data-collection.rst
index 4640e77d..a7fdef7e 100644
--- a/doc/source/admin/telemetry-data-collection.rst
+++ b/doc/source/admin/telemetry-data-collection.rst
@@ -26,15 +26,6 @@ Polling
machine using SNMP, or by using the APIs of other OpenStack
services.
-RESTful API (deprecated in Ocata)
- Pushing samples via the RESTful API of Telemetry.
-
-.. note::
-
- Rather than pushing data through Ceilometer's API, it is advised to push
- directly into gnocchi. Ceilometer's API is officially deprecated as of
- Ocata.
-
Notifications
~~~~~~~~~~~~~
@@ -435,71 +426,3 @@ The list of collected meters can be found in
Do not deploy both the IPMI agent and the Bare metal service on one
compute node. If ``conductor.send_sensor_data`` is set, this
misconfiguration causes duplicated IPMI sensor samples.
-
-Send samples to Telemetry
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-.. note::
-
- Sample pushing via the API is deprecated in Ocata. Measurement data should
- be pushed directly into `gnocchi's API <http://gnocchi.xyz/rest.html>`__.
-
-While most parts of the data collection in the Telemetry service are
-automated, Telemetry provides the possibility to submit samples via the
-REST API to allow users to send custom samples into this service.
-
-This option makes it possible to send any kind of samples without the
-need of writing extra code lines or making configuration changes.
-
-The samples that can be sent to Telemetry are not limited to the actual
-existing meters. There is a possibility to provide data for any new,
-customer defined counter by filling out all the required fields of the
-POST request.
-
-If the sample corresponds to an existing meter, then the fields like
-``meter-type`` and meter name should be matched accordingly.
-
-The required fields for sending a sample using the command-line client
-are:
-
-- ID of the corresponding resource. (``--resource-id``)
-
-- Name of meter. (``--meter-name``)
-
-- Type of meter. (``--meter-type``)
-
- Predefined meter types:
-
- - Gauge
-
- - Delta
-
- - Cumulative
-
-- Unit of meter. (``--meter-unit``)
-
-- Volume of sample. (``--sample-volume``)
-
-To send samples to Telemetry using the command-line client, the
-following command should be invoked:
-
-.. code-block:: console
-
- $ ceilometer sample-create -r 37128ad6-daaa-4d22-9509-b7e1c6b08697 \
- -m memory.usage --meter-type gauge --meter-unit MB --sample-volume 48
- +-------------------+--------------------------------------------+
- | Property | Value |
- +-------------------+--------------------------------------------+
- | message_id | 6118820c-2137-11e4-a429-08002715c7fb |
- | name | memory.usage |
- | project_id | e34eaa91d52a4402b4cb8bc9bbd308c1 |
- | resource_id | 37128ad6-daaa-4d22-9509-b7e1c6b08697 |
- | resource_metadata | {} |
- | source | e34eaa91d52a4402b4cb8bc9bbd308c1:openstack |
- | timestamp | 2014-08-11T09:10:46.358926 |
- | type | gauge |
- | unit | MB |
- | user_id | 679b0499e7a34ccb9d90b64208401f8e |
- | volume | 48.0 |
- +-------------------+--------------------------------------------+
-
diff --git a/doc/source/admin/telemetry-data-retrieval.rst b/doc/source/admin/telemetry-data-retrieval.rst
deleted file mode 100644
index d6806f9e..00000000
--- a/doc/source/admin/telemetry-data-retrieval.rst
+++ /dev/null
@@ -1,493 +0,0 @@
-==============
-Data retrieval
-==============
-
-.. warning::
-
- Accessing meters through the v2 API of Ceilometer is deprecated in Ocata and
- has been unmaintained for a few cycles prior. We recommend storing metric
- data in a time-series optimized database such as Gnocchi_ and event data in
- Panko_.
-
-.. _Gnocchi: http://gnocchi.xyz/
-.. _Panko: https://docs.openstack.org/panko/latest/
-
-The Telemetry service offers several mechanisms from which the persisted
-data can be accessed. As described in :ref:`telemetry-system-architecture` and
-in :ref:`telemetry-data-collection`, the collected information can be stored in
-one or more database back ends, which are hidden by the Telemetry RESTful API.
-
-.. note::
-
- It is highly recommended not to access the database directly and
- read or modify any data in it. The API layer hides all the changes
- in the actual database schema and provides a standard interface to
- expose the samples, alarms and so forth.
-
-Telemetry v2 API
-~~~~~~~~~~~~~~~~
-
-The Telemetry service provides a RESTful API, from which the collected
-samples and all the related information can be retrieved, like the list
-of meters, alarm definitions and so forth.
-
-The Telemetry API URL can be retrieved from the service catalog provided
-by OpenStack Identity, which is populated during the installation
-process. The API access needs a valid token and proper permission to
-retrieve data, as described in :ref:`telemetry-users-roles-projects`.
-
-Further information about the available API endpoints can be found in
-the `Telemetry API Reference
-<https://developer.openstack.org/api-ref-telemetry-v2.html>`__.
-
-Query
------
-
-The API provides some additional functionalities, like querying the
-collected data set. For the samples and alarms API endpoints, both
-simple and complex query styles are available, whereas for the other
-endpoints only simple queries are supported.
-
-After validating the query parameters, the processing is done on the
-database side in the case of most database back ends in order to achieve
-better performance.
-
-**Simple query**
-
-Many of the API endpoints accept a query filter argument, which should
-be a list of data structures that consist of the following items:
-
-- ``field``
-
-- ``op``
-
-- ``value``
-
-- ``type``
-
-Regardless of the endpoint on which the filter is applied on, it will
-always target the fields of the `Sample type
-<https://docs.openstack.org/ceilometer/latest/webapi/v2.html#Sample>`__.
-
-Several fields of the API endpoints accept shorter names than the ones
-defined in the reference. The API will do the transformation internally
-and return the output with the fields that are listed in the `API reference
-<https://docs.openstack.org/ceilometer/latest/webapi/v2.html>`__.
-The fields are the following:
-
-- ``project_id``: project
-
-- ``resource_id``: resource
-
-- ``user_id``: user
-
-When a filter argument contains multiple constraints of the above form,
-a logical ``AND`` relation between them is implied.
-
-.. _complex-query:
-
-**Complex query**
-
-The filter expressions of the complex query feature operate on the
-fields of ``Sample``, ``Alarm`` and ``AlarmChange`` types. The following
-comparison operators are supported:
-
-- ``=``
-
-- ``!=``
-
-- ``<``
-
-- ``<=``
-
-- ``>``
-
-- ``>=``
-
-The following logical operators can be used:
-
-- ``and``
-
-- ``or``
-
-- ``not``
-
-.. note::
-
- The ``not`` operator has different behavior in MongoDB and in the
- SQLAlchemy-based database engines. If the ``not`` operator is
- applied on a non existent metadata field then the result depends on
- the database engine. In case of MongoDB, it will return every sample
- as the ``not`` operator is evaluated true for every sample where the
- given field does not exist. On the other hand the SQL-based database
- engine will return an empty result because of the underlying
- ``join`` operation.
-
-Complex query supports specifying a list of ``orderby`` expressions.
-This means that the result of the query can be ordered based on the
-field names provided in this list. When multiple keys are defined for
-the ordering, these will be applied sequentially in the order of the
-specification. The second expression will be applied on the groups for
-which the values of the first expression are the same. The ordering can
-be ascending or descending.
-
-The number of returned items can be bounded using the ``limit`` option.
-
-The ``filter``, ``orderby`` and ``limit`` fields are optional.
-
-.. note::
-
- As opposed to the simple query, complex query is available via a
- separate API endpoint. For more information see the `Telemetry v2 Web API
- Reference <https://docs.openstack.org/ceilometer/latest/webapi/v2.html#v2-web-api>`__.
-
-Statistics
-----------
-
-The sample data can be used in various ways for several purposes, like
-billing or profiling. In external systems the data is often used in the
-form of aggregated statistics. The Telemetry API provides several
-built-in functions to make some basic calculations available without any
-additional coding.
-
-Telemetry supports the following statistics and aggregation functions:
-
-``avg``
- Average of the sample volumes over each period.
-
-``cardinality``
- Count of distinct values in each period identified by a key
- specified as the parameter of this aggregate function. The supported
- parameter values are:
-
- - ``project_id``
-
- - ``resource_id``
-
- - ``user_id``
-
-.. note::
-
- The ``aggregate.param`` option is required.
-
-``count``
- Number of samples in each period.
-
-``max``
- Maximum of the sample volumes in each period.
-
-``min``
- Minimum of the sample volumes in each period.
-
-``stddev``
- Standard deviation of the sample volumes in each period.
-
-``sum``
- Sum of the sample volumes over each period.
-
-The simple query and the statistics functionality can be used together
-in a single API request.
-
-Telemetry command-line client and SDK
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The Telemetry service provides a command-line client, with which the
-collected data is available just as the alarm definition and retrieval
-options. The client uses the Telemetry RESTful API in order to execute
-the requested operations.
-
-To be able to use the :command:`ceilometer` command, the
-python-ceilometerclient package needs to be installed and configured
-properly. For details about the installation process, see the `Telemetry
-chapter <https://docs.openstack.org/project-install-guide/telemetry/ocata/>`__
-in the Installation Tutorials and Guides.
-
-.. note::
-
- The Telemetry service captures the user-visible resource usage data.
- Therefore the database will not contain any data without the
- existence of these resources, like VM images in the OpenStack Image
- service.
-
-Similarly to other OpenStack command-line clients, the ``ceilometer``
-client uses OpenStack Identity for authentication. The proper
-credentials and ``--auth_url`` parameter have to be defined via command
-line parameters or environment variables.
-
-This section provides some examples without the aim of completeness.
-These commands can be used for instance for validating an installation
-of Telemetry.
-
-To retrieve the list of collected meters, the following command should
-be used:
-
-.. code-block:: console
-
- $ ceilometer meter-list
- +------------------------+------------+------+------------------------------------------+----------------------------------+----------------------------------+
- | Name | Type | Unit | Resource ID | User ID | Project ID |
- +------------------------+------------+------+------------------------------------------+----------------------------------+----------------------------------+
- | cpu | cumulative | ns | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | cpu | cumulative | ns | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | cpu_util | gauge | % | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | cpu_util | gauge | % | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.device.read.bytes | cumulative | B | bb52e52b-1e42-4751-b3ac-45c52d83ba07-hdd | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.device.read.bytes | cumulative | B | bb52e52b-1e42-4751-b3ac-45c52d83ba07-vda | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.device.read.bytes | cumulative | B | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b-hdd | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.device.read.bytes | cumulative | B | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b-vda | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | ... |
- +------------------------+------------+------+------------------------------------------+----------------------------------+----------------------------------+
-
-The :command:`ceilometer` command was run with ``admin`` rights, which means
-that all the data is accessible in the database. For more information
-about access right see :ref:`telemetry-users-roles-projects`. As it can be seen
-in the above example, there are two VM instances existing in the system, as
-there are VM instance related meters on the top of the result list. The
-existence of these meters does not indicate that these instances are running at
-the time of the request. The result contains the currently collected meters per
-resource, in an ascending order based on the name of the meter.
-
-Samples are collected for each meter that is present in the list of
-meters, except in case of instances that are not running or deleted from
-the OpenStack Compute database. If an instance no longer exists and
-there is a ``time_to_live`` value set in the ``ceilometer.conf``
-configuration file, then a group of samples are deleted in each
-expiration cycle. When the last sample is deleted for a meter, the
-database can be cleaned up by running ceilometer-expirer and the meter
-will not be present in the list above anymore. For more information
-about the expiration procedure see :ref:`telemetry-expiry`.
-
-The Telemetry API supports simple query on the meter endpoint. The query
-functionality has the following syntax:
-
-.. code-block:: console
-
- --query <field1><operator1><value1>;...;<field_n><operator_n><value_n>
-
-The following command needs to be invoked to request the meters of one
-VM instance:
-
-.. code-block:: console
-
- $ ceilometer meter-list --query resource=bb52e52b-1e42-4751-b3ac-45c52d83ba07
- +-------------------------+------------+-----------+--------------------------------------+----------------------------------+----------------------------------+
- | Name | Type | Unit | Resource ID | User ID | Project ID |
- +-------------------------+------------+-----------+--------------------------------------+----------------------------------+----------------------------------+
- | cpu | cumulative | ns | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | cpu_util | gauge | % | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | cpu_l3_cache | gauge | B | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.ephemeral.size | gauge | GB | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.read.bytes | cumulative | B | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.read.bytes.rate | gauge | B/s | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.read.requests | cumulative | request | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.read.requests.rate | gauge | request/s | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.root.size | gauge | GB | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.write.bytes | cumulative | B | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.write.bytes.rate | gauge | B/s | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.write.requests | cumulative | request | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | disk.write.requests.rate| gauge | request/s | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | instance | gauge | instance | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | instance:m1.tiny | gauge | instance | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | memory | gauge | MB | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- | vcpus | gauge | vcpu | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | b6e62aad26174382bc3781c12fe413c8 | cbfa8e3dfab64a27a87c8e24ecd5c60f |
- +-------------------------+------------+-----------+--------------------------------------+----------------------------------+----------------------------------+
-
-As it was described above, the whole set of samples can be retrieved
-that are stored for a meter or filtering the result set by using one of
-the available query types. The request for all the samples of the
-``cpu`` meter without any additional filtering looks like the following:
-
-.. code-block:: console
-
- $ ceilometer sample-list --meter cpu
- +--------------------------------------+-------+------------+------------+------+---------------------+
- | Resource ID | Meter | Type | Volume | Unit | Timestamp |
- +--------------------------------------+-------+------------+------------+------+---------------------+
- | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b | cpu | cumulative | 5.4863e+11 | ns | 2014-08-31T11:17:03 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.7848e+11 | ns | 2014-08-31T11:17:03 |
- | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b | cpu | cumulative | 5.4811e+11 | ns | 2014-08-31T11:07:05 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.7797e+11 | ns | 2014-08-31T11:07:05 |
- | c8d2e153-a48f-4cec-9e93-86e7ac6d4b0b | cpu | cumulative | 5.3589e+11 | ns | 2014-08-31T10:27:19 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.6397e+11 | ns | 2014-08-31T10:27:19 |
- | ... |
- +--------------------------------------+-------+------------+------------+------+---------------------+
-
-The result set of the request contains the samples for both instances
-ordered by the timestamp field in the default descending order.
-
-The simple query makes it possible to retrieve only a subset of the
-collected samples. The following command can be executed to request the
-``cpu`` samples of only one of the VM instances:
-
-.. code-block:: console
-
- $ ceilometer sample-list --meter cpu --query resource=bb52e52b-1e42-4751-
- b3ac-45c52d83ba07
- +--------------------------------------+------+------------+------------+------+---------------------+
- | Resource ID | Name | Type | Volume | Unit | Timestamp |
- +--------------------------------------+------+------------+------------+------+---------------------+
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.7906e+11 | ns | 2014-08-31T11:27:08 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.7848e+11 | ns | 2014-08-31T11:17:03 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.7797e+11 | ns | 2014-08-31T11:07:05 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.6397e+11 | ns | 2014-08-31T10:27:19 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.6207e+11 | ns | 2014-08-31T10:17:03 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 5.3831e+11 | ns | 2014-08-31T08:41:57 |
- | ... |
- +--------------------------------------+------+------------+------------+------+---------------------+
-
-As it can be seen on the output above, the result set contains samples
-for only one instance of the two.
-
-The :command:`ceilometer query-samples` command is used to execute rich
-queries. This command accepts the following parameters:
-
-``--filter``
- Contains the filter expression for the query in the form of:
- ``{complex_op: [{simple_op: {field_name: value}}]}``.
-
-``--orderby``
- Contains the list of ``orderby`` expressions in the form of:
- ``[{field_name: direction}, {field_name: direction}]``.
-
-``--limit``
- Specifies the maximum number of samples to return.
-
-For more information about complex queries see
-:ref:`Complex query <complex-query>`.
-
-As the complex query functionality provides the possibility of using
-complex operators, it is possible to retrieve a subset of samples for a
-given VM instance. To request for the first six samples for the ``cpu``
-and ``disk.read.bytes`` meters, the following command should be invoked:
-
-.. code-block:: none
-
- $ ceilometer query-samples --filter '{"and": \
- [{"=":{"resource":"bb52e52b-1e42-4751-b3ac-45c52d83ba07"}},{"or":[{"=":{"counter_name":"cpu"}}, \
- {"=":{"counter_name":"disk.read.bytes"}}]}]}' --orderby '[{"timestamp":"asc"}]' --limit 6
- +--------------------------------------+-----------------+------------+------------+------+---------------------+
- | Resource ID | Meter | Type | Volume | Unit | Timestamp |
- +--------------------------------------+-----------------+------------+------------+------+---------------------+
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | disk.read.bytes | cumulative | 385334.0 | B | 2014-08-30T13:00:46 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 1.2132e+11 | ns | 2014-08-30T13:00:47 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 1.4295e+11 | ns | 2014-08-30T13:10:51 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | disk.read.bytes | cumulative | 601438.0 | B | 2014-08-30T13:10:51 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | disk.read.bytes | cumulative | 601438.0 | B | 2014-08-30T13:20:33 |
- | bb52e52b-1e42-4751-b3ac-45c52d83ba07 | cpu | cumulative | 1.4795e+11 | ns | 2014-08-30T13:20:34 |
- +--------------------------------------+-----------------+------------+------------+------+---------------------+
-
-Ceilometer also captures data as events, which represents the state of a
-resource. Refer to ``/telemetry-events`` for more information regarding
-Events.
-
-To retrieve a list of recent events that occurred in the system, the
-following command can be executed:
-
-.. code-block:: console
-
- $ ceilometer event-list
- +--------------------------------------+---------------+----------------------------+-----------------------------------------------------------------+
- | Message ID | Event Type | Generated | Traits |
- +--------------------------------------+---------------+----------------------------+-----------------------------------------------------------------+
- | dfdb87b6-92c6-4d40-b9b5-ba308f304c13 | image.create | 2015-09-24T22:17:39.498888 | +---------+--------+-----------------+ |
- | | | | | name | type | value | |
- | | | | +---------+--------+-----------------+ |
- | | | | | service | string | image.localhost | |
- | | | | +---------+--------+-----------------+ |
- | 84054bc6-2ae6-4b93-b5e7-06964f151cef | image.prepare | 2015-09-24T22:17:39.594192 | +---------+--------+-----------------+ |
- | | | | | name | type | value | |
- | | | | +---------+--------+-----------------+ |
- | | | | | service | string | image.localhost | |
- | | | | +---------+--------+-----------------+ |
- | 2ec99c2c-08ee-4079-bf80-27d4a073ded6 | image.update | 2015-09-24T22:17:39.578336 | +-------------+--------+--------------------------------------+ |
- | | | | | name | type | value | |
- | | | | +-------------+--------+--------------------------------------+ |
- | | | | | created_at | string | 2015-09-24T22:17:39Z | |
- | | | | | name | string | cirros-0.3.5-x86_64-uec-kernel | |
- | | | | | project_id | string | 56ffddea5b4f423496444ea36c31be23 | |
- | | | | | resource_id | string | 86eb8273-edd7-4483-a07c-002ff1c5657d | |
- | | | | | service | string | image.localhost | |
- | | | | | status | string | saving | |
- | | | | | user_id | string | 56ffddea5b4f423496444ea36c31be23 | |
- | | | | +-------------+--------+--------------------------------------+ |
- +--------------------------------------+---------------+----------------------------+-----------------------------------------------------------------+
-
-.. note::
-
- In Liberty, the data returned corresponds to the role and user. Non-admin
- users will only return events that are scoped to them. Admin users will
- return all events related to the project they administer as well as
- all unscoped events.
-
-Similar to querying meters, additional filter parameters can be given to
-retrieve specific events:
-
-.. code-block:: console
-
- $ ceilometer event-list -q 'event_type=compute.instance.exists;instance_type=m1.tiny'
- +--------------------------------------+-------------------------+----------------------------+----------------------------------------------------------------------------------+
- | Message ID | Event Type | Generated | Traits |
- +--------------------------------------+-------------------------+----------------------------+----------------------------------------------------------------------------------+
- | 134a2ab3-6051-496c-b82f-10a3c367439a | compute.instance.exists | 2015-09-25T03:00:02.152041 | +------------------------+----------+------------------------------------------+ |
- | | | | | name | type | value | |
- | | | | +------------------------+----------+------------------------------------------+ |
- | | | | | audit_period_beginning | datetime | 2015-09-25T02:00:00 | |
- | | | | | audit_period_ending | datetime | 2015-09-25T03:00:00 | |
- | | | | | disk_gb | integer | 1 | |
- | | | | | ephemeral_gb | integer | 0 | |
- | | | | | host | string | localhost.localdomain | |
- | | | | | instance_id | string | 2115f189-c7f1-4228-97bc-d742600839f2 | |
- | | | | | instance_type | string | m1.tiny | |
- | | | | | instance_type_id | integer | 2 | |
- | | | | | launched_at | datetime | 2015-09-24T22:24:56 | |
- | | | | | memory_mb | integer | 512 | |
- | | | | | project_id | string | 56ffddea5b4f423496444ea36c31be23 | |
- | | | | | request_id | string | req-c6292b21-bf98-4a1d-b40c-cebba4d09a67 | |
- | | | | | root_gb | integer | 1 | |
- | | | | | service | string | compute | |
- | | | | | state | string | active | |
- | | | | | tenant_id | string | 56ffddea5b4f423496444ea36c31be23 | |
- | | | | | user_id | string | 0b3d725756f94923b9d0c4db864d06a9 | |
- | | | | | vcpus | integer | 1 | |
- | | | | +------------------------+----------+------------------------------------------+ |
- +--------------------------------------+-------------------------+----------------------------+----------------------------------------------------------------------------------+
-
-.. note::
-
- As of the Liberty release, the number of items returned will be
- restricted to the value defined by ``default_api_return_limit`` in the
- ``ceilometer.conf`` configuration file. Alternatively, the value can
- be set per query by passing the ``limit`` option in the request.
-
-
-Telemetry Python bindings
--------------------------
-
-The command-line client library provides python bindings in order to use
-the Telemetry Python API directly from python programs.
-
-The first step in setting up the client is to create a client instance
-with the proper credentials:
-
-.. code-block:: python
-
- >>> import ceilometerclient.client
- >>> cclient = ceilometerclient.client.get_client(VERSION, username=USERNAME, password=PASSWORD, tenant_name=PROJECT_NAME, auth_url=AUTH_URL)
-
-The ``VERSION`` parameter can be ``1`` or ``2``, specifying the API
-version to be used.
-
-The method calls look like the following:
-
-.. code-block:: python
-
- >>> cclient.meters.list()
- [<Meter ...>, ...]
-
- >>> cclient.samples.list()
- [<Sample ...>, ...]
-
-For further details about the python-ceilometerclient package, see the
-`Python bindings to the OpenStack Ceilometer
-API <https://docs.openstack.org/python-ceilometerclient/latest/>`__
-reference.
diff --git a/doc/source/admin/telemetry-system-architecture.rst b/doc/source/admin/telemetry-system-architecture.rst
index 25ca4be8..55ca2380 100644
--- a/doc/source/admin/telemetry-system-architecture.rst
+++ b/doc/source/admin/telemetry-system-architecture.rst
@@ -10,11 +10,6 @@ database, or provide an API service for handling incoming requests.
The Telemetry service is built from the following agents and services:
-ceilometer-api (deprecated in Ocata)
- Presents aggregated metering data to consumers (such as billing
- engines and analytics tools). Alarm, Meter and Event APIs are now handled
- by aodh, gnocchi, and panko services respectively.
-
ceilometer-polling
Polls for different kinds of meter data by using the polling
plug-ins (pollsters) registered in different namespaces. It provides a
@@ -40,9 +35,8 @@ ceilometer-collector (deprecated in Ocata)
agents: ``ceilometer-agent-central``, ``ceilometer-agent-compute``,
and ``ceilometer-agent-ipmi``.
- 2. The ``ceilometer-api`` and ``ceilometer-collector`` are no longer
- supported since the Ocata release. Storage and API are provided by
- gnocchi, aodh, and panko services.
+ 2. The ``ceilometer-collector`` is no longer supported since the Ocata
+ release. Storage is provided by gnocchi, aodh, and panko services.
Except for the ``ceilometer-polling`` agents polling the ``compute`` or
``ipmi`` namespaces, all the other services are placed on one or more
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 10336f5b..8fad59b2 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -41,11 +41,8 @@ os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings'
extensions = [
'openstackdocstheme',
'sphinx.ext.autodoc',
- 'wsmeext.sphinxext',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
- 'sphinxcontrib.pecanwsme.rest',
- 'sphinxcontrib.httpdomain',
'oslo_config.sphinxconfiggen',
]
@@ -53,8 +50,6 @@ config_generator_config_file = os.path.join(ROOT,
'etc/ceilometer/ceilometer-config-generator.conf')
sample_config_basename = '_static/ceilometer'
-wsme_protocols = ['restjson', 'restxml']
-
todo_include_todos = True
# The suffix of source filenames.
diff --git a/doc/source/contributor/install/manual.rst b/doc/source/contributor/install/manual.rst
index e76bc9aa..f631e23d 100644
--- a/doc/source/contributor/install/manual.rst
+++ b/doc/source/contributor/install/manual.rst
@@ -234,18 +234,6 @@ Installing the Polling Agent
$ ceilometer-polling --polling-namespaces central,ipmi
-Installing the API Server
-=========================
-
-.. index::
- double: installing; API
-
-.. note::
-
- The Ceilometer's API service is no longer supported. Data storage should be
- handled by a separate service such as Gnocchi.
-
-
Enabling Service Notifications
==============================
diff --git a/doc/source/contributor/install/upgrade.rst b/doc/source/contributor/install/upgrade.rst
index d196eae7..1895285e 100644
--- a/doc/source/contributor/install/upgrade.rst
+++ b/doc/source/contributor/install/upgrade.rst
@@ -56,11 +56,6 @@ one pass.
After starting the first agent, you should verify that data is again being
polled. Additional agents can be added to support coordination if enabled.
-.. note::
-
- The API service can be taken offline and upgraded at any point in the
- process (if applicable).
-
Partial upgrades
================
@@ -94,17 +89,12 @@ version in time.
are upgraded, the polling agents can be changed to poll both new pollsters
AND the old ones.
-5. Upgrade the API service(s)
-
- API management is handled by WSGI so there is only ever one version of API
- service running
-
.. note::
Upgrade ordering does not matter in partial upgrade path. The only
requirement is that the database be upgraded first. It is advisable to
upgrade following the same ordering as currently described: database,
- collector, notification agent, polling agent, api.
+ collector, notification agent, polling agent.
Developer notes
diff --git a/doc/source/contributor/plugins.rst b/doc/source/contributor/plugins.rst
index 489af467..eb2dcc84 100644
--- a/doc/source/contributor/plugins.rst
+++ b/doc/source/contributor/plugins.rst
@@ -58,12 +58,10 @@ on where is polling agent running. This will load, among others, the
:class:`ceilometer.compute.pollsters.cpu.CPUPollster`, which is defined in
the folder ``ceilometer/compute/pollsters``.
-Notifications mechanism uses plugins as well, for instance
-:class:`ceilometer.telemetry.notifications.TelemetryApiPost` plugin
-which is defined in the ``ceilometer/telemetry/notifications`` folder, Though
-in most cases, this is not needed. A meter definition can be directly added
-to :file:`ceilometer/data/meters.d/meters.yaml` to match the event type. For
-more information, see the :ref:`add_new_meters` page.
+Notifications mechanism uses plugins as well, though in most cases, this is not
+needed. A meter definition can be directly added to
+:file:`ceilometer/data/meters.d/meters.yaml` to match the event type. For more
+information, see the :ref:`add_new_meters` page.
We are using these two existing plugins as examples as the first one provides
an example of how to interact when you need to retrieve information from an
diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst
index a87cd38c..af793c16 100644
--- a/doc/source/contributor/testing.rst
+++ b/doc/source/contributor/testing.rst
@@ -49,9 +49,9 @@ run through tox_.
.. _testr documentation: https://testrepository.readthedocs.org/en/latest/MANUAL.html
- Use a double hyphen to pass options to testr. For example, to run only tests under tests/api/v2::
+ Use a double hyphen to pass options to testr. For example, to run only tests under tests/unit/image::
- $ tox -e py27 -- api.v2
+ $ tox -e py27 -- image
To debug tests (ie. break into pdb debugger), you can use ''debug'' tox
environment. Here's an example, passing the name of a test since you'll
@@ -62,19 +62,6 @@ run through tox_.
For reference, the ``debug`` tox environment implements the instructions
here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests
-5. There is a growing suite of tests which use a tool called `gabbi`_ to
- test and validate the behavior of the Ceilometer API. These tests are run
- when using the usual ``py27`` tox target but if desired they can be run by
- themselves::
-
- $ tox -e gabbi
-
- The YAML files used to drive the gabbi tests can be found in
- ``ceilometer/tests/gabbi/gabbits``. If you are adding to or adjusting the
- API you should consider adding tests here.
-
-.. _gabbi: https://gabbi.readthedocs.org/
-
.. seealso::
* tox_
diff --git a/doc/source/glossary.rst b/doc/source/glossary.rst
index 01c458e7..fc55f9ee 100644
--- a/doc/source/glossary.rst
+++ b/doc/source/glossary.rst
@@ -25,9 +25,6 @@
measuring usage and sending the results to any number of
target using the :term:`publisher`.
- API server
- HTTP REST API service for ceilometer.
-
billing
Billing is the process to assemble bill line items into a single
per customer bill, emitting the bill to start the payment collection.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 0b4ed2e5..7f466b0a 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -38,7 +38,6 @@ Overview
contributor/index
admin/index
configuration/index
- webapi/index
Appendix
========
diff --git a/doc/source/webapi/index.rst b/doc/source/webapi/index.rst
deleted file mode 100644
index 721a0809..00000000
--- a/doc/source/webapi/index.rst
+++ /dev/null
@@ -1,55 +0,0 @@
-=========
- Web API
-=========
-
-.. note::
-
- Gnocchi provides a more responsive API when statistical capabilities rather
- than full-resolution datapoints are required. The REST API for Gnocchi is
- captured here_.
-
-.. _here: http://gnocchi.xyz/rest.html
-
-.. toctree::
- :maxdepth: 2
-
- v2
-
-You can get API version list via request to endpoint root path. For example::
-
- $ curl -H "X-AUTH-TOKEN: fa2ec18631f94039a5b9a8b4fe8f56ad" http://127.0.0.1:8777
-
-Sample response::
-
- {
- "versions": {
- "values": [
- {
- "id": "v2",
- "links": [
- {
- "href": "http://127.0.0.1:8777/v2",
- "rel": "self"
- },
- {
- "href": "https://docs.openstack.org/",
- "rel": "describedby",
- "type": "text/html"
- }
- ],
- "media-types": [
- {
- "base": "application/json",
- "type": "application/vnd.openstack.telemetry-v2+json"
- },
- {
- "base": "application/xml",
- "type": "application/vnd.openstack.telemetry-v2+xml"
- }
- ],
- "status": "stable",
- "updated": "2013-02-13T00:00:00Z"
- }
- ]
- }
- }
diff --git a/doc/source/webapi/v2.rst b/doc/source/webapi/v2.rst
deleted file mode 100644
index 801489c4..00000000
--- a/doc/source/webapi/v2.rst
+++ /dev/null
@@ -1,655 +0,0 @@
-============
- V2 Web API
-============
-
-.. note::
-
- The Ceilometer API is deprecated. Use the APIs from Aodh_ (alarms),
- Gnocchi_ (metrics), and/or Panko_ (events).
-
-.. _Aodh: https://docs.openstack.org/aodh/latest/
-.. _Panko: https://docs.openstack.org/panko/latest/
-
-
-Resources
-=========
-
-.. rest-controller:: ceilometer.api.controllers.v2.resources:ResourcesController
- :webprefix: /v2/resources
-
-.. autotype:: ceilometer.api.controllers.v2.resources.Resource
- :members:
-
-Meters
-======
-
-.. rest-controller:: ceilometer.api.controllers.v2.meters:MetersController
- :webprefix: /v2/meters
-
-.. rest-controller:: ceilometer.api.controllers.v2.meters:MeterController
- :webprefix: /v2/meters
-
-.. autotype:: ceilometer.api.controllers.v2.meters.Meter
- :members:
-
-.. autotype:: ceilometer.api.controllers.v2.meters.OldSample
- :members:
-
-Samples and Statistics
-======================
-
-.. rest-controller:: ceilometer.api.controllers.v2.samples:SamplesController
- :webprefix: /v2/samples
-
-.. autotype:: ceilometer.api.controllers.v2.samples.Sample
- :members:
-
-.. autotype:: ceilometer.api.controllers.v2.meters.Statistics
- :members:
-
-When a simple statistics request is invoked (using GET /v2/meters/<meter_name>/statistics),
-it will return the standard set of *Statistics*: *avg*, *sum*, *min*, *max*, and *count*.
-
-.. note::
-
- If using Ceilometer data for statistics, it's recommended to use a backend
- such as Gnocchi_ rather than Ceilometer's interface. Gnocchi is designed
- specifically for this use case by providing a light-weight, aggregated model.
- As they manage data differently, the API models returned by Ceilometer and Gnocchi
- are different. The Gnocchi API can be found here_.
-
-.. _Gnocchi: http://gnocchi.xyz/
-.. _here: http://gnocchi.xyz/rest.html
-
-Selectable Aggregates
-+++++++++++++++++++++
-
-The Statistics API has been extended to include the aggregate functions
-*stddev* and *cardinality*. You can explicitly select these functions or any
-from the standard set by specifying an aggregate function in the statistics
-query::
-
- GET /v2/meters/<meter_name>/statistics?aggregate.func=<name>&aggregate.param=<value>
-
-(where aggregate.param is optional).
-
-Duplicate aggregate function and parameter pairs are silently discarded from the statistics query. Partial duplicates, in the sense of the same function but differing parameters, for example::
-
- GET /v2/meters/<meter_name>/statistics?aggregate.func=cardinality&aggregate.param=resource_id&aggregate.func=cardinality&aggregate.param=project_id
-
-are, on the other hand, both allowed by the API and supported by the storage drivers. See the :ref:`functional-examples` section for more detail.
-
-.. note::
-
- Currently only *cardinality* needs aggregate.param to be specified.
-
-.. autotype:: ceilometer.api.controllers.v2.meters.Aggregate
- :members:
-
-Capabilities
-============
-
-The Capabilities API allows you to directly discover which functions from the
-V2 API functionality, including the selectable aggregate functions, are
-supported by the currently configured storage driver. A capabilities query
-returns a flattened dictionary of properties with associated boolean values -
-a 'False' or absent value means that the corresponding feature is not
-available in the backend.
-
-.. rest-controller:: ceilometer.api.controllers.v2.capabilities:CapabilitiesController
- :webprefix: /v2/capabilities
-
-.. autotype:: ceilometer.api.controllers.v2.capabilities.Capabilities
- :members:
-
-
-Filtering Queries
-=================
-
-Ceilometer's REST API currently supports two types of queries. The Simple
-Query functionality provides simple filtering on several fields of the
-*Sample* type. Complex Query provides the possibility to specify queries
-with logical and comparison operators on the fields of *Sample*.
-
-You may also apply filters based on the values of one or more of the
-*resource_metadata* field, which you can identify by using *metadata.<field>*
-syntax in either type of query. Note, however, that given the free-form
-nature of *resource_metadata* field, there is no practical or consistent way
-to validate the query fields under *metadata* domain like it is done for
-all other fields.
-
-.. note::
-
- The API call will return HTTP 200 OK status for both of the
- following cases: when a query with *metadata.<field>* does not match its
- value, and when *<field>* itself does not exist in any of the records being
- queried.
-
-Simple Query
-++++++++++++
-
-Many of the endpoints above accept a query filter argument, which
-should be a list of Query data structures. Whatever the endpoint you
-want to apply a filter on, you always filter on the fields of the *Sample*
-type (for example, if you apply a filter on a query for statistics,
-you won't target *duration_start* field of *Statistics*, but *timestamp*
-field of *Sample*). See :ref:`api-queries` for how to query the API.
-
-.. autotype:: ceilometer.api.controllers.v2.base.Query
- :members:
-
-
-Complex Query
-+++++++++++++
-
-The filter expressions of the Complex Query feature operate on the fields
-of *Sample*. The following comparison operators are
-supported: *=*, *!=*, *<*, *<=*, *>*, *>=* and *in*; and the following logical
-operators can be used: *and* *or* and *not*. The field names are validated
-against the database models. See :ref:`api-queries` for how to query the API.
-
-.. note::
-
- The *not* operator has different meaning in MongoDB and in SQL DB engine.
- If the *not* operator is applied on a non existent metadata field then
- the result depends on the DB engine. For example, if
- {"not": {"metadata.nonexistent_field" : "some value"}} filter is used in a query
- the MongoDB will return every Sample object as *not* operator evaluated true
- for every Sample where the given field does not exists. See more in the MongoDB doc.
- On the other hand, SQL based DB engine will return empty result as the join operation
- on the metadata table will return zero rows as the on clause of the join which
- tries to match on the metadata field name is never fulfilled.
-
-Complex Query supports defining the list of orderby expressions in the form
-of [{"field_name": "asc"}, {"field_name2": "desc"}, ...].
-
-The number of the returned items can be bounded using the *limit* option.
-
-The *filter*, *orderby* and *limit* are all optional fields in a query.
-
-.. rest-controller:: ceilometer.api.controllers.v2.query:QuerySamplesController
- :webprefix: /v2/query/samples
-
-.. autotype:: ceilometer.api.controllers.v2.query.ComplexQuery
- :members:
-
-Links
-=====
-
-.. autotype:: ceilometer.api.controllers.v2.base.Link
- :members:
-
-API and CLI query examples
-==========================
-
-CLI Queries
-+++++++++++
-
-Ceilometer CLI Commands::
-
- $ ceilometer --debug --os-username <username_here> --os-password <password_here> --os-auth-url http://localhost:5000/v2.0/ --os-tenant-name admin meter-list
-
-.. note::
-
- The *username*, *password*, and *tenant-name* options are required to be
- present in these arguments or specified via environment variables. Note that
- the in-line arguments will override the environment variables.
-
-.. _api-queries:
-
-API Queries
-+++++++++++
-
-Ceilometer API calls:
-
-.. note::
-
- To successfully query Ceilometer you must first get a project-specific
- token from the Keystone service and add it to any API calls that you
- execute against that project. See the
- `OpenStack credentials documentation <http://docs.openstack.org/api/quick-start/content/index.html#getting-credentials-a00665>`_
- for additional details.
-
-A simple query to return a list of available meters::
-
- $ curl -H 'X-Auth-Token: <inserttokenhere>' \
- "http://localhost:8777/v2/meters"
-
-A query to return the list of resources::
-
- $ curl -H 'X-Auth-Token: <inserttokenhere>' \
- "http://localhost:8777/v2/resources"
-
-A query to return the list of samples, limited to a specific meter type::
-
- $ curl -H 'X-Auth-Token: <inserttokenhere>' \
- "http://localhost:8777/v2/meters/disk.root.size"
-
-A query using filters (see: `query filter section <https://docs.openstack.org/ceilometer/latest/webapi/v2.html#filtering-queries>`_)::
-
- $ curl -H 'X-Auth-Token: <inserttokenhere>' \
- "http://localhost:8777/v2/meters/instance?q.field=metadata.event_type&q.value=compute.instance.delete.start"
-
-Additional examples::
-
- $ curl -H 'X-Auth-Token: <inserttokenhere>' \
- "http://localhost:8777/v2/meters/disk.root.size?q.field=resource_id&q.op=eq&q.value=<resource_id_here>"
-
-or::
-
- $ curl -H 'X-Auth-Token: <inserttokenhere>' \
- "http://localhost:8777/v2/meters/instance?q.field=metadata.event_type&q.value=compute.instance.exists"
-
-You can specify multiple filters by using an array of queries (order matters)::
-
- $ curl -H 'X-Auth-Token: <inserttokenhere>' \
- "http://localhost:8777/v2/meters/instance"\
- "?q.field=metadata.event_type&q.value=compute.instance.exists"\
- "&q.field=timestamp&q.op=gt&q.value=2013-07-03T13:34:17"
-
-A query to find the maximum value and standard deviation (*max*, *stddev*) of
-the CPU utilization for a given instance (identified by *resource_id*)::
-
- $ curl -H 'X-Auth-Token: <inserttokenhere>' \
- "http://localhost:8777/v2/meters/cpu_util/statistics?aggregate.func=max&aggregate.func=stddev"\
- "&q.field=resource_id&q.op=eq&q.value=64da755c-9120-4236-bee1-54acafe24980"
-
-.. note::
-
- If any of the requested aggregates are not supported by the storage driver,
- a HTTP 400 error code will be returned along with an appropriate error
- message.
-
-JSON based example::
-
- $ curl -X GET -H "X-Auth-Token: <inserttokenhere>" -H "Content-Type: application/json"
- -d '{"q": [{"field": "timestamp", "op": "ge", "value": "2014-04-01T13:34:17"}]}'
- http://localhost:8777/v2/meters/instance
-
-JSON based example with multiple filters::
-
- $ curl -X GET -H "X-Auth-Token: <inserttokenhere>" -H "Content-Type: application/json"
- -d '{"q": [{"field": "timestamp", "op": "ge", "value": "2014-04-01T13:34:17"},
- {"field": "resource_id", "op": "eq", "value": "4da2b992-0dc3-4a7c-a19a-d54bf918de41"}]}'
- http://localhost:8777/v2/meters/instance
-
-.. _functional-examples:
-
-Functional examples
-+++++++++++++++++++
-
-The examples below are meant to help you understand how to query the
-Ceilometer API to build custom meters report. The query parameters should
-be encoded using one of the above methods, e.g. as the URL parameters or
-as JSON encoded data passed to the GET request.
-
-Get the list of samples about instances running for June 2013::
-
- GET /v2/meters/instance
- q: [{"field": "timestamp",
- "op": "ge",
- "value": "2013-06-01T00:00:00"},
- {"field": "timestamp",
- "op": "lt",
- "value": "2013-07-01T00:00:00"}]
-
-
-Get the list of samples about instances running for June 2013 for a particular
-project::
-
- GET /v2/meters/instance
- q: [{"field": "timestamp",
- "op": "ge",
- "value": "2013-06-01T00:00:00"},
- {"field": "timestamp",
- "op": "lt",
- "value": "2013-07-01T00:00:00"},
- {"field": "project_id",
- "op": "eq",
- "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}]
-
-Now you may want to have statistics on the meters you are targeting.
-Consider the following example where you are getting the list of samples
-about CPU utilization of a given instance (identified by its *resource_id*)
-running for June 2013::
-
- GET /v2/meters/cpu_util
- q: [{"field": "timestamp",
- "op": "ge",
- "value": "2013-06-01T00:00:00"},
- {"field": "timestamp",
- "op": "lt",
- "value": "2013-07-01T00:00:00"},
- {"field": "resource_id",
- "op": "eq",
- "value": "64da755c-9120-4236-bee1-54acafe24980"}]
-
-You can have statistics on the list of samples requested (*avg*, *sum*, *max*,
-*min*, *count*) computed on the full duration::
-
- GET /v2/meters/cpu_util/statistics
- q: [{"field": "timestamp",
- "op": "ge",
- "value": "2013-06-01T00:00:00"},
- {"field": "timestamp",
- "op": "lt",
- "value": "2013-07-01T00:00:00"},
- {"field": "resource_id",
- "op": "eq",
- "value": "64da755c-9120-4236-bee1-54acafe24980"}]
-
-You may want to aggregate samples over a given period (10 minutes for
-example) in order to get an array of the statistics computed on smaller
-durations::
-
- GET /v2/meters/cpu_util/statistics
- q: [{"field": "timestamp",
- "op": "ge",
- "value": "2013-06-01T00:00:00"},
- {"field": "timestamp",
- "op": "lt",
- "value": "2013-07-01T00:00:00"},
- {"field": "resource_id",
- "op": "eq",
- "value": "64da755c-9120-4236-bee1-54acafe24980"}]
- period: 600
-
-The *period* parameter aggregates by time range. You can also aggregate by
-field using the *groupby* parameter. Currently, the *user_id*, *resource_id*,
-*project_id*, and *source* fields are supported. Below is an example that uses
-a query filter and group by aggregation on *project_id* and *resource_id*::
-
- GET /v2/meters/instance/statistics
- q: [{"field": "user_id",
- "op": "eq",
- "value": "user-2"},
- {"field": "source",
- "op": "eq",
- "value": "source-1"}]
- groupby: ["project_id", "resource_id"]
-
-The statistics will be returned in a list, and each entry of the list will be
-labeled with the group name. For the previous example, the first entry might
-have *project_id* be "project-1" and *resource_id* be "resource-1", the second
-entry have *project_id* be "project-1" and *resource_id* be "resource-2", and
-so on.
-
-You can request both period and group by aggregation in the same query::
-
- GET /v2/meters/instance/statistics
- q: [{"field": "source",
- "op": "eq",
- "value": "source-1"}]
- groupby: ["project_id"]
- period: 7200
-
-Note that period aggregation is applied first, followed by group by
-aggregation. Order matters because the period aggregation determines the time
-ranges for the statistics.
-
-Below is a real-life query::
-
- GET /v2/meters/image/statistics
- groupby: ["project_id", "resource_id"]
-
-With the return values::
-
- [{"count": 4, "duration_start": "2013-09-18T19:08:33", "min": 1.0,
- "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0,
- "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1137.0,
- "period_start": "2013-09-18T19:08:33", "avg": 1.0,
- "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78",
- "resource_id": "551f495f-7f49-4624-a34c-c422f2c5f90b"},
- "unit": "image"},
- {"count": 4, "duration_start": "2013-09-18T19:08:36", "min": 1.0,
- "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0,
- "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1134.0,
- "period_start": "2013-09-18T19:08:36", "avg": 1.0,
- "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78",
- "resource_id": "7c1157ed-cf30-48af-a868-6c7c3ad7b531"},
- "unit": "image"},
- {"count": 4, "duration_start": "2013-09-18T19:08:34", "min": 1.0,
- "max": 1.0, "duration_end": "2013-09-18T19:27:30", "period": 0,
- "sum": 4.0, "period_end": "2013-09-18T19:27:30", "duration": 1136.0,
- "period_start": "2013-09-18T19:08:34", "avg": 1.0,
- "groupby": {"project_id": "c2334f175d8b4cb8b1db49d83cecde78",
- "resource_id": "eaed9cf4-fc99-4115-93ae-4a5c37a1a7d7"},
- "unit": "image"}]
-
-You can request specific aggregate functions as well. For example, if you only
-want the average CPU utilization, the GET request would look like this::
-
- GET /v2/meters/cpu_util/statistics?aggregate.func=avg
-
-Use the same syntax to access the aggregate functions not in the standard set,
-e.g. *stddev* and *cardinality*. A request for the standard deviation of CPU utilization would take the form::
-
- GET /v2/meters/cpu_util/statistics?aggregate.func=stddev
-
-And would give a response such as the example::
-
- [{"aggregate": {"stddev":0.6858829535841072},
- "duration_start": "2014-01-30T11:13:23",
- "duration_end": "2014-01-31T16:07:13",
- "duration": 104030.0,
- "period": 0,
- "period_start": "2014-01-30T11:13:23",
- "period_end": "2014-01-31T16:07:13",
- "groupby": null,
- "unit" : "%"}]
-
-The request syntax is similar for *cardinality* but with the aggregate.param
-option provided. So, for example, if you want to know the number of distinct
-tenants with images, you would do::
-
- GET /v2/meters/image/statistics?aggregate.func=cardinality
- &aggregate.param=project_id
-
-For a more involved example, consider a requirement for determining, for some
-tenant, the number of distinct instances (*cardinality*) as well as the total
-number of instance samples (*count*). You might also want to see this
-information with 15 minute long intervals. Then, using the *period* and
-*groupby* options, a query would look like the following::
-
- GET /v2/meters/instance/statistics?aggregate.func=cardinality
- &aggregate.param=resource_id
- &aggregate.func=count
- &groupby=project_id&period=900
-
-This would give an example response of the form::
-
- [{"count": 19,
- "aggregate": {"count": 19.0, "cardinality/resource_id": 3.0},
- "duration": 328.478029,
- "duration_start": "2014-01-31T10:00:41.823919",
- "duration_end": "2014-01-31T10:06:10.301948",
- "period": 900,
- "period_start": "2014-01-31T10:00:00",
- "period_end": "2014-01-31T10:15:00",
- "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"},
- "unit": "instance"},
- {"count": 22,
- "aggregate": {"count": 22.0, "cardinality/resource_id": 4.0},
- "duration": 808.00384,
- "duration_start": "2014-01-31T10:15:15",
- "duration_end": "2014-01-31T10:28:43.003840",
- "period": 900,
- "period_start": "2014-01-31T10:15:00",
- "period_end": "2014-01-31T10:30:00",
- "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"},
- "unit": "instance"},
- {"count": 2,
- "aggregate": {"count": 2.0, "cardinality/resource_id": 2.0},
- "duration": 0.0,
- "duration_start": "2014-01-31T10:35:15",
- "duration_end": "2014-01-31T10:35:15",
- "period": 900,
- "period_start": "2014-01-31T10:30:00",
- "period_end": "2014-01-31T10:45:00",
- "groupby": {"project_id": "061a5c91811e4044b7dc86c6136c4f99"},
- "unit": "instance"}]
-
-If you want to retrieve all the instances (not the list of samples, but the
-resource itself) that have been run during this month for a given project,
-you should ask the resource endpoint for the list of resources (all types:
-including storage, images, networking, ...)::
-
- GET /v2/resources
- q: [{"field": "timestamp",
- "op": "ge",
- "value": "2013-06-01T00:00:00"},
- {"field": "timestamp",
- "op": "lt",
- "value": "2013-07-01T00:00:00"},
- {"field": "project_id",
- "op": "eq",
- "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}]
-
-Then look for resources that have an *instance* meter linked to them. That
-will indicate resources that have been measured as being instance. You can
-then request their samples to have more detailed information, like their
-state or their flavor::
-
- GET /v2/meter/instance
- q: [{"field": "timestamp",
- "op": "ge",
- "value": "2013-06-01T00:00:00"},
- {"field": "timestamp",
- "op": "lt",
- "value": "2013-07-01T00:00:00"},
- {"field": "resource_id",
- "op": "eq",
- "value": "64da755c-9120-4236-bee1-54acafe24980"},
- {"field": "project_id",
- "op": "eq",
- "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}]
-
-This will return a list of samples that have been recorded on this
-particular resource. You can inspect them to retrieve information, such as
-the instance state (check the *metadata.vm_state* field) or the instance
-flavor (check the *metadata.flavor* field).
-You can request nested metadata fields by using a dot to delimit the fields
-(e.g. *metadata.weighted_host.host* for *instance.scheduled* meter)
-
-To retrieve only the 3 last samples of a meters, you can pass the *limit*
-parameter to the query::
-
- GET /v2/meter/instance
- q: [{"field": "timestamp",
- "op": "ge",
- "value": "2013-06-01T00:00:00"},
- {"field": "timestamp",
- "op": "lt",
- "value": "2013-07-01T00:00:00"},
- {"field": "resource_id",
- "op": "eq",
- "value": "64da755c-9120-4236-bee1-54acafe24980"},
- {"field": "project_id",
- "op": "eq",
- "value": "8d6057bc-5b90-4296-afe0-84acaa2ef909"}]
- limit: 3
-
-This query would only return the last 3 samples.
-
-Functional example for Complex Query
-++++++++++++++++++++++++++++++++++++
-
-This example demonstrates how complex query filter expressions can be generated and sent
-to the /v2/query/samples endpoint of Ceilometer API using POST request.
-
-To check for *cpu_util* samples reported between 18:00-18:15 or between 18:30 - 18:45
-on a particular date (2013-12-01), where the utilization is between 23 and 26 percent,
-but not exactly 25.12 percent, the following filter expression can be created::
-
- {"and":
- [{"and":
- [{"=": {"counter_name": "cpu_util"}},
- {">": {"counter_volume": 0.23}},
- {"<": {"counter_volume": 0.26}},
- {"not": {"=": {"counter_volume": 0.2512}}}]},
- {"or":
- [{"and":
- [{">": {"timestamp": "2013-12-01T18:00:00"}},
- {"<": {"timestamp": "2013-12-01T18:15:00"}}]},
- {"and":
- [{">": {"timestamp": "2013-12-01T18:30:00"}},
- {"<": {"timestamp": "2013-12-01T18:45:00"}}]}]}]}
-
-Different sorting criteria can be defined for the query filter, for example the results
-can be ordered in an ascending order by the *counter_volume* and descending order based on
-the *timestamp*. The following order by expression has to be created for specifying this
-criteria::
-
- [{"counter_volume": "ASC"}, {"timestamp": "DESC"}]
-
-As the current implementation accepts only string values as query filter and order by
-definitions, the above defined expressions have to be converted to string values.
-By adding a limit criteria to the request, which maximizes the number of returned samples
-to four, the query looks like the following::
-
- {
- "filter" : "{\"and\":[{\"and\": [{\"=\": {\"counter_name\": \"cpu_util\"}}, {\">\": {\"counter_volume\": 0.23}}, {\"<\": {\"counter_volume\": 0.26}}, {\"not\": {\"=\": {\"counter_volume\": 0.2512}}}]}, {\"or\": [{\"and\": [{\">\": {\"timestamp\": \"2013-12-01T18:00:00\"}}, {\"<\": {\"timestamp\": \"2013-12-01T18:15:00\"}}]}, {\"and\": [{\">\": {\"timestamp\": \"2013-12-01T18:30:00\"}}, {\"<\": {\"timestamp\": \"2013-12-01T18:45:00\"}}]}]}]}",
- "orderby" : "[{\"counter_volume\": \"ASC\"}, {\"timestamp\": \"DESC\"}]",
- "limit" : 4
- }
-
-A query request looks like the following with curl::
-
- $ curl -X POST -H 'X-Auth-Token: <inserttokenhere>' -H 'Content-Type: application/json' \
- -d '<insertyourqueryexpressionhere>' \
- http://localhost:8777/v2/query/samples
-
-.. _user-defined-data:
-
-User-defined data
-+++++++++++++++++
-
-It is possible to add your own samples (created from data retrieved in any
-way like monitoring agents on your instances) in Ceilometer to store
-them and query on them. You can even get *Statistics* on your own inserted data.
-By adding a *Sample* to a *Resource*, you create automatically the corresponding
-*Meter* if it does not exist already. To achieve this, you have to POST a list
-of one to many samples in JSON format::
-
- $ curl -X POST -H 'X-Auth-Token: <inserttokenhere>' -H 'Content-Type: application/json' \
- -d '<insertyoursampleslisthere>' \
- http://localhost:8777/v2/meters/<insertyourmeternamehere>
-
-Fields *source*, *timestamp*, *project_id* and *user_id* are automatically
-added if not present in the samples. Field *message_id* is not taken into
-account if present and an internal value will be set.
-
-By default, samples posted via API will be placed on the notification bus and
-processed by the notification agent.
-
-To avoid re-queuing the data, samples posted via API can be stored directly to
-the storage backend verbatim by specifying a boolean flag 'direct' in the
-request URL, like this::
-
- POST /v2/meters/ram_util?direct=True
-
-Samples posted this way will bypass pipeline processing.
-
-Here is an example showing how to add a sample for a *ram_util* meter (already
-existing or not)::
-
- POST /v2/meters/ram_util
- body: [
- {
- "counter_name": "ram_util",
- "user_id": "4790fbafad2e44dab37b1d7bfc36299b",
- "resource_id": "87acaca4-ae45-43ae-ac91-846d8d96a89b",
- "resource_metadata": {
- "display_name": "my_instance",
- "my_custom_metadata_1": "value1",
- "my_custom_metadata_2": "value2"
- },
- "counter_unit": "%",
- "counter_volume": 8.57762938230384,
- "project_id": "97f9a6aaa9d842fcab73797d3abb2f53",
- "counter_type": "gauge"
- }
- ]
-
-You get back the same list containing your example completed with the missing
-fields : *source* and *timestamp* in this case.
diff --git a/etc/apache2/ceilometer b/etc/apache2/ceilometer
deleted file mode 100644
index 261acc3e..00000000
--- a/etc/apache2/ceilometer
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) 2013 New Dream Network, LLC (DreamHost)
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# This is an example Apache2 configuration file for using the
-# ceilometer API through mod_wsgi.
-
-# Note: If you are using a Debian-based system then the paths
-# "/var/log/httpd" and "/var/run/httpd" will use "apache2" instead
-# of "httpd".
-#
-# The number of processes and threads is an example only and should
-# be adjusted according to local requirements.
-
-Listen 8777
-
-<VirtualHost *:8777>
- WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP}
- WSGIProcessGroup ceilometer-api
- WSGIScriptAlias / /var/www/ceilometer/app
- WSGIApplicationGroup %{GLOBAL}
- <IfVersion >= 2.4>
- ErrorLogFormat "%{cu}t %M"
- </IfVersion>
- ErrorLog /var/log/httpd/ceilometer_error.log
- CustomLog /var/log/httpd/ceilometer_access.log combined
-</VirtualHost>
-
-WSGISocketPrefix /var/run/httpd
diff --git a/etc/ceilometer/api_paste.ini b/etc/ceilometer/api_paste.ini
deleted file mode 100644
index 4247f1bb..00000000
--- a/etc/ceilometer/api_paste.ini
+++ /dev/null
@@ -1,27 +0,0 @@
-# Ceilometer API WSGI Pipeline
-# Define the filters that make up the pipeline for processing WSGI requests
-# Note: This pipeline is PasteDeploy's term rather than Ceilometer's pipeline
-# used for processing samples
-
-# Remove authtoken from the pipeline if you don't want to use keystone authentication
-[pipeline:main]
-pipeline = cors http_proxy_to_wsgi request_id authtoken api-server
-
-[app:api-server]
-paste.app_factory = ceilometer.api.app:app_factory
-
-[filter:authtoken]
-paste.filter_factory = keystonemiddleware.auth_token:filter_factory
-oslo_config_project = ceilometer
-
-[filter:request_id]
-paste.filter_factory = oslo_middleware:RequestId.factory
-oslo_config_project = ceilometer
-
-[filter:cors]
-paste.filter_factory = oslo_middleware.cors:filter_factory
-oslo_config_project = ceilometer
-
-[filter:http_proxy_to_wsgi]
-paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
-oslo_config_project = ceilometer
diff --git a/etc/ceilometer/ceilometer-config-generator.conf b/etc/ceilometer/ceilometer-config-generator.conf
index 976385b7..205c093d 100644
--- a/etc/ceilometer/ceilometer-config-generator.conf
+++ b/etc/ceilometer/ceilometer-config-generator.conf
@@ -7,8 +7,5 @@ namespace = oslo.concurrency
namespace = oslo.db
namespace = oslo.log
namespace = oslo.messaging
-namespace = oslo.middleware.cors
-namespace = oslo.middleware.http_proxy_to_wsgi
-namespace = oslo.policy
namespace = oslo.service.service
namespace = keystonemiddleware.auth_token
diff --git a/etc/ceilometer/policy.json b/etc/ceilometer/policy.json
deleted file mode 100644
index 0aa0a3d1..00000000
--- a/etc/ceilometer/policy.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "context_is_admin": "role:admin",
- "segregation": "rule:context_is_admin",
-
- "telemetry:get_samples": "",
- "telemetry:get_sample": "",
- "telemetry:query_sample": "",
- "telemetry:create_samples": "",
-
- "telemetry:compute_statistics": "",
- "telemetry:get_meters": "",
-
- "telemetry:get_resource": "",
- "telemetry:get_resources": "",
-}
diff --git a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only/post.yaml b/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only/post.yaml
deleted file mode 100644
index dac87534..00000000
--- a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only/post.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/logs/**
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only/run.yaml b/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only/run.yaml
deleted file mode 100644
index 36f31640..00000000
--- a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only/run.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-- hosts: all
- name: Autoconverted job legacy-ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only
- from old job gate-ceilometer-dsvm-tempest-plugin-mongodb-identity-v3-only-ubuntu-xenial
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack-infra/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- git://git.openstack.org \
- openstack-infra/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PYTHONUNBUFFERED=true
- export DEVSTACK_GATE_TEMPEST=1
- export DEVSTACK_GATE_TEMPEST_REGEX="^ceilometer\."
- export DEVSTACK_GATE_TEMPEST_ALL_PLUGINS=1
- export DEVSTACK_GATE_CEILOMETER_BACKEND=mongodb
- export DEVSTACK_LOCAL_CONFIG="enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer"
- export BRANCH_OVERRIDE=default
- if [ "$BRANCH_OVERRIDE" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
- if [ "mongodb" = "postgresql" ] ; then
- export DEVSTACK_GATE_POSTGRES=1
- fi
- if [ "-identity-v3-only" == "-identity-v3-only" ] ; then
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"ENABLE_IDENTITY_V2=False"
- fi
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb/post.yaml b/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb/post.yaml
deleted file mode 100644
index dac87534..00000000
--- a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb/post.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/logs/**
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb/run.yaml b/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb/run.yaml
deleted file mode 100644
index 3d0030ee..00000000
--- a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mongodb/run.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-- hosts: all
- name: Autoconverted job legacy-ceilometer-dsvm-tempest-plugin-mongodb from old job
- gate-ceilometer-dsvm-tempest-plugin-mongodb-ubuntu-xenial
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack-infra/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- git://git.openstack.org \
- openstack-infra/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PYTHONUNBUFFERED=true
- export DEVSTACK_GATE_TEMPEST=1
- export DEVSTACK_GATE_TEMPEST_REGEX="^ceilometer\."
- export DEVSTACK_GATE_TEMPEST_ALL_PLUGINS=1
- export DEVSTACK_GATE_CEILOMETER_BACKEND=mongodb
- export DEVSTACK_LOCAL_CONFIG="enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer"
- export BRANCH_OVERRIDE=default
- if [ "$BRANCH_OVERRIDE" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
- if [ "mongodb" = "postgresql" ] ; then
- export DEVSTACK_GATE_POSTGRES=1
- fi
- if [ "" == "-identity-v3-only" ] ; then
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"ENABLE_IDENTITY_V2=False"
- fi
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mysql/post.yaml b/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mysql/post.yaml
deleted file mode 100644
index dac87534..00000000
--- a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mysql/post.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/logs/**
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mysql/run.yaml b/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mysql/run.yaml
deleted file mode 100644
index 9433c9a7..00000000
--- a/playbooks/legacy/ceilometer-dsvm-tempest-plugin-mysql/run.yaml
+++ /dev/null
@@ -1,51 +0,0 @@
-- hosts: all
- name: Autoconverted job legacy-ceilometer-dsvm-tempest-plugin-mysql from old job
- gate-ceilometer-dsvm-tempest-plugin-mysql-ubuntu-xenial
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack-infra/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- git://git.openstack.org \
- openstack-infra/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PYTHONUNBUFFERED=true
- export DEVSTACK_GATE_TEMPEST=1
- export DEVSTACK_GATE_TEMPEST_REGEX="^ceilometer\."
- export DEVSTACK_GATE_TEMPEST_ALL_PLUGINS=1
- export DEVSTACK_GATE_CEILOMETER_BACKEND=mysql
- export DEVSTACK_LOCAL_CONFIG="enable_plugin ceilometer git://git.openstack.org/openstack/ceilometer"
- export BRANCH_OVERRIDE=default
- if [ "$BRANCH_OVERRIDE" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
- if [ "mysql" = "postgresql" ] ; then
- export DEVSTACK_GATE_POSTGRES=1
- fi
- if [ "" == "-identity-v3-only" ] ; then
- export DEVSTACK_LOCAL_CONFIG+=$'\n'"ENABLE_IDENTITY_V2=False"
- fi
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst
deleted file mode 100644
index c9996a36..00000000
--- a/rally-jobs/README.rst
+++ /dev/null
@@ -1,29 +0,0 @@
-Rally job related files
-=======================
-
-This directory contains rally tasks and plugins that are run by OpenStack CI.
-
-Structure
----------
-
-* plugins - directory where you can add rally plugins. Almost everything in
- Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic
- cleanup resources, ....
-
-* extra - all files from this directory will be copy pasted to gates, so you
- are able to use absolute paths in rally tasks.
- Files will be located in ~/.rally/extra/*
-
-* ceilometer is a task that is run in gates against Ceilometer
-
-
-Useful links
-------------
-
-* More about Rally: https://rally.readthedocs.org/en/latest/
-
-* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html
-
-* About plugins: https://rally.readthedocs.org/en/latest/plugins.html
-
-* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins
diff --git a/rally-jobs/ceilometer.yaml b/rally-jobs/ceilometer.yaml
deleted file mode 100644
index 32c1022f..00000000
--- a/rally-jobs/ceilometer.yaml
+++ /dev/null
@@ -1,69 +0,0 @@
----
-
- CeilometerMeters.list_meters:
- -
- runner:
- type: "constant"
- times: 10
- concurrency: 10
- context:
- users:
- tenants: 1
- users_per_tenant: 1
- sla:
- max_failure_percent: 0
-
- CeilometerResource.list_resources:
- -
- runner:
- type: "constant"
- times: 10
- concurrency: 10
- context:
- users:
- tenants: 1
- users_per_tenant: 1
- sla:
- max_failure_percent: 0
-
- CeilometerStats.create_meter_and_get_stats:
- -
- args:
- user_id: "user-id"
- resource_id: "resource-id"
- counter_volume: 1.0
- counter_unit: ""
- counter_type: "cumulative"
- runner:
- type: "constant"
- times: 20
- concurrency: 10
- context:
- users:
- tenants: 1
- users_per_tenant: 1
- sla:
- max_failure_percent: 0
-
- CeilometerQueries.create_and_query_samples:
- -
- args:
- filter: {"=": {"counter_unit": "instance"}}
- orderby: !!null
- limit: 10
- counter_name: "cpu_util"
- counter_type: "gauge"
- counter_unit: "instance"
- counter_volume: "1.0"
- resource_id: "resource_id"
- runner:
- type: "constant"
- times: 20
- concurrency: 10
- context:
- users:
- tenants: 1
- users_per_tenant: 1
- sla:
- max_failure_percent: 0
-
diff --git a/rally-jobs/extra/README.rst b/rally-jobs/extra/README.rst
deleted file mode 100644
index aab343c5..00000000
--- a/rally-jobs/extra/README.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-Extra files
-===========
-
-All files from this directory will be copy pasted to gates, so you are able to
-use absolute path in rally tasks. Files will be in ~/.rally/extra/*
-
diff --git a/rally-jobs/extra/fake.img b/rally-jobs/extra/fake.img
deleted file mode 100644
index e69de29b..00000000
--- a/rally-jobs/extra/fake.img
+++ /dev/null
diff --git a/rally-jobs/plugins/README.rst b/rally-jobs/plugins/README.rst
deleted file mode 100644
index 33bec0d2..00000000
--- a/rally-jobs/plugins/README.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-Rally plugins
-=============
-
-All *.py modules from this directory will be auto-loaded by Rally and all
-plugins will be discoverable. There is no need of any extra configuration
-and there is no difference between writing them here and in rally code base.
-
-Note that it is better to push all interesting and useful benchmarks to Rally
-code base, this simplifies administration for Operators.
diff --git a/rally-jobs/plugins/plugin_sample.py b/rally-jobs/plugins/plugin_sample.py
deleted file mode 100644
index bcc38783..00000000
--- a/rally-jobs/plugins/plugin_sample.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""Sample of plugin for Ceilometer.
-
-For more Ceilometer related benchmarks take a look here:
-github.com/openstack/rally/blob/master/rally/benchmark/scenarios/ceilometer/
-
-About plugins: https://rally.readthedocs.org/en/latest/plugins.html
-
-Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts
-"""
-
-from rally.benchmark.scenarios import base
-
-
-class CeilometerPlugin(base.Scenario):
- pass
diff --git a/releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml b/releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml
new file mode 100644
index 00000000..132fdd62
--- /dev/null
+++ b/releasenotes/notes/ceilometer-api-removal-6bd44d3eab05e593.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+ - |
+ The deprecated Ceilometer API has been removed.
diff --git a/requirements.txt b/requirements.txt
index b714a918..faa05260 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,6 @@ futures>=3.0;python_version=='2.7' or python_version=='2.6' # BSD
futurist>=0.11.0 # Apache-2.0
debtcollector>=1.2.0 # Apache-2.0
jsonpath-rw-ext>=0.1.9 # Apache-2.0
-jsonschema!=2.5.0,<3.0.0,>=2.0.0 # MIT
kafka-python>=1.3.2 # Apache-2.0
keystonemiddleware!=4.1.0,>=4.0.0 # Apache-2.0
lxml>=2.3 # BSD
@@ -19,14 +18,10 @@ oslo.config>=3.22.0 # Apache-2.0
oslo.db>=4.1.0 # Apache-2.0
oslo.i18n>=2.1.0 # Apache-2.0
oslo.log>=1.14.0 # Apache-2.0
-oslo.policy>=0.5.0 # Apache-2.0
oslo.reports>=0.6.0 # Apache-2.0
oslo.rootwrap>=2.0.0 # Apache-2.0
-PasteDeploy>=1.5.0 # MIT
pbr>=1.6 # Apache-2.0
-pecan>=1.0.0 # BSD
oslo.messaging>=5.12.0 # Apache-2.0
-oslo.middleware>=3.0.0 # Apache-2.0
oslo.serialization>=1.10.0 # Apache-2.0
oslo.utils>=3.5.0 # Apache-2.0
pysnmp<5.0.0,>=4.2.3 # BSD
@@ -45,9 +40,4 @@ sqlalchemy-migrate>=0.9.6 # Apache-2.0
stevedore>=1.9.0 # Apache-2.0
tenacity>=3.2.1 # Apache-2.0
tooz[zake]>=1.47.0 # Apache-2.0
-WebOb>=1.5.0 # MIT
-WSME>=0.8 # MIT
-# NOTE(jd) We do not import it directly, but WSME datetime string parsing
-# behaviour changes when this library is installed
-python-dateutil>=2.4.2 # BSD
os-xenapi>=0.1.1 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index babd0138..02c46b77 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -271,9 +271,6 @@ ceilometer.event.trait_plugin =
bitfield = ceilometer.event.trait_plugins:BitfieldTraitPlugin
timedelta = ceilometer.event.trait_plugins:TimedeltaPlugin
-wsgi_scripts =
- ceilometer-api = ceilometer.cmd.api:build_wsgi_app
-
console_scripts =
ceilometer-polling = ceilometer.cmd.polling:main
ceilometer-agent-notification = ceilometer.cmd.agent_notification:main
@@ -294,9 +291,6 @@ oslo.config.opts =
ceilometer = ceilometer.opts:list_opts
ceilometer-auth = ceilometer.opts:list_keystoneauth_opts
-oslo.config.opts.defaults =
- ceilometer = ceilometer.conf.defaults:set_cors_middleware_defaults
-
tempest.test_plugins =
ceilometer_tests = ceilometer.tests.tempest.plugin:CeilometerTempestPlugin
diff --git a/test-requirements.txt b/test-requirements.txt
index 9bd796e7..69ecdfc6 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -14,8 +14,6 @@ oslotest>=2.15.0 # Apache-2.0
oslo.vmware>=1.16.0 # Apache-2.0
pyOpenSSL>=0.14 # Apache-2.0
sphinx>=1.6.2 # BSD
-sphinxcontrib-httpdomain # BSD
-sphinxcontrib-pecanwsme>=0.8 # Apache-2.0
testrepository>=0.0.18 # Apache-2.0/BSD
testscenarios>=0.4 # Apache-2.0/BSD
testtools>=1.4.0 # MIT
@@ -23,6 +21,4 @@ gabbi>=1.30.0 # Apache-2.0
requests-aws>=0.1.4 # BSD License (3 clause)
os-testr>=0.4.1 # Apache-2.0
tempest>=14.0.0 # Apache-2.0
-WebTest>=2.0 # MIT
pifpaf>=0.0.11 # Apache-2.0
-os-api-ref>=0.1.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 10a211e4..ce95a831 100644
--- a/tox.ini
+++ b/tox.ini
@@ -35,15 +35,6 @@ passenv = {[testenv]passenv} HEAT_* CEILOMETER_* GNOCCHI_* AODH_* PANKO_* GLANCE
commands =
bash -c 'cd ceilometer/tests/integration/gabbi/gabbits-live && gabbi-run -x < autoscaling.yaml'
-# NOTE(chdent): The gabbi tests are also run under the other functional
-# tox targets. This target simply provides a target to directly run just
-# gabbi tests without needing to do discovery across the entire body of
-# tests.
-[testenv:gabbi]
-setenv = OS_TEST_PATH=ceilometer/tests/functional/gabbi
-passenv = CEILOMETER_*
-commands = pifpaf run mongodb {toxinidir}/tools/pretty_tox.sh "{posargs}"
-
[testenv:cover]
setenv = OS_TEST_PATH=ceilometer/tests
commands =
@@ -79,10 +70,3 @@ show-source = True
import_exceptions =
ceilometer.i18n
local-check-factory = ceilometer.hacking.checks.factory
-
-[testenv:api-ref]
-whitelist_externals = rm
-commands =
- rm -rf api-ref/build
- sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html
-