summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKiall Mac Innes <kiall@managedit.ie>2012-09-25 08:00:23 +0100
committerKiall Mac Innes <kiall@managedit.ie>2012-09-25 08:04:13 +0100
commite851d552c80a450d2e564b2b20432fd656699d01 (patch)
tree71e0386b02d320e5d716e2a33fcc8860ce0aa027
downloaddesignate-e851d552c80a450d2e564b2b20432fd656699d01.tar.gz
Initial Public Commit.
The previous history contained sensitive imformation.
-rw-r--r--.gitignore12
-rw-r--r--.gitreview4
-rw-r--r--MANIFEST.in4
-rw-r--r--TODO.md9
-rwxr-xr-xbin/moniker-agent-bind938
-rwxr-xr-xbin/moniker-api34
-rwxr-xr-xbin/moniker-central38
-rw-r--r--etc/moniker-agent-bind9.conf18
-rw-r--r--etc/moniker-api.conf18
-rw-r--r--etc/moniker-central.conf18
-rw-r--r--moniker.sublime-project54
-rw-r--r--moniker/__init__.py30
-rw-r--r--moniker/agent/__init__.py0
-rw-r--r--moniker/agent/api.py94
-rw-r--r--moniker/agent/bind9.py138
-rw-r--r--moniker/api/__init__.py45
-rw-r--r--moniker/api/debug.py27
-rw-r--r--moniker/api/v1.py24
-rw-r--r--moniker/api/v1/__init__.py22
-rw-r--r--moniker/api/v1/domains.py126
-rw-r--r--moniker/api/v1/records.py128
-rw-r--r--moniker/api/v1/schemas.py154
-rw-r--r--moniker/api/v1/servers.py125
-rw-r--r--moniker/central/__init__.py0
-rw-r--r--moniker/central/api.py191
-rw-r--r--moniker/central/manager.py148
-rw-r--r--moniker/database/__init__.py75
-rw-r--r--moniker/database/sqlalchemy/__init__.py207
-rw-r--r--moniker/database/sqlalchemy/models.py169
-rw-r--r--moniker/database/sqlalchemy/session.py54
-rw-r--r--moniker/database/sqlalchemy/types.py62
-rw-r--r--moniker/database/sqlalchemy/utils.py130
-rw-r--r--moniker/exceptions.py52
-rw-r--r--moniker/openstack/__init__.py0
-rw-r--r--moniker/openstack/common/__init__.py0
-rw-r--r--moniker/openstack/common/cfg.py1653
-rw-r--r--moniker/openstack/common/context.py81
-rw-r--r--moniker/openstack/common/excutils.py49
-rw-r--r--moniker/openstack/common/gettextutils.py33
-rw-r--r--moniker/openstack/common/importutils.py59
-rw-r--r--moniker/openstack/common/iniparser.py130
-rw-r--r--moniker/openstack/common/jsonutils.py154
-rw-r--r--moniker/openstack/common/local.py37
-rw-r--r--moniker/openstack/common/log.py453
-rw-r--r--moniker/openstack/common/loopingcall.py88
-rw-r--r--moniker/openstack/common/manager.py68
-rw-r--r--moniker/openstack/common/notifier/__init__.py14
-rw-r--r--moniker/openstack/common/notifier/api.py181
-rw-r--r--moniker/openstack/common/notifier/log_notifier.py35
-rw-r--r--moniker/openstack/common/notifier/no_op_notifier.py19
-rw-r--r--moniker/openstack/common/notifier/rabbit_notifier.py46
-rw-r--r--moniker/openstack/common/notifier/test_notifier.py22
-rw-r--r--moniker/openstack/common/periodic_task.py112
-rw-r--r--moniker/openstack/common/rpc/__init__.py266
-rw-r--r--moniker/openstack/common/rpc/amqp.py418
-rw-r--r--moniker/openstack/common/rpc/common.py311
-rw-r--r--moniker/openstack/common/rpc/dispatcher.py150
-rw-r--r--moniker/openstack/common/rpc/impl_fake.py184
-rw-r--r--moniker/openstack/common/rpc/impl_kombu.py751
-rw-r--r--moniker/openstack/common/rpc/impl_qpid.py599
-rw-r--r--moniker/openstack/common/rpc/impl_zmq.py718
-rw-r--r--moniker/openstack/common/rpc/matchmaker.py258
-rw-r--r--moniker/openstack/common/rpc/proxy.py165
-rw-r--r--moniker/openstack/common/service.py336
-rw-r--r--moniker/openstack/common/threadgroup.py118
-rw-r--r--moniker/openstack/common/timeutils.py126
-rw-r--r--moniker/schema.py110
-rw-r--r--moniker/schemas.py145
-rw-r--r--moniker/utils.py35
-rw-r--r--openstack-common.conf3
-rw-r--r--setup.cfg8
-rwxr-xr-xsetup.py35
-rw-r--r--templates/bind9-config.jinja23
-rw-r--r--templates/bind9-zone.jinja217
-rw-r--r--tools/pip-requires6
-rw-r--r--tools/test-requires4
-rw-r--r--tox.ini21
77 files changed, 10269 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..1cb1b024
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,12 @@
+*.pyc
+*.dat
+TAGS
+*.egg-info
+build
+.coverage
+.tox
+cover
+venv
+*.sublime-workspace
+*.sqlite
+var/*
diff --git a/.gitreview b/.gitreview
new file mode 100644
index 00000000..0947087f
--- /dev/null
+++ b/.gitreview
@@ -0,0 +1,4 @@
+[gerrit]
+host=review.openstack.org
+port=29418
+project=stackforge/moniker.git
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 00000000..a17ee1f0
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,4 @@
+exclude .gitignore
+exclude .gitreview
+
+global-exclude *.pyc
diff --git a/TODO.md b/TODO.md
new file mode 100644
index 00000000..4723ba0f
--- /dev/null
+++ b/TODO.md
@@ -0,0 +1,9 @@
+
+TODOs:
+
+* Re-Add Keystone integration.
+* Fixup Bind9 agent implementation so it could be considered even remotely reliable
+* Re-Add PowerDNS agent implementation.
+* Database migrations
+* Unit Tests!!
+* Integration with other OS servers eg Nova and Quantum
diff --git a/bin/moniker-agent-bind9 b/bin/moniker-agent-bind9
new file mode 100755
index 00000000..8948030f
--- /dev/null
+++ b/bin/moniker-agent-bind9
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import sys
+import eventlet
+from moniker.openstack.common import cfg
+from moniker.openstack.common import log as logging
+from moniker.openstack.common import service
+from moniker.agent import bind9
+
+eventlet.monkey_patch()
+
+config_files = cfg.find_config_files(project='moniker',
+ prog='moniker-agent-bind9')
+config_files.append('./etc/moniker-agent-bind9.conf')
+
+cfg.CONF(sys.argv[1:], project='moniker', prog='moniker-agent-bind9',
+ default_config_files=config_files)
+
+logging.setup('moniker')
+
+serv = service.Service(cfg.CONF.host, bind9.Manager(cfg.CONF.host), 10)
+
+launcher = service.launch(serv)
+launcher.wait()
diff --git a/bin/moniker-api b/bin/moniker-api
new file mode 100755
index 00000000..94faff91
--- /dev/null
+++ b/bin/moniker-api
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import sys
+from moniker.openstack.common import cfg
+from moniker.openstack.common import log as logging
+from moniker.api import app
+
+config_files = cfg.find_config_files(project='moniker', prog='moniker-api')
+config_files.append('./etc/moniker-api.conf')
+
+cfg.CONF(sys.argv[1:], project='moniker', prog='moniker-api',
+ default_config_files=config_files)
+
+logging.setup('moniker')
+
+
+if cfg.CONF.verbose or cfg.CONF.debug:
+ app.debug = True
+
+app.run(host=cfg.CONF.api_host, port=cfg.CONF.api_port)
diff --git a/bin/moniker-central b/bin/moniker-central
new file mode 100755
index 00000000..04b30f59
--- /dev/null
+++ b/bin/moniker-central
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import sys
+import eventlet
+from moniker.openstack.common import cfg
+from moniker.openstack.common import log as logging
+from moniker.openstack.common import service
+from moniker.central import manager
+
+eventlet.monkey_patch()
+
+config_files = cfg.find_config_files(project='moniker',
+ prog='moniker-central')
+config_files.append('./etc/moniker-central.conf')
+
+cfg.CONF(sys.argv[1:], project='moniker', prog='moniker-central',
+ default_config_files=config_files)
+
+logging.setup('moniker')
+
+serv = service.Service(cfg.CONF.host, manager.Manager(cfg.CONF.host), 10)
+
+launcher = service.launch(serv)
+launcher.wait()
diff --git a/etc/moniker-agent-bind9.conf b/etc/moniker-agent-bind9.conf
new file mode 100644
index 00000000..d690246e
--- /dev/null
+++ b/etc/moniker-agent-bind9.conf
@@ -0,0 +1,18 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose = True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug = False
+
+#
+logging_context_format_string = %(asctime)s %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s %(message)s
+
+#
+control_exchange = moniker
+
+#
+state_path = ./var/
+
+#
+templates_path = ./templates/
diff --git a/etc/moniker-api.conf b/etc/moniker-api.conf
new file mode 100644
index 00000000..3a60c531
--- /dev/null
+++ b/etc/moniker-api.conf
@@ -0,0 +1,18 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose = True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug = False
+
+# Address to bind the API server
+api_host = 0.0.0.0
+
+# Port the bind the API server to
+api_port = 9001
+
+#
+logging_context_format_string = %(asctime)s %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s %(message)s
+
+#
+control_exchange = moniker
diff --git a/etc/moniker-central.conf b/etc/moniker-central.conf
new file mode 100644
index 00000000..70612051
--- /dev/null
+++ b/etc/moniker-central.conf
@@ -0,0 +1,18 @@
+[DEFAULT]
+# Show more verbose log output (sets INFO log level output)
+verbose = True
+
+# Show debugging output in logs (sets DEBUG log level output)
+debug = False
+
+# SQLAlchemy connection string.
+sql_connection = postgres://postgres:postgres@127.0.0.1:5432/moniker
+
+#
+notification_driver=moniker.openstack.common.notifier.rabbit_notifier
+
+#
+logging_context_format_string = %(asctime)s %(levelname)s %(name)s [%(request_id)s %(user)s %(tenant)s] %(instance)s %(message)s
+
+#
+control_exchange = moniker
diff --git a/moniker.sublime-project b/moniker.sublime-project
new file mode 100644
index 00000000..bbfc742d
--- /dev/null
+++ b/moniker.sublime-project
@@ -0,0 +1,54 @@
+{
+ "folders":
+ [
+ {
+ "file_exclude_patterns":
+ [
+ "*.pyc",
+ "*.pyo",
+ "*.exe",
+ "*.dll",
+ "*.obj",
+ "*.o",
+ "*.a",
+ "*.lib",
+ "*.so",
+ "*.dylib",
+ "*.ncb",
+ "*.sdf",
+ "*.suo",
+ "*.pdb",
+ "*.idb",
+ ".DS_Store",
+ "*.class",
+ "*.psd",
+ "*.db",
+ ".vagrant",
+ ".noseids"
+ ],
+ "folder_exclude_patterns":
+ [
+ ".svn",
+ ".git",
+ ".hg",
+ "CVS",
+ "*.egg",
+ "*.egg-info"
+ ],
+ "path": "."
+ }
+ ],
+ "settings":
+ {
+ "default_line_ending": "unix",
+ "detect_indentation": false,
+ "ensure_newline_at_eof_on_save": true,
+ "rulers":
+ [
+ 79
+ ],
+ "tab_size": 4,
+ "translate_tabs_to_spaces": true,
+ "trim_trailing_white_space_on_save": true
+ }
+}
diff --git a/moniker/__init__.py b/moniker/__init__.py
new file mode 100644
index 00000000..19f7f355
--- /dev/null
+++ b/moniker/__init__.py
@@ -0,0 +1,30 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import socket
+from moniker.openstack.common import cfg
+
+cfg.CONF.register_opts([
+ cfg.StrOpt('host', default=socket.gethostname(),
+ help='Name of this node'),
+ # TODO(kiall): Common RPC has nova as the default -_-
+ # cfg.StrOpt('control_exchange', default='moniker',
+ # help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
+ cfg.StrOpt('central-topic', default='central', help='Central Topic'),
+ cfg.StrOpt('agent-topic', default='agent', help='Agent Topic'),
+ cfg.StrOpt('state-path', default='/var/lib/moniker', help='State Path'),
+ cfg.StrOpt('templates-path', default='/use/share/moniker/templates',
+ help='Templates Path'),
+])
diff --git a/moniker/agent/__init__.py b/moniker/agent/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/moniker/agent/__init__.py
diff --git a/moniker/agent/api.py b/moniker/agent/api.py
new file mode 100644
index 00000000..d045339a
--- /dev/null
+++ b/moniker/agent/api.py
@@ -0,0 +1,94 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from moniker.openstack.common import cfg
+from moniker.openstack.common import log as logging
+from moniker.openstack.common.rpc.proxy import RpcProxy
+
+DEFAULT_VERSION = '1.0'
+
+LOG = logging.getLogger(__name__)
+RPC = RpcProxy(cfg.CONF.agent_topic, DEFAULT_VERSION)
+
+
+# Domain Methods
+def create_domain(context, domain):
+ msg = {
+ 'method': 'create_domain',
+ 'args': {
+ 'domain': domain,
+ },
+ }
+
+ return RPC.fanout_cast(context, msg)
+
+
+def update_domain(context, domain):
+ msg = {
+ 'method': 'update_domain',
+ 'args': {
+ 'domain': domain,
+ },
+ }
+
+ return RPC.fanout_cast(context, msg)
+
+
+def delete_domain(context, domain_id):
+ msg = {
+ 'method': 'delete_domain',
+ 'args': {
+ 'domain_id': domain_id,
+ },
+ }
+
+ return RPC.fanout_cast(context, msg)
+
+
+# Record Methods
+def create_record(context, domain, record):
+ msg = {
+ 'method': 'create_record',
+ 'args': {
+ 'domain': domain,
+ 'record': record,
+ },
+ }
+
+ return RPC.fanout_cast(context, msg)
+
+
+def update_record(context, domain, record):
+ msg = {
+ 'method': 'update_record',
+ 'args': {
+ 'domain': domain,
+ 'record': record,
+ },
+ }
+
+ return RPC.fanout_cast(context, msg)
+
+
+def delete_record(context, domain, record_id):
+ msg = {
+ 'method': 'delete_record',
+ 'args': {
+ 'domain': domain,
+ 'record_id': record_id,
+ },
+ }
+
+ return RPC.fanout_cast(context, msg)
diff --git a/moniker/agent/bind9.py b/moniker/agent/bind9.py
new file mode 100644
index 00000000..0ae8b8a3
--- /dev/null
+++ b/moniker/agent/bind9.py
@@ -0,0 +1,138 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import os
+import subprocess
+from jinja2 import Template
+from moniker.openstack.common import cfg
+from moniker.openstack.common import log as logging
+from moniker.openstack.common import rpc
+from moniker.openstack.common import manager
+from moniker.openstack.common.context import get_admin_context
+from moniker.openstack.common.rpc import dispatcher as rpc_dispatcher
+from moniker.openstack.common.periodic_task import periodic_task
+from moniker.central import api as central_api
+
+LOG = logging.getLogger(__name__)
+
+cfg.CONF.register_opts([
+ cfg.StrOpt('rndc-path', default='/usr/sbin/rndc', help='RNDC Path'),
+ cfg.StrOpt('rndc-host', default='127.0.0.1', help='RNDC Host'),
+ cfg.IntOpt('rndc-port', default=953, help='RNDC Port'),
+ cfg.StrOpt('rndc-config-file', default='/etc/rndc.conf',
+ help='RNDC Config File'),
+ cfg.StrOpt('rndc-key-file', default=None, help='RNDC Key File'),
+])
+
+
+class Manager(manager.Manager):
+ def init_host(self):
+ LOG.warn('Init Host')
+
+ self.init_rpc()
+
+ def init_rpc(self):
+ self.connection = rpc.create_connection()
+ dispatcher = rpc_dispatcher.RpcDispatcher([self])
+ self.connection.create_consumer(cfg.CONF.agent_topic, dispatcher,
+ fanout=True)
+
+ self.connection.consume_in_thread()
+
+ def create_domain(self, context, domain):
+ LOG.debug('Create Domain')
+ self._sync_domain(domain=domain)
+
+ def update_domain(self, context, domain):
+ LOG.debug('Update Domain')
+ self._sync_domain(domain=domain)
+
+ def delete_domain(self, context, domain_id):
+ LOG.debug('Delete Domain')
+
+ raise NotImplementedError()
+
+ def create_record(self, context, domain, record):
+ LOG.debug('Create Record')
+ self._sync_domain(servers, domain, records)
+
+ def update_record(self, context, domain, record):
+ LOG.debug('Update Record')
+ self._sync_domain(domain)
+
+ def delete_record(self, context, domain, record_id):
+ LOG.debug('Delete Record')
+ self._sync_domain(domain)
+
+ def _sync_domains(self):
+ """ Sync the list of domains this server handles """
+ # TODO: Rewrite this entire thing ASAP
+ LOG.debug('Synchronising domains')
+
+ admin_context = get_admin_context()
+
+ domains = central_api.get_domains(admin_context)
+
+ template_path = os.path.join(os.path.abspath(
+ cfg.CONF.templates_path), 'bind9-config.jinja2')
+
+ output_path = os.path.join(os.path.abspath(cfg.CONF.state_path),
+ 'bind9', 'zones.config')
+
+ self._render_template(template_path, output_path, domains=domains,
+ state_path=os.path.abspath(cfg.CONF.state_path))
+
+ def _sync_domain(self, domain_id):
+ """ Sync a single domain's zone file """
+ # TODO: Rewrite this entire thing ASAP
+ LOG.debug('Synchronising Domain: %s' % domain['id'])
+
+ admin_context = get_admin_context()
+
+ template_path = os.path.join(os.path.abspath(
+ cfg.CONF.templates_path), 'bind9-zone.jinja2')
+
+ output_path = os.path.join(os.path.abspath(cfg.CONF.state_path),
+ 'bind9', '%s.zone' % domain['id'])
+
+ self._render_template(template_path, output_path, servers=servers,
+ domain=domain, records=records)
+
+ self._sync_domains()
+
+ rndc_call = [
+ 'sudo',
+ cfg.CONF.rndc_path,
+ '-c', cfg.CONF.rndc_config_file,
+ '-s', cfg.CONF.rndc_host,
+ '-p', str(cfg.CONF.rndc_port),
+ ]
+
+ if cfg.CONF.rndc_key_file:
+ rndc_call.extend(['-k', c.cfg.CONF.rndc_key_file])
+
+ rndc_call.extend(['reload', domain['name']])
+
+ subprocess.call(rndc_call)
+
+ def _render_template(self, template_path, output_path, **template_context):
+ # TODO: Handle failures...
+ with open(template_path) as template_fh:
+ template = Template(template_fh.read())
+
+ content = template.render(**template_context)
+
+ with open(output_path, 'w') as output_fh:
+ output_fh.write(content)
diff --git a/moniker/api/__init__.py b/moniker/api/__init__.py
new file mode 100644
index 00000000..f02d5f2f
--- /dev/null
+++ b/moniker/api/__init__.py
@@ -0,0 +1,45 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import flask
+from moniker.openstack.common import cfg
+from moniker.openstack.common import jsonutils
+from moniker.openstack.common.context import get_admin_context
+from moniker import central
+from moniker.api import v1
+from moniker.api import debug
+
+# Allows us to serialize datetime's etc
+flask.helpers.json = jsonutils
+
+cfg.CONF.register_opts([
+ cfg.StrOpt('api_host', default='0.0.0.0',
+ help='API Host'),
+ cfg.IntOpt('api_port', default=9001,
+ help='API Port Number'),
+])
+
+app = flask.Flask('moniker.api')
+
+# Blueprints
+app.register_blueprint(v1.blueprint, url_prefix='/v1')
+app.register_blueprint(debug.blueprint, url_prefix='/debug')
+
+
+@app.before_request
+def before_request():
+ flask.request.context = get_admin_context() # Temp hack
+ flask.request.context.tenant = '12345' # Temp hack
+ flask.request.context.user = '12345' # Temp hack
diff --git a/moniker/api/debug.py b/moniker/api/debug.py
new file mode 100644
index 00000000..c2c19e6a
--- /dev/null
+++ b/moniker/api/debug.py
@@ -0,0 +1,27 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import flask
+from moniker.openstack.common import cfg
+from moniker.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+blueprint = flask.Blueprint('debug', __name__)
+
+
+@blueprint.route('/config', methods=['GET'])
+def list_config():
+ return flask.jsonify(cfg.CONF)
diff --git a/moniker/api/v1.py b/moniker/api/v1.py
new file mode 100644
index 00000000..ee98b532
--- /dev/null
+++ b/moniker/api/v1.py
@@ -0,0 +1,24 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import flask
+import functools
+from moniker.openstack.common import log as logging
+from moniker.schemas import (domain_schema, domains_schema, record_schema,
+ records_schema, server_schema, servers_schema)
+
+LOG = logging.getLogger(__name__)
+
+blueprint = flask.Blueprint('v1', __name__)
diff --git a/moniker/api/v1/__init__.py b/moniker/api/v1/__init__.py
new file mode 100644
index 00000000..1638196c
--- /dev/null
+++ b/moniker/api/v1/__init__.py
@@ -0,0 +1,22 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import flask
+
+blueprint = flask.Blueprint('v1', __name__)
+
+import moniker.api.v1.servers
+import moniker.api.v1.domains
+import moniker.api.v1.records
diff --git a/moniker/api/v1/domains.py b/moniker/api/v1/domains.py
new file mode 100644
index 00000000..7df2a51b
--- /dev/null
+++ b/moniker/api/v1/domains.py
@@ -0,0 +1,126 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import flask
+from moniker.openstack.common import log as logging
+from moniker import exceptions
+from moniker.api.v1 import blueprint
+from moniker.api.v1.schemas import domain_schema, domains_schema
+from moniker.central import api as central_api
+
+LOG = logging.getLogger(__name__)
+
+
+def _append_domain_links(values, domain_id):
+ values['self'] = flask.url_for('.get_domain', domain_id=domain_id)
+ values['records'] = flask.url_for('.get_records', domain_id=domain_id)
+ values['schema'] = flask.url_for('.get_domain_schema')
+
+ return values
+
+
+@blueprint.route('/schemas/domain', methods=['GET'])
+def get_domain_schema():
+ return flask.jsonify(domain_schema.raw())
+
+
+@blueprint.route('/schemas/domains', methods=['GET'])
+def get_domains_schema():
+ return flask.jsonify(domains_schema.raw())
+
+
+@blueprint.route('/domains', methods=['POST'])
+def create_domain():
+ context = flask.request.context
+ values = flask.request.json
+
+ try:
+ domain_schema.validate(values)
+ domain = central_api.create_domain(context, values)
+ except exceptions.InvalidObject, e:
+ return flask.Response(status=400, response=str(e))
+ except exceptions.DuplicateDomain:
+ return flask.Response(status=409)
+ else:
+ domain = _append_domain_links(domain, domain['id'])
+
+ domain = domain_schema.filter(domain)
+
+ response = flask.jsonify(domain)
+ response.status_int = 201
+ response.location = flask.url_for('.get_domain',
+ domain_id=domain['id'])
+ return response
+
+
+@blueprint.route('/domains', methods=['GET'])
+def get_domains():
+ context = flask.request.context
+
+ domains = central_api.get_domains(context)
+
+ domains = domains_schema.filter(domains)
+
+ return flask.jsonify(domains=domains)
+
+
+@blueprint.route('/domains/<domain_id>', methods=['GET'])
+def get_domain(domain_id):
+ context = flask.request.context
+
+ try:
+ domain = central_api.get_domain(context, domain_id)
+ except exceptions.DomainNotFound:
+ return flask.Response(status=404)
+ else:
+ domain = _append_domain_links(domain, domain['id'])
+
+ domain = domain_schema.filter(domain)
+
+ return flask.jsonify(domain)
+
+
+@blueprint.route('/domains/<domain_id>', methods=['PUT'])
+def update_domain(domain_id):
+ context = flask.request.context
+ values = flask.request.json
+
+ try:
+ domain_schema.validate(values)
+ domain = central_api.update_domain(context, domain_id, values)
+ except exceptions.InvalidObject, e:
+ return flask.Response(status=400, response=str(e))
+ except exceptions.DomainNotFound:
+ return flask.Response(status=404)
+ except exceptions.DuplicateDomain:
+ return flask.Response(status=409)
+ else:
+ domain = _append_domain_links(domain, domain['id'])
+
+ domain = domain_schema.filter(domain)
+
+ return flask.jsonify(domain)
+
+
+@blueprint.route('/domains/<domain_id>', methods=['DELETE'])
+def delete_domain(domain_id):
+ context = flask.request.context
+
+ try:
+ central_api.delete_domain(context, domain_id)
+ except exceptions.DomainNotFound:
+ return flask.Response(status=404)
+ else:
+ return flask.Response(status=200)
diff --git a/moniker/api/v1/records.py b/moniker/api/v1/records.py
new file mode 100644
index 00000000..443531cf
--- /dev/null
+++ b/moniker/api/v1/records.py
@@ -0,0 +1,128 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import flask
+from moniker.openstack.common import log as logging
+from moniker import exceptions
+from moniker.api.v1 import blueprint
+from moniker.api.v1.schemas import record_schema, records_schema
+from moniker.central import api as central_api
+
+LOG = logging.getLogger(__name__)
+
+
+def _append_record_links(values, domain_id, record_id):
+ values['self'] = flask.url_for('.get_record', domain_id=domain_id,
+ record_id=record_id)
+ values['domain'] = flask.url_for('.get_domain', domain_id=domain_id)
+ values['schema'] = flask.url_for('.get_record_schema')
+
+ return values
+
+
+@blueprint.route('/schemas/record', methods=['GET'])
+def get_record_schema():
+ return flask.jsonify(record_schema.raw())
+
+
+@blueprint.route('/schemas/records', methods=['GET'])
+def get_records_schema():
+ return flask.jsonify(records_schema.raw())
+
+
+@blueprint.route('/domains/<domain_id>/records', methods=['POST'])
+def create_record(domain_id):
+ context = flask.request.context
+ values = flask.request.json
+
+ try:
+ record_schema.validate(values)
+ record = central_api.create_record(context, domain_id, values)
+ except exceptions.InvalidObject, e:
+ return flask.Response(status=400, response=str(e))
+ except exceptions.DuplicateRecord:
+ return flask.Response(status=409)
+ else:
+ record = _append_record_links(record, record['domain_id'],
+ record['id'])
+ record = record_schema.filter(record)
+
+ response = flask.jsonify(record)
+ response.status_int = 201
+ response.location = flask.url_for('.get_record',
+ domain_id=domain_id,
+ record_id=record['id'])
+ return response
+
+
+@blueprint.route('/domains/<domain_id>/records', methods=['GET'])
+def get_records(domain_id):
+ context = flask.request.context
+
+ records = central_api.get_records(context, domain_id)
+
+ return flask.jsonify(records=records)
+
+
+@blueprint.route('/domains/<domain_id>/records/<record_id>', methods=['GET'])
+def get_record(domain_id, record_id):
+ context = flask.request.context
+
+ try:
+ record = central_api.get_record(context, domain_id, record_id)
+ except exceptions.RecordNotFound:
+ return flask.Response(status=404)
+ else:
+ record = _append_record_links(record, record['domain_id'],
+ record['id'])
+ record = record_schema.filter(record)
+
+ return flask.jsonify(record)
+
+
+@blueprint.route('/domains/<domain_id>/records/<record_id>', methods=['PUT'])
+def update_record(domain_id, record_id):
+ context = flask.request.context
+ values = flask.request.json
+
+ try:
+ record_schema.validate(values)
+ record = central_api.update_record(context, domain_id, record_id,
+ values)
+ except exceptions.InvalidObject, e:
+ return flask.Response(status=400, response=str(e))
+ except exceptions.RecordNotFound:
+ return flask.Response(status=404)
+ except exceptions.DuplicateRecord:
+ return flask.Response(status=409)
+ else:
+ record = _append_record_links(record, record['domain_id'],
+ record['id'])
+ record = record_schema.filter(record)
+
+ return flask.jsonify(record)
+
+
+@blueprint.route('/domains/<domain_id>/records/<record_id>',
+ methods=['DELETE'])
+def delete_record(domain_id, record_id):
+ context = flask.request.context
+
+ try:
+ central_api.delete_record(context, domain_id, record_id)
+ except exceptions.RecordNotFound:
+ return flask.Response(status=404)
+ else:
+ return flask.Response(status=200)
diff --git a/moniker/api/v1/schemas.py b/moniker/api/v1/schemas.py
new file mode 100644
index 00000000..6d4cdcd8
--- /dev/null
+++ b/moniker/api/v1/schemas.py
@@ -0,0 +1,154 @@
+from moniker.schema import Schema, CollectionSchema
+
+SERVER_PROPERTIES = {
+ 'id': {
+ 'type': 'string',
+ 'description': 'Server identifier',
+ 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
+ '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
+ },
+ 'name': {
+ 'type': 'string',
+ 'description': 'Server DNS name',
+ 'maxLength': 255,
+ 'pattern': '^.+[^\\.]$', # TODO: Figure out the correct regex
+ 'required': True,
+ },
+ 'ipv4': {
+ 'type': 'string',
+ 'description': 'IPv4 address of server',
+ 'format': 'ip-address',
+ 'required': True,
+ },
+ 'ipv6': {
+ 'type': 'string',
+ 'description': 'IPv6 address of server',
+ 'format': 'ipv6',
+ },
+ 'created_at': {
+ 'type': 'string',
+ 'description': 'Date and time of server creation',
+ 'format': 'date-time',
+ },
+ 'updated_at': {
+ 'type': 'string',
+ 'description': 'Date and time of last server update',
+ 'format': 'date-time',
+ },
+ 'self': {'type': 'string'},
+ 'schema': {'type': 'string'},
+}
+
+SERVER_LINKS = [
+ {'rel': 'self', 'href': '{self}'},
+ {'rel': 'describedby', 'href': '{schema}'},
+]
+
+DOMAIN_PROPERTIES = {
+ 'id': {
+ 'type': 'string',
+ 'description': 'Domain identifier',
+ 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
+ '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
+ },
+ 'name': {
+ 'type': 'string',
+ 'description': 'Domain name',
+ 'maxLength': 255,
+ 'pattern': '^.+[^\\.]$', # TODO: Figure out the correct regex
+ 'required': True,
+ },
+ 'email': {
+ 'type': 'string',
+ 'description': 'Hostmaster email address',
+ 'maxLength': 255,
+ 'required': True,
+ },
+ 'ttl': {
+ 'type': 'integer',
+ 'description': 'Time to live',
+ },
+ 'serial': {
+ 'type': 'integer',
+ 'description': 'Serial Number',
+ },
+ 'created_at': {
+ 'type': 'string',
+ 'description': 'Date and time of image registration',
+ 'format': 'date-time',
+ },
+ 'updated_at': {
+ 'type': 'string',
+ 'description': 'Date and time of image registration',
+ 'format': 'date-time',
+ },
+ 'self': {'type': 'string'},
+ 'records': {'type': 'string'},
+ 'schema': {'type': 'string'},
+}
+
+DOMAIN_LINKS = [
+ {'rel': 'self', 'href': '{self}'},
+ {'rel': 'records', 'href': '{records}', 'method': 'GET'},
+ {'rel': 'describedby', 'href': '{schema}', 'method': 'GET'},
+]
+
+RECORD_PROPERTIES = {
+ 'id': {
+ 'type': 'string',
+ 'description': 'Record identifier',
+ 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
+ '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
+ },
+ 'name': {
+ 'type': 'string',
+ 'description': 'DNS Record Name',
+ 'maxLength': 255,
+ 'pattern': '^.+[^\\.]$', # TODO: Figure out the correct regex
+ 'required': True,
+ },
+ 'type': {
+ 'type': 'string',
+ 'description': 'DNS Record Type',
+ 'enum': ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS'],
+ },
+ 'data': {
+ 'type': 'string',
+ 'description': 'DNS Record Value',
+ 'maxLength': 255,
+ 'required': True,
+ },
+ 'ttl': {
+ 'type': 'integer',
+ 'description': 'Time to live.',
+ 'min': 60, # TODO: This should be a config option
+ },
+ 'created_at': {
+ 'type': 'string',
+ 'description': 'Date and time of image registration',
+ 'format': 'date-time',
+ },
+ 'updated_at': {
+ 'type': 'string',
+ 'description': 'Date and time of image registration',
+ 'format': 'date-time',
+ },
+ 'self': {'type': 'string'},
+ 'domain': {'type': 'string'},
+ 'schema': {'type': 'string'},
+}
+
+RECORD_LINKS = [
+ {'rel': 'self', 'href': '{self}'},
+ {'rel': 'domain', 'href': '{domain}'},
+ {'rel': 'describedby', 'href': '{schema}'},
+]
+
+server_schema = Schema('server', SERVER_PROPERTIES, SERVER_LINKS)
+servers_schema = CollectionSchema('servers', server_schema)
+
+domain_schema = Schema('domain', DOMAIN_PROPERTIES, DOMAIN_LINKS)
+domains_schema = CollectionSchema('domains', domain_schema)
+
+record_schema = Schema('record', RECORD_PROPERTIES, RECORD_LINKS)
+records_schema = CollectionSchema('records', record_schema)
diff --git a/moniker/api/v1/servers.py b/moniker/api/v1/servers.py
new file mode 100644
index 00000000..fe41cf91
--- /dev/null
+++ b/moniker/api/v1/servers.py
@@ -0,0 +1,125 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import flask
+from moniker.openstack.common import log as logging
+from moniker import exceptions
+from moniker.api.v1 import blueprint
+from moniker.api.v1.schemas import server_schema, servers_schema
+from moniker.central import api as central_api
+
+LOG = logging.getLogger(__name__)
+
+
+def _append_server_links(values, server_id):
+ values['self'] = flask.url_for('.get_server', server_id=server_id)
+ values['schema'] = flask.url_for('.get_server_schema')
+
+ return values
+
+
+@blueprint.route('/schemas/server', methods=['GET'])
+def get_server_schema():
+ return flask.jsonify(server_schema.raw())
+
+
+@blueprint.route('/schemas/servers', methods=['GET'])
+def get_servers_schema():
+ return flask.jsonify(servers_schema.raw())
+
+
+@blueprint.route('/servers', methods=['POST'])
+def create_server():
+ context = flask.request.context
+ values = flask.request.json
+
+ try:
+ server_schema.validate(values)
+ server = central_api.create_server(context, values=flask.request.json)
+ except exceptions.InvalidObject, e:
+ return flask.Response(status=400, response=str(e))
+ except exceptions.DuplicateServer:
+ return flask.Response(status=409)
+ else:
+ server = _append_server_links(server, server['id'])
+
+ server = server_schema.filter(server)
+
+ response = flask.jsonify(server)
+ response.status_int = 201
+ response.location = flask.url_for('.get_server',
+ server_id=server['id'])
+ return response
+
+
+@blueprint.route('/servers', methods=['GET'])
+def get_servers():
+ context = flask.request.context
+
+ servers = central_api.get_servers(context)
+
+ servers = servers_schema.filter(servers)
+
+ return flask.jsonify(servers=servers)
+
+
+@blueprint.route('/servers/<server_id>', methods=['GET'])
+def get_server(server_id):
+ context = flask.request.context
+
+ try:
+ server = central_api.get_server(context, server_id)
+ except exceptions.ServerNotFound:
+ return flask.Response(status=404)
+ else:
+ server = _append_server_links(server, server['id'])
+
+ server = server_schema.filter(server)
+
+ return flask.jsonify(server)
+
+
+@blueprint.route('/servers/<server_id>', methods=['PUT'])
+def update_server(server_id):
+ context = flask.request.context
+ values = flask.request.json
+
+ try:
+ server_schema.validate(values)
+ server = central_api.update_server(context, server_id, values=values)
+ except exceptions.InvalidObject, e:
+ return flask.Response(status=400, response=str(e))
+ except exceptions.ServerNotFound:
+ return flask.Response(status=404)
+ except exceptions.DuplicateServer:
+ return flask.Response(status=409)
+ else:
+ server = _append_server_links(server, server['id'])
+
+ server = server_schema.filter(server)
+
+ return flask.jsonify(server)
+
+
+@blueprint.route('/servers/<server_id>', methods=['DELETE'])
+def delete_server(server_id):
+ context = flask.request.context
+
+ try:
+ central_api.delete_server(context, server_id)
+ except exceptions.ServerNotFound:
+ return flask.Response(status=404)
+ else:
+ return flask.Response(status=200)
diff --git a/moniker/central/__init__.py b/moniker/central/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/moniker/central/__init__.py
diff --git a/moniker/central/api.py b/moniker/central/api.py
new file mode 100644
index 00000000..c1e31b82
--- /dev/null
+++ b/moniker/central/api.py
@@ -0,0 +1,191 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from moniker.openstack.common import cfg
+from moniker.openstack.common import log as logging
+from moniker.openstack.common.rpc.proxy import RpcProxy
+
+DEFAULT_VERSION = '1.0'
+
+LOG = logging.getLogger(__name__)
+RPC = RpcProxy(cfg.CONF.central_topic, DEFAULT_VERSION)
+
+
+def create_server(context, values):
+ msg = {
+ 'method': 'create_server',
+ 'args': {
+ 'values': values,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+def get_servers(context):
+ msg = {
+ 'method': 'get_servers',
+ }
+
+ return RPC.call(context, msg)
+
+
+def get_server(context, server_id):
+ msg = {
+ 'method': 'get_server',
+ 'args': {
+ 'server_id': server_id,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+def update_server(context, server_id, values):
+ msg = {
+ 'method': 'update_server',
+ 'args': {
+ 'server_id': server_id,
+ 'values': values,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+def delete_server(context, server_id):
+ msg = {
+ 'method': 'delete_server',
+ 'args': {
+ 'server_id': server_id,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+# Domain Methods
+def create_domain(context, values):
+ msg = {
+ 'method': 'create_domain',
+ 'args': {
+ 'values': values,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+def get_domains(context):
+ msg = {
+ 'method': 'get_domains',
+ }
+
+ return RPC.call(context, msg)
+
+
+def get_domain(context, domain_id):
+ msg = {
+ 'method': 'get_domain',
+ 'args': {
+ 'domain_id': domain_id,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+def update_domain(context, domain_id, values):
+ msg = {
+ 'method': 'update_domain',
+ 'args': {
+ 'domain_id': domain_id,
+ 'values': values,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+def delete_domain(context, domain_id):
+ msg = {
+ 'method': 'delete_domain',
+ 'args': {
+ 'domain_id': domain_id,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+# Record Methods
+def create_record(context, domain_id, values):
+ msg = {
+ 'method': 'create_record',
+ 'args': {
+ 'domain_id': domain_id,
+ 'values': values,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+def get_records(context, domain_id):
+ msg = {
+ 'method': 'get_records',
+ 'args': {
+ 'domain_id': domain_id,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+def get_record(context, domain_id, record_id):
+ msg = {
+ 'method': 'get_record',
+ 'args': {
+ 'domain_id': domain_id,
+ 'record_id': record_id,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+def update_record(context, domain_id, record_id, values):
+ msg = {
+ 'method': 'update_record',
+ 'args': {
+ 'domain_id': domain_id,
+ 'record_id': record_id,
+ 'values': values,
+ },
+ }
+
+ return RPC.call(context, msg)
+
+
+def delete_record(context, domain_id, record_id):
+ msg = {
+ 'method': 'delete_record',
+ 'args': {
+ 'domain_id': domain_id,
+ 'record_id': record_id,
+ },
+ }
+
+ return RPC.call(context, msg)
diff --git a/moniker/central/manager.py b/moniker/central/manager.py
new file mode 100644
index 00000000..f94a4a60
--- /dev/null
+++ b/moniker/central/manager.py
@@ -0,0 +1,148 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from moniker.openstack.common import cfg
+from moniker.openstack.common import log as logging
+from moniker.openstack.common import rpc
+from moniker.openstack.common import manager
+from moniker.openstack.common.rpc import dispatcher as rpc_dispatcher
+from moniker.openstack.common.periodic_task import periodic_task
+from moniker import exceptions
+from moniker import database
+from moniker import utils
+from moniker.agent import api as agent_api
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Manager(manager.Manager):
+ def init_host(self):
+ LOG.warn('Init Host')
+
+ self.init_database()
+ self.init_rpc()
+
+ def init_database(self):
+ self.database = database.get_driver()
+
+ def init_rpc(self):
+ self.connection = rpc.create_connection()
+ dispatcher = rpc_dispatcher.RpcDispatcher([self])
+ self.connection.create_consumer(cfg.CONF.central_topic, dispatcher)
+
+ self.connection.consume_in_thread()
+
+ # @periodic_task
+ # def test(self):
+ # LOG.critical('TEST')
+
+ # Server Methods
+ def create_server(self, context, values):
+ server = self.database.create_server(context, values)
+
+ utils.notify(context, 'api', 'server.create', server)
+
+ return server
+
+ def get_servers(self, context):
+ return self.database.get_servers(context)
+
+ def get_server(self, context, server_id):
+ return self.database.get_server(context, server_id)
+
+ def update_server(self, context, server_id, values):
+ server = self.database.update_server(context, server_id, values)
+
+ utils.notify(context, 'api', 'server.update', server)
+
+ return server
+
+ def delete_server(self, context, server_id):
+ server = self.database.get_server(context, server_id)
+
+ utils.notify(context, 'api', 'server.delete', server)
+
+ return self.database.delete_server(context, server_id)
+
+ # Domain Methods
+ def create_domain(self, context, values):
+ values['tenant_id'] = context.tenant
+
+ domain = self.database.create_domain(context, values)
+
+ agent_api.create_domain(context, domain)
+ utils.notify(context, 'api', 'domain.create', domain)
+
+ return domain
+
+ def get_domains(self, context):
+ return self.database.get_domains(context)
+
+ def get_domain(self, context, domain_id):
+ return self.database.get_domain(context, domain_id)
+
+ def update_domain(self, context, domain_id, values):
+ domain = self.database.update_domain(context, domain_id, values)
+
+ agent_api.update_domain(context, domain)
+ utils.notify(context, 'api', 'domain.update', domain)
+
+ return domain
+
+ def delete_domain(self, context, domain_id):
+ domain = self.database.get_domain(context, domain_id)
+
+ agent_api.delete_domain(context, domain_id)
+ utils.notify(context, 'api', 'domain.delete', domain)
+
+ return self.database.delete_domain(context, domain_id)
+
+ # Record Methods
+ def create_record(self, context, domain_id, values):
+ record = self.database.create_record(context, domain_id, values)
+
+ domain = self.database.get_domain(context, domain['id'])
+
+ agent_api.create_record(context, domain, record)
+ utils.notify(context, 'api', 'record.create', record)
+
+ return record
+
+ def get_records(self, context, domain_id):
+ return self.database.get_records(context, domain_id)
+
+ def get_record(self, context, domain_id, record_id):
+ return self.database.get_record(context, record_id)
+
+ def update_record(self, context, domain_id, record_id, values):
+ record = self.database.update_record(context, record_id, values)
+
+ domain = self.database.get_domain(context, domain['id'])
+
+ agent_api.update_record(context, domain, record)
+ utils.notify(context, 'api', 'record.update', record)
+
+ return record
+
+ def delete_record(self, context, domain_id, record_id):
+ record = self.database.get_record(context, record_id)
+
+ domain = self.database.get_domain(context, domain['id'])
+
+ agent_api.delete_record(context, domain, record)
+ utils.notify(context, 'api', 'record.delete', record)
+
+ return self.database.delete_record(context, record_id)
diff --git a/moniker/database/__init__.py b/moniker/database/__init__.py
new file mode 100644
index 00000000..d1904204
--- /dev/null
+++ b/moniker/database/__init__.py
@@ -0,0 +1,75 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from moniker.openstack.common import cfg
+
+cfg.CONF.register_opts([
+ cfg.StrOpt('database-driver', default='sqlalchemy',
+ help='The database driver to use'),
+])
+
+
+class BaseDatabase(object):
+ def create_server(self, context, values):
+ raise NotImplementedError()
+
+ def get_servers(self, context):
+ raise NotImplementedError()
+
+ def get_server(self, context, server_id):
+ raise NotImplementedError()
+
+ def update_server(self, context, server_id, values):
+ raise NotImplementedError()
+
+ def delete_server(self, context, server_id):
+ raise NotImplementedError()
+
+ def create_domain(self, context, values):
+ raise NotImplementedError()
+
+ def get_domains(self, context):
+ raise NotImplementedError()
+
+ def get_domain(self, context, domain_id):
+ raise NotImplementedError()
+
+ def update_domain(self, context, domain_id, values):
+ raise NotImplementedError()
+
+ def delete_domain(self, context, domain_id):
+ raise NotImplementedError()
+
+ def create_record(self, context, domain_id, values):
+ raise NotImplementedError()
+
+ def get_records(self, context, domain_id):
+ raise NotImplementedError()
+
+ def get_record(self, context, record_id):
+ raise NotImplementedError()
+
+ def update_record(self, context, record_id, values):
+ raise NotImplementedError()
+
+ def delete_record(self, context, record_id):
+ raise NotImplementedError()
+
+
+def get_driver(*args, **kwargs):
+ # TODO: Switch to the config var + entry point loading
+ from moniker.database.sqlalchemy import Sqlalchemy
+
+ return Sqlalchemy(*args, **kwargs)
diff --git a/moniker/database/sqlalchemy/__init__.py b/moniker/database/sqlalchemy/__init__.py
new file mode 100644
index 00000000..daa7bcb1
--- /dev/null
+++ b/moniker/database/sqlalchemy/__init__.py
@@ -0,0 +1,207 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
+from moniker.openstack.common import cfg
+from moniker.openstack.common import log as logging
+from moniker import exceptions
+from moniker.database import BaseDatabase
+from moniker.database.sqlalchemy import models
+from moniker.database.sqlalchemy.session import get_session
+
+LOG = logging.getLogger(__name__)
+
+cfg.CONF.register_opts([
+ cfg.StrOpt('sql-connection', default='sqlite:///test.sqlite',
+ help='The database connection string'),
+])
+
+
+class Sqlalchemy(BaseDatabase):
+ def __init__(self):
+ self.session = get_session()
+ models.Base.metadata.create_all(self.session.bind) # HACK: Remove me
+
+ # Server Methods
+ def create_server(self, context, values):
+ server = models.Server()
+
+ server.update(values)
+
+ try:
+ server.save()
+ except IntegrityError, e:
+ if ' unique ' in str(e):
+ raise exceptions.DuplicateServer()
+ else:
+ raise
+
+ return dict(server)
+
+ def get_servers(self, context):
+ query = self.session.query(models.Server)
+
+ try:
+ result = query.all()
+ except NoResultFound:
+ LOG.debug('No results found')
+ return []
+ else:
+ return [dict(o) for o in result]
+
+ def _get_server(self, context, server_id):
+ query = self.session.query(models.Server)
+
+ try:
+ server = query.filter(models.Server.id == server_id).one()
+ except NoResultFound:
+ raise exceptions.ServerNotFound(server_id)
+ else:
+ return server
+
+ def get_server(self, context, server_id):
+ server = self._get_server(context, server_id)
+
+ return dict(server)
+
+ def update_server(self, context, server_id, values):
+ server = self._get_server(context, server_id)
+
+ server.update(values)
+
+ try:
+ server.save()
+ except IntegrityError, e:
+ if ' unique ' in str(e):
+ raise exceptions.DuplicateServer()
+ else:
+ raise
+
+ return dict(server)
+
+ def delete_server(self, context, server_id):
+ server = self._get_server(context, server_id)
+
+ server.delete()
+
+ # Domain Methods
+ def create_domain(self, context, values):
+ domain = models.Domain()
+
+ domain.update(values)
+
+ try:
+ domain.save()
+ except IntegrityError, e:
+ if ' unique ' in str(e):
+ raise exceptions.DuplicateDomain()
+ else:
+ raise
+
+ return dict(domain)
+
+ def get_domains(self, context):
+ query = self.session.query(models.Domain)
+
+ try:
+ result = query.all()
+ except NoResultFound:
+ LOG.debug('No results found')
+ return []
+ else:
+ return [dict(o) for o in result]
+
+ def _get_domain(self, context, domain_id):
+ query = self.session.query(models.Domain)
+
+ try:
+ domain = query.filter(models.Domain.id == domain_id).one()
+ except NoResultFound:
+ raise exceptions.DomainNotFound(domain_id)
+ else:
+ return domain
+
+ def get_domain(self, context, domain_id):
+ domain = self._get_domain(context, domain_id)
+
+ return dict(domain)
+
+ def update_domain(self, context, domain_id, values):
+ domain = self._get_domain(context, domain_id)
+
+ domain.update(values)
+
+ try:
+ domain.save()
+ except IntegrityError, e:
+ if ' unique ' in str(e):
+ raise exceptions.DuplicateDomain()
+ else:
+ raise
+
+ return dict(domain)
+
+ def delete_domain(self, context, domain_id):
+ domain = self._get_domain(context, domain_id)
+
+ domain.delete()
+
+ # Record Methods
+ def create_record(self, context, domain_id, values):
+ domain = self._get_domain(context, domain_id)
+
+ record = models.Record()
+ record.update(values)
+
+ domain.records.append(record)
+
+ domain.save()
+
+ return dict(record)
+
+ def get_records(self, context, domain_id):
+ domain = self._get_domain(context, domain_id)
+
+ return [dict(o) for o in domain.records]
+
+ def _get_record(self, context, record_id):
+ query = self.session.query(models.Record)
+
+ try:
+ record = query.filter(models.Record.id == record_id).one()
+ except NoResultFound:
+ raise exceptions.RecordNotFound(record_id)
+ else:
+ return record
+
+ def get_record(self, context, record_id):
+ record = self._get_record(context, record_id)
+
+ return dict(record)
+
+ def update_record(self, context, record_id, values):
+ record = self._get_record(context, record_id)
+
+ record.update(values)
+
+ record.save()
+
+ return dict(record)
+
+ def delete_record(self, context, record_id):
+ record = self._get_record(context, record_id)
+
+ record.delete()
diff --git a/moniker/database/sqlalchemy/models.py b/moniker/database/sqlalchemy/models.py
new file mode 100644
index 00000000..f3ffb6b5
--- /dev/null
+++ b/moniker/database/sqlalchemy/models.py
@@ -0,0 +1,169 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from uuid import uuid4
+from sqlalchemy import (Column, DateTime, Boolean, String, Integer, ForeignKey,
+ Enum)
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm import relationship, backref, object_mapper
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.ext.hybrid import hybrid_property
+from moniker import exceptions
+from moniker.openstack.common import timeutils
+from moniker.openstack.common import log as logging
+from moniker.database.sqlalchemy.session import get_session
+from moniker.database.sqlalchemy.types import UUID, Inet
+
+LOG = logging.getLogger(__name__)
+
+
+class Base(object):
+ __abstract__ = True
+
+ id = Column(UUID, default=uuid4, primary_key=True)
+
+ created_at = Column(DateTime, default=timeutils.utcnow)
+ updated_at = Column(DateTime, onupdate=timeutils.utcnow)
+ version = Column(Integer, default=1, nullable=False)
+
+ __mapper_args__ = {
+ 'version_id_col': version
+ }
+
+ def save(self, session=None):
+ """ Save this object """
+ if not session:
+ session = get_session()
+
+ session.add(self)
+
+ try:
+ session.flush()
+ except IntegrityError, e:
+ if 'is not unique' in str(e):
+ raise exceptions.Duplicate(str(e))
+ else:
+ raise
+
+ def delete(self, session=None):
+ """ Delete this object """
+ if not session:
+ session = get_session()
+
+ session.delete(self)
+
+ def __setitem__(self, key, value):
+ setattr(self, key, value)
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __iter__(self):
+ columns = dict(object_mapper(self).columns).keys()
+ # NOTE(russellb): Allow models to specify other keys that can be looked
+ # up, beyond the actual db columns. An example would be the 'name'
+ # property for an Instance.
+ if hasattr(self, '_extra_keys'):
+ columns.extend(self._extra_keys())
+ self._i = iter(columns)
+ return self
+
+ def next(self):
+ n = self._i.next()
+ return n, getattr(self, n)
+
+ def update(self, values):
+ """ Make the model object behave like a dict """
+ for k, v in values.iteritems():
+ setattr(self, k, v)
+
+ def iteritems(self):
+ """
+ Make the model object behave like a dict.
+
+ Includes attributes from joins.
+ """
+ local = dict(self)
+ joined = dict([(k, v) for k, v in self.__dict__.iteritems()
+ if not k[0] == '_'])
+ local.update(joined)
+ return local.iteritems()
+
+
+Base = declarative_base(cls=Base)
+RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'NS']
+
+
+class Server(Base):
+ __tablename__ = 'servers'
+
+ name = Column(String, nullable=False, unique=True)
+ ipv4 = Column(Inet, nullable=False, unique=True)
+ ipv6 = Column(Inet, default=None, unique=True)
+
+
+class Domain(Base):
+ __tablename__ = 'domains'
+
+ tenant_id = Column(String, nullable=False)
+ name = Column(String, nullable=False, unique=True)
+ email = Column(String, nullable=False)
+
+ ttl = Column(Integer, default=3600, nullable=False)
+ refresh = Column(Integer, default=3600, nullable=False)
+ retry = Column(Integer, default=3600, nullable=False)
+ expire = Column(Integer, default=3600, nullable=False)
+ minimum = Column(Integer, default=3600, nullable=False)
+
+ records = relationship('Record', backref=backref('domain', uselist=False))
+
+ @hybrid_property
+ def serial(self):
+ # TODO: Terrible terrible hack.. Cleanup ;)
+ last_change = self.updated_at
+
+ if last_change is None or self.created_at > last_change:
+ last_change = self.created_at
+
+ for record in self.records:
+ if (record.updated_at is not None
+ and record.updated_at > last_change):
+ last_change = record.updated_at
+ elif record.created_at > last_change:
+ last_change = record.created_at
+
+ return int(last_change.strftime("%s"))
+
+ def _extra_keys(self):
+ return ['serial']
+
+
+class Record(Base):
+ __tablename__ = 'records'
+
+ type = Column(Enum(name='record_types', *RECORD_TYPES), nullable=False)
+ name = Column(String, nullable=False)
+ data = Column(String, nullable=False)
+ priority = Column(Integer, default=None)
+ ttl = Column(Integer, default=3600, nullable=False)
+
+ domain_id = Column(UUID, ForeignKey('domains.id'), nullable=False)
+
+ @hybrid_property
+ def tenant_id(self):
+ return self.domain.tenant_id
+
+ def _extra_keys(self):
+ return ['tenant_id']
diff --git a/moniker/database/sqlalchemy/session.py b/moniker/database/sqlalchemy/session.py
new file mode 100644
index 00000000..813b8921
--- /dev/null
+++ b/moniker/database/sqlalchemy/session.py
@@ -0,0 +1,54 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from sqlalchemy import create_engine
+from sqlalchemy.orm import scoped_session, sessionmaker
+from moniker.openstack.common import cfg
+from moniker.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+_ENGINE = None
+_SESSION = None
+
+
+def get_session():
+ global _ENGINE, _SESSION
+
+ if _ENGINE is None:
+ _ENGINE = get_engine()
+
+ if _SESSION is None:
+ Session = sessionmaker(bind=_ENGINE, autocommit=True,
+ expire_on_commit=False)
+ _SESSION = scoped_session(Session)
+
+ return _SESSION
+
+
+def get_engine():
+ url = cfg.CONF.sql_connection
+
+ engine_args = {
+ 'echo': False,
+ 'convert_unicode': True,
+ }
+
+ if cfg.CONF.verbose or cfg.CONF.debug:
+ engine_args['echo'] = True
+ engine = create_engine(url, **engine_args)
+
+ engine.connect()
+
+ return engine
diff --git a/moniker/database/sqlalchemy/types.py b/moniker/database/sqlalchemy/types.py
new file mode 100644
index 00000000..8424370a
--- /dev/null
+++ b/moniker/database/sqlalchemy/types.py
@@ -0,0 +1,62 @@
+from sqlalchemy.types import TypeDecorator, CHAR, VARCHAR
+from sqlalchemy.dialects.postgresql import UUID as pgUUID
+from sqlalchemy.dialects.postgresql import INET as pgINET
+import uuid
+import ipaddr
+
+
+class UUID(TypeDecorator):
+ """Platform-independent UUID type.
+
+ Uses Postgresql's UUID type, otherwise uses
+ CHAR(32), storing as stringified hex values.
+
+ Copied verbatim from SQLAlchemy documentation.
+ """
+ impl = CHAR
+
+ def load_dialect_impl(self, dialect):
+ if dialect.name == 'postgresql':
+ return dialect.type_descriptor(pgUUID())
+ else:
+ return dialect.type_descriptor(CHAR(32))
+
+ def process_bind_param(self, value, dialect):
+ if value is None:
+ return value
+ elif dialect.name == 'postgresql':
+ return str(value)
+ else:
+ if not isinstance(value, uuid.UUID):
+ return "%.32x" % uuid.UUID(value)
+ else:
+ # hexstring
+ return "%.32x" % value
+
+ def process_result_value(self, value, dialect):
+ if value is None:
+ return value
+ else:
+ return uuid.UUID(value)
+
+
+class Inet(TypeDecorator):
+ impl = VARCHAR
+
+ def load_dialect_impl(self, dialect):
+ if dialect.name == "postgresql":
+ return pgINET()
+ else:
+ return VARCHAR(39) # IPv6 can be up to 39 chars
+
+ def process_bind_param(self, value, dialect):
+ if value is None:
+ return value
+ else:
+ return str(value)
+
+ def process_result_value(self, value, dialect):
+ if value is None:
+ return value
+ else:
+ return ipaddr.IPAddress(value)
diff --git a/moniker/database/sqlalchemy/utils.py b/moniker/database/sqlalchemy/utils.py
new file mode 100644
index 00000000..afa4b71b
--- /dev/null
+++ b/moniker/database/sqlalchemy/utils.py
@@ -0,0 +1,130 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010-2011 OpenStack LLC.
+# Copyright 2012 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# NOTE(kiall): Copied verbatim from Nova
+
+"""Implementation of paginate query."""
+
+import sqlalchemy
+
+from moniker.openstack.common import log as logging
+from moniker import exceptions
+
+
+LOG = logging.getLogger(__name__)
+
+
+# copy from glance/db/sqlalchemy/api.py
+def paginate_query(query, model, limit, sort_keys, marker=None,
+ sort_dir=None, sort_dirs=None):
+ """Returns a query with sorting / pagination criteria added.
+
+ Pagination works by requiring a unique sort_key, specified by sort_keys.
+ (If sort_keys is not unique, then we risk looping through values.)
+ We use the last row in the previous page as the 'marker' for pagination.
+ So we must return values that follow the passed marker in the order.
+ With a single-valued sort_key, this would be easy: sort_key > X.
+ With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
+ the lexicographical ordering:
+ (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
+
+ We also have to cope with different sort_directions.
+
+ Typically, the id of the last row is used as the client-facing pagination
+ marker, then the actual marker object must be fetched from the db and
+ passed in to us as marker.
+
+ :param query: the query object to which we should add paging/sorting
+ :param model: the ORM model class
+ :param limit: maximum number of items to return
+ :param sort_keys: array of attributes by which results should be sorted
+ :param marker: the last item of the previous page; we returns the next
+ results after this value.
+ :param sort_dir: direction in which results should be sorted (asc, desc)
+ :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
+
+ :rtype: sqlalchemy.orm.query.Query
+ :return: The query with sorting/pagination added.
+ """
+
+ if 'id' not in sort_keys:
+ # TODO(justinsb): If this ever gives a false-positive, check
+ # the actual primary key, rather than assuming its id
+ LOG.warn(_('Id not in sort_keys; is sort_keys unique?'))
+
+ assert(not (sort_dir and sort_dirs))
+
+ # Default the sort direction to ascending
+ if sort_dirs is None and sort_dir is None:
+ sort_dir = 'asc'
+
+ # Ensure a per-column sort direction
+ if sort_dirs is None:
+ sort_dirs = [sort_dir for _sort_key in sort_keys]
+
+ assert(len(sort_dirs) == len(sort_keys))
+
+ # Add sorting
+ for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
+ sort_dir_func = {
+ 'asc': sqlalchemy.asc,
+ 'desc': sqlalchemy.desc,
+ }[current_sort_dir]
+
+ try:
+ sort_key_attr = getattr(model, current_sort_key)
+ except AttributeError:
+ raise exceptions.InvalidSortKey()
+ query = query.order_by(sort_dir_func(sort_key_attr))
+
+ # Add pagination
+ if marker is not None:
+ marker_values = []
+ for sort_key in sort_keys:
+ v = getattr(marker, sort_key)
+ marker_values.append(v)
+
+ # Build up an array of sort criteria as in the docstring
+ criteria_list = []
+ for i in xrange(0, len(sort_keys)):
+ crit_attrs = []
+ for j in xrange(0, i):
+ model_attr = getattr(model, sort_keys[j])
+ crit_attrs.append((model_attr == marker_values[j]))
+
+ model_attr = getattr(model, sort_keys[i])
+ if sort_dirs[i] == 'desc':
+ crit_attrs.append((model_attr < marker_values[i]))
+ elif sort_dirs[i] == 'asc':
+ crit_attrs.append((model_attr > marker_values[i]))
+ else:
+ raise ValueError(_("Unknown sort direction, "
+ "must be 'desc' or 'asc'"))
+
+ criteria = sqlalchemy.sql.and_(*crit_attrs)
+ criteria_list.append(criteria)
+
+ f = sqlalchemy.sql.or_(*criteria_list)
+ query = query.filter(f)
+
+ if limit is not None:
+ query = query.limit(limit)
+
+ return query
diff --git a/moniker/exceptions.py b/moniker/exceptions.py
new file mode 100644
index 00000000..d35c9456
--- /dev/null
+++ b/moniker/exceptions.py
@@ -0,0 +1,52 @@
+
+
+class Base(Exception):
+ pass
+
+
+class InvalidObject(Base):
+ pass
+
+
+class Forbidden(Base):
+ pass
+
+
+class InvalidSortKey(Base):
+ pass
+
+
+class NoServersConfigured(Base):
+ pass
+
+
+class Duplicate(Base):
+ pass
+
+
+class DuplicateServer(Duplicate):
+ pass
+
+
+class DuplicateDomain(Duplicate):
+ pass
+
+
+class DuplicateRecord(Duplicate):
+ pass
+
+
+class NotFound(Base):
+ pass
+
+
+class ServerNotFound(NotFound):
+ pass
+
+
+class DomainNotFound(NotFound):
+ pass
+
+
+class RecordNotFound(NotFound):
+ pass
diff --git a/moniker/openstack/__init__.py b/moniker/openstack/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/moniker/openstack/__init__.py
diff --git a/moniker/openstack/common/__init__.py b/moniker/openstack/common/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/moniker/openstack/common/__init__.py
diff --git a/moniker/openstack/common/cfg.py b/moniker/openstack/common/cfg.py
new file mode 100644
index 00000000..47392a0f
--- /dev/null
+++ b/moniker/openstack/common/cfg.py
@@ -0,0 +1,1653 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+r"""
+Configuration options which may be set on the command line or in config files.
+
+The schema for each option is defined using the Opt sub-classes, e.g.:
+
+::
+
+ common_opts = [
+ cfg.StrOpt('bind_host',
+ default='0.0.0.0',
+ help='IP address to listen on'),
+ cfg.IntOpt('bind_port',
+ default=9292,
+ help='Port number to listen on')
+ ]
+
+Options can be strings, integers, floats, booleans, lists or 'multi strings'::
+
+ enabled_apis_opt = cfg.ListOpt('enabled_apis',
+ default=['ec2', 'osapi_compute'],
+ help='List of APIs to enable by default')
+
+ DEFAULT_EXTENSIONS = [
+ 'nova.api.openstack.compute.contrib.standard_extensions'
+ ]
+ osapi_compute_extension_opt = cfg.MultiStrOpt('osapi_compute_extension',
+ default=DEFAULT_EXTENSIONS)
+
+Option schemas are registered with the config manager at runtime, but before
+the option is referenced::
+
+ class ExtensionManager(object):
+
+ enabled_apis_opt = cfg.ListOpt(...)
+
+ def __init__(self, conf):
+ self.conf = conf
+ self.conf.register_opt(enabled_apis_opt)
+ ...
+
+ def _load_extensions(self):
+ for ext_factory in self.conf.osapi_compute_extension:
+ ....
+
+A common usage pattern is for each option schema to be defined in the module or
+class which uses the option::
+
+ opts = ...
+
+ def add_common_opts(conf):
+ conf.register_opts(opts)
+
+ def get_bind_host(conf):
+ return conf.bind_host
+
+ def get_bind_port(conf):
+ return conf.bind_port
+
+An option may optionally be made available via the command line. Such options
+must registered with the config manager before the command line is parsed (for
+the purposes of --help and CLI arg validation)::
+
+ cli_opts = [
+ cfg.BoolOpt('verbose',
+ short='v',
+ default=False,
+ help='Print more verbose output'),
+ cfg.BoolOpt('debug',
+ short='d',
+ default=False,
+ help='Print debugging output'),
+ ]
+
+ def add_common_opts(conf):
+ conf.register_cli_opts(cli_opts)
+
+The config manager has two CLI options defined by default, --config-file
+and --config-dir::
+
+ class ConfigOpts(object):
+
+ def __call__(self, ...):
+
+ opts = [
+ MultiStrOpt('config-file',
+ ...),
+ StrOpt('config-dir',
+ ...),
+ ]
+
+ self.register_cli_opts(opts)
+
+Option values are parsed from any supplied config files using
+openstack.common.iniparser. If none are specified, a default set is used
+e.g. glance-api.conf and glance-common.conf::
+
+ glance-api.conf:
+ [DEFAULT]
+ bind_port = 9292
+
+ glance-common.conf:
+ [DEFAULT]
+ bind_host = 0.0.0.0
+
+Option values in config files override those on the command line. Config files
+are parsed in order, with values in later files overriding those in earlier
+files.
+
+The parsing of CLI args and config files is initiated by invoking the config
+manager e.g.::
+
+ conf = ConfigOpts()
+ conf.register_opt(BoolOpt('verbose', ...))
+ conf(sys.argv[1:])
+ if conf.verbose:
+ ...
+
+Options can be registered as belonging to a group::
+
+ rabbit_group = cfg.OptGroup(name='rabbit',
+ title='RabbitMQ options')
+
+ rabbit_host_opt = cfg.StrOpt('host',
+ default='localhost',
+ help='IP/hostname to listen on'),
+ rabbit_port_opt = cfg.IntOpt('port',
+ default=5672,
+ help='Port number to listen on')
+
+ def register_rabbit_opts(conf):
+ conf.register_group(rabbit_group)
+ # options can be registered under a group in either of these ways:
+ conf.register_opt(rabbit_host_opt, group=rabbit_group)
+ conf.register_opt(rabbit_port_opt, group='rabbit')
+
+If it no group attributes are required other than the group name, the group
+need not be explicitly registered e.g.
+
+ def register_rabbit_opts(conf):
+ # The group will automatically be created, equivalent calling::
+ # conf.register_group(OptGroup(name='rabbit'))
+ conf.register_opt(rabbit_port_opt, group='rabbit')
+
+If no group is specified, options belong to the 'DEFAULT' section of config
+files::
+
+ glance-api.conf:
+ [DEFAULT]
+ bind_port = 9292
+ ...
+
+ [rabbit]
+ host = localhost
+ port = 5672
+ use_ssl = False
+ userid = guest
+ password = guest
+ virtual_host = /
+
+Command-line options in a group are automatically prefixed with the
+group name::
+
+ --rabbit-host localhost --rabbit-port 9999
+
+Option values in the default group are referenced as attributes/properties on
+the config manager; groups are also attributes on the config manager, with
+attributes for each of the options associated with the group::
+
+ server.start(app, conf.bind_port, conf.bind_host, conf)
+
+ self.connection = kombu.connection.BrokerConnection(
+ hostname=conf.rabbit.host,
+ port=conf.rabbit.port,
+ ...)
+
+Option values may reference other values using PEP 292 string substitution::
+
+ opts = [
+ cfg.StrOpt('state_path',
+ default=os.path.join(os.path.dirname(__file__), '../'),
+ help='Top-level directory for maintaining nova state'),
+ cfg.StrOpt('sqlite_db',
+ default='nova.sqlite',
+ help='file name for sqlite'),
+ cfg.StrOpt('sql_connection',
+ default='sqlite:///$state_path/$sqlite_db',
+ help='connection string for sql database'),
+ ]
+
+Note that interpolation can be avoided by using '$$'.
+
+For command line utilities that dispatch to other command line utilities, the
+disable_interspersed_args() method is available. If this this method is called,
+then parsing e.g.::
+
+ script --verbose cmd --debug /tmp/mything
+
+will no longer return::
+
+ ['cmd', '/tmp/mything']
+
+as the leftover arguments, but will instead return::
+
+ ['cmd', '--debug', '/tmp/mything']
+
+i.e. argument parsing is stopped at the first non-option argument.
+
+Options may be declared as required so that an error is raised if the user
+does not supply a value for the option.
+
+Options may be declared as secret so that their values are not leaked into
+log files:
+
+ opts = [
+ cfg.StrOpt('s3_store_access_key', secret=True),
+ cfg.StrOpt('s3_store_secret_key', secret=True),
+ ...
+ ]
+
+This module also contains a global instance of the CommonConfigOpts class
+in order to support a common usage pattern in OpenStack:
+
+ from openstack.common import cfg
+
+ opts = [
+ cfg.StrOpt('bind_host' default='0.0.0.0'),
+ cfg.IntOpt('bind_port', default=9292),
+ ]
+
+ CONF = cfg.CONF
+ CONF.register_opts(opts)
+
+ def start(server, app):
+ server.start(app, CONF.bind_port, CONF.bind_host)
+
+"""
+
+import collections
+import copy
+import functools
+import glob
+import optparse
+import os
+import string
+import sys
+
+from moniker.openstack.common import iniparser
+
+
+class Error(Exception):
+ """Base class for cfg exceptions."""
+
+ def __init__(self, msg=None):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+
+class ArgsAlreadyParsedError(Error):
+ """Raised if a CLI opt is registered after parsing."""
+
+ def __str__(self):
+ ret = "arguments already parsed"
+ if self.msg:
+ ret += ": " + self.msg
+ return ret
+
+
+class NoSuchOptError(Error, AttributeError):
+ """Raised if an opt which doesn't exist is referenced."""
+
+ def __init__(self, opt_name, group=None):
+ self.opt_name = opt_name
+ self.group = group
+
+ def __str__(self):
+ if self.group is None:
+ return "no such option: %s" % self.opt_name
+ else:
+ return "no such option in group %s: %s" % (self.group.name,
+ self.opt_name)
+
+
+class NoSuchGroupError(Error):
+ """Raised if a group which doesn't exist is referenced."""
+
+ def __init__(self, group_name):
+ self.group_name = group_name
+
+ def __str__(self):
+ return "no such group: %s" % self.group_name
+
+
+class DuplicateOptError(Error):
+ """Raised if multiple opts with the same name are registered."""
+
+ def __init__(self, opt_name):
+ self.opt_name = opt_name
+
+ def __str__(self):
+ return "duplicate option: %s" % self.opt_name
+
+
+class RequiredOptError(Error):
+ """Raised if an option is required but no value is supplied by the user."""
+
+ def __init__(self, opt_name, group=None):
+ self.opt_name = opt_name
+ self.group = group
+
+ def __str__(self):
+ if self.group is None:
+ return "value required for option: %s" % self.opt_name
+ else:
+ return "value required for option: %s.%s" % (self.group.name,
+ self.opt_name)
+
+
+class TemplateSubstitutionError(Error):
+ """Raised if an error occurs substituting a variable in an opt value."""
+
+ def __str__(self):
+ return "template substitution error: %s" % self.msg
+
+
+class ConfigFilesNotFoundError(Error):
+ """Raised if one or more config files are not found."""
+
+ def __init__(self, config_files):
+ self.config_files = config_files
+
+ def __str__(self):
+ return ('Failed to read some config files: %s' %
+ string.join(self.config_files, ','))
+
+
+class ConfigFileParseError(Error):
+ """Raised if there is an error parsing a config file."""
+
+ def __init__(self, config_file, msg):
+ self.config_file = config_file
+ self.msg = msg
+
+ def __str__(self):
+ return 'Failed to parse %s: %s' % (self.config_file, self.msg)
+
+
+class ConfigFileValueError(Error):
+ """Raised if a config file value does not match its opt type."""
+ pass
+
+
+def _fixpath(p):
+ """Apply tilde expansion and absolutization to a path."""
+ return os.path.abspath(os.path.expanduser(p))
+
+
+def _get_config_dirs(project=None):
+ """Return a list of directors where config files may be located.
+
+ :param project: an optional project name
+
+ If a project is specified, following directories are returned::
+
+ ~/.${project}/
+ ~/
+ /etc/${project}/
+ /etc/
+
+ Otherwise, these directories::
+
+ ~/
+ /etc/
+ """
+ cfg_dirs = [
+ _fixpath(os.path.join('~', '.' + project)) if project else None,
+ _fixpath('~'),
+ os.path.join('/etc', project) if project else None,
+ '/etc'
+ ]
+
+ return filter(bool, cfg_dirs)
+
+
+def _search_dirs(dirs, basename, extension=""):
+ """Search a list of directories for a given filename.
+
+ Iterator over the supplied directories, returning the first file
+ found with the supplied name and extension.
+
+ :param dirs: a list of directories
+ :param basename: the filename, e.g. 'glance-api'
+ :param extension: the file extension, e.g. '.conf'
+ :returns: the path to a matching file, or None
+ """
+ for d in dirs:
+ path = os.path.join(d, '%s%s' % (basename, extension))
+ if os.path.exists(path):
+ return path
+
+
+def find_config_files(project=None, prog=None, extension='.conf'):
+ """Return a list of default configuration files.
+
+ :param project: an optional project name
+ :param prog: the program name, defaulting to the basename of sys.argv[0]
+ :param extension: the type of the config file
+
+ We default to two config files: [${project}.conf, ${prog}.conf]
+
+ And we look for those config files in the following directories::
+
+ ~/.${project}/
+ ~/
+ /etc/${project}/
+ /etc/
+
+ We return an absolute path for (at most) one of each the default config
+ files, for the topmost directory it exists in.
+
+ For example, if project=foo, prog=bar and /etc/foo/foo.conf, /etc/bar.conf
+ and ~/.foo/bar.conf all exist, then we return ['/etc/foo/foo.conf',
+ '~/.foo/bar.conf']
+
+ If no project name is supplied, we only look for ${prog.conf}.
+ """
+ if prog is None:
+ prog = os.path.basename(sys.argv[0])
+
+ cfg_dirs = _get_config_dirs(project)
+
+ config_files = []
+ if project:
+ config_files.append(_search_dirs(cfg_dirs, project, extension))
+ config_files.append(_search_dirs(cfg_dirs, prog, extension))
+
+ return filter(bool, config_files)
+
+
+def _is_opt_registered(opts, opt):
+ """Check whether an opt with the same name is already registered.
+
+ The same opt may be registered multiple times, with only the first
+ registration having any effect. However, it is an error to attempt
+ to register a different opt with the same name.
+
+ :param opts: the set of opts already registered
+ :param opt: the opt to be registered
+ :returns: True if the opt was previously registered, False otherwise
+ :raises: DuplicateOptError if a naming conflict is detected
+ """
+ if opt.dest in opts:
+ if opts[opt.dest]['opt'] != opt:
+ raise DuplicateOptError(opt.name)
+ return True
+ else:
+ return False
+
+
+class Opt(object):
+
+ """Base class for all configuration options.
+
+ An Opt object has no public methods, but has a number of public string
+ properties:
+
+ name:
+ the name of the option, which may include hyphens
+ dest:
+ the (hyphen-less) ConfigOpts property which contains the option value
+ short:
+ a single character CLI option name
+ default:
+ the default value of the option
+ metavar:
+ the name shown as the argument to a CLI option in --help output
+ help:
+ an string explaining how the options value is used
+ """
+ multi = False
+
+ def __init__(self, name, dest=None, short=None, default=None,
+ metavar=None, help=None, secret=False, required=False,
+ deprecated_name=None):
+ """Construct an Opt object.
+
+ The only required parameter is the option's name. However, it is
+ common to also supply a default and help string for all options.
+
+ :param name: the option's name
+ :param dest: the name of the corresponding ConfigOpts property
+ :param short: a single character CLI option name
+ :param default: the default value of the option
+ :param metavar: the option argument to show in --help
+ :param help: an explanation of how the option is used
+ :param secret: true iff the value should be obfuscated in log output
+ :param required: true iff a value must be supplied for this option
+ :param deprecated_name: deprecated name option. Acts like an alias
+ """
+ self.name = name
+ if dest is None:
+ self.dest = self.name.replace('-', '_')
+ else:
+ self.dest = dest
+ self.short = short
+ self.default = default
+ self.metavar = metavar
+ self.help = help
+ self.secret = secret
+ self.required = required
+ if deprecated_name is not None:
+ self.deprecated_name = deprecated_name.replace('-', '_')
+ else:
+ self.deprecated_name = None
+
+ def __ne__(self, another):
+ return vars(self) != vars(another)
+
+ def _get_from_config_parser(self, cparser, section):
+ """Retrieves the option value from a MultiConfigParser object.
+
+ This is the method ConfigOpts uses to look up the option value from
+ config files. Most opt types override this method in order to perform
+ type appropriate conversion of the returned value.
+
+ :param cparser: a ConfigParser object
+ :param section: a section name
+ """
+ return self._cparser_get_with_deprecated(cparser, section)
+
+ def _cparser_get_with_deprecated(self, cparser, section):
+ """If cannot find option as dest try deprecated_name alias."""
+ if self.deprecated_name is not None:
+ return cparser.get(section, [self.dest, self.deprecated_name])
+ return cparser.get(section, [self.dest])
+
+ def _add_to_cli(self, parser, group=None):
+ """Makes the option available in the command line interface.
+
+ This is the method ConfigOpts uses to add the opt to the CLI interface
+ as appropriate for the opt type. Some opt types may extend this method,
+ others may just extend the helper methods it uses.
+
+ :param parser: the CLI option parser
+ :param group: an optional OptGroup object
+ """
+ container = self._get_optparse_container(parser, group)
+ kwargs = self._get_optparse_kwargs(group)
+ prefix = self._get_optparse_prefix('', group)
+ self._add_to_optparse(container, self.name, self.short, kwargs, prefix,
+ self.deprecated_name)
+
+ def _add_to_optparse(self, container, name, short, kwargs, prefix='',
+ deprecated_name=None):
+ """Add an option to an optparse parser or group.
+
+ :param container: an optparse.OptionContainer object
+ :param name: the opt name
+ :param short: the short opt name
+ :param kwargs: the keyword arguments for add_option()
+ :param prefix: an optional prefix to prepend to the opt name
+ :raises: DuplicateOptError if a naming confict is detected
+ """
+ args = ['--' + prefix + name]
+ if short:
+ args += ['-' + short]
+ if deprecated_name:
+ args += ['--' + prefix + deprecated_name]
+ for a in args:
+ if container.has_option(a):
+ raise DuplicateOptError(a)
+ container.add_option(*args, **kwargs)
+
+ def _get_optparse_container(self, parser, group):
+ """Returns an optparse.OptionContainer.
+
+ :param parser: an optparse.OptionParser
+ :param group: an (optional) OptGroup object
+ :returns: an optparse.OptionGroup if a group is given, else the parser
+ """
+ if group is not None:
+ return group._get_optparse_group(parser)
+ else:
+ return parser
+
+ def _get_optparse_kwargs(self, group, **kwargs):
+ """Build a dict of keyword arguments for optparse's add_option().
+
+ Most opt types extend this method to customize the behaviour of the
+ options added to optparse.
+
+ :param group: an optional group
+ :param kwargs: optional keyword arguments to add to
+ :returns: a dict of keyword arguments
+ """
+ dest = self.dest
+ if group is not None:
+ dest = group.name + '_' + dest
+ kwargs.update({'dest': dest,
+ 'metavar': self.metavar,
+ 'help': self.help, })
+ return kwargs
+
+ def _get_optparse_prefix(self, prefix, group):
+ """Build a prefix for the CLI option name, if required.
+
+ CLI options in a group are prefixed with the group's name in order
+ to avoid conflicts between similarly named options in different
+ groups.
+
+ :param prefix: an existing prefix to append to (e.g. 'no' or '')
+ :param group: an optional OptGroup object
+ :returns: a CLI option prefix including the group name, if appropriate
+ """
+ if group is not None:
+ return group.name + '-' + prefix
+ else:
+ return prefix
+
+
+class StrOpt(Opt):
+ """
+ String opts do not have their values transformed and are returned as
+ str objects.
+ """
+ pass
+
+
+class BoolOpt(Opt):
+
+ """
+ Bool opts are set to True or False on the command line using --optname or
+ --noopttname respectively.
+
+ In config files, boolean values are case insensitive and can be set using
+ 1/0, yes/no, true/false or on/off.
+ """
+
+ _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
+ '0': False, 'no': False, 'false': False, 'off': False}
+
+ def _get_from_config_parser(self, cparser, section):
+ """Retrieve the opt value as a boolean from ConfigParser."""
+ def convert_bool(v):
+ value = self._boolean_states.get(v.lower())
+ if value is None:
+ raise ValueError('Unexpected boolean value %r' % v)
+
+ return value
+
+ return [convert_bool(v) for v in
+ self._cparser_get_with_deprecated(cparser, section)]
+
+ def _add_to_cli(self, parser, group=None):
+ """Extends the base class method to add the --nooptname option."""
+ super(BoolOpt, self)._add_to_cli(parser, group)
+ self._add_inverse_to_optparse(parser, group)
+
+ def _add_inverse_to_optparse(self, parser, group):
+ """Add the --nooptname option to the option parser."""
+ container = self._get_optparse_container(parser, group)
+ kwargs = self._get_optparse_kwargs(group, action='store_false')
+ prefix = self._get_optparse_prefix('no', group)
+ kwargs["help"] = "The inverse of --" + self.name
+ self._add_to_optparse(container, self.name, None, kwargs, prefix,
+ self.deprecated_name)
+
+ def _get_optparse_kwargs(self, group, action='store_true', **kwargs):
+ """Extends the base optparse keyword dict for boolean options."""
+ return super(BoolOpt,
+ self)._get_optparse_kwargs(group, action=action, **kwargs)
+
+
+class IntOpt(Opt):
+
+ """Int opt values are converted to integers using the int() builtin."""
+
+ def _get_from_config_parser(self, cparser, section):
+ """Retrieve the opt value as a integer from ConfigParser."""
+ return [int(v) for v in self._cparser_get_with_deprecated(cparser,
+ section)]
+
+ def _get_optparse_kwargs(self, group, **kwargs):
+ """Extends the base optparse keyword dict for integer options."""
+ return super(IntOpt,
+ self)._get_optparse_kwargs(group, type='int', **kwargs)
+
+
+class FloatOpt(Opt):
+
+ """Float opt values are converted to floats using the float() builtin."""
+
+ def _get_from_config_parser(self, cparser, section):
+ """Retrieve the opt value as a float from ConfigParser."""
+ return [float(v) for v in
+ self._cparser_get_with_deprecated(cparser, section)]
+
+ def _get_optparse_kwargs(self, group, **kwargs):
+ """Extends the base optparse keyword dict for float options."""
+ return super(FloatOpt,
+ self)._get_optparse_kwargs(group, type='float', **kwargs)
+
+
+class ListOpt(Opt):
+
+ """
+ List opt values are simple string values separated by commas. The opt value
+ is a list containing these strings.
+ """
+
+ def _get_from_config_parser(self, cparser, section):
+ """Retrieve the opt value as a list from ConfigParser."""
+ return [v.split(',') for v in
+ self._cparser_get_with_deprecated(cparser, section)]
+
+ def _get_optparse_kwargs(self, group, **kwargs):
+ """Extends the base optparse keyword dict for list options."""
+ return super(ListOpt,
+ self)._get_optparse_kwargs(group,
+ type='string',
+ action='callback',
+ callback=self._parse_list,
+ **kwargs)
+
+ def _parse_list(self, option, opt, value, parser):
+ """An optparse callback for parsing an option value into a list."""
+ setattr(parser.values, self.dest, value.split(','))
+
+
+class MultiStrOpt(Opt):
+
+ """
+ Multistr opt values are string opts which may be specified multiple times.
+ The opt value is a list containing all the string values specified.
+ """
+ multi = True
+
+ def _get_optparse_kwargs(self, group, **kwargs):
+ """Extends the base optparse keyword dict for multi str options."""
+ return super(MultiStrOpt,
+ self)._get_optparse_kwargs(group, action='append')
+
+ def _cparser_get_with_deprecated(self, cparser, section):
+ """If cannot find option as dest try deprecated_name alias."""
+ if self.deprecated_name is not None:
+ return cparser.get(section, [self.dest, self.deprecated_name],
+ multi=True)
+ return cparser.get(section, [self.dest], multi=True)
+
+
+class OptGroup(object):
+
+ """
+ Represents a group of opts.
+
+ CLI opts in the group are automatically prefixed with the group name.
+
+ Each group corresponds to a section in config files.
+
+ An OptGroup object has no public methods, but has a number of public string
+ properties:
+
+ name:
+ the name of the group
+ title:
+ the group title as displayed in --help
+ help:
+ the group description as displayed in --help
+ """
+
+ def __init__(self, name, title=None, help=None):
+ """Constructs an OptGroup object.
+
+ :param name: the group name
+ :param title: the group title for --help
+ :param help: the group description for --help
+ """
+ self.name = name
+ if title is None:
+ self.title = "%s options" % title
+ else:
+ self.title = title
+ self.help = help
+
+ self._opts = {} # dict of dicts of (opt:, override:, default:)
+ self._optparse_group = None
+
+ def _register_opt(self, opt):
+ """Add an opt to this group.
+
+ :param opt: an Opt object
+ :returns: False if previously registered, True otherwise
+ :raises: DuplicateOptError if a naming conflict is detected
+ """
+ if _is_opt_registered(self._opts, opt):
+ return False
+
+ self._opts[opt.dest] = {'opt': opt}
+
+ return True
+
+ def _unregister_opt(self, opt):
+ """Remove an opt from this group.
+
+ :param opt: an Opt object
+ """
+ if opt.dest in self._opts:
+ del self._opts[opt.dest]
+
+ def _get_optparse_group(self, parser):
+ """Build an optparse.OptionGroup for this group."""
+ if self._optparse_group is None:
+ self._optparse_group = optparse.OptionGroup(parser, self.title,
+ self.help)
+ return self._optparse_group
+
+ def _clear(self):
+ """Clear this group's option parsing state."""
+ self._optparse_group = None
+
+
+class ParseError(iniparser.ParseError):
+ def __init__(self, msg, lineno, line, filename):
+ super(ParseError, self).__init__(msg, lineno, line)
+ self.filename = filename
+
+ def __str__(self):
+ return 'at %s:%d, %s: %r' % (self.filename, self.lineno,
+ self.msg, self.line)
+
+
+class ConfigParser(iniparser.BaseParser):
+ def __init__(self, filename, sections):
+ super(ConfigParser, self).__init__()
+ self.filename = filename
+ self.sections = sections
+ self.section = None
+
+ def parse(self):
+ with open(self.filename) as f:
+ return super(ConfigParser, self).parse(f)
+
+ def new_section(self, section):
+ self.section = section
+ self.sections.setdefault(self.section, {})
+
+ def assignment(self, key, value):
+ if not self.section:
+ raise self.error_no_section()
+
+ self.sections[self.section].setdefault(key, [])
+ self.sections[self.section][key].append('\n'.join(value))
+
+ def parse_exc(self, msg, lineno, line=None):
+ return ParseError(msg, lineno, line, self.filename)
+
+ def error_no_section(self):
+ return self.parse_exc('Section must be started before assignment',
+ self.lineno)
+
+
+class MultiConfigParser(object):
+ def __init__(self):
+ self.parsed = []
+
+ def read(self, config_files):
+ read_ok = []
+
+ for filename in config_files:
+ sections = {}
+ parser = ConfigParser(filename, sections)
+
+ try:
+ parser.parse()
+ except IOError:
+ continue
+ self.parsed.insert(0, sections)
+ read_ok.append(filename)
+
+ return read_ok
+
+ def get(self, section, names, multi=False):
+ rvalue = []
+ for sections in self.parsed:
+ if section not in sections:
+ continue
+ for name in names:
+ if name in sections[section]:
+ if multi:
+ rvalue = sections[section][name] + rvalue
+ else:
+ return sections[section][name]
+ if multi and rvalue != []:
+ return rvalue
+ raise KeyError
+
+
+class ConfigOpts(collections.Mapping):
+
+ """
+ Config options which may be set on the command line or in config files.
+
+ ConfigOpts is a configuration option manager with APIs for registering
+ option schemas, grouping options, parsing option values and retrieving
+ the values of options.
+ """
+
+ def __init__(self):
+ """Construct a ConfigOpts object."""
+ self._opts = {} # dict of dicts of (opt:, override:, default:)
+ self._groups = {}
+
+ self._args = None
+ self._oparser = None
+ self._cparser = None
+ self._cli_values = {}
+ self.__cache = {}
+ self._config_opts = []
+ self._disable_interspersed_args = False
+
+ def _setup(self, project, prog, version, usage, default_config_files):
+ """Initialize a ConfigOpts object for option parsing."""
+ if prog is None:
+ prog = os.path.basename(sys.argv[0])
+
+ if default_config_files is None:
+ default_config_files = find_config_files(project, prog)
+
+ self._oparser = optparse.OptionParser(prog=prog,
+ version=version,
+ usage=usage)
+ if self._disable_interspersed_args:
+ self._oparser.disable_interspersed_args()
+
+ self._config_opts = [
+ MultiStrOpt('config-file',
+ default=default_config_files,
+ metavar='PATH',
+ help='Path to a config file to use. Multiple config '
+ 'files can be specified, with values in later '
+ 'files taking precedence. The default files '
+ ' used are: %s' % (default_config_files, )),
+ StrOpt('config-dir',
+ metavar='DIR',
+ help='Path to a config directory to pull *.conf '
+ 'files from. This file set is sorted, so as to '
+ 'provide a predictable parse order if individual '
+ 'options are over-ridden. The set is parsed after '
+ 'the file(s), if any, specified via --config-file, '
+ 'hence over-ridden options in the directory take '
+ 'precedence.'),
+ ]
+ self.register_cli_opts(self._config_opts)
+
+ self.project = project
+ self.prog = prog
+ self.version = version
+ self.usage = usage
+ self.default_config_files = default_config_files
+
+ def __clear_cache(f):
+ @functools.wraps(f)
+ def __inner(self, *args, **kwargs):
+ if kwargs.pop('clear_cache', True):
+ self.__cache.clear()
+ return f(self, *args, **kwargs)
+
+ return __inner
+
+ def __call__(self,
+ args=None,
+ project=None,
+ prog=None,
+ version=None,
+ usage=None,
+ default_config_files=None):
+ """Parse command line arguments and config files.
+
+ Calling a ConfigOpts object causes the supplied command line arguments
+ and config files to be parsed, causing opt values to be made available
+ as attributes of the object.
+
+ The object may be called multiple times, each time causing the previous
+ set of values to be overwritten.
+
+ Automatically registers the --config-file option with either a supplied
+ list of default config files, or a list from find_config_files().
+
+ If the --config-dir option is set, any *.conf files from this
+ directory are pulled in, after all the file(s) specified by the
+ --config-file option.
+
+ :param args: command line arguments (defaults to sys.argv[1:])
+ :param project: the toplevel project name, used to locate config files
+ :param prog: the name of the program (defaults to sys.argv[0] basename)
+ :param version: the program version (for --version)
+ :param usage: a usage string (%prog will be expanded)
+ :param default_config_files: config files to use by default
+ :returns: the list of arguments left over after parsing options
+ :raises: SystemExit, ConfigFilesNotFoundError, ConfigFileParseError,
+ RequiredOptError, DuplicateOptError
+ """
+ self.clear()
+
+ self._setup(project, prog, version, usage, default_config_files)
+
+ self._cli_values, leftovers = self._parse_cli_opts(args)
+
+ self._parse_config_files()
+
+ self._check_required_opts()
+
+ return leftovers
+
+ def __getattr__(self, name):
+ """Look up an option value and perform string substitution.
+
+ :param name: the opt name (or 'dest', more precisely)
+ :returns: the option value (after string subsititution) or a GroupAttr
+ :raises: NoSuchOptError,ConfigFileValueError,TemplateSubstitutionError
+ """
+ return self._get(name)
+
+ def __getitem__(self, key):
+ """Look up an option value and perform string substitution."""
+ return self.__getattr__(key)
+
+ def __contains__(self, key):
+ """Return True if key is the name of a registered opt or group."""
+ return key in self._opts or key in self._groups
+
+ def __iter__(self):
+ """Iterate over all registered opt and group names."""
+ for key in self._opts.keys() + self._groups.keys():
+ yield key
+
+ def __len__(self):
+ """Return the number of options and option groups."""
+ return len(self._opts) + len(self._groups)
+
+ def reset(self):
+ """Clear the object state and unset overrides and defaults."""
+ self._unset_defaults_and_overrides()
+ self.clear()
+
+ @__clear_cache
+ def clear(self):
+ """Clear the state of the object to before it was called."""
+ self._args = None
+ self._cli_values.clear()
+ self._oparser = None
+ self._cparser = None
+ self.unregister_opts(self._config_opts)
+ for group in self._groups.values():
+ group._clear()
+
+ @__clear_cache
+ def register_opt(self, opt, group=None):
+ """Register an option schema.
+
+ Registering an option schema makes any option value which is previously
+ or subsequently parsed from the command line or config files available
+ as an attribute of this object.
+
+ :param opt: an instance of an Opt sub-class
+ :param group: an optional OptGroup object or group name
+ :return: False if the opt was already register, True otherwise
+ :raises: DuplicateOptError
+ """
+ if group is not None:
+ return self._get_group(group, autocreate=True)._register_opt(opt)
+
+ if _is_opt_registered(self._opts, opt):
+ return False
+
+ self._opts[opt.dest] = {'opt': opt}
+
+ return True
+
+ @__clear_cache
+ def register_opts(self, opts, group=None):
+ """Register multiple option schemas at once."""
+ for opt in opts:
+ self.register_opt(opt, group, clear_cache=False)
+
+ @__clear_cache
+ def register_cli_opt(self, opt, group=None):
+ """Register a CLI option schema.
+
+ CLI option schemas must be registered before the command line and
+ config files are parsed. This is to ensure that all CLI options are
+ show in --help and option validation works as expected.
+
+ :param opt: an instance of an Opt sub-class
+ :param group: an optional OptGroup object or group name
+ :return: False if the opt was already register, True otherwise
+ :raises: DuplicateOptError, ArgsAlreadyParsedError
+ """
+ if self._args is not None:
+ raise ArgsAlreadyParsedError("cannot register CLI option")
+
+ return self.register_opt(opt, group, clear_cache=False)
+
+ @__clear_cache
+ def register_cli_opts(self, opts, group=None):
+ """Register multiple CLI option schemas at once."""
+ for opt in opts:
+ self.register_cli_opt(opt, group, clear_cache=False)
+
+ def register_group(self, group):
+ """Register an option group.
+
+ An option group must be registered before options can be registered
+ with the group.
+
+ :param group: an OptGroup object
+ """
+ if group.name in self._groups:
+ return
+
+ self._groups[group.name] = copy.copy(group)
+
+ @__clear_cache
+ def unregister_opt(self, opt, group=None):
+ """Unregister an option.
+
+ :param opt: an Opt object
+ :param group: an optional OptGroup object or group name
+ :raises: ArgsAlreadyParsedError, NoSuchGroupError
+ """
+ if self._args is not None:
+ raise ArgsAlreadyParsedError("reset before unregistering options")
+
+ if group is not None:
+ self._get_group(group)._unregister_opt(opt)
+ elif opt.dest in self._opts:
+ del self._opts[opt.dest]
+
+ @__clear_cache
+ def unregister_opts(self, opts, group=None):
+ """Unregister multiple CLI option schemas at once."""
+ for opt in opts:
+ self.unregister_opt(opt, group, clear_cache=False)
+
+ def import_opt(self, name, module_str, group=None):
+ """Import an option definition from a module.
+
+ Import a module and check that a given option is registered.
+
+ This is intended for use with global configuration objects
+ like cfg.CONF where modules commonly register options with
+ CONF at module load time. If one module requires an option
+ defined by another module it can use this method to explicitly
+ declare the dependency.
+
+ :param name: the name/dest of the opt
+ :param module_str: the name of a module to import
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ __import__(module_str)
+ self._get_opt_info(name, group)
+
+ @__clear_cache
+ def set_override(self, name, override, group=None):
+ """Override an opt value.
+
+ Override the command line, config file and default values of a
+ given option.
+
+ :param name: the name/dest of the opt
+ :param override: the override value
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ opt_info = self._get_opt_info(name, group)
+ opt_info['override'] = override
+
+ @__clear_cache
+ def set_default(self, name, default, group=None):
+ """Override an opt's default value.
+
+ Override the default value of given option. A command line or
+ config file value will still take precedence over this default.
+
+ :param name: the name/dest of the opt
+ :param default: the default value
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ opt_info = self._get_opt_info(name, group)
+ opt_info['default'] = default
+
+ @__clear_cache
+ def clear_override(self, name, group=None):
+ """Clear an override an opt value.
+
+ Clear a previously set override of the command line, config file
+ and default values of a given option.
+
+ :param name: the name/dest of the opt
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ opt_info = self._get_opt_info(name, group)
+ opt_info.pop('override', None)
+
+ @__clear_cache
+ def clear_default(self, name, group=None):
+ """Clear an override an opt's default value.
+
+ Clear a previously set override of the default value of given option.
+
+ :param name: the name/dest of the opt
+ :param group: an option OptGroup object or group name
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ opt_info = self._get_opt_info(name, group)
+ opt_info.pop('default', None)
+
+ def _all_opt_infos(self):
+ """A generator function for iteration opt infos."""
+ for info in self._opts.values():
+ yield info, None
+ for group in self._groups.values():
+ for info in group._opts.values():
+ yield info, group
+
+ def _all_opts(self):
+ """A generator function for iteration opts."""
+ for info, group in self._all_opt_infos():
+ yield info['opt'], group
+
+ def _unset_defaults_and_overrides(self):
+ """Unset any default or override on all options."""
+ for info, group in self._all_opt_infos():
+ info.pop('default', None)
+ info.pop('override', None)
+
+ def disable_interspersed_args(self):
+ """Set parsing to stop on the first non-option.
+
+ If this this method is called, then parsing e.g.
+
+ script --verbose cmd --debug /tmp/mything
+
+ will no longer return:
+
+ ['cmd', '/tmp/mything']
+
+ as the leftover arguments, but will instead return:
+
+ ['cmd', '--debug', '/tmp/mything']
+
+ i.e. argument parsing is stopped at the first non-option argument.
+ """
+ self._disable_interspersed_args = True
+
+ def enable_interspersed_args(self):
+ """Set parsing to not stop on the first non-option.
+
+ This it the default behaviour."""
+ self._disable_interspersed_args = False
+
+ def find_file(self, name):
+ """Locate a file located alongside the config files.
+
+ Search for a file with the supplied basename in the directories
+ which we have already loaded config files from and other known
+ configuration directories.
+
+ The directory, if any, supplied by the config_dir option is
+ searched first. Then the config_file option is iterated over
+ and each of the base directories of the config_files values
+ are searched. Failing both of these, the standard directories
+ searched by the module level find_config_files() function is
+ used. The first matching file is returned.
+
+ :param basename: the filename, e.g. 'policy.json'
+ :returns: the path to a matching file, or None
+ """
+ dirs = []
+ if self.config_dir:
+ dirs.append(_fixpath(self.config_dir))
+
+ for cf in reversed(self.config_file):
+ dirs.append(os.path.dirname(_fixpath(cf)))
+
+ dirs.extend(_get_config_dirs(self.project))
+
+ return _search_dirs(dirs, name)
+
+ def log_opt_values(self, logger, lvl):
+ """Log the value of all registered opts.
+
+ It's often useful for an app to log its configuration to a log file at
+ startup for debugging. This method dumps to the entire config state to
+ the supplied logger at a given log level.
+
+ :param logger: a logging.Logger object
+ :param lvl: the log level (e.g. logging.DEBUG) arg to logger.log()
+ """
+ logger.log(lvl, "*" * 80)
+ logger.log(lvl, "Configuration options gathered from:")
+ logger.log(lvl, "command line args: %s", self._args)
+ logger.log(lvl, "config files: %s", self.config_file)
+ logger.log(lvl, "=" * 80)
+
+ def _sanitize(opt, value):
+ """Obfuscate values of options declared secret"""
+ return value if not opt.secret else '*' * len(str(value))
+
+ for opt_name in sorted(self._opts):
+ opt = self._get_opt_info(opt_name)['opt']
+ logger.log(lvl, "%-30s = %s", opt_name,
+ _sanitize(opt, getattr(self, opt_name)))
+
+ for group_name in self._groups:
+ group_attr = self.GroupAttr(self, self._get_group(group_name))
+ for opt_name in sorted(self._groups[group_name]._opts):
+ opt = self._get_opt_info(opt_name, group_name)['opt']
+ logger.log(lvl, "%-30s = %s",
+ "%s.%s" % (group_name, opt_name),
+ _sanitize(opt, getattr(group_attr, opt_name)))
+
+ logger.log(lvl, "*" * 80)
+
+ def print_usage(self, file=None):
+ """Print the usage message for the current program."""
+ self._oparser.print_usage(file)
+
+ def print_help(self, file=None):
+ """Print the help message for the current program."""
+ self._oparser.print_help(file)
+
+ def _get(self, name, group=None):
+ if isinstance(group, OptGroup):
+ key = (group.name, name)
+ else:
+ key = (group, name)
+ try:
+ return self.__cache[key]
+ except KeyError:
+ value = self._substitute(self._do_get(name, group))
+ self.__cache[key] = value
+ return value
+
+ def _do_get(self, name, group=None):
+ """Look up an option value.
+
+ :param name: the opt name (or 'dest', more precisely)
+ :param group: an OptGroup
+ :returns: the option value, or a GroupAttr object
+ :raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError,
+ TemplateSubstitutionError
+ """
+ if group is None and name in self._groups:
+ return self.GroupAttr(self, self._get_group(name))
+
+ info = self._get_opt_info(name, group)
+ opt = info['opt']
+
+ if 'override' in info:
+ return info['override']
+
+ values = []
+ if self._cparser is not None:
+ section = group.name if group is not None else 'DEFAULT'
+ try:
+ value = opt._get_from_config_parser(self._cparser, section)
+ except KeyError:
+ pass
+ except ValueError as ve:
+ raise ConfigFileValueError(str(ve))
+ else:
+ if not opt.multi:
+ # No need to continue since the last value wins
+ return value[-1]
+ values.extend(value)
+
+ name = name if group is None else group.name + '_' + name
+ value = self._cli_values.get(name)
+ if value is not None:
+ if not opt.multi:
+ return value
+
+ return value + values
+
+ if values:
+ return values
+
+ if 'default' in info:
+ return info['default']
+
+ return opt.default
+
+ def _substitute(self, value):
+ """Perform string template substitution.
+
+ Substitute any template variables (e.g. $foo, ${bar}) in the supplied
+ string value(s) with opt values.
+
+ :param value: the string value, or list of string values
+ :returns: the substituted string(s)
+ """
+ if isinstance(value, list):
+ return [self._substitute(i) for i in value]
+ elif isinstance(value, str):
+ tmpl = string.Template(value)
+ return tmpl.safe_substitute(self.StrSubWrapper(self))
+ else:
+ return value
+
+ def _get_group(self, group_or_name, autocreate=False):
+ """Looks up a OptGroup object.
+
+ Helper function to return an OptGroup given a parameter which can
+ either be the group's name or an OptGroup object.
+
+ The OptGroup object returned is from the internal dict of OptGroup
+ objects, which will be a copy of any OptGroup object that users of
+ the API have access to.
+
+ :param group_or_name: the group's name or the OptGroup object itself
+ :param autocreate: whether to auto-create the group if it's not found
+ :raises: NoSuchGroupError
+ """
+ group = group_or_name if isinstance(group_or_name, OptGroup) else None
+ group_name = group.name if group else group_or_name
+
+ if not group_name in self._groups:
+ if not group is None or not autocreate:
+ raise NoSuchGroupError(group_name)
+
+ self.register_group(OptGroup(name=group_name))
+
+ return self._groups[group_name]
+
+ def _get_opt_info(self, opt_name, group=None):
+ """Return the (opt, override, default) dict for an opt.
+
+ :param opt_name: an opt name/dest
+ :param group: an optional group name or OptGroup object
+ :raises: NoSuchOptError, NoSuchGroupError
+ """
+ if group is None:
+ opts = self._opts
+ else:
+ group = self._get_group(group)
+ opts = group._opts
+
+ if not opt_name in opts:
+ raise NoSuchOptError(opt_name, group)
+
+ return opts[opt_name]
+
+ def _parse_config_files(self):
+ """Parse the config files from --config-file and --config-dir.
+
+ :raises: ConfigFilesNotFoundError, ConfigFileParseError
+ """
+ config_files = list(self.config_file)
+
+ if self.config_dir:
+ config_dir_glob = os.path.join(self.config_dir, '*.conf')
+ config_files += sorted(glob.glob(config_dir_glob))
+
+ config_files = [_fixpath(p) for p in config_files]
+
+ self._cparser = MultiConfigParser()
+
+ try:
+ read_ok = self._cparser.read(config_files)
+ except iniparser.ParseError as pe:
+ raise ConfigFileParseError(pe.filename, str(pe))
+
+ if read_ok != config_files:
+ not_read_ok = filter(lambda f: f not in read_ok, config_files)
+ raise ConfigFilesNotFoundError(not_read_ok)
+
+ def _check_required_opts(self):
+ """Check that all opts marked as required have values specified.
+
+ :raises: RequiredOptError
+ """
+ for info, group in self._all_opt_infos():
+ opt = info['opt']
+
+ if opt.required:
+ if ('default' in info or 'override' in info):
+ continue
+
+ if self._get(opt.name, group) is None:
+ raise RequiredOptError(opt.name, group)
+
+ def _parse_cli_opts(self, args):
+ """Parse command line options.
+
+ Initializes the command line option parser and parses the supplied
+ command line arguments.
+
+ :param args: the command line arguments
+ :returns: a dict of parsed option values
+ :raises: SystemExit, DuplicateOptError
+
+ """
+ self._args = args
+
+ for opt, group in self._all_opts():
+ opt._add_to_cli(self._oparser, group)
+
+ values, leftovers = self._oparser.parse_args(args)
+
+ return vars(values), leftovers
+
+ class GroupAttr(collections.Mapping):
+
+ """
+ A helper class representing the option values of a group as a mapping
+ and attributes.
+ """
+
+ def __init__(self, conf, group):
+ """Construct a GroupAttr object.
+
+ :param conf: a ConfigOpts object
+ :param group: an OptGroup object
+ """
+ self.conf = conf
+ self.group = group
+
+ def __getattr__(self, name):
+ """Look up an option value and perform template substitution."""
+ return self.conf._get(name, self.group)
+
+ def __getitem__(self, key):
+ """Look up an option value and perform string substitution."""
+ return self.__getattr__(key)
+
+ def __contains__(self, key):
+ """Return True if key is the name of a registered opt or group."""
+ return key in self.group._opts
+
+ def __iter__(self):
+ """Iterate over all registered opt and group names."""
+ for key in self.group._opts.keys():
+ yield key
+
+ def __len__(self):
+ """Return the number of options and option groups."""
+ return len(self.group._opts)
+
+ class StrSubWrapper(object):
+
+ """
+ A helper class exposing opt values as a dict for string substitution.
+ """
+
+ def __init__(self, conf):
+ """Construct a StrSubWrapper object.
+
+ :param conf: a ConfigOpts object
+ """
+ self.conf = conf
+
+ def __getitem__(self, key):
+ """Look up an opt value from the ConfigOpts object.
+
+ :param key: an opt name
+ :returns: an opt value
+ :raises: TemplateSubstitutionError if attribute is a group
+ """
+ value = getattr(self.conf, key)
+ if isinstance(value, self.conf.GroupAttr):
+ raise TemplateSubstitutionError(
+ 'substituting group %s not supported' % key)
+ return value
+
+
+class CommonConfigOpts(ConfigOpts):
+
+ DEFAULT_LOG_FORMAT = "%(asctime)s %(levelname)8s [%(name)s] %(message)s"
+ DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
+
+ common_cli_opts = [
+ BoolOpt('debug',
+ short='d',
+ default=False,
+ help='Print debugging output'),
+ BoolOpt('verbose',
+ short='v',
+ default=False,
+ help='Print more verbose output'),
+ ]
+
+ logging_cli_opts = [
+ StrOpt('log-config',
+ metavar='PATH',
+ help='If this option is specified, the logging configuration '
+ 'file specified is used and overrides any other logging '
+ 'options specified. Please see the Python logging module '
+ 'documentation for details on logging configuration '
+ 'files.'),
+ StrOpt('log-format',
+ default=DEFAULT_LOG_FORMAT,
+ metavar='FORMAT',
+ help='A logging.Formatter log message format string which may '
+ 'use any of the available logging.LogRecord attributes. '
+ 'Default: %default'),
+ StrOpt('log-date-format',
+ default=DEFAULT_LOG_DATE_FORMAT,
+ metavar='DATE_FORMAT',
+ help='Format string for %(asctime)s in log records. '
+ 'Default: %default'),
+ StrOpt('log-file',
+ metavar='PATH',
+ help='(Optional) Name of log file to output to. '
+ 'If not set, logging will go to stdout.'),
+ StrOpt('log-dir',
+ help='(Optional) The directory to keep log files in '
+ '(will be prepended to --logfile)'),
+ BoolOpt('use-syslog',
+ default=False,
+ help='Use syslog for logging.'),
+ StrOpt('syslog-log-facility',
+ default='LOG_USER',
+ help='syslog facility to receive log lines')
+ ]
+
+ def __init__(self):
+ super(CommonConfigOpts, self).__init__()
+ self.register_cli_opts(self.common_cli_opts)
+ self.register_cli_opts(self.logging_cli_opts)
+
+
+CONF = CommonConfigOpts()
diff --git a/moniker/openstack/common/context.py b/moniker/openstack/common/context.py
new file mode 100644
index 00000000..dd7dd04c
--- /dev/null
+++ b/moniker/openstack/common/context.py
@@ -0,0 +1,81 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Simple class that stores security context information in the web request.
+
+Projects should subclass this class if they wish to enhance the request
+context or provide additional information in their specific WSGI pipeline.
+"""
+
+import itertools
+import uuid
+
+
+def generate_request_id():
+ return 'req-' + str(uuid.uuid4())
+
+
+class RequestContext(object):
+
+ """
+ Stores information about the security context under which the user
+ accesses the system, as well as additional request information.
+ """
+
+ def __init__(self, auth_tok=None, user=None, tenant=None, is_admin=False,
+ read_only=False, show_deleted=False, request_id=None):
+ self.auth_tok = auth_tok
+ self.user = user
+ self.tenant = tenant
+ self.is_admin = is_admin
+ self.read_only = read_only
+ self.show_deleted = show_deleted
+ if not request_id:
+ request_id = generate_request_id()
+ self.request_id = request_id
+
+ def to_dict(self):
+ return {'user': self.user,
+ 'tenant': self.tenant,
+ 'is_admin': self.is_admin,
+ 'read_only': self.read_only,
+ 'show_deleted': self.show_deleted,
+ 'auth_token': self.auth_tok,
+ 'request_id': self.request_id}
+
+
+def get_admin_context(show_deleted="no"):
+ context = RequestContext(None,
+ tenant=None,
+ is_admin=True,
+ show_deleted=show_deleted)
+ return context
+
+
+def get_context_from_function_and_args(function, args, kwargs):
+ """Find an arg of type RequestContext and return it.
+
+ This is useful in a couple of decorators where we don't
+ know much about the function we're wrapping.
+ """
+
+ for arg in itertools.chain(kwargs.values(), args):
+ if isinstance(arg, RequestContext):
+ return arg
+
+ return None
diff --git a/moniker/openstack/common/excutils.py b/moniker/openstack/common/excutils.py
new file mode 100644
index 00000000..5dd48301
--- /dev/null
+++ b/moniker/openstack/common/excutils.py
@@ -0,0 +1,49 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2012, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Exception related utilities.
+"""
+
+import contextlib
+import logging
+import sys
+import traceback
+
+
+@contextlib.contextmanager
+def save_and_reraise_exception():
+ """Save current exception, run some code and then re-raise.
+
+ In some cases the exception context can be cleared, resulting in None
+ being attempted to be re-raised after an exception handler is run. This
+ can happen when eventlet switches greenthreads or when running an
+ exception handler, code raises and catches an exception. In both
+ cases the exception context will be cleared.
+
+ To work around this, we save the exception state, run handler code, and
+ then re-raise the original exception. If another exception occurs, the
+ saved exception is logged and the new exception is re-raised.
+ """
+ type_, value, tb = sys.exc_info()
+ try:
+ yield
+ except Exception:
+ logging.error('Original exception being dropped: %s' %
+ (traceback.format_exception(type_, value, tb)))
+ raise
+ raise type_, value, tb
diff --git a/moniker/openstack/common/gettextutils.py b/moniker/openstack/common/gettextutils.py
new file mode 100644
index 00000000..235350cc
--- /dev/null
+++ b/moniker/openstack/common/gettextutils.py
@@ -0,0 +1,33 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+gettext for openstack-common modules.
+
+Usual usage in an openstack.common module:
+
+ from openstack.common.gettextutils import _
+"""
+
+import gettext
+
+
+t = gettext.translation('openstack-common', 'locale', fallback=True)
+
+
+def _(msg):
+ return t.ugettext(msg)
diff --git a/moniker/openstack/common/importutils.py b/moniker/openstack/common/importutils.py
new file mode 100644
index 00000000..f45372b4
--- /dev/null
+++ b/moniker/openstack/common/importutils.py
@@ -0,0 +1,59 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Import related utilities and helper functions.
+"""
+
+import sys
+import traceback
+
+
+def import_class(import_str):
+ """Returns a class from a string including module and class"""
+ mod_str, _sep, class_str = import_str.rpartition('.')
+ try:
+ __import__(mod_str)
+ return getattr(sys.modules[mod_str], class_str)
+ except (ValueError, AttributeError), exc:
+ raise ImportError('Class %s cannot be found (%s)' %
+ (class_str,
+ traceback.format_exception(*sys.exc_info())))
+
+
+def import_object(import_str, *args, **kwargs):
+ """Import a class and return an instance of it."""
+ return import_class(import_str)(*args, **kwargs)
+
+
+def import_object_ns(name_space, import_str, *args, **kwargs):
+ """
+ Import a class and return an instance of it, first by trying
+ to find the class in a default namespace, then failing back to
+ a full path if not found in the default namespace.
+ """
+ import_value = "%s.%s" % (name_space, import_str)
+ try:
+ return import_class(import_value)(*args, **kwargs)
+ except ImportError:
+ return import_class(import_str)(*args, **kwargs)
+
+
+def import_module(import_str):
+ """Import a module."""
+ __import__(import_str)
+ return sys.modules[import_str]
diff --git a/moniker/openstack/common/iniparser.py b/moniker/openstack/common/iniparser.py
new file mode 100644
index 00000000..24128444
--- /dev/null
+++ b/moniker/openstack/common/iniparser.py
@@ -0,0 +1,130 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+class ParseError(Exception):
+ def __init__(self, message, lineno, line):
+ self.msg = message
+ self.line = line
+ self.lineno = lineno
+
+ def __str__(self):
+ return 'at line %d, %s: %r' % (self.lineno, self.msg, self.line)
+
+
+class BaseParser(object):
+ lineno = 0
+ parse_exc = ParseError
+
+ def _assignment(self, key, value):
+ self.assignment(key, value)
+ return None, []
+
+ def _get_section(self, line):
+ if line[-1] != ']':
+ return self.error_no_section_end_bracket(line)
+ if len(line) <= 2:
+ return self.error_no_section_name(line)
+
+ return line[1:-1]
+
+ def _split_key_value(self, line):
+ colon = line.find(':')
+ equal = line.find('=')
+ if colon < 0 and equal < 0:
+ return self.error_invalid_assignment(line)
+
+ if colon < 0 or (equal >= 0 and equal < colon):
+ key, value = line[:equal], line[equal + 1:]
+ else:
+ key, value = line[:colon], line[colon + 1:]
+
+ value = value.strip()
+ if ((value and value[0] == value[-1]) and
+ (value[0] == "\"" or value[0] == "'")):
+ value = value[1:-1]
+ return key.strip(), [value]
+
+ def parse(self, lineiter):
+ key = None
+ value = []
+
+ for line in lineiter:
+ self.lineno += 1
+
+ line = line.rstrip()
+ if not line:
+ # Blank line, ends multi-line values
+ if key:
+ key, value = self._assignment(key, value)
+ continue
+ elif line[0] in (' ', '\t'):
+ # Continuation of previous assignment
+ if key is None:
+ self.error_unexpected_continuation(line)
+ else:
+ value.append(line.lstrip())
+ continue
+
+ if key:
+ # Flush previous assignment, if any
+ key, value = self._assignment(key, value)
+
+ if line[0] == '[':
+ # Section start
+ section = self._get_section(line)
+ if section:
+ self.new_section(section)
+ elif line[0] in '#;':
+ self.comment(line[1:].lstrip())
+ else:
+ key, value = self._split_key_value(line)
+ if not key:
+ return self.error_empty_key(line)
+
+ if key:
+ # Flush previous assignment, if any
+ self._assignment(key, value)
+
+ def assignment(self, key, value):
+ """Called when a full assignment is parsed"""
+ raise NotImplementedError()
+
+ def new_section(self, section):
+ """Called when a new section is started"""
+ raise NotImplementedError()
+
+ def comment(self, comment):
+ """Called when a comment is parsed"""
+ pass
+
+ def error_invalid_assignment(self, line):
+ raise self.parse_exc("No ':' or '=' found in assignment",
+ self.lineno, line)
+
+ def error_empty_key(self, line):
+ raise self.parse_exc('Key cannot be empty', self.lineno, line)
+
+ def error_unexpected_continuation(self, line):
+ raise self.parse_exc('Unexpected continuation line',
+ self.lineno, line)
+
+ def error_no_section_end_bracket(self, line):
+ raise self.parse_exc('Invalid section (must end with ])',
+ self.lineno, line)
+
+ def error_no_section_name(self, line):
+ raise self.parse_exc('Empty section name', self.lineno, line)
diff --git a/moniker/openstack/common/jsonutils.py b/moniker/openstack/common/jsonutils.py
new file mode 100644
index 00000000..8df2718f
--- /dev/null
+++ b/moniker/openstack/common/jsonutils.py
@@ -0,0 +1,154 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+'''
+JSON related utilities.
+
+This module provides a few things:
+
+ 1) A handy function for getting an object down to something that can be
+ JSON serialized. See to_primitive().
+
+ 2) Wrappers around loads() and dumps(). The dumps() wrapper will
+ automatically use to_primitive() for you if needed.
+
+ 3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
+ is available.
+'''
+
+
+import datetime
+import inspect
+import itertools
+import json
+import xmlrpclib
+import uuid
+import ipaddr
+
+from moniker.openstack.common import timeutils
+
+
+def to_primitive(value, convert_instances=False, level=0):
+ """Convert a complex object into primitives.
+
+ Handy for JSON serialization. We can optionally handle instances,
+ but since this is a recursive function, we could have cyclical
+ data structures.
+
+ To handle cyclical data structures we could track the actual objects
+ visited in a set, but not all objects are hashable. Instead we just
+ track the depth of the object inspections and don't go too deep.
+
+ Therefore, convert_instances=True is lossy ... be aware.
+
+ """
+ nasty = [inspect.ismodule, inspect.isclass, inspect.ismethod,
+ inspect.isfunction, inspect.isgeneratorfunction,
+ inspect.isgenerator, inspect.istraceback, inspect.isframe,
+ inspect.iscode, inspect.isbuiltin, inspect.isroutine,
+ inspect.isabstract]
+ for test in nasty:
+ if test(value):
+ return unicode(value)
+
+ # value of itertools.count doesn't get caught by inspects
+ # above and results in infinite loop when list(value) is called.
+ if type(value) == itertools.count:
+ return unicode(value)
+
+ # FIXME(vish): Workaround for LP bug 852095. Without this workaround,
+ # tests that raise an exception in a mocked method that
+ # has a @wrap_exception with a notifier will fail. If
+ # we up the dependency to 0.5.4 (when it is released) we
+ # can remove this workaround.
+ if getattr(value, '__module__', None) == 'mox':
+ return 'mock'
+
+ if level > 3:
+ return '?'
+
+ # The try block may not be necessary after the class check above,
+ # but just in case ...
+ try:
+ # It's not clear why xmlrpclib created their own DateTime type, but
+ # for our purposes, make it a datetime type which is explicitly
+ # handled
+ if isinstance(value, xmlrpclib.DateTime):
+ value = datetime.datetime(*tuple(value.timetuple())[:6])
+
+ if isinstance(value, (list, tuple)):
+ o = []
+ for v in value:
+ o.append(to_primitive(v, convert_instances=convert_instances,
+ level=level))
+ return o
+ elif isinstance(value, dict):
+ o = {}
+ for k, v in value.iteritems():
+ o[k] = to_primitive(v, convert_instances=convert_instances,
+ level=level)
+ return o
+ elif isinstance(value, datetime.datetime):
+ return timeutils.strtime(value)
+ elif isinstance(value, uuid.UUID):
+ return str(value)
+ elif isinstance(value, ipaddr.IPAddress):
+ return str(value)
+ elif hasattr(value, 'iteritems'):
+ return to_primitive(dict(value.iteritems()),
+ convert_instances=convert_instances,
+ level=level + 1)
+ elif hasattr(value, '__iter__'):
+ return to_primitive(list(value),
+ convert_instances=convert_instances,
+ level=level)
+ elif convert_instances and hasattr(value, '__dict__'):
+ # Likely an instance of something. Watch for cycles.
+ # Ignore class member vars.
+ return to_primitive(value.__dict__,
+ convert_instances=convert_instances,
+ level=level + 1)
+ else:
+ return value
+ except TypeError, e:
+ # Class objects are tricky since they may define something like
+ # __iter__ defined but it isn't callable as list().
+ return unicode(value)
+
+
+def dumps(value, default=to_primitive, **kwargs):
+ return json.dumps(value, default=default, **kwargs)
+
+
+def loads(s):
+ return json.loads(s)
+
+
+def load(s):
+ return json.load(s)
+
+
+try:
+ import anyjson
+except ImportError:
+ pass
+else:
+ anyjson._modules.append((__name__, 'dumps', TypeError,
+ 'loads', ValueError, 'load'))
+ anyjson.force_implementation(__name__)
diff --git a/moniker/openstack/common/local.py b/moniker/openstack/common/local.py
new file mode 100644
index 00000000..19d96273
--- /dev/null
+++ b/moniker/openstack/common/local.py
@@ -0,0 +1,37 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Greenthread local storage of variables using weak references"""
+
+import weakref
+
+from eventlet import corolocal
+
+
+class WeakLocal(corolocal.local):
+ def __getattribute__(self, attr):
+ rval = corolocal.local.__getattribute__(self, attr)
+ if rval:
+ rval = rval()
+ return rval
+
+ def __setattr__(self, attr, value):
+ value = weakref.ref(value)
+ return corolocal.local.__setattr__(self, attr, value)
+
+
+store = WeakLocal()
diff --git a/moniker/openstack/common/log.py b/moniker/openstack/common/log.py
new file mode 100644
index 00000000..f3c179a7
--- /dev/null
+++ b/moniker/openstack/common/log.py
@@ -0,0 +1,453 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Openstack logging handler.
+
+This module adds to logging functionality by adding the option to specify
+a context object when calling the various log methods. If the context object
+is not specified, default formatting is used. Additionally, an instance uuid
+may be passed as part of the log message, which is intended to make it easier
+for admins to find messages related to a specific instance.
+
+It also allows setting of formatting information through conf.
+
+"""
+
+import cStringIO
+import inspect
+import itertools
+import logging
+import logging.config
+import logging.handlers
+import os
+import stat
+import sys
+import traceback
+
+from moniker.openstack.common import cfg
+from moniker.openstack.common.gettextutils import _
+from moniker.openstack.common import jsonutils
+from moniker.openstack.common import local
+from moniker.openstack.common import notifier
+
+
+log_opts = [
+ cfg.StrOpt('logging_context_format_string',
+ default='%(asctime)s %(levelname)s %(name)s [%(request_id)s '
+ '%(user_id)s %(project_id)s] %(instance)s'
+ '%(message)s',
+ help='format string to use for log messages with context'),
+ cfg.StrOpt('logging_default_format_string',
+ default='%(asctime)s %(levelname)s %(name)s [-] %(instance)s'
+ '%(message)s',
+ help='format string to use for log messages without context'),
+ cfg.StrOpt('logging_debug_format_suffix',
+ default='from (pid=%(process)d) %(funcName)s '
+ '%(pathname)s:%(lineno)d',
+ help='data to append to log format when level is DEBUG'),
+ cfg.StrOpt('logging_exception_prefix',
+ default='%(asctime)s TRACE %(name)s %(instance)s',
+ help='prefix each line of exception output with this format'),
+ cfg.ListOpt('default_log_levels',
+ default=[
+ 'amqplib=WARN',
+ 'sqlalchemy=WARN',
+ 'boto=WARN',
+ 'suds=INFO',
+ 'keystone=INFO',
+ 'eventlet.wsgi.server=WARN'
+ ],
+ help='list of logger=LEVEL pairs'),
+ cfg.BoolOpt('publish_errors',
+ default=False,
+ help='publish error events'),
+
+ # NOTE(mikal): there are two options here because sometimes we are handed
+ # a full instance (and could include more information), and other times we
+ # are just handed a UUID for the instance.
+ cfg.StrOpt('instance_format',
+ default='[instance: %(uuid)s] ',
+ help='If an instance is passed with the log message, format '
+ 'it like this'),
+ cfg.StrOpt('instance_uuid_format',
+ default='[instance: %(uuid)s] ',
+ help='If an instance UUID is passed with the log message, '
+ 'format it like this'),
+]
+
+
+generic_log_opts = [
+ cfg.StrOpt('logdir',
+ default=None,
+ help='Log output to a per-service log file in named directory'),
+ cfg.StrOpt('logfile',
+ default=None,
+ help='Log output to a named file'),
+ cfg.BoolOpt('use_stderr',
+ default=True,
+ help='Log output to standard error'),
+ cfg.StrOpt('logfile_mode',
+ default='0644',
+ help='Default file mode used when creating log files'),
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(generic_log_opts)
+CONF.register_opts(log_opts)
+
+# our new audit level
+# NOTE(jkoelker) Since we synthesized an audit level, make the logging
+# module aware of it so it acts like other levels.
+logging.AUDIT = logging.INFO + 1
+logging.addLevelName(logging.AUDIT, 'AUDIT')
+
+
+try:
+ NullHandler = logging.NullHandler
+except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
+ class NullHandler(logging.Handler):
+ def handle(self, record):
+ pass
+
+ def emit(self, record):
+ pass
+
+ def createLock(self):
+ self.lock = None
+
+
+def _dictify_context(context):
+ if context is None:
+ return None
+ if not isinstance(context, dict) and getattr(context, 'to_dict', None):
+ context = context.to_dict()
+ return context
+
+
+def _get_binary_name():
+ return os.path.basename(inspect.stack()[-1][1])
+
+
+def _get_log_file_path(binary=None):
+ logfile = CONF.log_file or CONF.logfile
+ logdir = CONF.log_dir or CONF.logdir
+
+ if logfile and not logdir:
+ return logfile
+
+ if logfile and logdir:
+ return os.path.join(logdir, logfile)
+
+ if logdir:
+ binary = binary or _get_binary_name()
+ return '%s.log' % (os.path.join(logdir, binary),)
+
+
+class ContextAdapter(logging.LoggerAdapter):
+ warn = logging.LoggerAdapter.warning
+
+ def __init__(self, logger, project_name, version_string):
+ self.logger = logger
+ self.project = project_name
+ self.version = version_string
+
+ def audit(self, msg, *args, **kwargs):
+ self.log(logging.AUDIT, msg, *args, **kwargs)
+
+ def process(self, msg, kwargs):
+ if 'extra' not in kwargs:
+ kwargs['extra'] = {}
+ extra = kwargs['extra']
+
+ context = kwargs.pop('context', None)
+ if not context:
+ context = getattr(local.store, 'context', None)
+ if context:
+ extra.update(_dictify_context(context))
+
+ instance = kwargs.pop('instance', None)
+ instance_extra = ''
+ if instance:
+ instance_extra = CONF.instance_format % instance
+ else:
+ instance_uuid = kwargs.pop('instance_uuid', None)
+ if instance_uuid:
+ instance_extra = (CONF.instance_uuid_format
+ % {'uuid': instance_uuid})
+ extra.update({'instance': instance_extra})
+
+ extra.update({"project": self.project})
+ extra.update({"version": self.version})
+ extra['extra'] = extra.copy()
+ return msg, kwargs
+
+
+class JSONFormatter(logging.Formatter):
+ def __init__(self, fmt=None, datefmt=None):
+ # NOTE(jkoelker) we ignore the fmt argument, but its still there
+ # since logging.config.fileConfig passes it.
+ self.datefmt = datefmt
+
+ def formatException(self, ei, strip_newlines=True):
+ lines = traceback.format_exception(*ei)
+ if strip_newlines:
+ lines = [itertools.ifilter(
+ lambda x: x,
+ line.rstrip().splitlines()) for line in lines]
+ lines = list(itertools.chain(*lines))
+ return lines
+
+ def format(self, record):
+ message = {'message': record.getMessage(),
+ 'asctime': self.formatTime(record, self.datefmt),
+ 'name': record.name,
+ 'msg': record.msg,
+ 'args': record.args,
+ 'levelname': record.levelname,
+ 'levelno': record.levelno,
+ 'pathname': record.pathname,
+ 'filename': record.filename,
+ 'module': record.module,
+ 'lineno': record.lineno,
+ 'funcname': record.funcName,
+ 'created': record.created,
+ 'msecs': record.msecs,
+ 'relative_created': record.relativeCreated,
+ 'thread': record.thread,
+ 'thread_name': record.threadName,
+ 'process_name': record.processName,
+ 'process': record.process,
+ 'traceback': None}
+
+ if hasattr(record, 'extra'):
+ message['extra'] = record.extra
+
+ if record.exc_info:
+ message['traceback'] = self.formatException(record.exc_info)
+
+ return jsonutils.dumps(message)
+
+
+class PublishErrorsHandler(logging.Handler):
+ def emit(self, record):
+ if ('moniker.openstack.common.notifier.log_notifier' in
+ CONF.notification_driver):
+ return
+ notifier.api.notify(None, 'error.publisher',
+ 'error_notification',
+ notifier.api.ERROR,
+ dict(error=record.msg))
+
+
+def _create_logging_excepthook(product_name):
+ def logging_excepthook(type, value, tb):
+ extra = {}
+ if CONF.verbose:
+ extra['exc_info'] = (type, value, tb)
+ getLogger(product_name).critical(str(value), **extra)
+ return logging_excepthook
+
+
+def setup(product_name):
+ """Setup logging."""
+ sys.excepthook = _create_logging_excepthook(product_name)
+
+ if CONF.log_config:
+ try:
+ logging.config.fileConfig(CONF.log_config)
+ except Exception:
+ traceback.print_exc()
+ raise
+ else:
+ _setup_logging_from_conf(product_name)
+
+
+def _find_facility_from_conf():
+ facility_names = logging.handlers.SysLogHandler.facility_names
+ facility = getattr(logging.handlers.SysLogHandler,
+ CONF.syslog_log_facility,
+ None)
+
+ if facility is None and CONF.syslog_log_facility in facility_names:
+ facility = facility_names.get(CONF.syslog_log_facility)
+
+ if facility is None:
+ valid_facilities = facility_names.keys()
+ consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
+ 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
+ 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
+ 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
+ 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
+ valid_facilities.extend(consts)
+ raise TypeError(_('syslog facility must be one of: %s') %
+ ', '.join("'%s'" % fac
+ for fac in valid_facilities))
+
+ return facility
+
+
+def _setup_logging_from_conf(product_name):
+ log_root = getLogger(product_name).logger
+ for handler in log_root.handlers:
+ log_root.removeHandler(handler)
+
+ if CONF.use_syslog:
+ facility = _find_facility_from_conf()
+ syslog = logging.handlers.SysLogHandler(address='/dev/log',
+ facility=facility)
+ log_root.addHandler(syslog)
+
+ logpath = _get_log_file_path()
+ if logpath:
+ filelog = logging.handlers.WatchedFileHandler(logpath)
+ log_root.addHandler(filelog)
+
+ mode = int(CONF.logfile_mode, 8)
+ st = os.stat(logpath)
+ if st.st_mode != (stat.S_IFREG | mode):
+ os.chmod(logpath, mode)
+
+ if CONF.use_stderr:
+ streamlog = ColorHandler()
+ log_root.addHandler(streamlog)
+
+ elif not CONF.log_file:
+ # pass sys.stdout as a positional argument
+ # python2.6 calls the argument strm, in 2.7 it's stream
+ streamlog = logging.StreamHandler(sys.stdout)
+ log_root.addHandler(streamlog)
+
+ if CONF.publish_errors:
+ log_root.addHandler(PublishErrorsHandler(logging.ERROR))
+
+ for handler in log_root.handlers:
+ datefmt = CONF.log_date_format
+ if CONF.log_format:
+ handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
+ datefmt=datefmt))
+ handler.setFormatter(LegacyFormatter(datefmt=datefmt))
+
+ if CONF.verbose or CONF.debug:
+ log_root.setLevel(logging.DEBUG)
+ else:
+ log_root.setLevel(logging.INFO)
+
+ level = logging.NOTSET
+ for pair in CONF.default_log_levels:
+ mod, _sep, level_name = pair.partition('=')
+ level = logging.getLevelName(level_name)
+ logger = logging.getLogger(mod)
+ logger.setLevel(level)
+ for handler in log_root.handlers:
+ logger.addHandler(handler)
+
+_loggers = {}
+
+
+def getLogger(name='unknown', version='unknown'):
+ if name not in _loggers:
+ _loggers[name] = ContextAdapter(logging.getLogger(name),
+ name,
+ version)
+ return _loggers[name]
+
+
+class WritableLogger(object):
+ """A thin wrapper that responds to `write` and logs."""
+
+ def __init__(self, logger, level=logging.INFO):
+ self.logger = logger
+ self.level = level
+
+ def write(self, msg):
+ self.logger.log(self.level, msg)
+
+
+class LegacyFormatter(logging.Formatter):
+ """A context.RequestContext aware formatter configured through flags.
+
+ The flags used to set format strings are: logging_context_format_string
+ and logging_default_format_string. You can also specify
+ logging_debug_format_suffix to append extra formatting if the log level is
+ debug.
+
+ For information about what variables are available for the formatter see:
+ http://docs.python.org/library/logging.html#formatter
+
+ """
+
+ def format(self, record):
+ """Uses contextstring if request_id is set, otherwise default."""
+ # NOTE(sdague): default the fancier formating params
+ # to an empty string so we don't throw an exception if
+ # they get used
+ for key in ('instance', 'color'):
+ if key not in record.__dict__:
+ record.__dict__[key] = ''
+
+ if record.__dict__.get('request_id', None):
+ self._fmt = CONF.logging_context_format_string
+ else:
+ self._fmt = CONF.logging_default_format_string
+
+ if (record.levelno == logging.DEBUG and
+ CONF.logging_debug_format_suffix):
+ self._fmt += " " + CONF.logging_debug_format_suffix
+
+ # Cache this on the record, Logger will respect our formated copy
+ if record.exc_info:
+ record.exc_text = self.formatException(record.exc_info, record)
+ return logging.Formatter.format(self, record)
+
+ def formatException(self, exc_info, record=None):
+ """Format exception output with CONF.logging_exception_prefix."""
+ if not record:
+ return logging.Formatter.formatException(self, exc_info)
+
+ stringbuffer = cStringIO.StringIO()
+ traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
+ None, stringbuffer)
+ lines = stringbuffer.getvalue().split('\n')
+ stringbuffer.close()
+
+ if CONF.logging_exception_prefix.find('%(asctime)') != -1:
+ record.asctime = self.formatTime(record, self.datefmt)
+
+ formatted_lines = []
+ for line in lines:
+ pl = CONF.logging_exception_prefix % record.__dict__
+ fl = '%s%s' % (pl, line)
+ formatted_lines.append(fl)
+ return '\n'.join(formatted_lines)
+
+
+class ColorHandler(logging.StreamHandler):
+ LEVEL_COLORS = {
+ logging.DEBUG: '\033[00;32m', # GREEN
+ logging.INFO: '\033[00;36m', # CYAN
+ logging.AUDIT: '\033[01;36m', # BOLD CYAN
+ logging.WARN: '\033[01;33m', # BOLD YELLOW
+ logging.ERROR: '\033[01;31m', # BOLD RED
+ logging.CRITICAL: '\033[01;31m', # BOLD RED
+ }
+
+ def format(self, record):
+ record.color = self.LEVEL_COLORS[record.levelno]
+ return logging.StreamHandler.format(self, record)
diff --git a/moniker/openstack/common/loopingcall.py b/moniker/openstack/common/loopingcall.py
new file mode 100644
index 00000000..adbc6268
--- /dev/null
+++ b/moniker/openstack/common/loopingcall.py
@@ -0,0 +1,88 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import sys
+
+from eventlet import event
+from eventlet import greenthread
+
+from moniker.openstack.common import log as logging
+from moniker.openstack.common.gettextutils import _
+
+LOG = logging.getLogger(__name__)
+
+
+class LoopingCallDone(Exception):
+ """Exception to break out and stop a LoopingCall.
+
+ The poll-function passed to LoopingCall can raise this exception to
+ break out of the loop normally. This is somewhat analogous to
+ StopIteration.
+
+ An optional return-value can be included as the argument to the exception;
+ this return-value will be returned by LoopingCall.wait()
+
+ """
+
+ def __init__(self, retvalue=True):
+ """:param retvalue: Value that LoopingCall.wait() should return."""
+ self.retvalue = retvalue
+
+
+class LoopingCall(object):
+ def __init__(self, f=None, *args, **kw):
+ self.args = args
+ self.kw = kw
+ self.f = f
+ self._running = False
+
+ def start(self, interval, initial_delay=None):
+ self._running = True
+ done = event.Event()
+
+ def _inner():
+ if initial_delay:
+ greenthread.sleep(initial_delay)
+
+ try:
+ while self._running:
+ self.f(*self.args, **self.kw)
+ if not self._running:
+ break
+ greenthread.sleep(interval)
+ except LoopingCallDone, e:
+ self.stop()
+ done.send(e.retvalue)
+ except Exception:
+ LOG.exception(_('in looping call'))
+ done.send_exception(*sys.exc_info())
+ return
+ else:
+ done.send(True)
+
+ self.done = done
+
+ greenthread.spawn(_inner)
+ return self.done
+
+ def stop(self):
+ self._running = False
+
+ def wait(self):
+ return self.done.wait()
diff --git a/moniker/openstack/common/manager.py b/moniker/openstack/common/manager.py
new file mode 100644
index 00000000..190afe07
--- /dev/null
+++ b/moniker/openstack/common/manager.py
@@ -0,0 +1,68 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Base Manager class.
+
+Managers are responsible for a certain aspect of the system. It is a logical
+grouping of code relating to a portion of the system. In general other
+components should be using the manager to make changes to the components that
+it is responsible for.
+
+For example, other components that need to deal with volumes in some way,
+should do so by calling methods on the VolumeManager instead of directly
+changing fields in the database. This allows us to keep all of the code
+relating to volumes in the same place.
+
+We have adopted a basic strategy of Smart managers and dumb data, which means
+rather than attaching methods to data objects, components should call manager
+methods that act on the data.
+
+Methods on managers that can be executed locally should be called directly.
+
+Managers should be responsible for most of the db access, and
+non-implementation specific data. Anything implementation specific that can't
+be generalized should be done by the Driver.
+
+In general, we prefer to have one manager with multiple drivers for different
+implementations, but sometimes it makes sense to have multiple managers. You
+can think of it this way: Abstract different overall strategies at the manager
+level(FlatNetwork vs VlanNetwork), and different implementations at the driver
+level(LinuxNetDriver vs CiscoNetDriver).
+
+Managers will often provide methods for initial setup of a host or periodic
+tasks to a wrapping service.
+
+This module provides Manager, a base class for managers.
+
+"""
+
+from moniker.openstack.common import periodic_task
+
+
+class Manager(periodic_task.PeriodicTasks):
+
+ def __init__(self, host):
+ self.host = host
+
+ def init_host(self):
+ """Handle initialization if this is a standalone service.
+
+ Child classes should override this method.
+
+ """
+ pass
diff --git a/moniker/openstack/common/notifier/__init__.py b/moniker/openstack/common/notifier/__init__.py
new file mode 100644
index 00000000..482d54e4
--- /dev/null
+++ b/moniker/openstack/common/notifier/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/moniker/openstack/common/notifier/api.py b/moniker/openstack/common/notifier/api.py
new file mode 100644
index 00000000..07ce7cc3
--- /dev/null
+++ b/moniker/openstack/common/notifier/api.py
@@ -0,0 +1,181 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import uuid
+
+from moniker.openstack.common import cfg
+from moniker.openstack.common import context
+from moniker.openstack.common.gettextutils import _
+from moniker.openstack.common import importutils
+from moniker.openstack.common import jsonutils
+from moniker.openstack.common import log as logging
+from moniker.openstack.common import timeutils
+
+
+LOG = logging.getLogger(__name__)
+
+notifier_opts = [
+ cfg.MultiStrOpt('notification_driver',
+ default=[],
+ deprecated_name='list_notifier_drivers',
+ help='Driver or drivers to handle sending notifications'),
+ cfg.StrOpt('default_notification_level',
+ default='INFO',
+ help='Default notification level for outgoing notifications'),
+ cfg.StrOpt('default_publisher_id',
+ default='$host',
+ help='Default publisher_id for outgoing notifications'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(notifier_opts)
+
+WARN = 'WARN'
+INFO = 'INFO'
+ERROR = 'ERROR'
+CRITICAL = 'CRITICAL'
+DEBUG = 'DEBUG'
+
+log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL)
+
+
+class BadPriorityException(Exception):
+ pass
+
+
+def notify_decorator(name, fn):
+ """ decorator for notify which is used from utils.monkey_patch()
+
+ :param name: name of the function
+ :param function: - object of the function
+ :returns: function -- decorated function
+
+ """
+ def wrapped_func(*args, **kwarg):
+ body = {}
+ body['args'] = []
+ body['kwarg'] = {}
+ for arg in args:
+ body['args'].append(arg)
+ for key in kwarg:
+ body['kwarg'][key] = kwarg[key]
+
+ ctxt = context.get_context_from_function_and_args(fn, args, kwarg)
+ notify(ctxt,
+ CONF.default_publisher_id,
+ name,
+ CONF.default_notification_level,
+ body)
+ return fn(*args, **kwarg)
+ return wrapped_func
+
+
+def publisher_id(service, host=None):
+ if not host:
+ host = CONF.host
+ return "%s.%s" % (service, host)
+
+
+def notify(context, publisher_id, event_type, priority, payload):
+ """Sends a notification using the specified driver
+
+ :param publisher_id: the source worker_type.host of the message
+ :param event_type: the literal type of event (ex. Instance Creation)
+ :param priority: patterned after the enumeration of Python logging
+ levels in the set (DEBUG, WARN, INFO, ERROR, CRITICAL)
+ :param payload: A python dictionary of attributes
+
+ Outgoing message format includes the above parameters, and appends the
+ following:
+
+ message_id
+ a UUID representing the id for this notification
+
+ timestamp
+ the GMT timestamp the notification was sent at
+
+ The composite message will be constructed as a dictionary of the above
+ attributes, which will then be sent via the transport mechanism defined
+ by the driver.
+
+ Message example::
+
+ {'message_id': str(uuid.uuid4()),
+ 'publisher_id': 'compute.host1',
+ 'timestamp': timeutils.utcnow(),
+ 'priority': 'WARN',
+ 'event_type': 'compute.create_instance',
+ 'payload': {'instance_id': 12, ... }}
+
+ """
+ if priority not in log_levels:
+ raise BadPriorityException(
+ _('%s not in valid priorities') % priority)
+
+ # Ensure everything is JSON serializable.
+ payload = jsonutils.to_primitive(payload, convert_instances=True)
+
+ msg = dict(message_id=str(uuid.uuid4()),
+ publisher_id=publisher_id,
+ event_type=event_type,
+ priority=priority,
+ payload=payload,
+ timestamp=str(timeutils.utcnow()))
+
+ for driver in _get_drivers():
+ try:
+ driver.notify(context, msg)
+ except Exception, e:
+ LOG.exception(_("Problem '%(e)s' attempting to "
+ "send to notification system. Payload=%(payload)s") %
+ locals())
+
+
+_drivers = None
+
+
+def _get_drivers():
+ """Instantiate, cache, and return drivers based on the CONF."""
+ global _drivers
+ if _drivers is None:
+ _drivers = {}
+ for notification_driver in CONF.notification_driver:
+ add_driver(notification_driver)
+
+ return _drivers.values()
+
+
+def add_driver(notification_driver):
+ """Add a notification driver at runtime."""
+ # Make sure the driver list is initialized.
+ _get_drivers()
+ if isinstance(notification_driver, basestring):
+ # Load and add
+ try:
+ driver = importutils.import_module(notification_driver)
+ _drivers[notification_driver] = driver
+ except ImportError as e:
+ LOG.exception(_("Failed to load notifier %s. "
+ "These notifications will not be sent.") %
+ notification_driver)
+ else:
+ # Driver is already loaded; just add the object.
+ _drivers[notification_driver] = notification_driver
+
+
+def _reset_drivers():
+ """Used by unit tests to reset the drivers."""
+ global _drivers
+ _drivers = None
diff --git a/moniker/openstack/common/notifier/log_notifier.py b/moniker/openstack/common/notifier/log_notifier.py
new file mode 100644
index 00000000..f9ff7753
--- /dev/null
+++ b/moniker/openstack/common/notifier/log_notifier.py
@@ -0,0 +1,35 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from moniker.openstack.common import cfg
+from moniker.openstack.common import jsonutils
+from moniker.openstack.common import log as logging
+
+
+CONF = cfg.CONF
+
+
+def notify(_context, message):
+ """Notifies the recipient of the desired event given the model.
+ Log notifications using openstack's default logging system"""
+
+ priority = message.get('priority',
+ CONF.default_notification_level)
+ priority = priority.lower()
+ logger = logging.getLogger(
+ 'moniker.openstack.common.notification.%s' %
+ message['event_type'])
+ getattr(logger, priority)(jsonutils.dumps(message))
diff --git a/moniker/openstack/common/notifier/no_op_notifier.py b/moniker/openstack/common/notifier/no_op_notifier.py
new file mode 100644
index 00000000..ee1ddbdc
--- /dev/null
+++ b/moniker/openstack/common/notifier/no_op_notifier.py
@@ -0,0 +1,19 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+def notify(_context, message):
+ """Notifies the recipient of the desired event given the model"""
+ pass
diff --git a/moniker/openstack/common/notifier/rabbit_notifier.py b/moniker/openstack/common/notifier/rabbit_notifier.py
new file mode 100644
index 00000000..75a17f51
--- /dev/null
+++ b/moniker/openstack/common/notifier/rabbit_notifier.py
@@ -0,0 +1,46 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+from moniker.openstack.common import cfg
+from moniker.openstack.common import context as req_context
+from moniker.openstack.common.gettextutils import _
+from moniker.openstack.common import log as logging
+from moniker.openstack.common import rpc
+
+LOG = logging.getLogger(__name__)
+
+notification_topic_opt = cfg.ListOpt(
+ 'notification_topics', default=['notifications', ],
+ help='AMQP topic used for openstack notifications')
+
+CONF = cfg.CONF
+CONF.register_opt(notification_topic_opt)
+
+
+def notify(context, message):
+ """Sends a notification to the RabbitMQ"""
+ if not context:
+ context = req_context.get_admin_context()
+ priority = message.get('priority',
+ CONF.default_notification_level)
+ priority = priority.lower()
+ for topic in CONF.notification_topics:
+ topic = '%s.%s' % (topic, priority)
+ try:
+ rpc.notify(context, topic, message)
+ except Exception, e:
+ LOG.exception(_("Could not send notification to %(topic)s. "
+ "Payload=%(message)s"), locals())
diff --git a/moniker/openstack/common/notifier/test_notifier.py b/moniker/openstack/common/notifier/test_notifier.py
new file mode 100644
index 00000000..5e348803
--- /dev/null
+++ b/moniker/openstack/common/notifier/test_notifier.py
@@ -0,0 +1,22 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+NOTIFICATIONS = []
+
+
+def notify(_context, message):
+ """Test notifier, stores notifications in memory for unittests."""
+ NOTIFICATIONS.append(message)
diff --git a/moniker/openstack/common/periodic_task.py b/moniker/openstack/common/periodic_task.py
new file mode 100644
index 00000000..eb070863
--- /dev/null
+++ b/moniker/openstack/common/periodic_task.py
@@ -0,0 +1,112 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from moniker.openstack.common import log as logging
+from moniker.openstack.common.gettextutils import _
+
+LOG = logging.getLogger(__name__)
+
+
+def periodic_task(*args, **kwargs):
+ """Decorator to indicate that a method is a periodic task.
+
+ This decorator can be used in two ways:
+
+ 1. Without arguments '@periodic_task', this will be run on every tick
+ of the periodic scheduler.
+
+ 2. With arguments, @periodic_task(ticks_between_runs=N), this will be
+ run on every N ticks of the periodic scheduler.
+ """
+ def decorator(f):
+ f._periodic_task = True
+ f._ticks_between_runs = kwargs.pop('ticks_between_runs', 0)
+ return f
+
+ # NOTE(sirp): The `if` is necessary to allow the decorator to be used with
+ # and without parens.
+ #
+ # In the 'with-parens' case (with kwargs present), this function needs to
+ # return a decorator function since the interpreter will invoke it like:
+ #
+ # periodic_task(*args, **kwargs)(f)
+ #
+ # In the 'without-parens' case, the original function will be passed
+ # in as the first argument, like:
+ #
+ # periodic_task(f)
+ if kwargs:
+ return decorator
+ else:
+ return decorator(args[0])
+
+
+class _PeriodicTasksMeta(type):
+ def __init__(cls, names, bases, dict_):
+ """Metaclass that allows us to collect decorated periodic tasks."""
+ super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_)
+
+ # NOTE(sirp): if the attribute is not present then we must be the base
+ # class, so, go ahead and initialize it. If the attribute is present,
+ # then we're a subclass so make a copy of it so we don't step on our
+ # parent's toes.
+ try:
+ cls._periodic_tasks = cls._periodic_tasks[:]
+ except AttributeError:
+ cls._periodic_tasks = []
+
+ try:
+ cls._ticks_to_skip = cls._ticks_to_skip.copy()
+ except AttributeError:
+ cls._ticks_to_skip = {}
+
+ # This uses __dict__ instead of
+ # inspect.getmembers(cls, inspect.ismethod) so only the methods of the
+ # current class are added when this class is scanned, and base classes
+ # are not added redundantly.
+ for value in cls.__dict__.values():
+ if getattr(value, '_periodic_task', False):
+ task = value
+ name = task.__name__
+ cls._periodic_tasks.append((name, task))
+ cls._ticks_to_skip[name] = task._ticks_between_runs
+
+
+class PeriodicTasks(object):
+ __metaclass__ = _PeriodicTasksMeta
+
+ def run_periodic_tasks(self, *args, **kwargs):
+ """Tasks to be run at a periodic interval."""
+ raise_on_error = kwargs.get('raise_on_error', False)
+ for task_name, task in self._periodic_tasks:
+ full_task_name = '.'.join([self.__class__.__name__, task_name])
+
+ ticks_to_skip = self._ticks_to_skip[task_name]
+ if ticks_to_skip > 0:
+ LOG.debug(_("Skipping %(full_task_name)s, %(ticks_to_skip)s"
+ " ticks left until next run"), locals())
+ self._ticks_to_skip[task_name] -= 1
+ continue
+
+ self._ticks_to_skip[task_name] = task._ticks_between_runs
+ LOG.debug(_("Running periodic task %(full_task_name)s"), locals())
+
+ try:
+ task(self, *args, **kwargs)
+ except Exception as e:
+ if raise_on_error:
+ raise
+ LOG.exception(_("Error during %(full_task_name)s: %(e)s"),
+ locals())
diff --git a/moniker/openstack/common/rpc/__init__.py b/moniker/openstack/common/rpc/__init__.py
new file mode 100644
index 00000000..5fcc3366
--- /dev/null
+++ b/moniker/openstack/common/rpc/__init__.py
@@ -0,0 +1,266 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2011 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A remote procedure call (rpc) abstraction.
+
+For some wrappers that add message versioning to rpc, see:
+ rpc.dispatcher
+ rpc.proxy
+"""
+
+from moniker.openstack.common import cfg
+from moniker.openstack.common import importutils
+
+
+rpc_opts = [
+ cfg.StrOpt('rpc_backend',
+ default='%s.impl_kombu' % __package__,
+ help="The messaging module to use, defaults to kombu."),
+ cfg.IntOpt('rpc_thread_pool_size',
+ default=64,
+ help='Size of RPC thread pool'),
+ cfg.IntOpt('rpc_conn_pool_size',
+ default=30,
+ help='Size of RPC connection pool'),
+ cfg.IntOpt('rpc_response_timeout',
+ default=60,
+ help='Seconds to wait for a response from call or multicall'),
+ cfg.IntOpt('rpc_cast_timeout',
+ default=30,
+ help='Seconds to wait before a cast expires (TTL). '
+ 'Only supported by impl_zmq.'),
+ cfg.ListOpt('allowed_rpc_exception_modules',
+ default=['moniker.exceptions',
+ 'moniker.openstack.common.exception',
+ 'nova.exception',
+ 'cinder.exception',
+ ],
+ help='Modules of exceptions that are permitted to be recreated'
+ 'upon receiving exception data from an rpc call.'),
+ cfg.StrOpt('control_exchange',
+ default='nova',
+ help='AMQP exchange to connect to if using RabbitMQ or Qpid'),
+ cfg.BoolOpt('fake_rabbit',
+ default=False,
+ help='If passed, use a fake RabbitMQ provider'),
+]
+
+cfg.CONF.register_opts(rpc_opts)
+
+
+def create_connection(new=True):
+ """Create a connection to the message bus used for rpc.
+
+ For some example usage of creating a connection and some consumers on that
+ connection, see nova.service.
+
+ :param new: Whether or not to create a new connection. A new connection
+ will be created by default. If new is False, the
+ implementation is free to return an existing connection from a
+ pool.
+
+ :returns: An instance of openstack.common.rpc.common.Connection
+ """
+ return _get_impl().create_connection(cfg.CONF, new=new)
+
+
+def call(context, topic, msg, timeout=None):
+ """Invoke a remote method that returns something.
+
+ :param context: Information that identifies the user that has made this
+ request.
+ :param topic: The topic to send the rpc message to. This correlates to the
+ topic argument of
+ openstack.common.rpc.common.Connection.create_consumer()
+ and only applies when the consumer was created with
+ fanout=False.
+ :param msg: This is a dict in the form { "method" : "method_to_invoke",
+ "args" : dict_of_kwargs }
+ :param timeout: int, number of seconds to use for a response timeout.
+ If set, this overrides the rpc_response_timeout option.
+
+ :returns: A dict from the remote method.
+
+ :raises: openstack.common.rpc.common.Timeout if a complete response
+ is not received before the timeout is reached.
+ """
+ return _get_impl().call(cfg.CONF, context, topic, msg, timeout)
+
+
+def cast(context, topic, msg):
+ """Invoke a remote method that does not return anything.
+
+ :param context: Information that identifies the user that has made this
+ request.
+ :param topic: The topic to send the rpc message to. This correlates to the
+ topic argument of
+ openstack.common.rpc.common.Connection.create_consumer()
+ and only applies when the consumer was created with
+ fanout=False.
+ :param msg: This is a dict in the form { "method" : "method_to_invoke",
+ "args" : dict_of_kwargs }
+
+ :returns: None
+ """
+ return _get_impl().cast(cfg.CONF, context, topic, msg)
+
+
+def fanout_cast(context, topic, msg):
+ """Broadcast a remote method invocation with no return.
+
+ This method will get invoked on all consumers that were set up with this
+ topic name and fanout=True.
+
+ :param context: Information that identifies the user that has made this
+ request.
+ :param topic: The topic to send the rpc message to. This correlates to the
+ topic argument of
+ openstack.common.rpc.common.Connection.create_consumer()
+ and only applies when the consumer was created with
+ fanout=True.
+ :param msg: This is a dict in the form { "method" : "method_to_invoke",
+ "args" : dict_of_kwargs }
+
+ :returns: None
+ """
+ return _get_impl().fanout_cast(cfg.CONF, context, topic, msg)
+
+
+def multicall(context, topic, msg, timeout=None):
+ """Invoke a remote method and get back an iterator.
+
+ In this case, the remote method will be returning multiple values in
+ separate messages, so the return values can be processed as the come in via
+ an iterator.
+
+ :param context: Information that identifies the user that has made this
+ request.
+ :param topic: The topic to send the rpc message to. This correlates to the
+ topic argument of
+ openstack.common.rpc.common.Connection.create_consumer()
+ and only applies when the consumer was created with
+ fanout=False.
+ :param msg: This is a dict in the form { "method" : "method_to_invoke",
+ "args" : dict_of_kwargs }
+ :param timeout: int, number of seconds to use for a response timeout.
+ If set, this overrides the rpc_response_timeout option.
+
+ :returns: An iterator. The iterator will yield a tuple (N, X) where N is
+ an index that starts at 0 and increases by one for each value
+ returned and X is the Nth value that was returned by the remote
+ method.
+
+ :raises: openstack.common.rpc.common.Timeout if a complete response
+ is not received before the timeout is reached.
+ """
+ return _get_impl().multicall(cfg.CONF, context, topic, msg, timeout)
+
+
+def notify(context, topic, msg):
+ """Send notification event.
+
+ :param context: Information that identifies the user that has made this
+ request.
+ :param topic: The topic to send the notification to.
+ :param msg: This is a dict of content of event.
+
+ :returns: None
+ """
+ return _get_impl().notify(cfg.CONF, context, topic, msg)
+
+
+def cleanup():
+ """Clean up resoruces in use by implementation.
+
+ Clean up any resources that have been allocated by the RPC implementation.
+ This is typically open connections to a messaging service. This function
+ would get called before an application using this API exits to allow
+ connections to get torn down cleanly.
+
+ :returns: None
+ """
+ return _get_impl().cleanup()
+
+
+def cast_to_server(context, server_params, topic, msg):
+ """Invoke a remote method that does not return anything.
+
+ :param context: Information that identifies the user that has made this
+ request.
+ :param server_params: Connection information
+ :param topic: The topic to send the notification to.
+ :param msg: This is a dict in the form { "method" : "method_to_invoke",
+ "args" : dict_of_kwargs }
+
+ :returns: None
+ """
+ return _get_impl().cast_to_server(cfg.CONF, context, server_params, topic,
+ msg)
+
+
+def fanout_cast_to_server(context, server_params, topic, msg):
+ """Broadcast to a remote method invocation with no return.
+
+ :param context: Information that identifies the user that has made this
+ request.
+ :param server_params: Connection information
+ :param topic: The topic to send the notification to.
+ :param msg: This is a dict in the form { "method" : "method_to_invoke",
+ "args" : dict_of_kwargs }
+
+ :returns: None
+ """
+ return _get_impl().fanout_cast_to_server(cfg.CONF, context, server_params,
+ topic, msg)
+
+
+def queue_get_for(context, topic, host):
+ """Get a queue name for a given topic + host.
+
+ This function only works if this naming convention is followed on the
+ consumer side, as well. For example, in nova, every instance of the
+ nova-foo service calls create_consumer() for two topics:
+
+ foo
+ foo.<host>
+
+ Messages sent to the 'foo' topic are distributed to exactly one instance of
+ the nova-foo service. The services are chosen in a round-robin fashion.
+ Messages sent to the 'foo.<host>' topic are sent to the nova-foo service on
+ <host>.
+ """
+ return '%s.%s' % (topic, host)
+
+
+_RPCIMPL = None
+
+
+def _get_impl():
+ """Delay import of rpc_backend until configuration is loaded."""
+ global _RPCIMPL
+ if _RPCIMPL is None:
+ try:
+ _RPCIMPL = importutils.import_module(cfg.CONF.rpc_backend)
+ except ImportError:
+ # For backwards compatibility with older nova config.
+ impl = cfg.CONF.rpc_backend.replace('nova.rpc',
+ 'nova.openstack.common.rpc')
+ _RPCIMPL = importutils.import_module(impl)
+ return _RPCIMPL
diff --git a/moniker/openstack/common/rpc/amqp.py b/moniker/openstack/common/rpc/amqp.py
new file mode 100644
index 00000000..cb052409
--- /dev/null
+++ b/moniker/openstack/common/rpc/amqp.py
@@ -0,0 +1,418 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2011 - 2012, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Shared code between AMQP based openstack.common.rpc implementations.
+
+The code in this module is shared between the rpc implemenations based on AMQP.
+Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses
+AMQP, but is deprecated and predates this code.
+"""
+
+import inspect
+import logging
+import sys
+import uuid
+
+from eventlet import greenpool
+from eventlet import pools
+from eventlet import semaphore
+
+from moniker.openstack.common import excutils
+from moniker.openstack.common.gettextutils import _
+from moniker.openstack.common import local
+from moniker.openstack.common.rpc import common as rpc_common
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Pool(pools.Pool):
+ """Class that implements a Pool of Connections."""
+ def __init__(self, conf, connection_cls, *args, **kwargs):
+ self.connection_cls = connection_cls
+ self.conf = conf
+ kwargs.setdefault("max_size", self.conf.rpc_conn_pool_size)
+ kwargs.setdefault("order_as_stack", True)
+ super(Pool, self).__init__(*args, **kwargs)
+
+ # TODO(comstud): Timeout connections not used in a while
+ def create(self):
+ LOG.debug('Pool creating new connection')
+ return self.connection_cls(self.conf)
+
+ def empty(self):
+ while self.free_items:
+ self.get().close()
+
+
+_pool_create_sem = semaphore.Semaphore()
+
+
+def get_connection_pool(conf, connection_cls):
+ with _pool_create_sem:
+ # Make sure only one thread tries to create the connection pool.
+ if not connection_cls.pool:
+ connection_cls.pool = Pool(conf, connection_cls)
+ return connection_cls.pool
+
+
+class ConnectionContext(rpc_common.Connection):
+ """The class that is actually returned to the caller of
+ create_connection(). This is essentially a wrapper around
+ Connection that supports 'with'. It can also return a new
+ Connection, or one from a pool. The function will also catch
+ when an instance of this class is to be deleted. With that
+ we can return Connections to the pool on exceptions and so
+ forth without making the caller be responsible for catching
+ them. If possible the function makes sure to return a
+ connection to the pool.
+ """
+
+ def __init__(self, conf, connection_pool, pooled=True, server_params=None):
+ """Create a new connection, or get one from the pool"""
+ self.connection = None
+ self.conf = conf
+ self.connection_pool = connection_pool
+ if pooled:
+ self.connection = connection_pool.get()
+ else:
+ self.connection = connection_pool.connection_cls(
+ conf,
+ server_params=server_params)
+ self.pooled = pooled
+
+ def __enter__(self):
+ """When with ConnectionContext() is used, return self"""
+ return self
+
+ def _done(self):
+ """If the connection came from a pool, clean it up and put it back.
+ If it did not come from a pool, close it.
+ """
+ if self.connection:
+ if self.pooled:
+ # Reset the connection so it's ready for the next caller
+ # to grab from the pool
+ self.connection.reset()
+ self.connection_pool.put(self.connection)
+ else:
+ try:
+ self.connection.close()
+ except Exception:
+ pass
+ self.connection = None
+
+ def __exit__(self, exc_type, exc_value, tb):
+ """End of 'with' statement. We're done here."""
+ self._done()
+
+ def __del__(self):
+ """Caller is done with this connection. Make sure we cleaned up."""
+ self._done()
+
+ def close(self):
+ """Caller is done with this connection."""
+ self._done()
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ self.connection.create_consumer(topic, proxy, fanout)
+
+ def create_worker(self, topic, proxy, pool_name):
+ self.connection.create_worker(topic, proxy, pool_name)
+
+ def consume_in_thread(self):
+ self.connection.consume_in_thread()
+
+ def __getattr__(self, key):
+ """Proxy all other calls to the Connection instance"""
+ if self.connection:
+ return getattr(self.connection, key)
+ else:
+ raise rpc_common.InvalidRPCConnectionReuse()
+
+
+def msg_reply(conf, msg_id, connection_pool, reply=None, failure=None,
+ ending=False):
+ """Sends a reply or an error on the channel signified by msg_id.
+
+ Failure should be a sys.exc_info() tuple.
+
+ """
+ with ConnectionContext(conf, connection_pool) as conn:
+ if failure:
+ failure = rpc_common.serialize_remote_exception(failure)
+
+ try:
+ msg = {'result': reply, 'failure': failure}
+ except TypeError:
+ msg = {'result': dict((k, repr(v))
+ for k, v in reply.__dict__.iteritems()),
+ 'failure': failure}
+ if ending:
+ msg['ending'] = True
+ conn.direct_send(msg_id, msg)
+
+
+class RpcContext(rpc_common.CommonRpcContext):
+ """Context that supports replying to a rpc.call"""
+ def __init__(self, **kwargs):
+ self.msg_id = kwargs.pop('msg_id', None)
+ self.conf = kwargs.pop('conf')
+ super(RpcContext, self).__init__(**kwargs)
+
+ def deepcopy(self):
+ values = self.to_dict()
+ values['conf'] = self.conf
+ values['msg_id'] = self.msg_id
+ return self.__class__(**values)
+
+ def reply(self, reply=None, failure=None, ending=False,
+ connection_pool=None):
+ if self.msg_id:
+ msg_reply(self.conf, self.msg_id, connection_pool, reply, failure,
+ ending)
+ if ending:
+ self.msg_id = None
+
+
+def unpack_context(conf, msg):
+ """Unpack context from msg."""
+ context_dict = {}
+ for key in list(msg.keys()):
+ # NOTE(vish): Some versions of python don't like unicode keys
+ # in kwargs.
+ key = str(key)
+ if key.startswith('_context_'):
+ value = msg.pop(key)
+ context_dict[key[9:]] = value
+ context_dict['msg_id'] = msg.pop('_msg_id', None)
+ context_dict['conf'] = conf
+ ctx = RpcContext.from_dict(context_dict)
+ rpc_common._safe_log(LOG.debug, _('unpacked context: %s'), ctx.to_dict())
+ return ctx
+
+
+def pack_context(msg, context):
+ """Pack context into msg.
+
+ Values for message keys need to be less than 255 chars, so we pull
+ context out into a bunch of separate keys. If we want to support
+ more arguments in rabbit messages, we may want to do the same
+ for args at some point.
+
+ """
+ context_d = dict([('_context_%s' % key, value)
+ for (key, value) in context.to_dict().iteritems()])
+ msg.update(context_d)
+
+
+class ProxyCallback(object):
+ """Calls methods on a proxy object based on method and args."""
+
+ def __init__(self, conf, proxy, connection_pool):
+ self.proxy = proxy
+ self.pool = greenpool.GreenPool(conf.rpc_thread_pool_size)
+ self.connection_pool = connection_pool
+ self.conf = conf
+
+ def __call__(self, message_data):
+ """Consumer callback to call a method on a proxy object.
+
+ Parses the message for validity and fires off a thread to call the
+ proxy object method.
+
+ Message data should be a dictionary with two keys:
+ method: string representing the method to call
+ args: dictionary of arg: value
+
+ Example: {'method': 'echo', 'args': {'value': 42}}
+
+ """
+ # It is important to clear the context here, because at this point
+ # the previous context is stored in local.store.context
+ if hasattr(local.store, 'context'):
+ del local.store.context
+ rpc_common._safe_log(LOG.debug, _('received %s'), message_data)
+ ctxt = unpack_context(self.conf, message_data)
+ method = message_data.get('method')
+ args = message_data.get('args', {})
+ version = message_data.get('version', None)
+ if not method:
+ LOG.warn(_('no method for message: %s') % message_data)
+ ctxt.reply(_('No method for message: %s') % message_data,
+ connection_pool=self.connection_pool)
+ return
+ self.pool.spawn_n(self._process_data, ctxt, version, method, args)
+
+ def _process_data(self, ctxt, version, method, args):
+ """Process a message in a new thread.
+
+ If the proxy object we have has a dispatch method
+ (see rpc.dispatcher.RpcDispatcher), pass it the version,
+ method, and args and let it dispatch as appropriate. If not, use
+ the old behavior of magically calling the specified method on the
+ proxy we have here.
+ """
+ ctxt.update_store()
+ try:
+ rval = self.proxy.dispatch(ctxt, version, method, **args)
+ # Check if the result was a generator
+ if inspect.isgenerator(rval):
+ for x in rval:
+ ctxt.reply(x, None, connection_pool=self.connection_pool)
+ else:
+ ctxt.reply(rval, None, connection_pool=self.connection_pool)
+ # This final None tells multicall that it is done.
+ ctxt.reply(ending=True, connection_pool=self.connection_pool)
+ except Exception as e:
+ LOG.exception('Exception during message handling')
+ ctxt.reply(None, sys.exc_info(),
+ connection_pool=self.connection_pool)
+
+
+class MulticallWaiter(object):
+ def __init__(self, conf, connection, timeout):
+ self._connection = connection
+ self._iterator = connection.iterconsume(timeout=timeout or
+ conf.rpc_response_timeout)
+ self._result = None
+ self._done = False
+ self._got_ending = False
+ self._conf = conf
+
+ def done(self):
+ if self._done:
+ return
+ self._done = True
+ self._iterator.close()
+ self._iterator = None
+ self._connection.close()
+
+ def __call__(self, data):
+ """The consume() callback will call this. Store the result."""
+ if data['failure']:
+ failure = data['failure']
+ self._result = rpc_common.deserialize_remote_exception(self._conf,
+ failure)
+
+ elif data.get('ending', False):
+ self._got_ending = True
+ else:
+ self._result = data['result']
+
+ def __iter__(self):
+ """Return a result until we get a 'None' response from consumer"""
+ if self._done:
+ raise StopIteration
+ while True:
+ try:
+ self._iterator.next()
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ self.done()
+ if self._got_ending:
+ self.done()
+ raise StopIteration
+ result = self._result
+ if isinstance(result, Exception):
+ self.done()
+ raise result
+ yield result
+
+
+def create_connection(conf, new, connection_pool):
+ """Create a connection"""
+ return ConnectionContext(conf, connection_pool, pooled=not new)
+
+
+def multicall(conf, context, topic, msg, timeout, connection_pool):
+ """Make a call that returns multiple times."""
+ # Can't use 'with' for multicall, as it returns an iterator
+ # that will continue to use the connection. When it's done,
+ # connection.close() will get called which will put it back into
+ # the pool
+ LOG.debug(_('Making asynchronous call on %s ...'), topic)
+ msg_id = uuid.uuid4().hex
+ msg.update({'_msg_id': msg_id})
+ LOG.debug(_('MSG_ID is %s') % (msg_id))
+ pack_context(msg, context)
+
+ conn = ConnectionContext(conf, connection_pool)
+ wait_msg = MulticallWaiter(conf, conn, timeout)
+ conn.declare_direct_consumer(msg_id, wait_msg)
+ conn.topic_send(topic, msg)
+ return wait_msg
+
+
+def call(conf, context, topic, msg, timeout, connection_pool):
+ """Sends a message on a topic and wait for a response."""
+ rv = multicall(conf, context, topic, msg, timeout, connection_pool)
+ # NOTE(vish): return the last result from the multicall
+ rv = list(rv)
+ if not rv:
+ return
+ return rv[-1]
+
+
+def cast(conf, context, topic, msg, connection_pool):
+ """Sends a message on a topic without waiting for a response."""
+ LOG.debug(_('Making asynchronous cast on %s...'), topic)
+ pack_context(msg, context)
+ with ConnectionContext(conf, connection_pool) as conn:
+ conn.topic_send(topic, msg)
+
+
+def fanout_cast(conf, context, topic, msg, connection_pool):
+ """Sends a message on a fanout exchange without waiting for a response."""
+ LOG.debug(_('Making asynchronous fanout cast...'))
+ pack_context(msg, context)
+ with ConnectionContext(conf, connection_pool) as conn:
+ conn.fanout_send(topic, msg)
+
+
+def cast_to_server(conf, context, server_params, topic, msg, connection_pool):
+ """Sends a message on a topic to a specific server."""
+ pack_context(msg, context)
+ with ConnectionContext(conf, connection_pool, pooled=False,
+ server_params=server_params) as conn:
+ conn.topic_send(topic, msg)
+
+
+def fanout_cast_to_server(conf, context, server_params, topic, msg,
+ connection_pool):
+ """Sends a message on a fanout exchange to a specific server."""
+ pack_context(msg, context)
+ with ConnectionContext(conf, connection_pool, pooled=False,
+ server_params=server_params) as conn:
+ conn.fanout_send(topic, msg)
+
+
+def notify(conf, context, topic, msg, connection_pool):
+ """Sends a notification event on a topic."""
+ event_type = msg.get('event_type')
+ LOG.debug(_('Sending %(event_type)s on %(topic)s'), locals())
+ pack_context(msg, context)
+ with ConnectionContext(conf, connection_pool) as conn:
+ conn.notify_send(topic, msg)
+
+
+def cleanup(connection_pool):
+ if connection_pool:
+ connection_pool.empty()
diff --git a/moniker/openstack/common/rpc/common.py b/moniker/openstack/common/rpc/common.py
new file mode 100644
index 00000000..2c1ba517
--- /dev/null
+++ b/moniker/openstack/common/rpc/common.py
@@ -0,0 +1,311 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2011 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+import logging
+import traceback
+
+from moniker.openstack.common.gettextutils import _
+from moniker.openstack.common import importutils
+from moniker.openstack.common import jsonutils
+from moniker.openstack.common import local
+
+
+LOG = logging.getLogger(__name__)
+
+
+class RPCException(Exception):
+ message = _("An unknown RPC related exception occurred.")
+
+ def __init__(self, message=None, **kwargs):
+ self.kwargs = kwargs
+
+ if not message:
+ try:
+ message = self.message % kwargs
+
+ except Exception as e:
+ # kwargs doesn't match a variable in the message
+ # log the issue and the kwargs
+ LOG.exception(_('Exception in string format operation'))
+ for name, value in kwargs.iteritems():
+ LOG.error("%s: %s" % (name, value))
+ # at least get the core message out if something happened
+ message = self.message
+
+ super(RPCException, self).__init__(message)
+
+
+class RemoteError(RPCException):
+ """Signifies that a remote class has raised an exception.
+
+ Contains a string representation of the type of the original exception,
+ the value of the original exception, and the traceback. These are
+ sent to the parent as a joined string so printing the exception
+ contains all of the relevant info.
+
+ """
+ message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
+
+ def __init__(self, exc_type=None, value=None, traceback=None):
+ self.exc_type = exc_type
+ self.value = value
+ self.traceback = traceback
+ super(RemoteError, self).__init__(exc_type=exc_type,
+ value=value,
+ traceback=traceback)
+
+
+class Timeout(RPCException):
+ """Signifies that a timeout has occurred.
+
+ This exception is raised if the rpc_response_timeout is reached while
+ waiting for a response from the remote side.
+ """
+ message = _("Timeout while waiting on RPC response.")
+
+
+class InvalidRPCConnectionReuse(RPCException):
+ message = _("Invalid reuse of an RPC connection.")
+
+
+class UnsupportedRpcVersion(RPCException):
+ message = _("Specified RPC version, %(version)s, not supported by "
+ "this endpoint.")
+
+
+class Connection(object):
+ """A connection, returned by rpc.create_connection().
+
+ This class represents a connection to the message bus used for rpc.
+ An instance of this class should never be created by users of the rpc API.
+ Use rpc.create_connection() instead.
+ """
+ def close(self):
+ """Close the connection.
+
+ This method must be called when the connection will no longer be used.
+ It will ensure that any resources associated with the connection, such
+ as a network connection, and cleaned up.
+ """
+ raise NotImplementedError()
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ """Create a consumer on this connection.
+
+ A consumer is associated with a message queue on the backend message
+ bus. The consumer will read messages from the queue, unpack them, and
+ dispatch them to the proxy object. The contents of the message pulled
+ off of the queue will determine which method gets called on the proxy
+ object.
+
+ :param topic: This is a name associated with what to consume from.
+ Multiple instances of a service may consume from the same
+ topic. For example, all instances of nova-compute consume
+ from a queue called "compute". In that case, the
+ messages will get distributed amongst the consumers in a
+ round-robin fashion if fanout=False. If fanout=True,
+ every consumer associated with this topic will get a
+ copy of every message.
+ :param proxy: The object that will handle all incoming messages.
+ :param fanout: Whether or not this is a fanout topic. See the
+ documentation for the topic parameter for some
+ additional comments on this.
+ """
+ raise NotImplementedError()
+
+ def create_worker(self, topic, proxy, pool_name):
+ """Create a worker on this connection.
+
+ A worker is like a regular consumer of messages directed to a
+ topic, except that it is part of a set of such consumers (the
+ "pool") which may run in parallel. Every pool of workers will
+ receive a given message, but only one worker in the pool will
+ be asked to process it. Load is distributed across the members
+ of the pool in round-robin fashion.
+
+ :param topic: This is a name associated with what to consume from.
+ Multiple instances of a service may consume from the same
+ topic.
+ :param proxy: The object that will handle all incoming messages.
+ :param pool_name: String containing the name of the pool of workers
+ """
+ raise NotImplementedError()
+
+ def consume_in_thread(self):
+ """Spawn a thread to handle incoming messages.
+
+ Spawn a thread that will be responsible for handling all incoming
+ messages for consumers that were set up on this connection.
+
+ Message dispatching inside of this is expected to be implemented in a
+ non-blocking manner. An example implementation would be having this
+ thread pull messages in for all of the consumers, but utilize a thread
+ pool for dispatching the messages to the proxy objects.
+ """
+ raise NotImplementedError()
+
+
+def _safe_log(log_func, msg, msg_data):
+ """Sanitizes the msg_data field before logging."""
+ SANITIZE = {'set_admin_password': ('new_pass',),
+ 'run_instance': ('admin_password',), }
+
+ has_method = 'method' in msg_data and msg_data['method'] in SANITIZE
+ has_context_token = '_context_auth_token' in msg_data
+ has_token = 'auth_token' in msg_data
+
+ if not any([has_method, has_context_token, has_token]):
+ return log_func(msg, msg_data)
+
+ msg_data = copy.deepcopy(msg_data)
+
+ if has_method:
+ method = msg_data['method']
+ if method in SANITIZE:
+ args_to_sanitize = SANITIZE[method]
+ for arg in args_to_sanitize:
+ try:
+ msg_data['args'][arg] = "<SANITIZED>"
+ except KeyError:
+ pass
+
+ if has_context_token:
+ msg_data['_context_auth_token'] = '<SANITIZED>'
+
+ if has_token:
+ msg_data['auth_token'] = '<SANITIZED>'
+
+ return log_func(msg, msg_data)
+
+
+def serialize_remote_exception(failure_info):
+ """Prepares exception data to be sent over rpc.
+
+ Failure_info should be a sys.exc_info() tuple.
+
+ """
+ tb = traceback.format_exception(*failure_info)
+ failure = failure_info[1]
+ LOG.error(_("Returning exception %s to caller"), unicode(failure))
+ LOG.error(tb)
+
+ kwargs = {}
+ if hasattr(failure, 'kwargs'):
+ kwargs = failure.kwargs
+
+ data = {
+ 'class': str(failure.__class__.__name__),
+ 'module': str(failure.__class__.__module__),
+ 'message': unicode(failure),
+ 'tb': tb,
+ 'args': failure.args,
+ 'kwargs': kwargs
+ }
+
+ json_data = jsonutils.dumps(data)
+
+ return json_data
+
+
+def deserialize_remote_exception(conf, data):
+ failure = jsonutils.loads(str(data))
+
+ trace = failure.get('tb', [])
+ message = failure.get('message', "") + "\n" + "\n".join(trace)
+ name = failure.get('class')
+ module = failure.get('module')
+
+ # NOTE(ameade): We DO NOT want to allow just any module to be imported, in
+ # order to prevent arbitrary code execution.
+ if not module in conf.allowed_rpc_exception_modules:
+ return RemoteError(name, failure.get('message'), trace)
+
+ try:
+ mod = importutils.import_module(module)
+ klass = getattr(mod, name)
+ if not issubclass(klass, Exception):
+ raise TypeError("Can only deserialize Exceptions")
+
+ failure = klass(**failure.get('kwargs', {}))
+ except (AttributeError, TypeError, ImportError):
+ return RemoteError(name, failure.get('message'), trace)
+
+ ex_type = type(failure)
+ str_override = lambda self: message
+ new_ex_type = type(ex_type.__name__ + "_Remote", (ex_type,),
+ {'__str__': str_override, '__unicode__': str_override})
+ try:
+ # NOTE(ameade): Dynamically create a new exception type and swap it in
+ # as the new type for the exception. This only works on user defined
+ # Exceptions and not core python exceptions. This is important because
+ # we cannot necessarily change an exception message so we must override
+ # the __str__ method.
+ failure.__class__ = new_ex_type
+ except TypeError as e:
+ # NOTE(ameade): If a core exception then just add the traceback to the
+ # first exception argument.
+ failure.args = (message,) + failure.args[1:]
+ return failure
+
+
+class CommonRpcContext(object):
+ def __init__(self, **kwargs):
+ self.values = kwargs
+
+ def __getattr__(self, key):
+ try:
+ return self.values[key]
+ except KeyError:
+ raise AttributeError(key)
+
+ def to_dict(self):
+ return copy.deepcopy(self.values)
+
+ @classmethod
+ def from_dict(cls, values):
+ return cls(**values)
+
+ def deepcopy(self):
+ return self.from_dict(self.to_dict())
+
+ def update_store(self):
+ local.store.context = self
+
+ def elevated(self, read_deleted=None, overwrite=False):
+ """Return a version of this context with admin flag set."""
+ # TODO(russellb) This method is a bit of a nova-ism. It makes
+ # some assumptions about the data in the request context sent
+ # across rpc, while the rest of this class does not. We could get
+ # rid of this if we changed the nova code that uses this to
+ # convert the RpcContext back to its native RequestContext doing
+ # something like nova.context.RequestContext.from_dict(ctxt.to_dict())
+
+ context = self.deepcopy()
+ context.values['is_admin'] = True
+
+ context.values.setdefault('roles', [])
+
+ if 'admin' not in context.values['roles']:
+ context.values['roles'].append('admin')
+
+ if read_deleted is not None:
+ context.values['read_deleted'] = read_deleted
+
+ return context
diff --git a/moniker/openstack/common/rpc/dispatcher.py b/moniker/openstack/common/rpc/dispatcher.py
new file mode 100644
index 00000000..fa951c43
--- /dev/null
+++ b/moniker/openstack/common/rpc/dispatcher.py
@@ -0,0 +1,150 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Code for rpc message dispatching.
+
+Messages that come in have a version number associated with them. RPC API
+version numbers are in the form:
+
+ Major.Minor
+
+For a given message with version X.Y, the receiver must be marked as able to
+handle messages of version A.B, where:
+
+ A = X
+
+ B >= Y
+
+The Major version number would be incremented for an almost completely new API.
+The Minor version number would be incremented for backwards compatible changes
+to an existing API. A backwards compatible change could be something like
+adding a new method, adding an argument to an existing method (but not
+requiring it), or changing the type for an existing argument (but still
+handling the old type as well).
+
+The conversion over to a versioned API must be done on both the client side and
+server side of the API at the same time. However, as the code stands today,
+there can be both versioned and unversioned APIs implemented in the same code
+base.
+
+
+EXAMPLES:
+
+Nova was the first project to use versioned rpc APIs. Consider the compute rpc
+API as an example. The client side is in nova/compute/rpcapi.py and the server
+side is in nova/compute/manager.py.
+
+
+Example 1) Adding a new method.
+
+Adding a new method is a backwards compatible change. It should be added to
+nova/compute/manager.py, and RPC_API_VERSION should be bumped from X.Y to
+X.Y+1. On the client side, the new method in nova/compute/rpcapi.py should
+have a specific version specified to indicate the minimum API version that must
+be implemented for the method to be supported. For example:
+
+ def get_host_uptime(self, ctxt, host):
+ topic = _compute_topic(self.topic, ctxt, host, None)
+ return self.call(ctxt, self.make_msg('get_host_uptime'), topic,
+ version='1.1')
+
+In this case, version '1.1' is the first version that supported the
+get_host_uptime() method.
+
+
+Example 2) Adding a new parameter.
+
+Adding a new parameter to an rpc method can be made backwards compatible. The
+RPC_API_VERSION on the server side (nova/compute/manager.py) should be bumped.
+The implementation of the method must not expect the parameter to be present.
+
+ def some_remote_method(self, arg1, arg2, newarg=None):
+ # The code needs to deal with newarg=None for cases
+ # where an older client sends a message without it.
+ pass
+
+On the client side, the same changes should be made as in example 1. The
+minimum version that supports the new parameter should be specified.
+"""
+
+from moniker.openstack.common.rpc import common as rpc_common
+
+
+class RpcDispatcher(object):
+ """Dispatch rpc messages according to the requested API version.
+
+ This class can be used as the top level 'manager' for a service. It
+ contains a list of underlying managers that have an API_VERSION attribute.
+ """
+
+ def __init__(self, callbacks):
+ """Initialize the rpc dispatcher.
+
+ :param callbacks: List of proxy objects that are an instance
+ of a class with rpc methods exposed. Each proxy
+ object should have an RPC_API_VERSION attribute.
+ """
+ self.callbacks = callbacks
+ super(RpcDispatcher, self).__init__()
+
+ @staticmethod
+ def _is_compatible(mversion, version):
+ """Determine whether versions are compatible.
+
+ :param mversion: The API version implemented by a callback.
+ :param version: The API version requested by an incoming message.
+ """
+ version_parts = version.split('.')
+ mversion_parts = mversion.split('.')
+ if int(version_parts[0]) != int(mversion_parts[0]): # Major
+ return False
+ if int(version_parts[1]) > int(mversion_parts[1]): # Minor
+ return False
+ return True
+
+ def dispatch(self, ctxt, version, method, **kwargs):
+ """Dispatch a message based on a requested version.
+
+ :param ctxt: The request context
+ :param version: The requested API version from the incoming message
+ :param method: The method requested to be called by the incoming
+ message.
+ :param kwargs: A dict of keyword arguments to be passed to the method.
+
+ :returns: Whatever is returned by the underlying method that gets
+ called.
+ """
+ if not version:
+ version = '1.0'
+
+ had_compatible = False
+ for proxyobj in self.callbacks:
+ if hasattr(proxyobj, 'RPC_API_VERSION'):
+ rpc_api_version = proxyobj.RPC_API_VERSION
+ else:
+ rpc_api_version = '1.0'
+ is_compatible = self._is_compatible(rpc_api_version, version)
+ had_compatible = had_compatible or is_compatible
+ if not hasattr(proxyobj, method):
+ continue
+ if is_compatible:
+ return getattr(proxyobj, method)(ctxt, **kwargs)
+
+ if had_compatible:
+ raise AttributeError("No such RPC function '%s'" % method)
+ else:
+ raise rpc_common.UnsupportedRpcVersion(version=version)
diff --git a/moniker/openstack/common/rpc/impl_fake.py b/moniker/openstack/common/rpc/impl_fake.py
new file mode 100644
index 00000000..41e4d0f3
--- /dev/null
+++ b/moniker/openstack/common/rpc/impl_fake.py
@@ -0,0 +1,184 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Fake RPC implementation which calls proxy methods directly with no
+queues. Casts will block, but this is very useful for tests.
+"""
+
+import inspect
+import time
+
+import eventlet
+
+from moniker.openstack.common import jsonutils
+from moniker.openstack.common.rpc import common as rpc_common
+
+CONSUMERS = {}
+
+
+class RpcContext(rpc_common.CommonRpcContext):
+ def __init__(self, **kwargs):
+ super(RpcContext, self).__init__(**kwargs)
+ self._response = []
+ self._done = False
+
+ def deepcopy(self):
+ values = self.to_dict()
+ new_inst = self.__class__(**values)
+ new_inst._response = self._response
+ new_inst._done = self._done
+ return new_inst
+
+ def reply(self, reply=None, failure=None, ending=False):
+ if ending:
+ self._done = True
+ if not self._done:
+ self._response.append((reply, failure))
+
+
+class Consumer(object):
+ def __init__(self, topic, proxy):
+ self.topic = topic
+ self.proxy = proxy
+
+ def call(self, context, version, method, args, timeout):
+ done = eventlet.event.Event()
+
+ def _inner():
+ ctxt = RpcContext.from_dict(context.to_dict())
+ try:
+ rval = self.proxy.dispatch(context, version, method, **args)
+ res = []
+ # Caller might have called ctxt.reply() manually
+ for (reply, failure) in ctxt._response:
+ if failure:
+ raise failure[0], failure[1], failure[2]
+ res.append(reply)
+ # if ending not 'sent'...we might have more data to
+ # return from the function itself
+ if not ctxt._done:
+ if inspect.isgenerator(rval):
+ for val in rval:
+ res.append(val)
+ else:
+ res.append(rval)
+ done.send(res)
+ except Exception as e:
+ done.send_exception(e)
+
+ thread = eventlet.greenthread.spawn(_inner)
+
+ if timeout:
+ start_time = time.time()
+ while not done.ready():
+ eventlet.greenthread.sleep(1)
+ cur_time = time.time()
+ if (cur_time - start_time) > timeout:
+ thread.kill()
+ raise rpc_common.Timeout()
+
+ return done.wait()
+
+
+class Connection(object):
+ """Connection object."""
+
+ def __init__(self):
+ self.consumers = []
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ consumer = Consumer(topic, proxy)
+ self.consumers.append(consumer)
+ if topic not in CONSUMERS:
+ CONSUMERS[topic] = []
+ CONSUMERS[topic].append(consumer)
+
+ def close(self):
+ for consumer in self.consumers:
+ CONSUMERS[consumer.topic].remove(consumer)
+ self.consumers = []
+
+ def consume_in_thread(self):
+ pass
+
+
+def create_connection(conf, new=True):
+ """Create a connection"""
+ return Connection()
+
+
+def check_serialize(msg):
+ """Make sure a message intended for rpc can be serialized."""
+ jsonutils.dumps(msg)
+
+
+def multicall(conf, context, topic, msg, timeout=None):
+ """Make a call that returns multiple times."""
+
+ check_serialize(msg)
+
+ method = msg.get('method')
+ if not method:
+ return
+ args = msg.get('args', {})
+ version = msg.get('version', None)
+
+ try:
+ consumer = CONSUMERS[topic][0]
+ except (KeyError, IndexError):
+ return iter([None])
+ else:
+ return consumer.call(context, version, method, args, timeout)
+
+
+def call(conf, context, topic, msg, timeout=None):
+ """Sends a message on a topic and wait for a response."""
+ rv = multicall(conf, context, topic, msg, timeout)
+ # NOTE(vish): return the last result from the multicall
+ rv = list(rv)
+ if not rv:
+ return
+ return rv[-1]
+
+
+def cast(conf, context, topic, msg):
+ try:
+ call(conf, context, topic, msg)
+ except Exception:
+ pass
+
+
+def notify(conf, context, topic, msg):
+ check_serialize(msg)
+
+
+def cleanup():
+ pass
+
+
+def fanout_cast(conf, context, topic, msg):
+ """Cast to all consumers of a topic"""
+ check_serialize(msg)
+ method = msg.get('method')
+ if not method:
+ return
+ args = msg.get('args', {})
+ version = msg.get('version', None)
+
+ for consumer in CONSUMERS.get(topic, []):
+ try:
+ consumer.call(context, version, method, args, None)
+ except Exception:
+ pass
diff --git a/moniker/openstack/common/rpc/impl_kombu.py b/moniker/openstack/common/rpc/impl_kombu.py
new file mode 100644
index 00000000..828638b2
--- /dev/null
+++ b/moniker/openstack/common/rpc/impl_kombu.py
@@ -0,0 +1,751 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import itertools
+import socket
+import ssl
+import sys
+import time
+import uuid
+
+import eventlet
+import greenlet
+import kombu
+import kombu.connection
+import kombu.entity
+import kombu.messaging
+
+from moniker.openstack.common import cfg
+from moniker.openstack.common.gettextutils import _
+from moniker.openstack.common.rpc import amqp as rpc_amqp
+from moniker.openstack.common.rpc import common as rpc_common
+
+kombu_opts = [
+ cfg.StrOpt('kombu_ssl_version',
+ default='',
+ help='SSL version to use (valid only if SSL enabled)'),
+ cfg.StrOpt('kombu_ssl_keyfile',
+ default='',
+ help='SSL key file (valid only if SSL enabled)'),
+ cfg.StrOpt('kombu_ssl_certfile',
+ default='',
+ help='SSL cert file (valid only if SSL enabled)'),
+ cfg.StrOpt('kombu_ssl_ca_certs',
+ default='',
+ help=('SSL certification authority file '
+ '(valid only if SSL enabled)')),
+ cfg.StrOpt('rabbit_host',
+ default='localhost',
+ help='the RabbitMQ host'),
+ cfg.IntOpt('rabbit_port',
+ default=5672,
+ help='the RabbitMQ port'),
+ cfg.BoolOpt('rabbit_use_ssl',
+ default=False,
+ help='connect over SSL for RabbitMQ'),
+ cfg.StrOpt('rabbit_userid',
+ default='guest',
+ help='the RabbitMQ userid'),
+ cfg.StrOpt('rabbit_password',
+ default='guest',
+ help='the RabbitMQ password'),
+ cfg.StrOpt('rabbit_virtual_host',
+ default='/',
+ help='the RabbitMQ virtual host'),
+ cfg.IntOpt('rabbit_retry_interval',
+ default=1,
+ help='how frequently to retry connecting with RabbitMQ'),
+ cfg.IntOpt('rabbit_retry_backoff',
+ default=2,
+ help='how long to backoff for between retries when connecting '
+ 'to RabbitMQ'),
+ cfg.IntOpt('rabbit_max_retries',
+ default=0,
+ help='maximum retries with trying to connect to RabbitMQ '
+ '(the default of 0 implies an infinite retry count)'),
+ cfg.BoolOpt('rabbit_durable_queues',
+ default=False,
+ help='use durable queues in RabbitMQ'),
+
+]
+
+cfg.CONF.register_opts(kombu_opts)
+
+LOG = rpc_common.LOG
+
+
+class ConsumerBase(object):
+ """Consumer base class."""
+
+ def __init__(self, channel, callback, tag, **kwargs):
+ """Declare a queue on an amqp channel.
+
+ 'channel' is the amqp channel to use
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ queue name, exchange name, and other kombu options are
+ passed in here as a dictionary.
+ """
+ self.callback = callback
+ self.tag = str(tag)
+ self.kwargs = kwargs
+ self.queue = None
+ self.reconnect(channel)
+
+ def reconnect(self, channel):
+ """Re-declare the queue after a rabbit reconnect"""
+ self.channel = channel
+ self.kwargs['channel'] = channel
+ self.queue = kombu.entity.Queue(**self.kwargs)
+ self.queue.declare()
+
+ def consume(self, *args, **kwargs):
+ """Actually declare the consumer on the amqp channel. This will
+ start the flow of messages from the queue. Using the
+ Connection.iterconsume() iterator will process the messages,
+ calling the appropriate callback.
+
+ If a callback is specified in kwargs, use that. Otherwise,
+ use the callback passed during __init__()
+
+ If kwargs['nowait'] is True, then this call will block until
+ a message is read.
+
+ Messages will automatically be acked if the callback doesn't
+ raise an exception
+ """
+
+ options = {'consumer_tag': self.tag}
+ options['nowait'] = kwargs.get('nowait', False)
+ callback = kwargs.get('callback', self.callback)
+ if not callback:
+ raise ValueError("No callback defined")
+
+ def _callback(raw_message):
+ message = self.channel.message_to_python(raw_message)
+ try:
+ callback(message.payload)
+ message.ack()
+ except Exception:
+ LOG.exception(_("Failed to process message... skipping it."))
+
+ self.queue.consume(*args, callback=_callback, **options)
+
+ def cancel(self):
+ """Cancel the consuming from the queue, if it has started"""
+ try:
+ self.queue.cancel(self.tag)
+ except KeyError, e:
+ # NOTE(comstud): Kludge to get around a amqplib bug
+ if str(e) != "u'%s'" % self.tag:
+ raise
+ self.queue = None
+
+
+class DirectConsumer(ConsumerBase):
+ """Queue/consumer class for 'direct'"""
+
+ def __init__(self, conf, channel, msg_id, callback, tag, **kwargs):
+ """Init a 'direct' queue.
+
+ 'channel' is the amqp channel to use
+ 'msg_id' is the msg_id to listen on
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ Other kombu options may be passed
+ """
+ # Default options
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ exchange = kombu.entity.Exchange(name=msg_id,
+ type='direct',
+ durable=options['durable'],
+ auto_delete=options['auto_delete'])
+ super(DirectConsumer, self).__init__(channel,
+ callback,
+ tag,
+ name=msg_id,
+ exchange=exchange,
+ routing_key=msg_id,
+ **options)
+
+
+class TopicConsumer(ConsumerBase):
+ """Consumer class for 'topic'"""
+
+ def __init__(self, conf, channel, topic, callback, tag, name=None,
+ **kwargs):
+ """Init a 'topic' queue.
+
+ :param channel: the amqp channel to use
+ :param topic: the topic to listen on
+ :paramtype topic: str
+ :param callback: the callback to call when messages are received
+ :param tag: a unique ID for the consumer on the channel
+ :param name: optional queue name, defaults to topic
+ :paramtype name: str
+
+ Other kombu options may be passed as keyword arguments
+ """
+ # Default options
+ options = {'durable': conf.rabbit_durable_queues,
+ 'auto_delete': False,
+ 'exclusive': False}
+ options.update(kwargs)
+ exchange = kombu.entity.Exchange(name=conf.control_exchange,
+ type='topic',
+ durable=options['durable'],
+ auto_delete=options['auto_delete'])
+ super(TopicConsumer, self).__init__(channel,
+ callback,
+ tag,
+ name=name or topic,
+ exchange=exchange,
+ routing_key=topic,
+ **options)
+
+
+class FanoutConsumer(ConsumerBase):
+ """Consumer class for 'fanout'"""
+
+ def __init__(self, conf, channel, topic, callback, tag, **kwargs):
+ """Init a 'fanout' queue.
+
+ 'channel' is the amqp channel to use
+ 'topic' is the topic to listen on
+ 'callback' is the callback to call when messages are received
+ 'tag' is a unique ID for the consumer on the channel
+
+ Other kombu options may be passed
+ """
+ unique = uuid.uuid4().hex
+ exchange_name = '%s_fanout' % topic
+ queue_name = '%s_fanout_%s' % (topic, unique)
+
+ # Default options
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ exchange = kombu.entity.Exchange(name=exchange_name, type='fanout',
+ durable=options['durable'],
+ auto_delete=options['auto_delete'])
+ super(FanoutConsumer, self).__init__(channel, callback, tag,
+ name=queue_name,
+ exchange=exchange,
+ routing_key=topic,
+ **options)
+
+
+class Publisher(object):
+ """Base Publisher class"""
+
+ def __init__(self, channel, exchange_name, routing_key, **kwargs):
+ """Init the Publisher class with the exchange_name, routing_key,
+ and other options
+ """
+ self.exchange_name = exchange_name
+ self.routing_key = routing_key
+ self.kwargs = kwargs
+ self.reconnect(channel)
+
+ def reconnect(self, channel):
+ """Re-establish the Producer after a rabbit reconnection"""
+ self.exchange = kombu.entity.Exchange(name=self.exchange_name,
+ **self.kwargs)
+ self.producer = kombu.messaging.Producer(exchange=self.exchange,
+ channel=channel,
+ routing_key=self.routing_key)
+
+ def send(self, msg):
+ """Send a message"""
+ self.producer.publish(msg)
+
+
+class DirectPublisher(Publisher):
+ """Publisher class for 'direct'"""
+ def __init__(self, conf, channel, msg_id, **kwargs):
+ """init a 'direct' publisher.
+
+ Kombu options may be passed as keyword args to override defaults
+ """
+
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ super(DirectPublisher, self).__init__(channel, msg_id, msg_id,
+ type='direct', **options)
+
+
+class TopicPublisher(Publisher):
+ """Publisher class for 'topic'"""
+ def __init__(self, conf, channel, topic, **kwargs):
+ """init a 'topic' publisher.
+
+ Kombu options may be passed as keyword args to override defaults
+ """
+ options = {'durable': conf.rabbit_durable_queues,
+ 'auto_delete': False,
+ 'exclusive': False}
+ options.update(kwargs)
+ super(TopicPublisher, self).__init__(channel, conf.control_exchange,
+ topic, type='topic', **options)
+
+
+class FanoutPublisher(Publisher):
+ """Publisher class for 'fanout'"""
+ def __init__(self, conf, channel, topic, **kwargs):
+ """init a 'fanout' publisher.
+
+ Kombu options may be passed as keyword args to override defaults
+ """
+ options = {'durable': False,
+ 'auto_delete': True,
+ 'exclusive': True}
+ options.update(kwargs)
+ super(FanoutPublisher, self).__init__(channel, '%s_fanout' % topic,
+ None, type='fanout', **options)
+
+
+class NotifyPublisher(TopicPublisher):
+ """Publisher class for 'notify'"""
+
+ def __init__(self, conf, channel, topic, **kwargs):
+ self.durable = kwargs.pop('durable', conf.rabbit_durable_queues)
+ super(NotifyPublisher, self).__init__(conf, channel, topic, **kwargs)
+
+ def reconnect(self, channel):
+ super(NotifyPublisher, self).reconnect(channel)
+
+ # NOTE(jerdfelt): Normally the consumer would create the queue, but
+ # we do this to ensure that messages don't get dropped if the
+ # consumer is started after we do
+ queue = kombu.entity.Queue(channel=channel,
+ exchange=self.exchange,
+ durable=self.durable,
+ name=self.routing_key,
+ routing_key=self.routing_key)
+ queue.declare()
+
+
+class Connection(object):
+ """Connection object."""
+
+ pool = None
+
+ def __init__(self, conf, server_params=None):
+ self.consumers = []
+ self.consumer_thread = None
+ self.conf = conf
+ self.max_retries = self.conf.rabbit_max_retries
+ # Try forever?
+ if self.max_retries <= 0:
+ self.max_retries = None
+ self.interval_start = self.conf.rabbit_retry_interval
+ self.interval_stepping = self.conf.rabbit_retry_backoff
+ # max retry-interval = 30 seconds
+ self.interval_max = 30
+ self.memory_transport = False
+
+ if server_params is None:
+ server_params = {}
+
+ # Keys to translate from server_params to kombu params
+ server_params_to_kombu_params = {'username': 'userid'}
+
+ params = {}
+ for sp_key, value in server_params.iteritems():
+ p_key = server_params_to_kombu_params.get(sp_key, sp_key)
+ params[p_key] = value
+
+ params.setdefault('hostname', self.conf.rabbit_host)
+ params.setdefault('port', self.conf.rabbit_port)
+ params.setdefault('userid', self.conf.rabbit_userid)
+ params.setdefault('password', self.conf.rabbit_password)
+ params.setdefault('virtual_host', self.conf.rabbit_virtual_host)
+
+ self.params = params
+
+ if self.conf.fake_rabbit:
+ self.params['transport'] = 'memory'
+ self.memory_transport = True
+ else:
+ self.memory_transport = False
+
+ if self.conf.rabbit_use_ssl:
+ self.params['ssl'] = self._fetch_ssl_params()
+
+ self.connection = None
+ self.reconnect()
+
+ def _fetch_ssl_params(self):
+ """Handles fetching what ssl params
+ should be used for the connection (if any)"""
+ ssl_params = dict()
+
+ # http://docs.python.org/library/ssl.html - ssl.wrap_socket
+ if self.conf.kombu_ssl_version:
+ ssl_params['ssl_version'] = self.conf.kombu_ssl_version
+ if self.conf.kombu_ssl_keyfile:
+ ssl_params['keyfile'] = self.conf.kombu_ssl_keyfile
+ if self.conf.kombu_ssl_certfile:
+ ssl_params['certfile'] = self.conf.kombu_ssl_certfile
+ if self.conf.kombu_ssl_ca_certs:
+ ssl_params['ca_certs'] = self.conf.kombu_ssl_ca_certs
+ # We might want to allow variations in the
+ # future with this?
+ ssl_params['cert_reqs'] = ssl.CERT_REQUIRED
+
+ if not ssl_params:
+ # Just have the default behavior
+ return True
+ else:
+ # Return the extended behavior
+ return ssl_params
+
+ def _connect(self):
+ """Connect to rabbit. Re-establish any queues that may have
+ been declared before if we are reconnecting. Exceptions should
+ be handled by the caller.
+ """
+ if self.connection:
+ LOG.info(_("Reconnecting to AMQP server on "
+ "%(hostname)s:%(port)d") % self.params)
+ try:
+ self.connection.close()
+ except self.connection_errors:
+ pass
+ # Setting this in case the next statement fails, though
+ # it shouldn't be doing any network operations, yet.
+ self.connection = None
+ self.connection = kombu.connection.BrokerConnection(**self.params)
+ self.connection_errors = self.connection.connection_errors
+ if self.memory_transport:
+ # Kludge to speed up tests.
+ self.connection.transport.polling_interval = 0.0
+ self.consumer_num = itertools.count(1)
+ self.connection.connect()
+ self.channel = self.connection.channel()
+ # work around 'memory' transport bug in 1.1.3
+ if self.memory_transport:
+ self.channel._new_queue('ae.undeliver')
+ for consumer in self.consumers:
+ consumer.reconnect(self.channel)
+ LOG.info(_('Connected to AMQP server on %(hostname)s:%(port)d'),
+ self.params)
+
+ def reconnect(self):
+ """Handles reconnecting and re-establishing queues.
+ Will retry up to self.max_retries number of times.
+ self.max_retries = 0 means to retry forever.
+ Sleep between tries, starting at self.interval_start
+ seconds, backing off self.interval_stepping number of seconds
+ each attempt.
+ """
+
+ attempt = 0
+ while True:
+ attempt += 1
+ try:
+ self._connect()
+ return
+ except (self.connection_errors, IOError), e:
+ pass
+ except Exception, e:
+ # NOTE(comstud): Unfortunately it's possible for amqplib
+ # to return an error not covered by its transport
+ # connection_errors in the case of a timeout waiting for
+ # a protocol response. (See paste link in LP888621)
+ # So, we check all exceptions for 'timeout' in them
+ # and try to reconnect in this case.
+ if 'timeout' not in str(e):
+ raise
+
+ log_info = {}
+ log_info['err_str'] = str(e)
+ log_info['max_retries'] = self.max_retries
+ log_info.update(self.params)
+
+ if self.max_retries and attempt == self.max_retries:
+ LOG.exception(_('Unable to connect to AMQP server on '
+ '%(hostname)s:%(port)d after %(max_retries)d '
+ 'tries: %(err_str)s') % log_info)
+ # NOTE(comstud): Copied from original code. There's
+ # really no better recourse because if this was a queue we
+ # need to consume on, we have no way to consume anymore.
+ sys.exit(1)
+
+ if attempt == 1:
+ sleep_time = self.interval_start or 1
+ elif attempt > 1:
+ sleep_time += self.interval_stepping
+ if self.interval_max:
+ sleep_time = min(sleep_time, self.interval_max)
+
+ log_info['sleep_time'] = sleep_time
+ LOG.exception(_('AMQP server on %(hostname)s:%(port)d is'
+ ' unreachable: %(err_str)s. Trying again in '
+ '%(sleep_time)d seconds.') % log_info)
+ time.sleep(sleep_time)
+
+ def ensure(self, error_callback, method, *args, **kwargs):
+ while True:
+ try:
+ return method(*args, **kwargs)
+ except (self.connection_errors, socket.timeout, IOError), e:
+ pass
+ except Exception, e:
+ # NOTE(comstud): Unfortunately it's possible for amqplib
+ # to return an error not covered by its transport
+ # connection_errors in the case of a timeout waiting for
+ # a protocol response. (See paste link in LP888621)
+ # So, we check all exceptions for 'timeout' in them
+ # and try to reconnect in this case.
+ if 'timeout' not in str(e):
+ raise
+ if error_callback:
+ error_callback(e)
+ self.reconnect()
+
+ def get_channel(self):
+ """Convenience call for bin/clear_rabbit_queues"""
+ return self.channel
+
+ def close(self):
+ """Close/release this connection"""
+ self.cancel_consumer_thread()
+ self.connection.release()
+ self.connection = None
+
+ def reset(self):
+ """Reset a connection so it can be used again"""
+ self.cancel_consumer_thread()
+ self.channel.close()
+ self.channel = self.connection.channel()
+ # work around 'memory' transport bug in 1.1.3
+ if self.memory_transport:
+ self.channel._new_queue('ae.undeliver')
+ self.consumers = []
+
+ def declare_consumer(self, consumer_cls, topic, callback):
+ """Create a Consumer using the class that was passed in and
+ add it to our list of consumers
+ """
+
+ def _connect_error(exc):
+ log_info = {'topic': topic, 'err_str': str(exc)}
+ LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
+ "%(err_str)s") % log_info)
+
+ def _declare_consumer():
+ consumer = consumer_cls(self.conf, self.channel, topic, callback,
+ self.consumer_num.next())
+ self.consumers.append(consumer)
+ return consumer
+
+ return self.ensure(_connect_error, _declare_consumer)
+
+ def iterconsume(self, limit=None, timeout=None):
+ """Return an iterator that will consume from all queues/consumers"""
+
+ info = {'do_consume': True}
+
+ def _error_callback(exc):
+ if isinstance(exc, socket.timeout):
+ LOG.exception(_('Timed out waiting for RPC response: %s') %
+ str(exc))
+ raise rpc_common.Timeout()
+ else:
+ LOG.exception(_('Failed to consume message from queue: %s') %
+ str(exc))
+ info['do_consume'] = True
+
+ def _consume():
+ if info['do_consume']:
+ queues_head = self.consumers[:-1]
+ queues_tail = self.consumers[-1]
+ for queue in queues_head:
+ queue.consume(nowait=True)
+ queues_tail.consume(nowait=False)
+ info['do_consume'] = False
+ return self.connection.drain_events(timeout=timeout)
+
+ for iteration in itertools.count(0):
+ if limit and iteration >= limit:
+ raise StopIteration
+ yield self.ensure(_error_callback, _consume)
+
+ def cancel_consumer_thread(self):
+ """Cancel a consumer thread"""
+ if self.consumer_thread is not None:
+ self.consumer_thread.kill()
+ try:
+ self.consumer_thread.wait()
+ except greenlet.GreenletExit:
+ pass
+ self.consumer_thread = None
+
+ def publisher_send(self, cls, topic, msg, **kwargs):
+ """Send to a publisher based on the publisher class"""
+
+ def _error_callback(exc):
+ log_info = {'topic': topic, 'err_str': str(exc)}
+ LOG.exception(_("Failed to publish message to topic "
+ "'%(topic)s': %(err_str)s") % log_info)
+
+ def _publish():
+ publisher = cls(self.conf, self.channel, topic, **kwargs)
+ publisher.send(msg)
+
+ self.ensure(_error_callback, _publish)
+
+ def declare_direct_consumer(self, topic, callback):
+ """Create a 'direct' queue.
+ In nova's use, this is generally a msg_id queue used for
+ responses for call/multicall
+ """
+ self.declare_consumer(DirectConsumer, topic, callback)
+
+ def declare_topic_consumer(self, topic, callback=None, queue_name=None):
+ """Create a 'topic' consumer."""
+ self.declare_consumer(functools.partial(TopicConsumer,
+ name=queue_name,
+ ),
+ topic, callback)
+
+ def declare_fanout_consumer(self, topic, callback):
+ """Create a 'fanout' consumer"""
+ self.declare_consumer(FanoutConsumer, topic, callback)
+
+ def direct_send(self, msg_id, msg):
+ """Send a 'direct' message"""
+ self.publisher_send(DirectPublisher, msg_id, msg)
+
+ def topic_send(self, topic, msg):
+ """Send a 'topic' message"""
+ self.publisher_send(TopicPublisher, topic, msg)
+
+ def fanout_send(self, topic, msg):
+ """Send a 'fanout' message"""
+ self.publisher_send(FanoutPublisher, topic, msg)
+
+ def notify_send(self, topic, msg, **kwargs):
+ """Send a notify message on a topic"""
+ self.publisher_send(NotifyPublisher, topic, msg, **kwargs)
+
+ def consume(self, limit=None):
+ """Consume from all queues/consumers"""
+ it = self.iterconsume(limit=limit)
+ while True:
+ try:
+ it.next()
+ except StopIteration:
+ return
+
+ def consume_in_thread(self):
+ """Consumer from all queues/consumers in a greenthread"""
+ def _consumer_thread():
+ try:
+ self.consume()
+ except greenlet.GreenletExit:
+ return
+ if self.consumer_thread is None:
+ self.consumer_thread = eventlet.spawn(_consumer_thread)
+ return self.consumer_thread
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ """Create a consumer that calls a method in a proxy object"""
+ proxy_cb = rpc_amqp.ProxyCallback(
+ self.conf, proxy,
+ rpc_amqp.get_connection_pool(self.conf, Connection))
+
+ if fanout:
+ self.declare_fanout_consumer(topic, proxy_cb)
+ else:
+ self.declare_topic_consumer(topic, proxy_cb)
+
+ def create_worker(self, topic, proxy, pool_name):
+ """Create a worker that calls a method in a proxy object"""
+ proxy_cb = rpc_amqp.ProxyCallback(
+ self.conf, proxy,
+ rpc_amqp.get_connection_pool(self.conf, Connection))
+ self.declare_topic_consumer(topic, proxy_cb, pool_name)
+
+
+def create_connection(conf, new=True):
+ """Create a connection"""
+ return rpc_amqp.create_connection(
+ conf, new,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def multicall(conf, context, topic, msg, timeout=None):
+ """Make a call that returns multiple times."""
+ return rpc_amqp.multicall(
+ conf, context, topic, msg, timeout,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def call(conf, context, topic, msg, timeout=None):
+ """Sends a message on a topic and wait for a response."""
+ return rpc_amqp.call(
+ conf, context, topic, msg, timeout,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def cast(conf, context, topic, msg):
+ """Sends a message on a topic without waiting for a response."""
+ return rpc_amqp.cast(
+ conf, context, topic, msg,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def fanout_cast(conf, context, topic, msg):
+ """Sends a message on a fanout exchange without waiting for a response."""
+ return rpc_amqp.fanout_cast(
+ conf, context, topic, msg,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def cast_to_server(conf, context, server_params, topic, msg):
+ """Sends a message on a topic to a specific server."""
+ return rpc_amqp.cast_to_server(
+ conf, context, server_params, topic, msg,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def fanout_cast_to_server(conf, context, server_params, topic, msg):
+ """Sends a message on a fanout exchange to a specific server."""
+ return rpc_amqp.cast_to_server(
+ conf, context, server_params, topic, msg,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def notify(conf, context, topic, msg):
+ """Sends a notification event on a topic."""
+ return rpc_amqp.notify(
+ conf, context, topic, msg,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def cleanup():
+ return rpc_amqp.cleanup(Connection.pool)
diff --git a/moniker/openstack/common/rpc/impl_qpid.py b/moniker/openstack/common/rpc/impl_qpid.py
new file mode 100644
index 00000000..5a2c002a
--- /dev/null
+++ b/moniker/openstack/common/rpc/impl_qpid.py
@@ -0,0 +1,599 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+# Copyright 2011 - 2012, Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import functools
+import itertools
+import logging
+import time
+import uuid
+
+import eventlet
+import greenlet
+import qpid.messaging
+import qpid.messaging.exceptions
+
+from moniker.openstack.common import cfg
+from moniker.openstack.common.gettextutils import _
+from moniker.openstack.common import jsonutils
+from moniker.openstack.common.rpc import amqp as rpc_amqp
+from moniker.openstack.common.rpc import common as rpc_common
+
+LOG = logging.getLogger(__name__)
+
+qpid_opts = [
+ cfg.StrOpt('qpid_hostname',
+ default='localhost',
+ help='Qpid broker hostname'),
+ cfg.StrOpt('qpid_port',
+ default='5672',
+ help='Qpid broker port'),
+ cfg.StrOpt('qpid_username',
+ default='',
+ help='Username for qpid connection'),
+ cfg.StrOpt('qpid_password',
+ default='',
+ help='Password for qpid connection'),
+ cfg.StrOpt('qpid_sasl_mechanisms',
+ default='',
+ help='Space separated list of SASL mechanisms to use for auth'),
+ cfg.BoolOpt('qpid_reconnect',
+ default=True,
+ help='Automatically reconnect'),
+ cfg.IntOpt('qpid_reconnect_timeout',
+ default=0,
+ help='Reconnection timeout in seconds'),
+ cfg.IntOpt('qpid_reconnect_limit',
+ default=0,
+ help='Max reconnections before giving up'),
+ cfg.IntOpt('qpid_reconnect_interval_min',
+ default=0,
+ help='Minimum seconds between reconnection attempts'),
+ cfg.IntOpt('qpid_reconnect_interval_max',
+ default=0,
+ help='Maximum seconds between reconnection attempts'),
+ cfg.IntOpt('qpid_reconnect_interval',
+ default=0,
+ help='Equivalent to setting max and min to the same value'),
+ cfg.IntOpt('qpid_heartbeat',
+ default=5,
+ help='Seconds between connection keepalive heartbeats'),
+ cfg.StrOpt('qpid_protocol',
+ default='tcp',
+ help="Transport to use, either 'tcp' or 'ssl'"),
+ cfg.BoolOpt('qpid_tcp_nodelay',
+ default=True,
+ help='Disable Nagle algorithm'),
+]
+
+cfg.CONF.register_opts(qpid_opts)
+
+
+class ConsumerBase(object):
+ """Consumer base class."""
+
+ def __init__(self, session, callback, node_name, node_opts,
+ link_name, link_opts):
+ """Declare a queue on an amqp session.
+
+ 'session' is the amqp session to use
+ 'callback' is the callback to call when messages are received
+ 'node_name' is the first part of the Qpid address string, before ';'
+ 'node_opts' will be applied to the "x-declare" section of "node"
+ in the address string.
+ 'link_name' goes into the "name" field of the "link" in the address
+ string
+ 'link_opts' will be applied to the "x-declare" section of "link"
+ in the address string.
+ """
+ self.callback = callback
+ self.receiver = None
+ self.session = None
+
+ addr_opts = {
+ "create": "always",
+ "node": {
+ "type": "topic",
+ "x-declare": {
+ "durable": True,
+ "auto-delete": True,
+ },
+ },
+ "link": {
+ "name": link_name,
+ "durable": True,
+ "x-declare": {
+ "durable": False,
+ "auto-delete": True,
+ "exclusive": False,
+ },
+ },
+ }
+ addr_opts["node"]["x-declare"].update(node_opts)
+ addr_opts["link"]["x-declare"].update(link_opts)
+
+ self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
+
+ self.reconnect(session)
+
+ def reconnect(self, session):
+ """Re-declare the receiver after a qpid reconnect"""
+ self.session = session
+ self.receiver = session.receiver(self.address)
+ self.receiver.capacity = 1
+
+ def consume(self):
+ """Fetch the message and pass it to the callback object"""
+ message = self.receiver.fetch()
+ try:
+ self.callback(message.content)
+ except Exception:
+ LOG.exception(_("Failed to process message... skipping it."))
+ finally:
+ self.session.acknowledge(message)
+
+ def get_receiver(self):
+ return self.receiver
+
+
+class DirectConsumer(ConsumerBase):
+ """Queue/consumer class for 'direct'"""
+
+ def __init__(self, conf, session, msg_id, callback):
+ """Init a 'direct' queue.
+
+ 'session' is the amqp session to use
+ 'msg_id' is the msg_id to listen on
+ 'callback' is the callback to call when messages are received
+ """
+
+ super(DirectConsumer, self).__init__(session, callback,
+ "%s/%s" % (msg_id, msg_id),
+ {"type": "direct"},
+ msg_id,
+ {"exclusive": True})
+
+
+class TopicConsumer(ConsumerBase):
+ """Consumer class for 'topic'"""
+
+ def __init__(self, conf, session, topic, callback, name=None):
+ """Init a 'topic' queue.
+
+ :param session: the amqp session to use
+ :param topic: is the topic to listen on
+ :paramtype topic: str
+ :param callback: the callback to call when messages are received
+ :param name: optional queue name, defaults to topic
+ """
+
+ super(TopicConsumer, self).__init__(session, callback,
+ "%s/%s" % (conf.control_exchange,
+ topic),
+ {}, name or topic, {})
+
+
+class FanoutConsumer(ConsumerBase):
+ """Consumer class for 'fanout'"""
+
+ def __init__(self, conf, session, topic, callback):
+ """Init a 'fanout' queue.
+
+ 'session' is the amqp session to use
+ 'topic' is the topic to listen on
+ 'callback' is the callback to call when messages are received
+ """
+
+ super(FanoutConsumer, self).__init__(
+ session, callback,
+ "%s_fanout" % topic,
+ {"durable": False, "type": "fanout"},
+ "%s_fanout_%s" % (topic, uuid.uuid4().hex),
+ {"exclusive": True})
+
+
+class Publisher(object):
+ """Base Publisher class"""
+
+ def __init__(self, session, node_name, node_opts=None):
+ """Init the Publisher class with the exchange_name, routing_key,
+ and other options
+ """
+ self.sender = None
+ self.session = session
+
+ addr_opts = {
+ "create": "always",
+ "node": {
+ "type": "topic",
+ "x-declare": {
+ "durable": False,
+ # auto-delete isn't implemented for exchanges in qpid,
+ # but put in here anyway
+ "auto-delete": True,
+ },
+ },
+ }
+ if node_opts:
+ addr_opts["node"]["x-declare"].update(node_opts)
+
+ self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
+
+ self.reconnect(session)
+
+ def reconnect(self, session):
+ """Re-establish the Sender after a reconnection"""
+ self.sender = session.sender(self.address)
+
+ def send(self, msg):
+ """Send a message"""
+ self.sender.send(msg)
+
+
+class DirectPublisher(Publisher):
+ """Publisher class for 'direct'"""
+ def __init__(self, conf, session, msg_id):
+ """Init a 'direct' publisher."""
+ super(DirectPublisher, self).__init__(session, msg_id,
+ {"type": "Direct"})
+
+
+class TopicPublisher(Publisher):
+ """Publisher class for 'topic'"""
+ def __init__(self, conf, session, topic):
+ """init a 'topic' publisher.
+ """
+ super(TopicPublisher, self).__init__(
+ session,
+ "%s/%s" % (conf.control_exchange, topic))
+
+
+class FanoutPublisher(Publisher):
+ """Publisher class for 'fanout'"""
+ def __init__(self, conf, session, topic):
+ """init a 'fanout' publisher.
+ """
+ super(FanoutPublisher, self).__init__(
+ session,
+ "%s_fanout" % topic, {"type": "fanout"})
+
+
+class NotifyPublisher(Publisher):
+ """Publisher class for notifications"""
+ def __init__(self, conf, session, topic):
+ """init a 'topic' publisher.
+ """
+ super(NotifyPublisher, self).__init__(
+ session,
+ "%s/%s" % (conf.control_exchange, topic),
+ {"durable": True})
+
+
+class Connection(object):
+ """Connection object."""
+
+ pool = None
+
+ def __init__(self, conf, server_params=None):
+ self.session = None
+ self.consumers = {}
+ self.consumer_thread = None
+ self.conf = conf
+
+ if server_params is None:
+ server_params = {}
+
+ default_params = dict(hostname=self.conf.qpid_hostname,
+ port=self.conf.qpid_port,
+ username=self.conf.qpid_username,
+ password=self.conf.qpid_password)
+
+ params = server_params
+ for key in default_params.keys():
+ params.setdefault(key, default_params[key])
+
+ self.broker = params['hostname'] + ":" + str(params['port'])
+ # Create the connection - this does not open the connection
+ self.connection = qpid.messaging.Connection(self.broker)
+
+ # Check if flags are set and if so set them for the connection
+ # before we call open
+ self.connection.username = params['username']
+ self.connection.password = params['password']
+ self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
+ self.connection.reconnect = self.conf.qpid_reconnect
+ if self.conf.qpid_reconnect_timeout:
+ self.connection.reconnect_timeout = (
+ self.conf.qpid_reconnect_timeout)
+ if self.conf.qpid_reconnect_limit:
+ self.connection.reconnect_limit = self.conf.qpid_reconnect_limit
+ if self.conf.qpid_reconnect_interval_max:
+ self.connection.reconnect_interval_max = (
+ self.conf.qpid_reconnect_interval_max)
+ if self.conf.qpid_reconnect_interval_min:
+ self.connection.reconnect_interval_min = (
+ self.conf.qpid_reconnect_interval_min)
+ if self.conf.qpid_reconnect_interval:
+ self.connection.reconnect_interval = (
+ self.conf.qpid_reconnect_interval)
+ self.connection.heartbeat = self.conf.qpid_heartbeat
+ self.connection.protocol = self.conf.qpid_protocol
+ self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
+
+ # Open is part of reconnect -
+ # NOTE(WGH) not sure we need this with the reconnect flags
+ self.reconnect()
+
+ def _register_consumer(self, consumer):
+ self.consumers[str(consumer.get_receiver())] = consumer
+
+ def _lookup_consumer(self, receiver):
+ return self.consumers[str(receiver)]
+
+ def reconnect(self):
+ """Handles reconnecting and re-establishing sessions and queues"""
+ if self.connection.opened():
+ try:
+ self.connection.close()
+ except qpid.messaging.exceptions.ConnectionError:
+ pass
+
+ while True:
+ try:
+ self.connection.open()
+ except qpid.messaging.exceptions.ConnectionError, e:
+ LOG.error(_('Unable to connect to AMQP server: %s'), e)
+ time.sleep(self.conf.qpid_reconnect_interval or 1)
+ else:
+ break
+
+ LOG.info(_('Connected to AMQP server on %s'), self.broker)
+
+ self.session = self.connection.session()
+
+ for consumer in self.consumers.itervalues():
+ consumer.reconnect(self.session)
+
+ if self.consumers:
+ LOG.debug(_("Re-established AMQP queues"))
+
+ def ensure(self, error_callback, method, *args, **kwargs):
+ while True:
+ try:
+ return method(*args, **kwargs)
+ except (qpid.messaging.exceptions.Empty,
+ qpid.messaging.exceptions.ConnectionError), e:
+ if error_callback:
+ error_callback(e)
+ self.reconnect()
+
+ def close(self):
+ """Close/release this connection"""
+ self.cancel_consumer_thread()
+ self.connection.close()
+ self.connection = None
+
+ def reset(self):
+ """Reset a connection so it can be used again"""
+ self.cancel_consumer_thread()
+ self.session.close()
+ self.session = self.connection.session()
+ self.consumers = {}
+
+ def declare_consumer(self, consumer_cls, topic, callback):
+ """Create a Consumer using the class that was passed in and
+ add it to our list of consumers
+ """
+ def _connect_error(exc):
+ log_info = {'topic': topic, 'err_str': str(exc)}
+ LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
+ "%(err_str)s") % log_info)
+
+ def _declare_consumer():
+ consumer = consumer_cls(self.conf, self.session, topic, callback)
+ self._register_consumer(consumer)
+ return consumer
+
+ return self.ensure(_connect_error, _declare_consumer)
+
+ def iterconsume(self, limit=None, timeout=None):
+ """Return an iterator that will consume from all queues/consumers"""
+
+ def _error_callback(exc):
+ if isinstance(exc, qpid.messaging.exceptions.Empty):
+ LOG.exception(_('Timed out waiting for RPC response: %s') %
+ str(exc))
+ raise rpc_common.Timeout()
+ else:
+ LOG.exception(_('Failed to consume message from queue: %s') %
+ str(exc))
+
+ def _consume():
+ nxt_receiver = self.session.next_receiver(timeout=timeout)
+ try:
+ self._lookup_consumer(nxt_receiver).consume()
+ except Exception:
+ LOG.exception(_("Error processing message. Skipping it."))
+
+ for iteration in itertools.count(0):
+ if limit and iteration >= limit:
+ raise StopIteration
+ yield self.ensure(_error_callback, _consume)
+
+ def cancel_consumer_thread(self):
+ """Cancel a consumer thread"""
+ if self.consumer_thread is not None:
+ self.consumer_thread.kill()
+ try:
+ self.consumer_thread.wait()
+ except greenlet.GreenletExit:
+ pass
+ self.consumer_thread = None
+
+ def publisher_send(self, cls, topic, msg):
+ """Send to a publisher based on the publisher class"""
+
+ def _connect_error(exc):
+ log_info = {'topic': topic, 'err_str': str(exc)}
+ LOG.exception(_("Failed to publish message to topic "
+ "'%(topic)s': %(err_str)s") % log_info)
+
+ def _publisher_send():
+ publisher = cls(self.conf, self.session, topic)
+ publisher.send(msg)
+
+ return self.ensure(_connect_error, _publisher_send)
+
+ def declare_direct_consumer(self, topic, callback):
+ """Create a 'direct' queue.
+ In nova's use, this is generally a msg_id queue used for
+ responses for call/multicall
+ """
+ self.declare_consumer(DirectConsumer, topic, callback)
+
+ def declare_topic_consumer(self, topic, callback=None, queue_name=None):
+ """Create a 'topic' consumer."""
+ self.declare_consumer(functools.partial(TopicConsumer,
+ name=queue_name,
+ ),
+ topic, callback)
+
+ def declare_fanout_consumer(self, topic, callback):
+ """Create a 'fanout' consumer"""
+ self.declare_consumer(FanoutConsumer, topic, callback)
+
+ def direct_send(self, msg_id, msg):
+ """Send a 'direct' message"""
+ self.publisher_send(DirectPublisher, msg_id, msg)
+
+ def topic_send(self, topic, msg):
+ """Send a 'topic' message"""
+ self.publisher_send(TopicPublisher, topic, msg)
+
+ def fanout_send(self, topic, msg):
+ """Send a 'fanout' message"""
+ self.publisher_send(FanoutPublisher, topic, msg)
+
+ def notify_send(self, topic, msg, **kwargs):
+ """Send a notify message on a topic"""
+ self.publisher_send(NotifyPublisher, topic, msg)
+
+ def consume(self, limit=None):
+ """Consume from all queues/consumers"""
+ it = self.iterconsume(limit=limit)
+ while True:
+ try:
+ it.next()
+ except StopIteration:
+ return
+
+ def consume_in_thread(self):
+ """Consumer from all queues/consumers in a greenthread"""
+ def _consumer_thread():
+ try:
+ self.consume()
+ except greenlet.GreenletExit:
+ return
+ if self.consumer_thread is None:
+ self.consumer_thread = eventlet.spawn(_consumer_thread)
+ return self.consumer_thread
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ """Create a consumer that calls a method in a proxy object"""
+ proxy_cb = rpc_amqp.ProxyCallback(
+ self.conf, proxy,
+ rpc_amqp.get_connection_pool(self.conf, Connection))
+
+ if fanout:
+ consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
+ else:
+ consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
+
+ self._register_consumer(consumer)
+
+ return consumer
+
+ def create_worker(self, topic, proxy, pool_name):
+ """Create a worker that calls a method in a proxy object"""
+ proxy_cb = rpc_amqp.ProxyCallback(
+ self.conf, proxy,
+ rpc_amqp.get_connection_pool(self.conf, Connection))
+
+ consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
+ name=pool_name)
+
+ self._register_consumer(consumer)
+
+ return consumer
+
+
+def create_connection(conf, new=True):
+ """Create a connection"""
+ return rpc_amqp.create_connection(
+ conf, new,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def multicall(conf, context, topic, msg, timeout=None):
+ """Make a call that returns multiple times."""
+ return rpc_amqp.multicall(
+ conf, context, topic, msg, timeout,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def call(conf, context, topic, msg, timeout=None):
+ """Sends a message on a topic and wait for a response."""
+ return rpc_amqp.call(
+ conf, context, topic, msg, timeout,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def cast(conf, context, topic, msg):
+ """Sends a message on a topic without waiting for a response."""
+ return rpc_amqp.cast(
+ conf, context, topic, msg,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def fanout_cast(conf, context, topic, msg):
+ """Sends a message on a fanout exchange without waiting for a response."""
+ return rpc_amqp.fanout_cast(
+ conf, context, topic, msg,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def cast_to_server(conf, context, server_params, topic, msg):
+ """Sends a message on a topic to a specific server."""
+ return rpc_amqp.cast_to_server(
+ conf, context, server_params, topic, msg,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def fanout_cast_to_server(conf, context, server_params, topic, msg):
+ """Sends a message on a fanout exchange to a specific server."""
+ return rpc_amqp.fanout_cast_to_server(
+ conf, context, server_params, topic, msg,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def notify(conf, context, topic, msg):
+ """Sends a notification event on a topic."""
+ return rpc_amqp.notify(conf, context, topic, msg,
+ rpc_amqp.get_connection_pool(conf, Connection))
+
+
+def cleanup():
+ return rpc_amqp.cleanup(Connection.pool)
diff --git a/moniker/openstack/common/rpc/impl_zmq.py b/moniker/openstack/common/rpc/impl_zmq.py
new file mode 100644
index 00000000..64c0efc9
--- /dev/null
+++ b/moniker/openstack/common/rpc/impl_zmq.py
@@ -0,0 +1,718 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Cloudscaling Group, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pprint
+import socket
+import string
+import sys
+import types
+import uuid
+
+import eventlet
+from eventlet.green import zmq
+import greenlet
+
+from moniker.openstack.common import cfg
+from moniker.openstack.common.gettextutils import _
+from moniker.openstack.common import importutils
+from moniker.openstack.common import jsonutils
+from moniker.openstack.common.rpc import common as rpc_common
+
+
+# for convenience, are not modified.
+pformat = pprint.pformat
+Timeout = eventlet.timeout.Timeout
+LOG = rpc_common.LOG
+RemoteError = rpc_common.RemoteError
+RPCException = rpc_common.RPCException
+
+zmq_opts = [
+ cfg.StrOpt('rpc_zmq_bind_address', default='*',
+ help='ZeroMQ bind address. Should be a wildcard (*), '
+ 'an ethernet interface, or IP. '
+ 'The "host" option should point or resolve to this '
+ 'address.'),
+
+ # The module.Class to use for matchmaking.
+ cfg.StrOpt(
+ 'rpc_zmq_matchmaker',
+ default=('moniker.openstack.common.rpc.'
+ 'matchmaker.MatchMakerLocalhost'),
+ help='MatchMaker driver',
+ ),
+
+ # The following port is unassigned by IANA as of 2012-05-21
+ cfg.IntOpt('rpc_zmq_port', default=9501,
+ help='ZeroMQ receiver listening port'),
+
+ cfg.IntOpt('rpc_zmq_contexts', default=1,
+ help='Number of ZeroMQ contexts, defaults to 1'),
+
+ cfg.StrOpt('rpc_zmq_ipc_dir', default='/var/run/openstack',
+ help='Directory for holding IPC sockets'),
+
+ cfg.StrOpt('rpc_zmq_host', default=socket.gethostname(),
+ help='Name of this node. Must be a valid hostname, FQDN, or '
+ 'IP address. Must match "host" option, if running Nova.')
+]
+
+
+# These globals are defined in register_opts(conf),
+# a mandatory initialization call
+CONF = None
+ZMQ_CTX = None # ZeroMQ Context, must be global.
+matchmaker = None # memoized matchmaker object
+
+
+def _serialize(data):
+ """
+ Serialization wrapper
+ We prefer using JSON, but it cannot encode all types.
+ Error if a developer passes us bad data.
+ """
+ try:
+ return str(jsonutils.dumps(data, ensure_ascii=True))
+ except TypeError:
+ LOG.error(_("JSON serialization failed."))
+ raise
+
+
+def _deserialize(data):
+ """
+ Deserialization wrapper
+ """
+ LOG.debug(_("Deserializing: %s"), data)
+ return jsonutils.loads(data)
+
+
+class ZmqSocket(object):
+ """
+ A tiny wrapper around ZeroMQ to simplify the send/recv protocol
+ and connection management.
+
+ Can be used as a Context (supports the 'with' statement).
+ """
+
+ def __init__(self, addr, zmq_type, bind=True, subscribe=None):
+ self.sock = ZMQ_CTX.socket(zmq_type)
+ self.addr = addr
+ self.type = zmq_type
+ self.subscriptions = []
+
+ # Support failures on sending/receiving on wrong socket type.
+ self.can_recv = zmq_type in (zmq.PULL, zmq.SUB)
+ self.can_send = zmq_type in (zmq.PUSH, zmq.PUB)
+ self.can_sub = zmq_type in (zmq.SUB, )
+
+ # Support list, str, & None for subscribe arg (cast to list)
+ do_sub = {
+ list: subscribe,
+ str: [subscribe],
+ type(None): []
+ }[type(subscribe)]
+
+ for f in do_sub:
+ self.subscribe(f)
+
+ str_data = {'addr': addr, 'type': self.socket_s(),
+ 'subscribe': subscribe, 'bind': bind}
+
+ LOG.debug(_("Connecting to %(addr)s with %(type)s"), str_data)
+ LOG.debug(_("-> Subscribed to %(subscribe)s"), str_data)
+ LOG.debug(_("-> bind: %(bind)s"), str_data)
+
+ try:
+ if bind:
+ self.sock.bind(addr)
+ else:
+ self.sock.connect(addr)
+ except Exception:
+ raise RPCException(_("Could not open socket."))
+
+ def socket_s(self):
+ """Get socket type as string."""
+ t_enum = ('PUSH', 'PULL', 'PUB', 'SUB', 'REP', 'REQ', 'ROUTER',
+ 'DEALER')
+ return dict(map(lambda t: (getattr(zmq, t), t), t_enum))[self.type]
+
+ def subscribe(self, msg_filter):
+ """Subscribe."""
+ if not self.can_sub:
+ raise RPCException("Cannot subscribe on this socket.")
+ LOG.debug(_("Subscribing to %s"), msg_filter)
+
+ try:
+ self.sock.setsockopt(zmq.SUBSCRIBE, msg_filter)
+ except Exception:
+ return
+
+ self.subscriptions.append(msg_filter)
+
+ def unsubscribe(self, msg_filter):
+ """Unsubscribe."""
+ if msg_filter not in self.subscriptions:
+ return
+ self.sock.setsockopt(zmq.UNSUBSCRIBE, msg_filter)
+ self.subscriptions.remove(msg_filter)
+
+ def close(self):
+ if self.sock is None or self.sock.closed:
+ return
+
+ # We must unsubscribe, or we'll leak descriptors.
+ if len(self.subscriptions) > 0:
+ for f in self.subscriptions:
+ try:
+ self.sock.setsockopt(zmq.UNSUBSCRIBE, f)
+ except Exception:
+ pass
+ self.subscriptions = []
+
+ # Linger -1 prevents lost/dropped messages
+ try:
+ self.sock.close(linger=-1)
+ except Exception:
+ pass
+ self.sock = None
+
+ def recv(self):
+ if not self.can_recv:
+ raise RPCException(_("You cannot recv on this socket."))
+ return self.sock.recv_multipart()
+
+ def send(self, data):
+ if not self.can_send:
+ raise RPCException(_("You cannot send on this socket."))
+ self.sock.send_multipart(data)
+
+
+class ZmqClient(object):
+ """Client for ZMQ sockets."""
+
+ def __init__(self, addr, socket_type=zmq.PUSH, bind=False):
+ self.outq = ZmqSocket(addr, socket_type, bind=bind)
+
+ def cast(self, msg_id, topic, data):
+ self.outq.send([str(msg_id), str(topic), str('cast'),
+ _serialize(data)])
+
+ def close(self):
+ self.outq.close()
+
+
+class RpcContext(rpc_common.CommonRpcContext):
+ """Context that supports replying to a rpc.call."""
+ def __init__(self, **kwargs):
+ self.replies = []
+ super(RpcContext, self).__init__(**kwargs)
+
+ def deepcopy(self):
+ values = self.to_dict()
+ values['replies'] = self.replies
+ return self.__class__(**values)
+
+ def reply(self, reply=None, failure=None, ending=False):
+ if ending:
+ return
+ self.replies.append(reply)
+
+ @classmethod
+ def marshal(self, ctx):
+ ctx_data = ctx.to_dict()
+ return _serialize(ctx_data)
+
+ @classmethod
+ def unmarshal(self, data):
+ return RpcContext.from_dict(_deserialize(data))
+
+
+class InternalContext(object):
+ """Used by ConsumerBase as a private context for - methods."""
+
+ def __init__(self, proxy):
+ self.proxy = proxy
+ self.msg_waiter = None
+
+ def _get_response(self, ctx, proxy, topic, data):
+ """Process a curried message and cast the result to topic."""
+ LOG.debug(_("Running func with context: %s"), ctx.to_dict())
+ data.setdefault('version', None)
+ data.setdefault('args', [])
+
+ try:
+ result = proxy.dispatch(
+ ctx, data['version'], data['method'], **data['args'])
+ return ConsumerBase.normalize_reply(result, ctx.replies)
+ except greenlet.GreenletExit:
+ # ignore these since they are just from shutdowns
+ pass
+ except Exception:
+ return {'exc':
+ rpc_common.serialize_remote_exception(sys.exc_info())}
+
+ def reply(self, ctx, proxy,
+ msg_id=None, context=None, topic=None, msg=None):
+ """Reply to a casted call."""
+ # Our real method is curried into msg['args']
+
+ child_ctx = RpcContext.unmarshal(msg[0])
+ response = ConsumerBase.normalize_reply(
+ self._get_response(child_ctx, proxy, topic, msg[1]),
+ ctx.replies)
+
+ LOG.debug(_("Sending reply"))
+ cast(CONF, ctx, topic, {
+ 'method': '-process_reply',
+ 'args': {
+ 'msg_id': msg_id,
+ 'response': response
+ }
+ })
+
+
+class ConsumerBase(object):
+ """Base Consumer."""
+
+ def __init__(self):
+ self.private_ctx = InternalContext(None)
+
+ @classmethod
+ def normalize_reply(self, result, replies):
+ #TODO(ewindisch): re-evaluate and document this method.
+ if isinstance(result, types.GeneratorType):
+ return list(result)
+ elif replies:
+ return replies
+ else:
+ return [result]
+
+ def process(self, style, target, proxy, ctx, data):
+ # Method starting with - are
+ # processed internally. (non-valid method name)
+ method = data['method']
+
+ # Internal method
+ # uses internal context for safety.
+ if data['method'][0] == '-':
+ # For reply / process_reply
+ method = method[1:]
+ if method == 'reply':
+ self.private_ctx.reply(ctx, proxy, **data['args'])
+ return
+
+ data.setdefault('version', None)
+ data.setdefault('args', [])
+ proxy.dispatch(ctx, data['version'],
+ data['method'], **data['args'])
+
+
+class ZmqBaseReactor(ConsumerBase):
+ """
+ A consumer class implementing a
+ centralized casting broker (PULL-PUSH)
+ for RoundRobin requests.
+ """
+
+ def __init__(self, conf):
+ super(ZmqBaseReactor, self).__init__()
+
+ self.mapping = {}
+ self.proxies = {}
+ self.threads = []
+ self.sockets = []
+ self.subscribe = {}
+
+ self.pool = eventlet.greenpool.GreenPool(conf.rpc_thread_pool_size)
+
+ def register(self, proxy, in_addr, zmq_type_in, out_addr=None,
+ zmq_type_out=None, in_bind=True, out_bind=True,
+ subscribe=None):
+
+ LOG.info(_("Registering reactor"))
+
+ if zmq_type_in not in (zmq.PULL, zmq.SUB):
+ raise RPCException("Bad input socktype")
+
+ # Items push in.
+ inq = ZmqSocket(in_addr, zmq_type_in, bind=in_bind,
+ subscribe=subscribe)
+
+ self.proxies[inq] = proxy
+ self.sockets.append(inq)
+
+ LOG.info(_("In reactor registered"))
+
+ if not out_addr:
+ return
+
+ if zmq_type_out not in (zmq.PUSH, zmq.PUB):
+ raise RPCException("Bad output socktype")
+
+ # Items push out.
+ outq = ZmqSocket(out_addr, zmq_type_out, bind=out_bind)
+
+ self.mapping[inq] = outq
+ self.mapping[outq] = inq
+ self.sockets.append(outq)
+
+ LOG.info(_("Out reactor registered"))
+
+ def consume_in_thread(self):
+ def _consume(sock):
+ LOG.info(_("Consuming socket"))
+ while True:
+ self.consume(sock)
+
+ for k in self.proxies.keys():
+ self.threads.append(
+ self.pool.spawn(_consume, k)
+ )
+
+ def wait(self):
+ for t in self.threads:
+ t.wait()
+
+ def close(self):
+ for s in self.sockets:
+ s.close()
+
+ for t in self.threads:
+ t.kill()
+
+
+class ZmqProxy(ZmqBaseReactor):
+ """
+ A consumer class implementing a
+ topic-based proxy, forwarding to
+ IPC sockets.
+ """
+
+ def __init__(self, conf):
+ super(ZmqProxy, self).__init__(conf)
+
+ self.topic_proxy = {}
+ ipc_dir = CONF.rpc_zmq_ipc_dir
+
+ self.topic_proxy['zmq_replies'] = \
+ ZmqSocket("ipc://%s/zmq_topic_zmq_replies" % (ipc_dir, ),
+ zmq.PUB, bind=True)
+ self.sockets.append(self.topic_proxy['zmq_replies'])
+
+ def consume(self, sock):
+ ipc_dir = CONF.rpc_zmq_ipc_dir
+
+ #TODO(ewindisch): use zero-copy (i.e. references, not copying)
+ data = sock.recv()
+ msg_id, topic, style, in_msg = data
+ topic = topic.split('.', 1)[0]
+
+ LOG.debug(_("CONSUMER GOT %s"), ' '.join(map(pformat, data)))
+
+ # Handle zmq_replies magic
+ if topic.startswith('fanout~'):
+ sock_type = zmq.PUB
+ elif topic.startswith('zmq_replies'):
+ sock_type = zmq.PUB
+ inside = _deserialize(in_msg)
+ msg_id = inside[-1]['args']['msg_id']
+ response = inside[-1]['args']['response']
+ LOG.debug(_("->response->%s"), response)
+ data = [str(msg_id), _serialize(response)]
+ else:
+ sock_type = zmq.PUSH
+
+ if not topic in self.topic_proxy:
+ outq = ZmqSocket("ipc://%s/zmq_topic_%s" % (ipc_dir, topic),
+ sock_type, bind=True)
+ self.topic_proxy[topic] = outq
+ self.sockets.append(outq)
+ LOG.info(_("Created topic proxy: %s"), topic)
+
+ # It takes some time for a pub socket to open,
+ # before we can have any faith in doing a send() to it.
+ if sock_type == zmq.PUB:
+ eventlet.sleep(.5)
+
+ LOG.debug(_("ROUTER RELAY-OUT START %(data)s") % {'data': data})
+ self.topic_proxy[topic].send(data)
+ LOG.debug(_("ROUTER RELAY-OUT SUCCEEDED %(data)s") % {'data': data})
+
+
+class ZmqReactor(ZmqBaseReactor):
+ """
+ A consumer class implementing a
+ consumer for messages. Can also be
+ used as a 1:1 proxy
+ """
+
+ def __init__(self, conf):
+ super(ZmqReactor, self).__init__(conf)
+
+ def consume(self, sock):
+ #TODO(ewindisch): use zero-copy (i.e. references, not copying)
+ data = sock.recv()
+ LOG.debug(_("CONSUMER RECEIVED DATA: %s"), data)
+ if sock in self.mapping:
+ LOG.debug(_("ROUTER RELAY-OUT %(data)s") % {
+ 'data': data})
+ self.mapping[sock].send(data)
+ return
+
+ msg_id, topic, style, in_msg = data
+
+ ctx, request = _deserialize(in_msg)
+ ctx = RpcContext.unmarshal(ctx)
+
+ proxy = self.proxies[sock]
+
+ self.pool.spawn_n(self.process, style, topic,
+ proxy, ctx, request)
+
+
+class Connection(rpc_common.Connection):
+ """Manages connections and threads."""
+
+ def __init__(self, conf):
+ self.reactor = ZmqReactor(conf)
+
+ def create_consumer(self, topic, proxy, fanout=False):
+ # Only consume on the base topic name.
+ topic = topic.split('.', 1)[0]
+
+ LOG.info(_("Create Consumer for topic (%(topic)s)") %
+ {'topic': topic})
+
+ # Subscription scenarios
+ if fanout:
+ subscribe = ('', fanout)[type(fanout) == str]
+ sock_type = zmq.SUB
+ topic = 'fanout~' + topic
+ else:
+ sock_type = zmq.PULL
+ subscribe = None
+
+ # Receive messages from (local) proxy
+ inaddr = "ipc://%s/zmq_topic_%s" % \
+ (CONF.rpc_zmq_ipc_dir, topic)
+
+ LOG.debug(_("Consumer is a zmq.%s"),
+ ['PULL', 'SUB'][sock_type == zmq.SUB])
+
+ self.reactor.register(proxy, inaddr, sock_type,
+ subscribe=subscribe, in_bind=False)
+
+ def close(self):
+ self.reactor.close()
+
+ def wait(self):
+ self.reactor.wait()
+
+ def consume_in_thread(self):
+ self.reactor.consume_in_thread()
+
+
+def _cast(addr, context, msg_id, topic, msg, timeout=None):
+ timeout_cast = timeout or CONF.rpc_cast_timeout
+ payload = [RpcContext.marshal(context), msg]
+
+ with Timeout(timeout_cast, exception=rpc_common.Timeout):
+ try:
+ conn = ZmqClient(addr)
+
+ # assumes cast can't return an exception
+ conn.cast(msg_id, topic, payload)
+ except zmq.ZMQError:
+ raise RPCException("Cast failed. ZMQ Socket Exception")
+ finally:
+ if 'conn' in vars():
+ conn.close()
+
+
+def _call(addr, context, msg_id, topic, msg, timeout=None):
+ # timeout_response is how long we wait for a response
+ timeout = timeout or CONF.rpc_response_timeout
+
+ # The msg_id is used to track replies.
+ msg_id = str(uuid.uuid4().hex)
+
+ # Replies always come into the reply service.
+ reply_topic = "zmq_replies.%s" % CONF.rpc_zmq_host
+
+ LOG.debug(_("Creating payload"))
+ # Curry the original request into a reply method.
+ mcontext = RpcContext.marshal(context)
+ payload = {
+ 'method': '-reply',
+ 'args': {
+ 'msg_id': msg_id,
+ 'context': mcontext,
+ 'topic': reply_topic,
+ 'msg': [mcontext, msg]
+ }
+ }
+
+ LOG.debug(_("Creating queue socket for reply waiter"))
+
+ # Messages arriving async.
+ # TODO(ewindisch): have reply consumer with dynamic subscription mgmt
+ with Timeout(timeout, exception=rpc_common.Timeout):
+ try:
+ msg_waiter = ZmqSocket(
+ "ipc://%s/zmq_topic_zmq_replies" % CONF.rpc_zmq_ipc_dir,
+ zmq.SUB, subscribe=msg_id, bind=False
+ )
+
+ LOG.debug(_("Sending cast"))
+ _cast(addr, context, msg_id, topic, payload)
+
+ LOG.debug(_("Cast sent; Waiting reply"))
+ # Blocks until receives reply
+ msg = msg_waiter.recv()
+ LOG.debug(_("Received message: %s"), msg)
+ LOG.debug(_("Unpacking response"))
+ responses = _deserialize(msg[-1])
+ # ZMQError trumps the Timeout error.
+ except zmq.ZMQError:
+ raise RPCException("ZMQ Socket Error")
+ finally:
+ if 'msg_waiter' in vars():
+ msg_waiter.close()
+
+ # It seems we don't need to do all of the following,
+ # but perhaps it would be useful for multicall?
+ # One effect of this is that we're checking all
+ # responses for Exceptions.
+ for resp in responses:
+ if isinstance(resp, types.DictType) and 'exc' in resp:
+ raise rpc_common.deserialize_remote_exception(CONF, resp['exc'])
+
+ return responses[-1]
+
+
+def _multi_send(method, context, topic, msg, timeout=None):
+ """
+ Wraps the sending of messages,
+ dispatches to the matchmaker and sends
+ message to all relevant hosts.
+ """
+ conf = CONF
+ LOG.debug(_("%(msg)s") % {'msg': ' '.join(map(pformat, (topic, msg)))})
+
+ queues = matchmaker.queues(topic)
+ LOG.debug(_("Sending message(s) to: %s"), queues)
+
+ # Don't stack if we have no matchmaker results
+ if len(queues) == 0:
+ LOG.warn(_("No matchmaker results. Not casting."))
+ # While not strictly a timeout, callers know how to handle
+ # this exception and a timeout isn't too big a lie.
+ raise rpc_common.Timeout, "No match from matchmaker."
+
+ # This supports brokerless fanout (addresses > 1)
+ for queue in queues:
+ (_topic, ip_addr) = queue
+ _addr = "tcp://%s:%s" % (ip_addr, conf.rpc_zmq_port)
+
+ if method.__name__ == '_cast':
+ eventlet.spawn_n(method, _addr, context,
+ _topic, _topic, msg, timeout)
+ return
+ return method(_addr, context, _topic, _topic, msg, timeout)
+
+
+def create_connection(conf, new=True):
+ return Connection(conf)
+
+
+def multicall(conf, *args, **kwargs):
+ """Multiple calls."""
+ return _multi_send(_call, *args, **kwargs)
+
+
+def call(conf, *args, **kwargs):
+ """Send a message, expect a response."""
+ data = _multi_send(_call, *args, **kwargs)
+ return data[-1]
+
+
+def cast(conf, *args, **kwargs):
+ """Send a message expecting no reply."""
+ _multi_send(_cast, *args, **kwargs)
+
+
+def fanout_cast(conf, context, topic, msg, **kwargs):
+ """Send a message to all listening and expect no reply."""
+ # NOTE(ewindisch): fanout~ is used because it avoid splitting on .
+ # and acts as a non-subtle hint to the matchmaker and ZmqProxy.
+ _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs)
+
+
+def notify(conf, context, topic, msg, **kwargs):
+ """
+ Send notification event.
+ Notifications are sent to topic-priority.
+ This differs from the AMQP drivers which send to topic.priority.
+ """
+ # NOTE(ewindisch): dot-priority in rpc notifier does not
+ # work with our assumptions.
+ topic.replace('.', '-')
+ cast(conf, context, topic, msg, **kwargs)
+
+
+def cleanup():
+ """Clean up resources in use by implementation."""
+ global ZMQ_CTX
+ global matchmaker
+ matchmaker = None
+ ZMQ_CTX.term()
+ ZMQ_CTX = None
+
+
+def register_opts(conf):
+ """Registration of options for this driver."""
+ #NOTE(ewindisch): ZMQ_CTX and matchmaker
+ # are initialized here as this is as good
+ # an initialization method as any.
+
+ # We memoize through these globals
+ global ZMQ_CTX
+ global matchmaker
+ global CONF
+
+ if not CONF:
+ conf.register_opts(zmq_opts)
+ CONF = conf
+ # Don't re-set, if this method is called twice.
+ if not ZMQ_CTX:
+ ZMQ_CTX = zmq.Context(conf.rpc_zmq_contexts)
+ if not matchmaker:
+ # rpc_zmq_matchmaker should be set to a 'module.Class'
+ mm_path = conf.rpc_zmq_matchmaker.split('.')
+ mm_module = '.'.join(mm_path[:-1])
+ mm_class = mm_path[-1]
+
+ # Only initialize a class.
+ if mm_path[-1][0] not in string.ascii_uppercase:
+ LOG.error(_("Matchmaker could not be loaded.\n"
+ "rpc_zmq_matchmaker is not a class."))
+ raise RPCException(_("Error loading Matchmaker."))
+
+ mm_impl = importutils.import_module(mm_module)
+ mm_constructor = getattr(mm_impl, mm_class)
+ matchmaker = mm_constructor()
+
+
+register_opts(cfg.CONF)
diff --git a/moniker/openstack/common/rpc/matchmaker.py b/moniker/openstack/common/rpc/matchmaker.py
new file mode 100644
index 00000000..2d86032f
--- /dev/null
+++ b/moniker/openstack/common/rpc/matchmaker.py
@@ -0,0 +1,258 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Cloudscaling Group, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+The MatchMaker classes should except a Topic or Fanout exchange key and
+return keys for direct exchanges, per (approximate) AMQP parlance.
+"""
+
+import contextlib
+import itertools
+import json
+import logging
+
+from moniker.openstack.common import cfg
+from moniker.openstack.common.gettextutils import _
+
+
+matchmaker_opts = [
+ # Matchmaker ring file
+ cfg.StrOpt('matchmaker_ringfile',
+ default='/etc/nova/matchmaker_ring.json',
+ help='Matchmaker ring file (JSON)'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(matchmaker_opts)
+LOG = logging.getLogger(__name__)
+contextmanager = contextlib.contextmanager
+
+
+class MatchMakerException(Exception):
+ """Signified a match could not be found."""
+ message = _("Match not found by MatchMaker.")
+
+
+class Exchange(object):
+ """
+ Implements lookups.
+ Subclass this to support hashtables, dns, etc.
+ """
+ def __init__(self):
+ pass
+
+ def run(self, key):
+ raise NotImplementedError()
+
+
+class Binding(object):
+ """
+ A binding on which to perform a lookup.
+ """
+ def __init__(self):
+ pass
+
+ def test(self, key):
+ raise NotImplementedError()
+
+
+class MatchMakerBase(object):
+ """Match Maker Base Class."""
+
+ def __init__(self):
+ # Array of tuples. Index [2] toggles negation, [3] is last-if-true
+ self.bindings = []
+
+ def add_binding(self, binding, rule, last=True):
+ self.bindings.append((binding, rule, False, last))
+
+ #NOTE(ewindisch): kept the following method in case we implement the
+ # underlying support.
+ #def add_negate_binding(self, binding, rule, last=True):
+ # self.bindings.append((binding, rule, True, last))
+
+ def queues(self, key):
+ workers = []
+
+ # bit is for negate bindings - if we choose to implement it.
+ # last stops processing rules if this matches.
+ for (binding, exchange, bit, last) in self.bindings:
+ if binding.test(key):
+ workers.extend(exchange.run(key))
+
+ # Support last.
+ if last:
+ return workers
+ return workers
+
+
+class DirectBinding(Binding):
+ """
+ Specifies a host in the key via a '.' character
+ Although dots are used in the key, the behavior here is
+ that it maps directly to a host, thus direct.
+ """
+ def test(self, key):
+ if '.' in key:
+ return True
+ return False
+
+
+class TopicBinding(Binding):
+ """
+ Where a 'bare' key without dots.
+ AMQP generally considers topic exchanges to be those *with* dots,
+ but we deviate here in terminology as the behavior here matches
+ that of a topic exchange (whereas where there are dots, behavior
+ matches that of a direct exchange.
+ """
+ def test(self, key):
+ if '.' not in key:
+ return True
+ return False
+
+
+class FanoutBinding(Binding):
+ """Match on fanout keys, where key starts with 'fanout.' string."""
+ def test(self, key):
+ if key.startswith('fanout~'):
+ return True
+ return False
+
+
+class StubExchange(Exchange):
+ """Exchange that does nothing."""
+ def run(self, key):
+ return [(key, None)]
+
+
+class RingExchange(Exchange):
+ """
+ Match Maker where hosts are loaded from a static file containing
+ a hashmap (JSON formatted).
+
+ __init__ takes optional ring dictionary argument, otherwise
+ loads the ringfile from CONF.mathcmaker_ringfile.
+ """
+ def __init__(self, ring=None):
+ super(RingExchange, self).__init__()
+
+ if ring:
+ self.ring = ring
+ else:
+ fh = open(CONF.matchmaker_ringfile, 'r')
+ self.ring = json.load(fh)
+ fh.close()
+
+ self.ring0 = {}
+ for k in self.ring.keys():
+ self.ring0[k] = itertools.cycle(self.ring[k])
+
+ def _ring_has(self, key):
+ if key in self.ring0:
+ return True
+ return False
+
+
+class RoundRobinRingExchange(RingExchange):
+ """A Topic Exchange based on a hashmap."""
+ def __init__(self, ring=None):
+ super(RoundRobinRingExchange, self).__init__(ring)
+
+ def run(self, key):
+ if not self._ring_has(key):
+ LOG.warn(
+ _("No key defining hosts for topic '%s', "
+ "see ringfile") % (key, )
+ )
+ return []
+ host = next(self.ring0[key])
+ return [(key + '.' + host, host)]
+
+
+class FanoutRingExchange(RingExchange):
+ """Fanout Exchange based on a hashmap."""
+ def __init__(self, ring=None):
+ super(FanoutRingExchange, self).__init__(ring)
+
+ def run(self, key):
+ # Assume starts with "fanout~", strip it for lookup.
+ nkey = key.split('fanout~')[1:][0]
+ if not self._ring_has(nkey):
+ LOG.warn(
+ _("No key defining hosts for topic '%s', "
+ "see ringfile") % (nkey, )
+ )
+ return []
+ return map(lambda x: (key + '.' + x, x), self.ring[nkey])
+
+
+class LocalhostExchange(Exchange):
+ """Exchange where all direct topics are local."""
+ def __init__(self):
+ super(Exchange, self).__init__()
+
+ def run(self, key):
+ return [(key.split('.')[0] + '.localhost', 'localhost')]
+
+
+class DirectExchange(Exchange):
+ """
+ Exchange where all topic keys are split, sending to second half.
+ i.e. "compute.host" sends a message to "compute" running on "host"
+ """
+ def __init__(self):
+ super(Exchange, self).__init__()
+
+ def run(self, key):
+ b, e = key.split('.', 1)
+ return [(b, e)]
+
+
+class MatchMakerRing(MatchMakerBase):
+ """
+ Match Maker where hosts are loaded from a static hashmap.
+ """
+ def __init__(self, ring=None):
+ super(MatchMakerRing, self).__init__()
+ self.add_binding(FanoutBinding(), FanoutRingExchange(ring))
+ self.add_binding(DirectBinding(), DirectExchange())
+ self.add_binding(TopicBinding(), RoundRobinRingExchange(ring))
+
+
+class MatchMakerLocalhost(MatchMakerBase):
+ """
+ Match Maker where all bare topics resolve to localhost.
+ Useful for testing.
+ """
+ def __init__(self):
+ super(MatchMakerLocalhost, self).__init__()
+ self.add_binding(FanoutBinding(), LocalhostExchange())
+ self.add_binding(DirectBinding(), DirectExchange())
+ self.add_binding(TopicBinding(), LocalhostExchange())
+
+
+class MatchMakerStub(MatchMakerBase):
+ """
+ Match Maker where topics are untouched.
+ Useful for testing, or for AMQP/brokered queues.
+ Will not work where knowledge of hosts is known (i.e. zeromq)
+ """
+ def __init__(self):
+ super(MatchMakerLocalhost, self).__init__()
+
+ self.add_binding(FanoutBinding(), StubExchange())
+ self.add_binding(DirectBinding(), StubExchange())
+ self.add_binding(TopicBinding(), StubExchange())
diff --git a/moniker/openstack/common/rpc/proxy.py b/moniker/openstack/common/rpc/proxy.py
new file mode 100644
index 00000000..83e17cea
--- /dev/null
+++ b/moniker/openstack/common/rpc/proxy.py
@@ -0,0 +1,165 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A helper class for proxy objects to remote APIs.
+
+For more information about rpc API version numbers, see:
+ rpc/dispatcher.py
+"""
+
+
+from moniker.openstack.common import rpc
+
+
+class RpcProxy(object):
+ """A helper class for rpc clients.
+
+ This class is a wrapper around the RPC client API. It allows you to
+ specify the topic and API version in a single place. This is intended to
+ be used as a base class for a class that implements the client side of an
+ rpc API.
+ """
+
+ def __init__(self, topic, default_version):
+ """Initialize an RpcProxy.
+
+ :param topic: The topic to use for all messages.
+ :param default_version: The default API version to request in all
+ outgoing messages. This can be overridden on a per-message
+ basis.
+ """
+ self.topic = topic
+ self.default_version = default_version
+ super(RpcProxy, self).__init__()
+
+ def _set_version(self, msg, vers):
+ """Helper method to set the version in a message.
+
+ :param msg: The message having a version added to it.
+ :param vers: The version number to add to the message.
+ """
+ msg['version'] = vers if vers else self.default_version
+
+ def _get_topic(self, topic):
+ """Return the topic to use for a message."""
+ return topic if topic else self.topic
+
+ @staticmethod
+ def make_msg(method, **kwargs):
+ return {'method': method, 'args': kwargs}
+
+ def call(self, context, msg, topic=None, version=None, timeout=None):
+ """rpc.call() a remote method.
+
+ :param context: The request context
+ :param msg: The message to send, including the method and args.
+ :param topic: Override the topic for this message.
+ :param timeout: (Optional) A timeout to use when waiting for the
+ response. If no timeout is specified, a default timeout will be
+ used that is usually sufficient.
+ :param version: (Optional) Override the requested API version in this
+ message.
+
+ :returns: The return value from the remote method.
+ """
+ self._set_version(msg, version)
+ return rpc.call(context, self._get_topic(topic), msg, timeout)
+
+ def multicall(self, context, msg, topic=None, version=None, timeout=None):
+ """rpc.multicall() a remote method.
+
+ :param context: The request context
+ :param msg: The message to send, including the method and args.
+ :param topic: Override the topic for this message.
+ :param timeout: (Optional) A timeout to use when waiting for the
+ response. If no timeout is specified, a default timeout will be
+ used that is usually sufficient.
+ :param version: (Optional) Override the requested API version in this
+ message.
+
+ :returns: An iterator that lets you process each of the returned values
+ from the remote method as they arrive.
+ """
+ self._set_version(msg, version)
+ return rpc.multicall(context, self._get_topic(topic), msg, timeout)
+
+ def cast(self, context, msg, topic=None, version=None):
+ """rpc.cast() a remote method.
+
+ :param context: The request context
+ :param msg: The message to send, including the method and args.
+ :param topic: Override the topic for this message.
+ :param version: (Optional) Override the requested API version in this
+ message.
+
+ :returns: None. rpc.cast() does not wait on any return value from the
+ remote method.
+ """
+ self._set_version(msg, version)
+ rpc.cast(context, self._get_topic(topic), msg)
+
+ def fanout_cast(self, context, msg, topic=None, version=None):
+ """rpc.fanout_cast() a remote method.
+
+ :param context: The request context
+ :param msg: The message to send, including the method and args.
+ :param topic: Override the topic for this message.
+ :param version: (Optional) Override the requested API version in this
+ message.
+
+ :returns: None. rpc.fanout_cast() does not wait on any return value
+ from the remote method.
+ """
+ self._set_version(msg, version)
+ rpc.fanout_cast(context, self._get_topic(topic), msg)
+
+ def cast_to_server(self, context, server_params, msg, topic=None,
+ version=None):
+ """rpc.cast_to_server() a remote method.
+
+ :param context: The request context
+ :param server_params: Server parameters. See rpc.cast_to_server() for
+ details.
+ :param msg: The message to send, including the method and args.
+ :param topic: Override the topic for this message.
+ :param version: (Optional) Override the requested API version in this
+ message.
+
+ :returns: None. rpc.cast_to_server() does not wait on any
+ return values.
+ """
+ self._set_version(msg, version)
+ rpc.cast_to_server(context, server_params, self._get_topic(topic), msg)
+
+ def fanout_cast_to_server(self, context, server_params, msg, topic=None,
+ version=None):
+ """rpc.fanout_cast_to_server() a remote method.
+
+ :param context: The request context
+ :param server_params: Server parameters. See rpc.cast_to_server() for
+ details.
+ :param msg: The message to send, including the method and args.
+ :param topic: Override the topic for this message.
+ :param version: (Optional) Override the requested API version in this
+ message.
+
+ :returns: None. rpc.fanout_cast_to_server() does not wait on any
+ return values.
+ """
+ self._set_version(msg, version)
+ rpc.fanout_cast_to_server(context, server_params,
+ self._get_topic(topic), msg)
diff --git a/moniker/openstack/common/service.py b/moniker/openstack/common/service.py
new file mode 100644
index 00000000..26309197
--- /dev/null
+++ b/moniker/openstack/common/service.py
@@ -0,0 +1,336 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Generic Node base class for all workers that run on hosts."""
+
+import errno
+import os
+import random
+import signal
+import sys
+import time
+
+import eventlet
+import greenlet
+
+from moniker.openstack.common import log as logging
+from moniker.openstack.common import threadgroup
+from moniker.openstack.common.gettextutils import _
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Launcher(object):
+ """Launch one or more services and wait for them to complete."""
+
+ def __init__(self):
+ """Initialize the service launcher.
+
+ :returns: None
+
+ """
+ self._services = []
+
+ @staticmethod
+ def run_service(service):
+ """Start and wait for a service to finish.
+
+ :param service: service to run and wait for.
+ :returns: None
+
+ """
+ service.start()
+ service.wait()
+
+ def launch_service(self, service):
+ """Load and start the given service.
+
+ :param service: The service you would like to start.
+ :returns: None
+
+ """
+ gt = eventlet.spawn(self.run_service, service)
+ self._services.append(gt)
+
+ def stop(self):
+ """Stop all services which are currently running.
+
+ :returns: None
+
+ """
+ for service in self._services:
+ service.kill()
+
+ def wait(self):
+ """Waits until all services have been stopped, and then returns.
+
+ :returns: None
+
+ """
+ for service in self._services:
+ try:
+ service.wait()
+ except greenlet.GreenletExit:
+ pass
+
+
+class SignalExit(SystemExit):
+ def __init__(self, signo, exccode=1):
+ super(SignalExit, self).__init__(exccode)
+ self.signo = signo
+
+
+class ServiceLauncher(Launcher):
+ def _handle_signal(self, signo, frame):
+ # Allow the process to be killed again and die from natural causes
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+ raise SignalExit(signo)
+
+ def wait(self):
+ signal.signal(signal.SIGTERM, self._handle_signal)
+ signal.signal(signal.SIGINT, self._handle_signal)
+
+ status = None
+ try:
+ super(ServiceLauncher, self).wait()
+ except SignalExit as exc:
+ signame = {signal.SIGTERM: 'SIGTERM',
+ signal.SIGINT: 'SIGINT'}[exc.signo]
+ LOG.info(_('Caught %s, exiting'), signame)
+ status = exc.code
+ except SystemExit as exc:
+ status = exc.code
+ finally:
+ self.stop()
+ return status
+
+
+class ServiceWrapper(object):
+ def __init__(self, service, workers):
+ self.service = service
+ self.workers = workers
+ self.children = set()
+ self.forktimes = []
+
+
+class ProcessLauncher(object):
+ def __init__(self):
+ self.children = {}
+ self.sigcaught = None
+ self.running = True
+ rfd, self.writepipe = os.pipe()
+ self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
+
+ signal.signal(signal.SIGTERM, self._handle_signal)
+ signal.signal(signal.SIGINT, self._handle_signal)
+
+ def _handle_signal(self, signo, frame):
+ self.sigcaught = signo
+ self.running = False
+
+ # Allow the process to be killed again and die from natural causes
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+ def _pipe_watcher(self):
+ # This will block until the write end is closed when the parent
+ # dies unexpectedly
+ self.readpipe.read()
+
+ LOG.info(_('Parent process has died unexpectedly, exiting'))
+
+ sys.exit(1)
+
+ def _child_process(self, service):
+ # Setup child signal handlers differently
+ def _sigterm(*args):
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ raise SignalExit(signal.SIGTERM)
+
+ signal.signal(signal.SIGTERM, _sigterm)
+ # Block SIGINT and let the parent send us a SIGTERM
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ # Reopen the eventlet hub to make sure we don't share an epoll
+ # fd with parent and/or siblings, which would be bad
+ eventlet.hubs.use_hub()
+
+ # Close write to ensure only parent has it open
+ os.close(self.writepipe)
+ # Create greenthread to watch for parent to close pipe
+ eventlet.spawn(self._pipe_watcher)
+
+ # Reseed random number generator
+ random.seed()
+
+ launcher = Launcher()
+ launcher.run_service(service)
+
+ def _start_child(self, wrap):
+ if len(wrap.forktimes) > wrap.workers:
+ # Limit ourselves to one process a second (over the period of
+ # number of workers * 1 second). This will allow workers to
+ # start up quickly but ensure we don't fork off children that
+ # die instantly too quickly.
+ if time.time() - wrap.forktimes[0] < wrap.workers:
+ LOG.info(_('Forking too fast, sleeping'))
+ time.sleep(1)
+
+ wrap.forktimes.pop(0)
+
+ wrap.forktimes.append(time.time())
+
+ pid = os.fork()
+ if pid == 0:
+ # NOTE(johannes): All exceptions are caught to ensure this
+ # doesn't fallback into the loop spawning children. It would
+ # be bad for a child to spawn more children.
+ status = 0
+ try:
+ self._child_process(wrap.service)
+ except SignalExit as exc:
+ signame = {signal.SIGTERM: 'SIGTERM',
+ signal.SIGINT: 'SIGINT'}[exc.signo]
+ LOG.info(_('Caught %s, exiting'), signame)
+ status = exc.code
+ except SystemExit as exc:
+ status = exc.code
+ except BaseException:
+ LOG.exception(_('Unhandled exception'))
+ status = 2
+ finally:
+ wrap.service.stop()
+
+ os._exit(status)
+
+ LOG.info(_('Started child %d'), pid)
+
+ wrap.children.add(pid)
+ self.children[pid] = wrap
+
+ return pid
+
+ def launch_service(self, service, workers=1):
+ wrap = ServiceWrapper(service, workers)
+
+ LOG.info(_('Starting %d workers'), wrap.workers)
+ while self.running and len(wrap.children) < wrap.workers:
+ self._start_child(wrap)
+
+ def _wait_child(self):
+ try:
+ pid, status = os.wait()
+ except OSError as exc:
+ if exc.errno not in (errno.EINTR, errno.ECHILD):
+ raise
+ return None
+
+ if os.WIFSIGNALED(status):
+ sig = os.WTERMSIG(status)
+ LOG.info(_('Child %(pid)d killed by signal %(sig)d'), locals())
+ else:
+ code = os.WEXITSTATUS(status)
+ LOG.info(_('Child %(pid)d exited with status %(code)d'), locals())
+
+ if pid not in self.children:
+ LOG.warning(_('pid %d not in child list'), pid)
+ return None
+
+ wrap = self.children.pop(pid)
+ wrap.children.remove(pid)
+ return wrap
+
+ def wait(self):
+ """Loop waiting on children to die and respawning as necessary"""
+ while self.running:
+ wrap = self._wait_child()
+ if not wrap:
+ continue
+
+ while self.running and len(wrap.children) < wrap.workers:
+ self._start_child(wrap)
+
+ if self.sigcaught:
+ signame = {signal.SIGTERM: 'SIGTERM',
+ signal.SIGINT: 'SIGINT'}[self.sigcaught]
+ LOG.info(_('Caught %s, stopping children'), signame)
+
+ for pid in self.children:
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except OSError as exc:
+ if exc.errno != errno.ESRCH:
+ raise
+
+ # Wait for children to die
+ if self.children:
+ LOG.info(_('Waiting on %d children to exit'), len(self.children))
+ while self.children:
+ self._wait_child()
+
+
+class Service(object):
+ """Service object for binaries running on hosts.
+
+ A service takes a manager and periodically runs tasks on the manager."""
+
+ def __init__(self, host, manager,
+ periodic_interval=None,
+ periodic_fuzzy_delay=None):
+ self.host = host
+ self.manager = manager
+ self.periodic_interval = periodic_interval
+ self.periodic_fuzzy_delay = periodic_fuzzy_delay
+ self.tg = threadgroup.ThreadGroup('service')
+ self.periodic_args = []
+ self.periodic_kwargs = {}
+
+ def start(self):
+ if self.manager:
+ self.manager.init_host()
+
+ if self.periodic_interval and self.manager:
+ if self.periodic_fuzzy_delay:
+ initial_delay = random.randint(0, self.periodic_fuzzy_delay)
+ else:
+ initial_delay = 0
+ self.tg.add_timer(self.periodic_interval,
+ self.manager.run_periodic_tasks,
+ initial_delay,
+ *self.periodic_args,
+ **self.periodic_kwargs)
+
+ def stop(self):
+ self.tg.stop()
+
+ def wait(self):
+ self.tg.wait()
+
+
+def launch(service, workers=None):
+ if workers:
+ launcher = ProcessLauncher()
+ launcher.launch_service(service, workers=workers)
+ else:
+ launcher = ServiceLauncher()
+ launcher.launch_service(service)
+ return launcher
diff --git a/moniker/openstack/common/threadgroup.py b/moniker/openstack/common/threadgroup.py
new file mode 100644
index 00000000..7b66ba48
--- /dev/null
+++ b/moniker/openstack/common/threadgroup.py
@@ -0,0 +1,118 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import os
+import sys
+
+from eventlet import event
+from eventlet import greenthread
+from eventlet import greenpool
+
+from moniker.openstack.common import loopingcall
+from moniker.openstack.common.gettextutils import _
+from moniker.openstack.common import log as logging
+
+
+LOG = logging.getLogger(__name__)
+
+
+def _thread_done(gt, *args, **kwargs):
+ args[0].thread_done(args[1])
+
+
+class Thread(object):
+ """
+ Wrapper around a greenthread, that holds a reference to
+ the ThreadGroup. The Thread will notify the ThreadGroup
+ when it has done so it can be removed from the threads
+ list.
+ """
+ def __init__(self, name, thread, group):
+ self.name = name
+ self.thread = thread
+ self.thread.link(_thread_done, group, self)
+
+ def stop(self):
+ self.thread.cancel()
+
+ def wait(self):
+ return self.thread.wait()
+
+
+class ThreadGroup():
+ """
+ The point of this class is to:
+ - keep track of timers and greenthreads (making it easier to stop them
+ when need be).
+ - provide an easy API to add timers.
+ """
+ def __init__(self, name, thread_pool_size=10):
+ self.name = name
+ self.pool = greenpool.GreenPool(thread_pool_size)
+ self.threads = []
+ self.timers = []
+
+ def add_timer(self, interval, callback, initial_delay=None,
+ *args, **kwargs):
+ pulse = loopingcall.LoopingCall(callback, *args, **kwargs)
+ pulse.start(interval=interval,
+ initial_delay=initial_delay)
+ self.timers.append(pulse)
+
+ def add_thread(self, callback, *args, **kwargs):
+ gt = self.pool.spawn(callback, *args, **kwargs)
+ th = Thread(callback.__name__, gt, self)
+ self.threads.append(th)
+
+ def thread_done(self, thread):
+ try:
+ thread.wait()
+ except Exception as ex:
+ LOG.exception(ex)
+ finally:
+ self.threads.remove(thread)
+
+ def stop(self):
+ current = greenthread.getcurrent()
+ for x in self.threads:
+ if x is current:
+ # don't kill the current thread.
+ continue
+ try:
+ x.stop()
+ except Exception as ex:
+ LOG.exception(ex)
+
+ for x in self.timers:
+ try:
+ x.stop()
+ except Exception as ex:
+ LOG.exception(ex)
+ self.timers = []
+
+ def wait(self):
+ for x in self.timers:
+ try:
+ x.wait()
+ except Exception as ex:
+ LOG.exception(ex)
+ current = greenthread.getcurrent()
+ for x in self.threads:
+ if x is current:
+ continue
+ try:
+ x.wait()
+ except Exception as ex:
+ LOG.exception(ex)
diff --git a/moniker/openstack/common/timeutils.py b/moniker/openstack/common/timeutils.py
new file mode 100644
index 00000000..c4f6cf04
--- /dev/null
+++ b/moniker/openstack/common/timeutils.py
@@ -0,0 +1,126 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Time related utilities and helper functions.
+"""
+
+import calendar
+import datetime
+
+import iso8601
+
+
+TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
+PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
+
+
+def isotime(at=None):
+ """Stringify time in ISO 8601 format"""
+ if not at:
+ at = utcnow()
+ str = at.strftime(TIME_FORMAT)
+ tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
+ str += ('Z' if tz == 'UTC' else tz)
+ return str
+
+
+def parse_isotime(timestr):
+ """Parse time from ISO 8601 format"""
+ try:
+ return iso8601.parse_date(timestr)
+ except iso8601.ParseError as e:
+ raise ValueError(e.message)
+ except TypeError as e:
+ raise ValueError(e.message)
+
+
+def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
+ """Returns formatted utcnow."""
+ if not at:
+ at = utcnow()
+ return at.strftime(fmt)
+
+
+def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
+ """Turn a formatted time back into a datetime."""
+ return datetime.datetime.strptime(timestr, fmt)
+
+
+def normalize_time(timestamp):
+ """Normalize time in arbitrary timezone to UTC"""
+ offset = timestamp.utcoffset()
+ return timestamp.replace(tzinfo=None) - offset if offset else timestamp
+
+
+def is_older_than(before, seconds):
+ """Return True if before is older than seconds."""
+ return utcnow() - before > datetime.timedelta(seconds=seconds)
+
+
+def utcnow_ts():
+ """Timestamp version of our utcnow function."""
+ return calendar.timegm(utcnow().timetuple())
+
+
+def utcnow():
+ """Overridable version of utils.utcnow."""
+ if utcnow.override_time:
+ return utcnow.override_time
+ return datetime.datetime.utcnow()
+
+
+utcnow.override_time = None
+
+
+def set_time_override(override_time=datetime.datetime.utcnow()):
+ """Override utils.utcnow to return a constant time."""
+ utcnow.override_time = override_time
+
+
+def advance_time_delta(timedelta):
+ """Advance overridden time using a datetime.timedelta."""
+ assert(not utcnow.override_time is None)
+ utcnow.override_time += timedelta
+
+
+def advance_time_seconds(seconds):
+ """Advance overridden time by seconds."""
+ advance_time_delta(datetime.timedelta(0, seconds))
+
+
+def clear_time_override():
+ """Remove the overridden time."""
+ utcnow.override_time = None
+
+
+def marshall_now(now=None):
+ """Make an rpc-safe datetime with microseconds.
+
+ Note: tzinfo is stripped, but not required for relative times."""
+ if not now:
+ now = utcnow()
+ return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
+ minute=now.minute, second=now.second,
+ microsecond=now.microsecond)
+
+
+def unmarshall_time(tyme):
+ """Unmarshall a datetime dict."""
+ return datetime.datetime(day=tyme['day'], month=tyme['month'],
+ year=tyme['year'], hour=tyme['hour'], minute=tyme['minute'],
+ second=tyme['second'], microsecond=tyme['microsecond'])
diff --git a/moniker/schema.py b/moniker/schema.py
new file mode 100644
index 00000000..ab6d2d9b
--- /dev/null
+++ b/moniker/schema.py
@@ -0,0 +1,110 @@
+# Copyright 2012 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# NOTE(kiall): Copied from Glance
+import jsonschema
+from moniker import exceptions
+
+
+class Schema(object):
+ def __init__(self, name, properties=None, links=None):
+ self.name = name
+ if properties is None:
+ properties = {}
+ self.properties = properties
+ self.links = links
+
+ def validate(self, obj):
+ try:
+ jsonschema.validate(obj, self.raw())
+ except jsonschema.ValidationError as e:
+ raise exceptions.InvalidObject("Provided object does not match "
+ "schema '%s': %s"
+ % (self.name, str(e)))
+
+ def filter(self, obj):
+ filtered = {}
+ for key, value in obj.iteritems():
+ if self._filter_func(self.properties, key) and value is not None:
+ filtered[key] = value
+ return filtered
+
+ @staticmethod
+ def _filter_func(properties, key):
+ return key in properties
+
+ def merge_properties(self, properties):
+ # Ensure custom props aren't attempting to override base props
+ original_keys = set(self.properties.keys())
+ new_keys = set(properties.keys())
+ intersecting_keys = original_keys.intersection(new_keys)
+ conflicting_keys = [k for k in intersecting_keys
+ if self.properties[k] != properties[k]]
+ if len(conflicting_keys) > 0:
+ props = ', '.join(conflicting_keys)
+ reason = _("custom properties (%(props)s) conflict "
+ "with base properties")
+ raise exception.SchemaLoadError(reason=reason % {'props': props})
+
+ self.properties.update(properties)
+
+ def raw(self):
+ raw = {
+ 'name': self.name,
+ 'properties': self.properties,
+ 'additionalProperties': False,
+ }
+ if self.links:
+ raw['links'] = self.links
+ return raw
+
+
+class PermissiveSchema(Schema):
+ @staticmethod
+ def _filter_func(properties, key):
+ return True
+
+ def raw(self):
+ raw = super(PermissiveSchema, self).raw()
+ raw['additionalProperties'] = {'type': 'string'}
+ return raw
+
+
+class CollectionSchema(object):
+ def __init__(self, name, item_schema):
+ self.name = name
+ self.item_schema = item_schema
+
+ def filter(self, obj):
+ return [self.item_schema.filter(o) for o in obj]
+
+ def raw(self):
+ return {
+ 'name': self.name,
+ 'properties': {
+ self.name: {
+ 'type': 'array',
+ 'items': self.item_schema.raw(),
+ },
+ 'first': {'type': 'string'},
+ 'next': {'type': 'string'},
+ 'schema': {'type': 'string'},
+ },
+ 'links': [
+ {'rel': 'first', 'href': '{first}'},
+ {'rel': 'next', 'href': '{next}'},
+ {'rel': 'describedby', 'href': '{schema}'},
+ ],
+ }
diff --git a/moniker/schemas.py b/moniker/schemas.py
new file mode 100644
index 00000000..7433dab0
--- /dev/null
+++ b/moniker/schemas.py
@@ -0,0 +1,145 @@
+ from moniker.schema import Schema, CollectionSchema
+
+SERVER_PROPERTIES = {
+ 'id': {
+ 'type': 'string',
+ 'description': 'Server identifier',
+ 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
+ '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
+ },
+ 'name': {
+ 'type': 'string',
+ 'description': 'Server DNS name',
+ 'maxLength': 255,
+ 'required': True,
+ },
+ 'ipv4': {
+ 'type': 'string',
+ 'description': 'IPv4 address of server',
+ 'maxLength': 15,
+ 'required': True,
+ },
+ 'ipv6': {
+ 'type': 'string',
+ 'description': 'IPv6 address of server',
+ 'maxLength': 15,
+ },
+ 'created_at': {
+ 'type': 'string',
+ 'description': 'Date and time of server creation',
+ 'format': 'date-time',
+ },
+ 'updated_at': {
+ 'type': 'string',
+ 'description': 'Date and time of last server update',
+ 'format': 'date-time',
+ },
+ 'self': {'type': 'string'},
+ 'schema': {'type': 'string'},
+}
+
+SERVER_LINKS = [
+ {'rel': 'self', 'href': '{self}'},
+ {'rel': 'describedby', 'href': '{schema}'},
+]
+
+DOMAIN_PROPERTIES = {
+ 'id': {
+ 'type': 'string',
+ 'description': 'Domain identifier',
+ 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
+ '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
+ },
+ 'name': {
+ 'type': 'string',
+ 'description': 'Domain name',
+ 'maxLength': 255,
+ 'required': True,
+ },
+ 'serial': {
+ 'type': 'integer',
+ 'description': 'Zone serial number',
+ },
+ 'ttl': {
+ 'type': 'integer',
+ 'description': 'Time to live',
+ },
+ 'created_at': {
+ 'type': 'string',
+ 'description': 'Date and time of image registration',
+ 'format': 'date-time',
+ },
+ 'updated_at': {
+ 'type': 'string',
+ 'description': 'Date and time of image registration',
+ 'format': 'date-time',
+ },
+ 'self': {'type': 'string'},
+ 'records': {'type': 'string'},
+ 'schema': {'type': 'string'},
+}
+
+DOMAIN_LINKS = [
+ {'rel': 'self', 'href': '{self}'},
+ {'rel': 'records', 'href': '{records}', 'method': 'GET'},
+ {'rel': 'describedby', 'href': '{schema}', 'method': 'GET'},
+]
+
+RECORD_PROPERTIES = {
+ 'id': {
+ 'type': 'string',
+ 'description': 'Record identifier',
+ 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
+ '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
+ },
+ 'domain_id': {
+ 'type': 'string',
+ 'description': 'Domain identifier',
+ 'pattern': ('^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}'
+ '-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$'),
+ },
+ 'name': {
+ 'type': 'string',
+ 'description': 'DNS Record Name',
+ 'maxLength': 255,
+ 'required': True,
+ },
+ 'data': {
+ 'type': 'string',
+ 'description': 'DNS Record Value',
+ 'maxLength': 255,
+ 'required': True,
+ },
+ 'ttl': {
+ 'type': 'integer',
+ 'description': 'Time to live.',
+ },
+ 'created_at': {
+ 'type': 'string',
+ 'description': 'Date and time of image registration',
+ 'format': 'date-time',
+ },
+ 'updated_at': {
+ 'type': 'string',
+ 'description': 'Date and time of image registration',
+ 'format': 'date-time',
+ },
+ 'self': {'type': 'string'},
+ 'domain': {'type': 'string'},
+ 'schema': {'type': 'string'},
+}
+
+RECORD_LINKS = [
+ {'rel': 'self', 'href': '{self}'},
+ {'rel': 'domain', 'href': '{domain}'},
+ {'rel': 'describedby', 'href': '{schema}'},
+]
+
+server_schema = Schema('server', SERVER_PROPERTIES, SERVER_LINKS)
+servers_schema = CollectionSchema('servers', server_schema)
+
+domain_schema = Schema('domain', DOMAIN_PROPERTIES, DOMAIN_LINKS)
+domains_schema = CollectionSchema('domains', domain_schema)
+
+record_schema = Schema('record', RECORD_PROPERTIES, RECORD_LINKS)
+records_schema = CollectionSchema('records', record_schema)
diff --git a/moniker/utils.py b/moniker/utils.py
new file mode 100644
index 00000000..9b3d8696
--- /dev/null
+++ b/moniker/utils.py
@@ -0,0 +1,35 @@
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import functools
+from moniker.openstack.common import cfg
+from moniker.openstack.common import rpc
+from moniker.openstack.common.notifier import api as notifier_api
+
+
+def notify(context, service, event_type, payload):
+ priority = 'INFO'
+ publisher_id = notifier_api.publisher_id(service)
+
+ notifier_api.notify(context, publisher_id, event_type, priority, payload)
+
+
+def fanout_cast(context, topic, method, **kwargs):
+ msg = {
+ 'method': method,
+ 'args': kwargs
+ }
+
+ rpc.fanout_cast(context, topic, msg)
diff --git a/openstack-common.conf b/openstack-common.conf
new file mode 100644
index 00000000..f1e655e0
--- /dev/null
+++ b/openstack-common.conf
@@ -0,0 +1,3 @@
+[DEFAULT]
+modules=cfg,iniparser,rpc,importutils,excutils,local,jsonutils,gettextutils,timeutils,notifier,context,log,service,threadgroup,loopingcall,manager,periodic_task
+base=moniker
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 00000000..3f8281e5
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,8 @@
+[nosetests]
+cover-package=moniker
+cover-html=true
+cover-erase=true
+cover-inclusive=true
+verbosity=2
+detailed-errors=1
+where=tests
diff --git a/setup.py b/setup.py
new file mode 100755
index 00000000..40eff187
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+# Copyright 2012 Managed I.T.
+#
+# Author: Kiall Mac Innes <kiall@managedit.ie>
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from setuptools import setup, find_packages
+
+setup(
+ name='moniker',
+ version='0.0',
+ description='DNS as a Service',
+ author='Kiall Mac Innes',
+ author_email='kiall@managedit.ie',
+ url='https://launchpad.net/moniker',
+ packages=find_packages(exclude=['bin']),
+ include_package_data=True,
+ test_suite='nose.collector',
+ setup_requires=['setuptools-git>=0.4'],
+ scripts=[
+ 'bin/moniker-agent-bind9',
+ 'bin/moniker-api',
+ 'bin/moniker-central',
+ ],
+)
diff --git a/templates/bind9-config.jinja2 b/templates/bind9-config.jinja2
new file mode 100644
index 00000000..66140086
--- /dev/null
+++ b/templates/bind9-config.jinja2
@@ -0,0 +1,3 @@
+{% for domain in domains %}
+zone "{{domain.name}}" { type master; file "{{state_path}}/bind9/{{domain.id}}.zone"; };
+{%- endfor %}
diff --git a/templates/bind9-zone.jinja2 b/templates/bind9-zone.jinja2
new file mode 100644
index 00000000..655ee466
--- /dev/null
+++ b/templates/bind9-zone.jinja2
@@ -0,0 +1,17 @@
+$TTL {{ domain.ttl }}
+
+{{ domain.name }}. IN SOA {{ servers[0].name }}. {{ domain.email | replace("@", ".") }}. (
+ {{ domain.serial }} ; serial
+ {{ domain.refresh }} ; refresh
+ {{ domain.retry }} ; retry
+ {{ domain.expire }} ; expire
+ {{ domain.minimum }} ; minimum
+)
+
+{% for server in servers %}
+{{domain.name}}. IN NS {{server.name}}.
+{%- endfor %}
+
+{% for record in records %}
+{{record.name}}. {{record.ttl or ''}} IN {{record.type}} {{record.priority or ''}} {{record.data}}
+{%- endfor %}
diff --git a/tools/pip-requires b/tools/pip-requires
new file mode 100644
index 00000000..a679a008
--- /dev/null
+++ b/tools/pip-requires
@@ -0,0 +1,6 @@
+Flask==0.9
+iso8601>=0.1.4
+eventlet
+sqlalchemy>=0.7
+jsonschema
+ipaddr
diff --git a/tools/test-requires b/tools/test-requires
new file mode 100644
index 00000000..5de2b4ae
--- /dev/null
+++ b/tools/test-requires
@@ -0,0 +1,4 @@
+nose
+coverage
+pep8>=1.0
+setuptools-git>=0.4
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 00000000..2401d10d
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,21 @@
+[tox]
+envlist = py26,py27,pep8
+
+[testenv]
+deps = -r{toxinidir}/tools/pip-requires
+ -r{toxinidir}/tools/test-requires
+setenv = VIRTUAL_ENV={envdir}
+ NOSE_WITH_OPENSTACK=1
+ NOSE_OPENSTACK_COLOR=1
+ NOSE_OPENSTACK_RED=0.05
+ NOSE_OPENSTACK_YELLOW=0.025
+ NOSE_OPENSTACK_SHOW_ELAPSED=1
+commands = {toxinidir}/run_tests.sh --no-path-adjustment []
+sitepackages = True
+
+[testenv:cover]
+commands = {toxinidir}/run_tests.sh --no-path-adjustment --with-coverage --cover-erase --cover-package=moniker --cover-inclusive []
+
+[testenv:pep8]
+deps = pep8==1.1
+commands = pep8 --repeat --show-source moniker setup.py bin/moniker-api