summaryrefslogtreecommitdiff
path: root/qpid/extras
diff options
context:
space:
mode:
Diffstat (limited to 'qpid/extras')
-rw-r--r--qpid/extras/qmf/.gitignore20
-rw-r--r--qpid/extras/qmf/LICENSE.txt203
-rw-r--r--qpid/extras/qmf/NOTICE.txt20
-rwxr-xr-xqpid/extras/qmf/setup.py30
-rw-r--r--qpid/extras/qmf/src/py/qmf/__init__.py18
-rw-r--r--qpid/extras/qmf/src/py/qmf/console.py4043
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/__init__.py18
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/agent.py1380
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/common.py1738
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/console.py2626
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/__init__.py30
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/agent_discovery.py464
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/agent_test.py167
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/async_method.py353
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/async_query.py444
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/basic_method.py391
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/basic_query.py492
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/console_test.py175
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/events.py202
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/multi_response.py280
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/obj_gets.py581
-rw-r--r--qpid/extras/qmf/src/py/qmf2-prototype/tests/subscriptions.py983
-rw-r--r--qpid/extras/sasl/LICENSE234
-rw-r--r--qpid/extras/sasl/Makefile.am30
-rwxr-xr-xqpid/extras/sasl/bootstrap33
-rwxr-xr-xqpid/extras/sasl/build-aux/compile142
-rwxr-xr-xqpid/extras/sasl/build-aux/config.guess1501
-rwxr-xr-xqpid/extras/sasl/build-aux/config.rpath614
-rwxr-xr-xqpid/extras/sasl/build-aux/config.sub1619
-rwxr-xr-xqpid/extras/sasl/build-aux/depcomp584
-rwxr-xr-xqpid/extras/sasl/build-aux/install-sh507
-rwxr-xr-xqpid/extras/sasl/build-aux/mdate-sh201
-rwxr-xr-xqpid/extras/sasl/build-aux/missing367
l---------qpid/extras/sasl/build-aux/py-compile1
-rw-r--r--qpid/extras/sasl/configure.ac317
-rw-r--r--qpid/extras/sasl/include/saslwrapper.h146
-rw-r--r--qpid/extras/sasl/m4/ac_pkg_swig.m4120
-rw-r--r--qpid/extras/sasl/m4/compiler-flags.m423
-rw-r--r--qpid/extras/sasl/python/Makefile.am43
-rw-r--r--qpid/extras/sasl/python/python.i169
-rw-r--r--qpid/extras/sasl/ruby/Makefile.am44
-rw-r--r--qpid/extras/sasl/ruby/ruby.i124
-rw-r--r--qpid/extras/sasl/src/Makefile.am40
-rw-r--r--qpid/extras/sasl/src/cyrus/saslwrapper.cpp383
-rw-r--r--qpid/extras/sasl/src/saslwrapper.i40
45 files changed, 21940 insertions, 0 deletions
diff --git a/qpid/extras/qmf/.gitignore b/qpid/extras/qmf/.gitignore
new file mode 100644
index 0000000000..846f2ac5b0
--- /dev/null
+++ b/qpid/extras/qmf/.gitignore
@@ -0,0 +1,20 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+/build
diff --git a/qpid/extras/qmf/LICENSE.txt b/qpid/extras/qmf/LICENSE.txt
new file mode 100644
index 0000000000..6b0b1270ff
--- /dev/null
+++ b/qpid/extras/qmf/LICENSE.txt
@@ -0,0 +1,203 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/qpid/extras/qmf/NOTICE.txt b/qpid/extras/qmf/NOTICE.txt
new file mode 100644
index 0000000000..32ccdb70c4
--- /dev/null
+++ b/qpid/extras/qmf/NOTICE.txt
@@ -0,0 +1,20 @@
+=========================================================================
+== NOTICE file corresponding to the section 4 d of ==
+== the Apache License, Version 2.0, ==
+== in this case for the Apache Qpid distribution. ==
+=========================================================================
+
+This product includes software developed by the Apache Software Foundation
+(http://www.apache.org/).
+
+Please read the LICENSE.txt file present in the root directory of this
+distribution.
+
+
+Aside from contributions to the Apache Qpid project, this software also
+includes (binary only):
+
+ - None at this time
+
+
+
diff --git a/qpid/extras/qmf/setup.py b/qpid/extras/qmf/setup.py
new file mode 100755
index 0000000000..e7bf4b9717
--- /dev/null
+++ b/qpid/extras/qmf/setup.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from distutils.core import setup
+
+setup(name="qpid-qmf",
+ version="0.11",
+ author="Apache Qpid",
+ author_email="dev@qpid.apache.org",
+ packages=["qmf"],
+ package_dir={"": "src/py"},
+ url="http://qpid.apache.org/",
+ license="Apache Software License",
+ description="QMF Library")
diff --git a/qpid/extras/qmf/src/py/qmf/__init__.py b/qpid/extras/qmf/src/py/qmf/__init__.py
new file mode 100644
index 0000000000..31d5a2ef58
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf/__init__.py
@@ -0,0 +1,18 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/qpid/extras/qmf/src/py/qmf/console.py b/qpid/extras/qmf/src/py/qmf/console.py
new file mode 100644
index 0000000000..ecb0e1d9d0
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf/console.py
@@ -0,0 +1,4043 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+""" Console API for Qpid Management Framework """
+
+import os
+import platform
+import qpid
+import struct
+import socket
+import re
+from qpid.datatypes import UUID
+from qpid.datatypes import timestamp
+from qpid.datatypes import datetime
+from qpid.exceptions import Closed
+from qpid.session import SessionDetached
+from qpid.connection import Connection, ConnectionFailed, Timeout
+from qpid.datatypes import Message, RangedSet, UUID
+from qpid.util import connect, ssl, URL
+from qpid.codec010 import StringCodec as Codec
+from threading import Lock, Condition, Thread, Semaphore
+from Queue import Queue, Empty
+from time import time, strftime, gmtime, sleep
+from cStringIO import StringIO
+
+#import qpid.log
+#qpid.log.enable(name="qpid.io.cmd", level=qpid.log.DEBUG)
+
+#===================================================================================================
+# CONSOLE
+#===================================================================================================
+class Console:
+ """ To access the asynchronous operations, a class must be derived from
+ Console with overrides of any combination of the available methods. """
+
+ def brokerConnected(self, broker):
+ """ Invoked when a connection is established to a broker """
+ pass
+
+ def brokerConnectionFailed(self, broker):
+ """ Invoked when a connection to a broker fails """
+ pass
+
+ def brokerDisconnected(self, broker):
+ """ Invoked when the connection to a broker is lost """
+ pass
+
+ def newPackage(self, name):
+ """ Invoked when a QMF package is discovered. """
+ pass
+
+ def newClass(self, kind, classKey):
+ """ Invoked when a new class is discovered. Session.getSchema can be
+ used to obtain details about the class."""
+ pass
+
+ def newAgent(self, agent):
+ """ Invoked when a QMF agent is discovered. """
+ pass
+
+ def delAgent(self, agent):
+ """ Invoked when a QMF agent disconects. """
+ pass
+
+ def objectProps(self, broker, record):
+ """ Invoked when an object is updated. """
+ pass
+
+ def objectStats(self, broker, record):
+ """ Invoked when an object is updated. """
+ pass
+
+ def event(self, broker, event):
+ """ Invoked when an event is raised. """
+ pass
+
+ def heartbeat(self, agent, timestamp):
+ """ Invoked when an agent heartbeat is received. """
+ pass
+
+ def brokerInfo(self, broker):
+ """ Invoked when the connection sequence reaches the point where broker information is available. """
+ pass
+
+ def methodResponse(self, broker, seq, response):
+ """ Invoked when a method response from an asynchronous method call is received. """
+ pass
+
+
+#===================================================================================================
+# BrokerURL
+#===================================================================================================
+class BrokerURL(URL):
+ def __init__(self, text):
+ URL.__init__(self, text)
+ if self.port is None:
+ if self.scheme == URL.AMQPS:
+ self.port = 5671
+ else:
+ self.port = 5672
+ self.authName = None
+ self.authPass = None
+ if self.user:
+ self.authName = str(self.user)
+ if self.password:
+ self.authPass = str(self.password)
+
+ def name(self):
+ return self.host + ":" + str(self.port)
+
+ def match(self, host, port):
+ return socket.getaddrinfo(self.host, self.port)[0][4] == socket.getaddrinfo(host, port)[0][4]
+
+
+#===================================================================================================
+# Object
+#===================================================================================================
+class Object(object):
+ """
+ This class defines a 'proxy' object representing a real managed object on an agent.
+ Actions taken on this proxy are remotely affected on the real managed object.
+ """
+ def __init__(self, agent, schema, codec=None, prop=None, stat=None, v2Map=None, agentName=None, kwargs={}):
+ self._agent = agent
+ self._session = None
+ self._broker = None
+ if agent:
+ self._session = agent.session
+ self._broker = agent.broker
+ self._schema = schema
+ self._properties = []
+ self._statistics = []
+ self._currentTime = None
+ self._createTime = None
+ self._deleteTime = 0
+ self._objectId = None
+ if v2Map:
+ self.v2Init(v2Map, agentName)
+ return
+
+ if self._agent:
+ self._currentTime = codec.read_uint64()
+ self._createTime = codec.read_uint64()
+ self._deleteTime = codec.read_uint64()
+ self._objectId = ObjectId(codec)
+ if codec:
+ if prop:
+ notPresent = self._parsePresenceMasks(codec, schema)
+ for property in schema.getProperties():
+ if property.name in notPresent:
+ self._properties.append((property, None))
+ else:
+ self._properties.append((property, self._session._decodeValue(codec, property.type, self._broker)))
+ if stat:
+ for statistic in schema.getStatistics():
+ self._statistics.append((statistic, self._session._decodeValue(codec, statistic.type, self._broker)))
+ else:
+ for property in schema.getProperties():
+ if property.optional:
+ self._properties.append((property, None))
+ else:
+ self._properties.append((property, self._session._defaultValue(property, self._broker, kwargs)))
+ for statistic in schema.getStatistics():
+ self._statistics.append((statistic, self._session._defaultValue(statistic, self._broker, kwargs)))
+
+ def v2Init(self, omap, agentName):
+ if omap.__class__ != dict:
+ raise Exception("QMFv2 object data must be a map/dict")
+ if '_values' not in omap:
+ raise Exception("QMFv2 object must have '_values' element")
+
+ values = omap['_values']
+ for prop in self._schema.getProperties():
+ if prop.name in values:
+ if prop.type == 10: # Reference
+ self._properties.append((prop, ObjectId(values[prop.name], agentName=agentName)))
+ else:
+ self._properties.append((prop, values[prop.name]))
+ for stat in self._schema.getStatistics():
+ if stat.name in values:
+ self._statistics.append((stat, values[stat.name]))
+ if '_subtypes' in omap:
+ self._subtypes = omap['_subtypes']
+ if '_object_id' in omap:
+ self._objectId = ObjectId(omap['_object_id'], agentName=agentName)
+ else:
+ self._objectId = None
+
+ self._currentTime = omap.get("_update_ts", 0)
+ self._createTime = omap.get("_create_ts", 0)
+ self._deleteTime = omap.get("_delete_ts", 0)
+
+ def getAgent(self):
+ """ Return the agent from which this object was sent """
+ return self._agent
+
+ def getBroker(self):
+ """ Return the broker from which this object was sent """
+ return self._broker
+
+ def getV2RoutingKey(self):
+ """ Get the QMFv2 routing key to address this object """
+ return self._agent.getV2RoutingKey()
+
+ def getObjectId(self):
+ """ Return the object identifier for this object """
+ return self._objectId
+
+ def getClassKey(self):
+ """ Return the class-key that references the schema describing this object. """
+ return self._schema.getKey()
+
+ def getSchema(self):
+ """ Return the schema that describes this object. """
+ return self._schema
+
+ def getMethods(self):
+ """ Return a list of methods available for this object. """
+ return self._schema.getMethods()
+
+ def getTimestamps(self):
+ """ Return the current, creation, and deletion times for this object. """
+ return self._currentTime, self._createTime, self._deleteTime
+
+ def isDeleted(self):
+ """ Return True iff this object has been deleted. """
+ return self._deleteTime != 0
+
+ def isManaged(self):
+ """ Return True iff this object is a proxy for a managed object on an agent. """
+ return self._objectId and self._agent
+
+ def getIndex(self):
+ """ Return a string describing this object's primary key. """
+ if self._objectId.isV2:
+ return self._objectId.getObject()
+ result = u""
+ for prop, value in self._properties:
+ if prop.index:
+ if result != u"":
+ result += u":"
+ try:
+ valstr = unicode(self._session._displayValue(value, prop.type))
+ except Exception, e:
+ valstr = u"<undecodable>"
+ result += valstr
+ return result
+
+ def getProperties(self):
+ """ Return a list of object properties """
+ return self._properties
+
+ def getStatistics(self):
+ """ Return a list of object statistics """
+ return self._statistics
+
+ def mergeUpdate(self, newer):
+ """ Replace properties and/or statistics with a newly received update """
+ if not self.isManaged():
+ raise Exception("Object is not managed")
+ if self._objectId != newer._objectId:
+ raise Exception("Objects with different object-ids")
+ if len(newer.getProperties()) > 0:
+ self._properties = newer.getProperties()
+ if len(newer.getStatistics()) > 0:
+ self._statistics = newer.getStatistics()
+ self._currentTime = newer._currentTime
+ self._deleteTime = newer._deleteTime
+
+ def update(self):
+ """ Contact the agent and retrieve the lastest property and statistic values for this object. """
+ if not self.isManaged():
+ raise Exception("Object is not managed")
+ obj = self._agent.getObjects(_objectId=self._objectId)
+ if obj:
+ self.mergeUpdate(obj[0])
+ else:
+ raise Exception("Underlying object no longer exists")
+
+ def __repr__(self):
+ if self.isManaged():
+ id = self.getObjectId().__repr__()
+ else:
+ id = "unmanaged"
+ key = self.getClassKey()
+ return key.getPackageName() + ":" + key.getClassName() +\
+ "[" + id + "] " + self.getIndex().encode("utf8")
+
+ def __getattr__(self, name):
+ for method in self._schema.getMethods():
+ if name == method.name:
+ return lambda *args, **kwargs : self._invoke(name, args, kwargs)
+ for prop, value in self._properties:
+ if name == prop.name:
+ return value
+ if name == "_" + prop.name + "_" and prop.type == 10: # Dereference references
+ deref = self._agent.getObjects(_objectId=value)
+ if len(deref) != 1:
+ return None
+ else:
+ return deref[0]
+ for stat, value in self._statistics:
+ if name == stat.name:
+ return value
+
+ #
+ # Check to see if the name is in the schema. If so, return None (i.e. this is a not-present attribute)
+ #
+ for prop in self._schema.getProperties():
+ if name == prop.name:
+ return None
+ for stat in self._schema.getStatistics():
+ if name == stat.name:
+ return None
+ raise Exception("Type Object has no attribute '%s'" % name)
+
+ def __setattr__(self, name, value):
+ if name[0] == '_':
+ super.__setattr__(self, name, value)
+ return
+
+ for prop, unusedValue in self._properties:
+ if name == prop.name:
+ newprop = (prop, value)
+ newlist = []
+ for old, val in self._properties:
+ if name == old.name:
+ newlist.append(newprop)
+ else:
+ newlist.append((old, val))
+ self._properties = newlist
+ return
+ super.__setattr__(self, name, value)
+
+ def _sendMethodRequest(self, name, args, kwargs, synchronous=False, timeWait=None):
+ for method in self._schema.getMethods():
+ if name == method.name:
+ aIdx = 0
+ sendCodec = Codec()
+ seq = self._session.seqMgr._reserve((method, synchronous))
+
+ count = 0
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ count += 1
+ if count != len(args):
+ raise Exception("Incorrect number of arguments: expected %d, got %d" % (count, len(args)))
+
+ if self._agent.isV2:
+ #
+ # Compose and send a QMFv2 method request
+ #
+ call = {}
+ call['_object_id'] = self._objectId.asMap()
+ call['_method_name'] = name
+ argMap = {}
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ argMap[arg.name] = args[aIdx]
+ aIdx += 1
+ call['_arguments'] = argMap
+
+ dp = self._broker.amqpSession.delivery_properties()
+ dp.routing_key = self.getV2RoutingKey()
+ mp = self._broker.amqpSession.message_properties()
+ mp.content_type = "amqp/map"
+ if self._broker.saslUser:
+ mp.user_id = self._broker.saslUser
+ mp.correlation_id = str(seq)
+ mp.app_id = "qmf2"
+ mp.reply_to = self._broker.amqpSession.reply_to("qmf.default.direct", self._broker.v2_direct_queue)
+ mp.application_headers = {'qmf.opcode':'_method_request'}
+ sendCodec.write_map(call)
+ smsg = Message(dp, mp, sendCodec.encoded)
+ exchange = "qmf.default.direct"
+
+ else:
+ #
+ # Associate this sequence with the agent hosting the object so we can correctly
+ # route the method-response
+ #
+ agent = self._broker.getAgent(self._broker.getBrokerBank(), self._objectId.getAgentBank())
+ self._broker._setSequence(seq, agent)
+
+ #
+ # Compose and send a QMFv1 method request
+ #
+ self._broker._setHeader(sendCodec, 'M', seq)
+ self._objectId.encode(sendCodec)
+ self._schema.getKey().encode(sendCodec)
+ sendCodec.write_str8(name)
+
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ self._session._encodeValue(sendCodec, args[aIdx], arg.type)
+ aIdx += 1
+ smsg = self._broker._message(sendCodec.encoded, "agent.%d.%s" %
+ (self._objectId.getBrokerBank(), self._objectId.getAgentBank()))
+ exchange = "qpid.management"
+
+ if synchronous:
+ try:
+ self._broker.cv.acquire()
+ self._broker.syncInFlight = True
+ finally:
+ self._broker.cv.release()
+ self._broker._send(smsg, exchange)
+ return seq
+ return None
+
+ def _invoke(self, name, args, kwargs):
+ if not self.isManaged():
+ raise Exception("Object is not managed")
+ if "_timeout" in kwargs:
+ timeout = kwargs["_timeout"]
+ else:
+ timeout = self._broker.SYNC_TIME
+
+ if "_async" in kwargs and kwargs["_async"]:
+ sync = False
+ if "_timeout" not in kwargs:
+ timeout = None
+ else:
+ sync = True
+
+ seq = self._sendMethodRequest(name, args, kwargs, sync, timeout)
+ if seq:
+ if not sync:
+ return seq
+ self._broker.cv.acquire()
+ try:
+ starttime = time()
+ while self._broker.syncInFlight and self._broker.error == None:
+ self._broker.cv.wait(timeout)
+ if time() - starttime > timeout:
+ raise RuntimeError("Timed out waiting for method to respond")
+ finally:
+ self._session.seqMgr._release(seq)
+ self._broker.cv.release()
+ if self._broker.error != None:
+ errorText = self._broker.error
+ self._broker.error = None
+ raise Exception(errorText)
+ return self._broker.syncResult
+ raise Exception("Invalid Method (software defect) [%s]" % name)
+
+ def _encodeUnmanaged(self, codec):
+ codec.write_uint8(20)
+ codec.write_str8(self._schema.getKey().getPackageName())
+ codec.write_str8(self._schema.getKey().getClassName())
+ codec.write_bin128(self._schema.getKey().getHash())
+
+ # emit presence masks for optional properties
+ mask = 0
+ bit = 0
+ for prop, value in self._properties:
+ if prop.optional:
+ if bit == 0:
+ bit = 1
+ if value:
+ mask |= bit
+ bit = bit << 1
+ if bit == 256:
+ bit = 0
+ codec.write_uint8(mask)
+ mask = 0
+ if bit != 0:
+ codec.write_uint8(mask)
+
+ # encode properties
+ for prop, value in self._properties:
+ if value != None:
+ self._session._encodeValue(codec, value, prop.type)
+
+ # encode statistics
+ for stat, value in self._statistics:
+ self._session._encodeValue(codec, value, stat.type)
+
+ def _parsePresenceMasks(self, codec, schema):
+ excludeList = []
+ bit = 0
+ for property in schema.getProperties():
+ if property.optional:
+ if bit == 0:
+ mask = codec.read_uint8()
+ bit = 1
+ if (mask & bit) == 0:
+ excludeList.append(property.name)
+ bit *= 2
+ if bit == 256:
+ bit = 0
+ return excludeList
+
+
+#===================================================================================================
+# Session
+#===================================================================================================
+class Session:
+ """
+ An instance of the Session class represents a console session running
+ against one or more QMF brokers. A single instance of Session is needed
+ to interact with the management framework as a console.
+ """
+ _CONTEXT_SYNC = 1
+ _CONTEXT_STARTUP = 2
+ _CONTEXT_MULTIGET = 3
+
+ DEFAULT_GET_WAIT_TIME = 60
+
+ ENCODINGS = {
+ str: 7,
+ timestamp: 8,
+ datetime: 8,
+ int: 9,
+ long: 9,
+ float: 13,
+ UUID: 14,
+ Object: 20,
+ list: 21
+ }
+
+
+ def __init__(self, console=None, rcvObjects=True, rcvEvents=True, rcvHeartbeats=True,
+ manageConnections=False, userBindings=False):
+ """
+ Initialize a session. If the console argument is provided, the
+ more advanced asynchronous features are available. If console is
+ defaulted, the session will operate in a simpler, synchronous manner.
+
+ The rcvObjects, rcvEvents, and rcvHeartbeats arguments are meaningful only if 'console'
+ is provided. They control whether object updates, events, and agent-heartbeats are
+ subscribed to. If the console is not interested in receiving one or more of the above,
+ setting the argument to False will reduce tha bandwidth used by the API.
+
+ If manageConnections is set to True, the Session object will manage connections to
+ the brokers. This means that if a broker is unreachable, it will retry until a connection
+ can be established. If a connection is lost, the Session will attempt to reconnect.
+
+ If manageConnections is set to False, the user is responsible for handing failures. In
+ this case, an unreachable broker will cause addBroker to raise an exception.
+
+ If userBindings is set to False (the default) and rcvObjects is True, the console will
+ receive data for all object classes. If userBindings is set to True, the user must select
+ which classes the console shall receive by invoking the bindPackage or bindClass methods.
+ This allows the console to be configured to receive only information that is relavant to
+ a particular application. If rcvObjects id False, userBindings has no meaning.
+ """
+ self.console = console
+ self.brokers = []
+ self.schemaCache = SchemaCache()
+ self.seqMgr = SequenceManager()
+ self.cv = Condition()
+ self.syncSequenceList = []
+ self.getResult = []
+ self.getSelect = []
+ self.error = None
+ self.rcvObjects = rcvObjects
+ self.rcvEvents = rcvEvents
+ self.rcvHeartbeats = rcvHeartbeats
+ self.userBindings = userBindings
+ if self.console == None:
+ self.rcvObjects = False
+ self.rcvEvents = False
+ self.rcvHeartbeats = False
+ self.v1BindingKeyList, self.v2BindingKeyList = self._bindingKeys()
+ self.manageConnections = manageConnections
+ # callback filters:
+ self.agent_filter = [] # (vendor, product, instance) || v1-agent-label-str
+ self.class_filter = [] # (pkg, class)
+ self.event_filter = [] # (pkg, event)
+ self.agent_heartbeat_min = 10 # minimum agent heartbeat timeout interval
+ self.agent_heartbeat_miss = 3 # # of heartbeats to miss before deleting agent
+
+ if self.userBindings and not self.console:
+ raise Exception("userBindings can't be set unless a console is provided.")
+
+ def close(self):
+ """ Releases all resources held by the session. Must be called by the
+ application when it is done with the Session object.
+ """
+ self.cv.acquire()
+ try:
+ while len(self.brokers):
+ b = self.brokers.pop()
+ try:
+ b._shutdown()
+ except:
+ pass
+ finally:
+ self.cv.release()
+
+ def _getBrokerForAgentAddr(self, agent_addr):
+ try:
+ self.cv.acquire()
+ key = (1, agent_addr)
+ for b in self.brokers:
+ if key in b.agents:
+ return b
+ finally:
+ self.cv.release()
+ return None
+
+
+ def _getAgentForAgentAddr(self, agent_addr):
+ try:
+ self.cv.acquire()
+ key = agent_addr
+ for b in self.brokers:
+ if key in b.agents:
+ return b.agents[key]
+ finally:
+ self.cv.release()
+ return None
+
+
+ def __repr__(self):
+ return "QMF Console Session Manager (brokers: %d)" % len(self.brokers)
+
+
+ def addBroker(self, target="localhost", timeout=None, mechanisms=None):
+ """ Connect to a Qpid broker. Returns an object of type Broker.
+ Will raise an exception if the session is not managing the connection and
+ the connection setup to the broker fails.
+ """
+ url = BrokerURL(target)
+ broker = Broker(self, url.host, url.port, mechanisms, url.authName, url.authPass,
+ ssl = url.scheme == URL.AMQPS, connTimeout=timeout)
+
+ self.brokers.append(broker)
+ return broker
+
+
+ def delBroker(self, broker):
+ """ Disconnect from a broker, and deallocate the broker proxy object. The
+ 'broker' argument is the object returned from the addBroker call. Errors
+ are ignored.
+ """
+ broker._shutdown()
+ self.brokers.remove(broker)
+ del broker
+
+
+ def getPackages(self):
+ """ Get the list of known QMF packages """
+ for broker in self.brokers:
+ broker._waitForStable()
+ return self.schemaCache.getPackages()
+
+
+ def getClasses(self, packageName):
+ """ Get the list of known classes within a QMF package """
+ for broker in self.brokers:
+ broker._waitForStable()
+ return self.schemaCache.getClasses(packageName)
+
+
+ def getSchema(self, classKey):
+ """ Get the schema for a QMF class """
+ for broker in self.brokers:
+ broker._waitForStable()
+ return self.schemaCache.getSchema(classKey)
+
+
+ def bindPackage(self, packageName):
+ """ Filter object and event callbacks to only those elements of the
+ specified package. Also filters newPackage and newClass callbacks to the
+ given package. Only valid if userBindings is True.
+ """
+ if not self.userBindings:
+ raise Exception("userBindings option must be set for this Session.")
+ if not self.rcvObjects and not self.rcvEvents:
+ raise Exception("Session needs to be configured to receive events or objects.")
+ v1keys = ["console.obj.*.*.%s.#" % packageName, "console.event.*.*.%s.#" % packageName]
+ v2keys = ["agent.ind.data.%s.#" % packageName.replace(".", "_"),
+ "agent.ind.event.%s.#" % packageName.replace(".", "_"),]
+ if (packageName, None) not in self.class_filter:
+ self.class_filter.append((packageName, None))
+ if (packageName, None) not in self.event_filter:
+ self.event_filter.append((packageName, None))
+ self.v1BindingKeyList.extend(v1keys)
+ self.v2BindingKeyList.extend(v2keys)
+ for broker in self.brokers:
+ if broker.isConnected():
+ for v1key in v1keys:
+ broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key)
+ if broker.brokerSupportsV2:
+ for v2key in v2keys:
+ # data indications should arrive on the unsolicited indication queue
+ broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key)
+
+
+ def bindClass(self, pname, cname=None):
+ """ Filter object callbacks to only those objects of the specified package
+ and optional class. Will also filter newPackage/newClass callbacks to the
+ specified package and class. Only valid if userBindings is True and
+ rcvObjects is True.
+ """
+ if not self.userBindings:
+ raise Exception("userBindings option must be set for this Session.")
+ if not self.rcvObjects:
+ raise Exception("Session needs to be configured with rcvObjects=True.")
+ if cname is not None:
+ v1key = "console.obj.*.*.%s.%s.#" % (pname, cname)
+ v2key = "agent.ind.data.%s.%s.#" % (pname.replace(".", "_"), cname.replace(".", "_"))
+ else:
+ v1key = "console.obj.*.*.%s.#" % pname
+ v2key = "agent.ind.data.%s.#" % pname.replace(".", "_")
+ self.v1BindingKeyList.append(v1key)
+ self.v2BindingKeyList.append(v2key)
+ if (pname, cname) not in self.class_filter:
+ self.class_filter.append((pname, cname))
+ for broker in self.brokers:
+ if broker.isConnected():
+ broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key)
+ if broker.brokerSupportsV2:
+ # data indications should arrive on the unsolicited indication queue
+ broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key)
+
+
+ def bindClassKey(self, classKey):
+ """ Filter object callbacks to only those objects of the specified
+ class. Will also filter newPackage/newClass callbacks to the specified
+ package and class. Only valid if userBindings is True and rcvObjects is
+ True.
+ """
+ pname = classKey.getPackageName()
+ cname = classKey.getClassName()
+ self.bindClass(pname, cname)
+
+ def bindEvent(self, pname, ename=None):
+ """ Filter event callbacks only from a particular class by package and
+ event name, or all events in a package if ename=None. Will also filter
+ newPackage/newClass callbacks to the specified package and class. Only
+ valid if userBindings is True and rcvEvents is True.
+ """
+ if not self.userBindings:
+ raise Exception("userBindings option must be set for this Session.")
+ if not self.rcvEvents:
+ raise Exception("Session needs to be configured with rcvEvents=True.")
+ if ename is not None:
+ v1key = "console.event.*.*.%s.%s.#" % (pname, ename)
+ v2key = "agent.ind.event.%s.%s.#" % (pname.replace(".", "_"), ename.replace(".", "_"))
+ else:
+ v1key = "console.event.*.*.%s.#" % pname
+ v2key = "agent.ind.event.%s.#" % pname.replace(".", "_")
+ self.v1BindingKeyList.append(v1key)
+ self.v2BindingKeyList.append(v2key)
+ if (pname, ename) not in self.event_filter:
+ self.event_filter.append((pname, ename))
+ for broker in self.brokers:
+ if broker.isConnected():
+ broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key)
+ if broker.brokerSupportsV2:
+ # event indications should arrive on the unsolicited indication queue
+ broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key)
+
+ def bindEventKey(self, eventKey):
+ """ Filter event callbacks only from a particular class key. Will also
+ filter newPackage/newClass callbacks to the specified package and
+ class. Only valid if userBindings is True and rcvEvents is True.
+ """
+ pname = eventKey.getPackageName()
+ ename = eventKey.getClassName()
+ self.bindEvent(pname, ename)
+
+ def bindAgent(self, vendor=None, product=None, instance=None, label=None):
+ """ Receive heartbeats, newAgent and delAgent callbacks only for those
+ agent(s) that match the passed identification criteria:
+ V2 agents: vendor, optionally product and instance strings
+ V1 agents: the label string.
+ Only valid if userBindings is True.
+ """
+ if not self.userBindings:
+ raise Exception("Session not configured for binding specific agents.")
+ if vendor is None and label is None:
+ raise Exception("Must specify at least a vendor (V2 agents)"
+ " or label (V1 agents).")
+
+ if vendor: # V2 agent identification
+ if product is not None:
+ v2key = "agent.ind.heartbeat.%s.%s.#" % (vendor.replace(".", "_"), product.replace(".", "_"))
+ else:
+ v2key = "agent.ind.heartbeat.%s.#" % vendor.replace(".", "_")
+ self.v2BindingKeyList.append(v2key)
+
+ # allow wildcards - only add filter if a non-wildcarded component is given
+ if vendor == "*":
+ vendor = None
+ if product == "*":
+ product = None
+ if instance == "*":
+ instance = None
+ if vendor or product or instance:
+ if (vendor, product, instance) not in self.agent_filter:
+ self.agent_filter.append((vendor, product, instance))
+
+ for broker in self.brokers:
+ if broker.isConnected():
+ if broker.brokerSupportsV2:
+ # heartbeats should arrive on the heartbeat queue
+ broker.amqpSession.exchange_bind(exchange="qmf.default.topic",
+ queue=broker.v2_topic_queue_hb,
+ binding_key=v2key)
+ elif label != "*": # non-wildcard V1 agent label
+ # V1 format heartbeats do not have any agent identifier in the routing
+ # key, so we cannot filter them by bindings.
+ if label not in self.agent_filter:
+ self.agent_filter.append(label)
+
+
+ def getAgents(self, broker=None):
+ """ Get a list of currently known agents """
+ brokerList = []
+ if broker == None:
+ for b in self.brokers:
+ brokerList.append(b)
+ else:
+ brokerList.append(broker)
+
+ for b in brokerList:
+ b._waitForStable()
+ agentList = []
+ for b in brokerList:
+ for a in b.getAgents():
+ agentList.append(a)
+ return agentList
+
+
+ def makeObject(self, classKey, **kwargs):
+ """ Create a new, unmanaged object of the schema indicated by classKey """
+ schema = self.getSchema(classKey)
+ if schema == None:
+ raise Exception("Schema not found for classKey")
+ return Object(None, schema, None, True, True, kwargs)
+
+
+ def getObjects(self, **kwargs):
+ """ Get a list of objects from QMF agents.
+ All arguments are passed by name(keyword).
+
+ The class for queried objects may be specified in one of the following ways:
+
+ _schema = <schema> - supply a schema object returned from getSchema.
+ _key = <key> - supply a classKey from the list returned by getClasses.
+ _class = <name> - supply a class name as a string. If the class name exists
+ in multiple packages, a _package argument may also be supplied.
+ _objectId = <id> - get the object referenced by the object-id
+
+ If objects should be obtained from only one agent, use the following argument.
+ Otherwise, the query will go to all agents.
+
+ _agent = <agent> - supply an agent from the list returned by getAgents.
+
+ If the get query is to be restricted to one broker (as opposed to all connected brokers),
+ add the following argument:
+
+ _broker = <broker> - supply a broker as returned by addBroker.
+
+ The default timeout for this synchronous operation is 60 seconds. To change the timeout,
+ use the following argument:
+
+ _timeout = <time in seconds>
+
+ If additional arguments are supplied, they are used as property selectors. For example,
+ if the argument name="test" is supplied, only objects whose "name" property is "test"
+ will be returned in the result.
+ """
+ if "_broker" in kwargs:
+ brokerList = []
+ brokerList.append(kwargs["_broker"])
+ else:
+ brokerList = self.brokers
+ for broker in brokerList:
+ broker._waitForStable()
+ if broker.isConnected():
+ if "_package" not in kwargs or "_class" not in kwargs or \
+ kwargs["_package"] != "org.apache.qpid.broker" or \
+ kwargs["_class"] != "agent":
+ self.getObjects(_package = "org.apache.qpid.broker", _class = "agent",
+ _agent = broker.getAgent(1,0))
+
+ agentList = []
+ if "_agent" in kwargs:
+ agent = kwargs["_agent"]
+ if agent.broker not in brokerList:
+ raise Exception("Supplied agent is not accessible through the supplied broker")
+ if agent.broker.isConnected():
+ agentList.append(agent)
+ else:
+ if "_objectId" in kwargs:
+ oid = kwargs["_objectId"]
+ for broker in brokerList:
+ for agent in broker.getAgents():
+ if agent.getBrokerBank() == oid.getBrokerBank() and agent.getAgentBank() == oid.getAgentBank():
+ agentList.append(agent)
+ else:
+ for broker in brokerList:
+ for agent in broker.getAgents():
+ if agent.broker.isConnected():
+ agentList.append(agent)
+
+ if len(agentList) == 0:
+ return []
+
+ #
+ # We now have a list of agents to query, start the queries and gather the results.
+ #
+ request = SessionGetRequest(len(agentList))
+ for agent in agentList:
+ agent.getObjects(request, **kwargs)
+ timeout = 60
+ if '_timeout' in kwargs:
+ timeout = kwargs['_timeout']
+ request.wait(timeout)
+ return request.result
+
+
+ def addEventFilter(self, **kwargs):
+ """Filter unsolicited events based on package and event name.
+ QMF v2 also can filter on vendor, product, and severity values.
+
+ By default, a console receives unsolicted events by binding to:
+
+ qpid.management/console.event.# (v1)
+
+ qmf.default.topic/agent.ind.event.# (v2)
+
+ A V1 event filter binding uses the pattern:
+
+ qpid.management/console.event.*.*[.<package>[.<event>]].#
+
+ A V2 event filter binding uses the pattern:
+
+ qmf.default.topic/agent.ind.event.<Vendor|*>.<Product|*>.<severity|*>.<package|*>.<event|*>.#
+ """
+ package = kwargs.get("package", "*")
+ event = kwargs.get("event", "*")
+ vendor = kwargs.get("vendor", "*")
+ product = kwargs.get("product", "*")
+ severity = kwargs.get("severity", "*")
+
+ if package == "*" and event != "*":
+ raise Exception("'package' parameter required if 'event' parameter"
+ " supplied")
+
+ # V1 key - can only filter on package (and event)
+ if package == "*":
+ key = "console.event.*.*." + str(package)
+ if event != "*":
+ key += "." + str(event)
+ key += ".#"
+
+ if key not in self.v1BindingKeyList:
+ self.v1BindingKeyList.append(key)
+ try:
+ # remove default wildcard binding
+ self.v1BindingKeyList.remove("console.event.#")
+ except:
+ pass
+
+ # V2 key - escape any "." in the filter strings
+
+ key = "agent.ind.event." + str(package).replace(".", "_") \
+ + "." + str(event).replace(".", "_") \
+ + "." + str(severity).replace(".", "_") \
+ + "." + str(vendor).replace(".", "_") \
+ + "." + str(product).replace(".", "_") \
+ + ".#"
+
+ if key not in self.v2BindingKeyList:
+ self.v2BindingKeyList.append(key)
+ try:
+ # remove default wildcard binding
+ self.v2BindingKeyList.remove("agent.ind.event.#")
+ except:
+ pass
+
+ if package != "*":
+ if event != "*":
+ f = (package, event)
+ else:
+ f = (package, None)
+ if f not in self.event_filter:
+ self.event_filter.append(f)
+
+
+ def addAgentFilter(self, vendor, product=None):
+ """ Deprecate - use bindAgent() instead
+ """
+ self.addHeartbeatFilter(vendor=vendor, product=product)
+
+ def addHeartbeatFilter(self, **kwargs):
+ """ Deprecate - use bindAgent() instead.
+ """
+ vendor = kwargs.get("vendor")
+ product = kwargs.get("product")
+ if vendor is None:
+ raise Exception("vendor parameter required!")
+
+ # V1 heartbeats do not have any agent identifier - we cannot
+ # filter them by agent.
+
+ # build the binding key - escape "."s...
+ key = "agent.ind.heartbeat." + str(vendor).replace(".", "_")
+ if product is not None:
+ key += "." + str(product).replace(".", "_")
+ key += ".#"
+
+ if key not in self.v2BindingKeyList:
+ self.v2BindingKeyList.append(key)
+ self.agent_filter.append((vendor, product, None))
+
+ # be sure we don't ever filter the local broker
+ local_broker_key = "agent.ind.heartbeat." + "org.apache".replace(".", "_") \
+ + "." + "qpidd".replace(".", "_") + ".#"
+ if local_broker_key not in self.v2BindingKeyList:
+ self.v2BindingKeyList.append(local_broker_key)
+
+ # remove the wildcard key if present
+ try:
+ self.v2BindingKeyList.remove("agent.ind.heartbeat.#")
+ except:
+ pass
+
+ def _bindingKeys(self):
+ v1KeyList = []
+ v2KeyList = []
+ v1KeyList.append("schema.#")
+ # note well: any binding that starts with 'agent.ind.heartbeat' will be
+ # bound to the heartbeat queue, otherwise it will be bound to the
+ # unsolicited indication queue. See _decOutstanding() for the binding.
+ if not self.userBindings:
+ if self.rcvObjects and self.rcvEvents and self.rcvHeartbeats:
+ v1KeyList.append("console.#")
+ v2KeyList.append("agent.ind.data.#")
+ v2KeyList.append("agent.ind.event.#")
+ v2KeyList.append("agent.ind.heartbeat.#")
+ else:
+ # need heartbeats for V2 newAgent()/delAgent()
+ v2KeyList.append("agent.ind.heartbeat.#")
+ if self.rcvObjects:
+ v1KeyList.append("console.obj.#")
+ v2KeyList.append("agent.ind.data.#")
+ else:
+ v1KeyList.append("console.obj.*.*.org.apache.qpid.broker.agent")
+ if self.rcvEvents:
+ v1KeyList.append("console.event.#")
+ v2KeyList.append("agent.ind.event.#")
+ else:
+ v1KeyList.append("console.event.*.*.org.apache.qpid.broker.agent")
+ if self.rcvHeartbeats:
+ v1KeyList.append("console.heartbeat.#")
+ else:
+ # mandatory bindings
+ v1KeyList.append("console.obj.*.*.org.apache.qpid.broker.agent")
+ v1KeyList.append("console.event.*.*.org.apache.qpid.broker.agent")
+ v1KeyList.append("console.heartbeat.#") # no way to turn this on later
+ v2KeyList.append("agent.ind.heartbeat.org_apache.qpidd.#")
+
+ return (v1KeyList, v2KeyList)
+
+
+ def _handleBrokerConnect(self, broker):
+ if self.console:
+ for agent in broker.getAgents():
+ self._newAgentCallback(agent)
+ self.console.brokerConnected(broker)
+
+
+ def _handleBrokerDisconnect(self, broker):
+ if self.console:
+ for agent in broker.getAgents():
+ self._delAgentCallback(agent)
+ self.console.brokerDisconnected(broker)
+
+
+ def _handleBrokerResp(self, broker, codec, seq):
+ broker.brokerId = codec.read_uuid()
+ if self.console != None:
+ self.console.brokerInfo(broker)
+
+ # Send a package request
+ # (effectively inc and dec outstanding by not doing anything)
+ sendCodec = Codec()
+ seq = self.seqMgr._reserve(self._CONTEXT_STARTUP)
+ broker._setHeader(sendCodec, 'P', seq)
+ smsg = broker._message(sendCodec.encoded)
+ broker._send(smsg)
+
+
+ def _handlePackageInd(self, broker, codec, seq):
+ pname = str(codec.read_str8())
+ notify = self.schemaCache.declarePackage(pname)
+ if notify and self.console != None:
+ self._newPackageCallback(pname)
+
+ # Send a class request
+ broker._incOutstanding()
+ sendCodec = Codec()
+ seq = self.seqMgr._reserve(self._CONTEXT_STARTUP)
+ broker._setHeader(sendCodec, 'Q', seq)
+ sendCodec.write_str8(pname)
+ smsg = broker._message(sendCodec.encoded)
+ broker._send(smsg)
+
+
+ def _handleCommandComplete(self, broker, codec, seq, agent):
+ code = codec.read_uint32()
+ text = codec.read_str8()
+ context = self.seqMgr._release(seq)
+ if context == self._CONTEXT_STARTUP:
+ broker._decOutstanding()
+ elif context == self._CONTEXT_SYNC and seq == broker.syncSequence:
+ try:
+ broker.cv.acquire()
+ broker.syncInFlight = False
+ broker.cv.notify()
+ finally:
+ broker.cv.release()
+ elif context == self._CONTEXT_MULTIGET and seq in self.syncSequenceList:
+ try:
+ self.cv.acquire()
+ self.syncSequenceList.remove(seq)
+ if len(self.syncSequenceList) == 0:
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ if agent:
+ agent._handleV1Completion(seq, code, text)
+
+
+ def _handleClassInd(self, broker, codec, seq):
+ kind = codec.read_uint8()
+ classKey = ClassKey(codec)
+ classKey._setType(kind)
+ schema = self.schemaCache.getSchema(classKey)
+
+ if not schema:
+ # Send a schema request for the unknown class
+ broker._incOutstanding()
+ sendCodec = Codec()
+ seq = self.seqMgr._reserve(self._CONTEXT_STARTUP)
+ broker._setHeader(sendCodec, 'S', seq)
+ classKey.encode(sendCodec)
+ smsg = broker._message(sendCodec.encoded)
+ broker._send(smsg)
+
+
+ def _handleHeartbeatInd(self, broker, codec, seq, msg):
+ brokerBank = 1
+ agentBank = 0
+ dp = msg.get("delivery_properties")
+ if dp:
+ key = dp["routing_key"]
+ if key:
+ keyElements = key.split(".")
+ if len(keyElements) == 4:
+ brokerBank = int(keyElements[2])
+ agentBank = int(keyElements[3])
+ else:
+ # If there's no routing key in the delivery properties,
+ # assume the message is from the broker.
+ brokerBank = 1
+ agentBank = 0
+
+ agent = broker.getAgent(brokerBank, agentBank)
+ if self.rcvHeartbeats and self.console != None and agent != None:
+ timestamp = codec.read_uint64()
+ self._heartbeatCallback(agent, timestamp)
+
+
+ def _handleSchemaResp(self, broker, codec, seq, agent_addr):
+ kind = codec.read_uint8()
+ classKey = ClassKey(codec)
+ classKey._setType(kind)
+ _class = SchemaClass(kind, classKey, codec, self)
+ new_pkg, new_cls = self.schemaCache.declareClass(classKey, _class)
+ ctx = self.seqMgr._release(seq)
+ if ctx:
+ broker._decOutstanding()
+ if self.console != None:
+ if new_pkg:
+ self._newPackageCallback(classKey.getPackageName())
+ if new_cls:
+ self._newClassCallback(kind, classKey)
+
+ if agent_addr and (agent_addr.__class__ == str or agent_addr.__class__ == unicode):
+ agent = self._getAgentForAgentAddr(agent_addr)
+ if agent:
+ agent._schemaInfoFromV2Agent()
+
+
+ def _v2HandleHeartbeatInd(self, broker, mp, ah, content):
+ try:
+ agentName = ah["qmf.agent"]
+ values = content["_values"]
+
+ if '_timestamp' in values:
+ timestamp = values["_timestamp"]
+ else:
+ timestamp = values['timestamp']
+
+ if '_heartbeat_interval' in values:
+ interval = values['_heartbeat_interval']
+ else:
+ interval = values['heartbeat_interval']
+
+ epoch = 0
+ if '_epoch' in values:
+ epoch = values['_epoch']
+ elif 'epoch' in values:
+ epoch = values['epoch']
+ except Exception,e:
+ return
+
+ ##
+ ## For now, ignore heartbeats from messaging brokers. We already have the "local-broker"
+ ## agent in our list.
+ ##
+ if '_vendor' in values and values['_vendor'] == 'apache.org' and \
+ '_product' in values and values['_product'] == 'qpidd':
+ return
+
+ if self.agent_filter:
+ # only allow V2 agents that satisfy the filter
+ v = agentName.split(":", 2)
+ if len(v) != 3 or ((v[0], None, None) not in self.agent_filter
+ and (v[0], v[1], None) not in self.agent_filter
+ and (v[0], v[1], v[2]) not in self.agent_filter):
+ return
+
+ agent = broker.getAgent(1, agentName)
+ if agent == None:
+ agent = Agent(broker, agentName, "QMFv2 Agent", True, interval)
+ agent.setEpoch(epoch)
+ broker._addAgent(agentName, agent)
+ else:
+ agent.touch()
+ if self.rcvHeartbeats and self.console and agent:
+ self._heartbeatCallback(agent, timestamp)
+ agent.update_schema_timestamp(values.get("_schema_updated", 0))
+
+
+ def _v2HandleAgentLocateRsp(self, broker, mp, ah, content):
+ self._v2HandleHeartbeatInd(broker, mp, ah, content)
+
+
+ def _handleError(self, error):
+ try:
+ self.cv.acquire()
+ if len(self.syncSequenceList) > 0:
+ self.error = error
+ self.syncSequenceList = []
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+
+ def _selectMatch(self, object):
+ """ Check the object against self.getSelect to check for a match """
+ for key, value in self.getSelect:
+ for prop, propval in object.getProperties():
+ if key == prop.name and value != propval:
+ return False
+ return True
+
+
+ def _decodeValue(self, codec, typecode, broker=None):
+ """ Decode, from the codec, a value based on its typecode. """
+ if typecode == 1: data = codec.read_uint8() # U8
+ elif typecode == 2: data = codec.read_uint16() # U16
+ elif typecode == 3: data = codec.read_uint32() # U32
+ elif typecode == 4: data = codec.read_uint64() # U64
+ elif typecode == 6: data = codec.read_str8() # SSTR
+ elif typecode == 7: data = codec.read_str16() # LSTR
+ elif typecode == 8: data = codec.read_int64() # ABSTIME
+ elif typecode == 9: data = codec.read_uint64() # DELTATIME
+ elif typecode == 10: data = ObjectId(codec) # REF
+ elif typecode == 11: data = codec.read_uint8() != 0 # BOOL
+ elif typecode == 12: data = codec.read_float() # FLOAT
+ elif typecode == 13: data = codec.read_double() # DOUBLE
+ elif typecode == 14: data = codec.read_uuid() # UUID
+ elif typecode == 16: data = codec.read_int8() # S8
+ elif typecode == 17: data = codec.read_int16() # S16
+ elif typecode == 18: data = codec.read_int32() # S32
+ elif typecode == 19: data = codec.read_int64() # S63
+ elif typecode == 15: data = codec.read_map() # FTABLE
+ elif typecode == 20: # OBJECT
+ # Peek at the type, and if it is still 20 pull it decode. If
+ # Not, call back into self.
+ inner_type_code = codec.read_uint8()
+ if inner_type_code == 20:
+ classKey = ClassKey(codec)
+ schema = self.schemaCache.getSchema(classKey)
+ if not schema:
+ return None
+ data = Object(self, broker, schema, codec, True, True, False)
+ else:
+ data = self._decodeValue(codec, inner_type_code, broker)
+ elif typecode == 21: data = codec.read_list() # List
+ elif typecode == 22: #Array
+ #taken from codec10.read_array
+ sc = Codec(codec.read_vbin32())
+ count = sc.read_uint32()
+ type = sc.read_uint8()
+ data = []
+ while count > 0:
+ data.append(self._decodeValue(sc,type,broker))
+ count -= 1
+ else:
+ raise ValueError("Invalid type code: %d" % typecode)
+ return data
+
+
+ def _encodeValue(self, codec, value, typecode):
+ """ Encode, into the codec, a value based on its typecode. """
+ if typecode == 1: codec.write_uint8 (int(value)) # U8
+ elif typecode == 2: codec.write_uint16 (int(value)) # U16
+ elif typecode == 3: codec.write_uint32 (long(value)) # U32
+ elif typecode == 4: codec.write_uint64 (long(value)) # U64
+ elif typecode == 6: codec.write_str8 (value) # SSTR
+ elif typecode == 7: codec.write_str16 (value) # LSTR
+ elif typecode == 8: codec.write_int64 (long(value)) # ABSTIME
+ elif typecode == 9: codec.write_uint64 (long(value)) # DELTATIME
+ elif typecode == 10: value.encode (codec) # REF
+ elif typecode == 11: codec.write_uint8 (int(value)) # BOOL
+ elif typecode == 12: codec.write_float (float(value)) # FLOAT
+ elif typecode == 13: codec.write_double (float(value)) # DOUBLE
+ elif typecode == 14: codec.write_uuid (value.bytes) # UUID
+ elif typecode == 16: codec.write_int8 (int(value)) # S8
+ elif typecode == 17: codec.write_int16 (int(value)) # S16
+ elif typecode == 18: codec.write_int32 (int(value)) # S32
+ elif typecode == 19: codec.write_int64 (int(value)) # S64
+ elif typecode == 20: value._encodeUnmanaged(codec) # OBJECT
+ elif typecode == 15: codec.write_map (value) # FTABLE
+ elif typecode == 21: codec.write_list (value) # List
+ elif typecode == 22: # Array
+ sc = Codec()
+ self._encodeValue(sc, len(value), 3)
+ if len(value) > 0:
+ ltype = self.encoding(value[0])
+ self._encodeValue(sc,ltype,1)
+ for o in value:
+ self._encodeValue(sc, o, ltype)
+ codec.write_vbin32(sc.encoded)
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+
+
+ def encoding(self, value):
+ return self._encoding(value.__class__)
+
+
+ def _encoding(self, klass):
+ if Session.ENCODINGS.has_key(klass):
+ return self.ENCODINGS[klass]
+ for base in klass.__bases__:
+ result = self._encoding(base)
+ if result != None:
+ return result
+
+
+ def _displayValue(self, value, typecode):
+ """ """
+ if typecode == 1: return unicode(value)
+ elif typecode == 2: return unicode(value)
+ elif typecode == 3: return unicode(value)
+ elif typecode == 4: return unicode(value)
+ elif typecode == 6: return value
+ elif typecode == 7: return value
+ elif typecode == 8: return unicode(strftime("%c", gmtime(value / 1000000000)))
+ elif typecode == 9: return unicode(value)
+ elif typecode == 10: return unicode(value.__repr__())
+ elif typecode == 11:
+ if value: return u"T"
+ else: return u"F"
+ elif typecode == 12: return unicode(value)
+ elif typecode == 13: return unicode(value)
+ elif typecode == 14: return unicode(value.__repr__())
+ elif typecode == 15: return unicode(value.__repr__())
+ elif typecode == 16: return unicode(value)
+ elif typecode == 17: return unicode(value)
+ elif typecode == 18: return unicode(value)
+ elif typecode == 19: return unicode(value)
+ elif typecode == 20: return unicode(value.__repr__())
+ elif typecode == 21: return unicode(value.__repr__())
+ elif typecode == 22: return unicode(value.__repr__())
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+
+
+ def _defaultValue(self, stype, broker=None, kwargs={}):
+ """ """
+ typecode = stype.type
+ if typecode == 1: return 0
+ elif typecode == 2: return 0
+ elif typecode == 3: return 0
+ elif typecode == 4: return 0
+ elif typecode == 6: return ""
+ elif typecode == 7: return ""
+ elif typecode == 8: return 0
+ elif typecode == 9: return 0
+ elif typecode == 10: return ObjectId(None)
+ elif typecode == 11: return False
+ elif typecode == 12: return 0.0
+ elif typecode == 13: return 0.0
+ elif typecode == 14: return UUID(bytes=[0 for i in range(16)])
+ elif typecode == 15: return {}
+ elif typecode == 16: return 0
+ elif typecode == 17: return 0
+ elif typecode == 18: return 0
+ elif typecode == 19: return 0
+ elif typecode == 21: return []
+ elif typecode == 22: return []
+ elif typecode == 20:
+ try:
+ if "classKeys" in kwargs:
+ keyList = kwargs["classKeys"]
+ else:
+ keyList = None
+ classKey = self._bestClassKey(stype.refPackage, stype.refClass, keyList)
+ if classKey:
+ return self.makeObject(classKey, broker, kwargs)
+ except:
+ pass
+ return None
+ else:
+ raise ValueError ("Invalid type code: %d" % typecode)
+
+
+ def _bestClassKey(self, pname, cname, preferredList):
+ """ """
+ if pname == None or cname == None:
+ if len(preferredList) == 0:
+ return None
+ return preferredList[0]
+ for p in preferredList:
+ if p.getPackageName() == pname and p.getClassName() == cname:
+ return p
+ clist = self.getClasses(pname)
+ for c in clist:
+ if c.getClassName() == cname:
+ return c
+ return None
+
+
+ def _sendMethodRequest(self, broker, schemaKey, objectId, name, argList):
+ """ This function can be used to send a method request to an object given only the
+ broker, schemaKey, and objectId. This is an uncommon usage pattern as methods are
+ normally invoked on the object itself.
+ """
+ schema = self.getSchema(schemaKey)
+ if not schema:
+ raise Exception("Schema not present (Key=%s)" % str(schemaKey))
+ for method in schema.getMethods():
+ if name == method.name:
+ #
+ # Count the arguments supplied and validate that the number is what is expected
+ # based on the schema.
+ #
+ count = 0
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ count += 1
+ if count != len(argList):
+ raise Exception("Incorrect number of arguments: expected %d, got %d" % (count, len(argList)))
+
+ aIdx = 0
+ sendCodec = Codec()
+ seq = self.seqMgr._reserve((method, False))
+
+ if objectId.isV2:
+ #
+ # Compose and send a QMFv2 method request
+ #
+ call = {}
+ call['_object_id'] = objectId.asMap()
+ call['_method_name'] = name
+ args = {}
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ args[arg.name] = argList[aIdx]
+ aIdx += 1
+ call['_arguments'] = args
+
+ dp = broker.amqpSession.delivery_properties()
+ dp.routing_key = objectId.getV2RoutingKey()
+ mp = broker.amqpSession.message_properties()
+ mp.content_type = "amqp/map"
+ if broker.saslUser:
+ mp.user_id = broker.saslUser
+ mp.correlation_id = str(seq)
+ mp.app_id = "qmf2"
+ mp.reply_to = broker.amqpSession.reply_to("qmf.default.direct", broker.v2_direct_queue)
+ mp.application_headers = {'qmf.opcode':'_method_request'}
+ sendCodec.write_map(call)
+ msg = Message(dp, mp, sendCodec.encoded)
+ broker._send(msg, "qmf.default.direct")
+
+ else:
+ #
+ # Associate this sequence with the agent hosting the object so we can correctly
+ # route the method-response
+ #
+ agent = broker.getAgent(broker.getBrokerBank(), objectId.getAgentBank())
+ broker._setSequence(seq, agent)
+
+ #
+ # Compose and send a QMFv1 method request
+ #
+ broker._setHeader(sendCodec, 'M', seq)
+ objectId.encode(sendCodec)
+ schemaKey.encode(sendCodec)
+ sendCodec.write_str8(name)
+
+ for arg in method.arguments:
+ if arg.dir.find("I") != -1:
+ self._encodeValue(sendCodec, argList[aIdx], arg.type)
+ aIdx += 1
+ smsg = broker._message(sendCodec.encoded, "agent.%d.%s" %
+ (objectId.getBrokerBank(), objectId.getAgentBank()))
+ broker._send(smsg)
+ return seq
+ return None
+
+ def _newPackageCallback(self, pname):
+ """
+ Invokes the console.newPackage() callback if the callback is present and
+ the package is not filtered.
+ """
+ if self.console:
+ if len(self.class_filter) == 0 and len(self.event_filter) == 0:
+ self.console.newPackage(pname)
+ else:
+ for x in self.class_filter:
+ if x[0] == pname:
+ self.console.newPackage(pname)
+ return
+
+ for x in self.event_filter:
+ if x[0] == pname:
+ self.console.newPackage(pname)
+ return
+
+
+ def _newClassCallback(self, ctype, ckey):
+ """
+ Invokes the console.newClass() callback if the callback is present and the
+ class is not filtered.
+ """
+ if self.console:
+ if ctype == ClassKey.TYPE_DATA:
+ if (len(self.class_filter) == 0
+ or (ckey.getPackageName(), ckey.getClassName()) in self.class_filter):
+ self.console.newClass(ctype, ckey)
+ elif ctype == ClassKey.TYPE_EVENT:
+ if (len(self.event_filter) == 0
+ or (ckey.getPackageName(), ckey.getClassName()) in self.event_filter):
+ self.console.newClass(ctype, ckey)
+ else: # old class keys did not contain type info, check both filters
+ if ((len(self.class_filter) == 0 and len(self.event_filter) == 0)
+ or (ckey.getPackageName(), ckey.getClassName()) in self.class_filter
+ or (ckey.getPackageName(), ckey.getClassName()) in self.event_filter):
+ self.console.newClass(ctype, ckey)
+
+ def _agentAllowed(self, agentName, isV2):
+ """ True if the agent is NOT filtered.
+ """
+ if self.agent_filter:
+ if isV2:
+ v = agentName.split(":", 2)
+ return ((len(v) > 2 and (v[0], v[1], v[2]) in self.agent_filter)
+ or (len(v) > 1 and (v[0], v[1], None) in self.agent_filter)
+ or (v and (v[0], None, None) in self.agent_filter));
+ else:
+ return agentName in self.agent_filter
+ return True
+
+ def _heartbeatCallback(self, agent, timestamp):
+ """
+ Invokes the console.heartbeat() callback if the callback is present and the
+ agent is not filtered.
+ """
+ if self.console and self.rcvHeartbeats:
+ if ((agent.isV2 and self._agentAllowed(agent.agentBank, True))
+ or ((not agent.isV2) and self._agentAllowed(agent.label, False))):
+ self.console.heartbeat(agent, timestamp)
+
+ def _newAgentCallback(self, agent):
+ """
+ Invokes the console.newAgent() callback if the callback is present and the
+ agent is not filtered.
+ """
+ if self.console:
+ if ((agent.isV2 and self._agentAllowed(agent.agentBank, True))
+ or ((not agent.isV2) and self._agentAllowed(agent.label, False))):
+ self.console.newAgent(agent)
+
+ def _delAgentCallback(self, agent):
+ """
+ Invokes the console.delAgent() callback if the callback is present and the
+ agent is not filtered.
+ """
+ if self.console:
+ if ((agent.isV2 and self._agentAllowed(agent.agentBank, True))
+ or ((not agent.isV2) and self._agentAllowed(agent.label, False))):
+ self.console.delAgent(agent)
+
+#===================================================================================================
+# SessionGetRequest
+#===================================================================================================
+class SessionGetRequest(object):
+ """
+ This class is used to track get-object queries at the Session level.
+ """
+ def __init__(self, agentCount):
+ self.agentCount = agentCount
+ self.result = []
+ self.cv = Condition()
+ self.waiting = True
+
+ def __call__(self, **kwargs):
+ """
+ Callable entry point for gathering collected objects.
+ """
+ try:
+ self.cv.acquire()
+ if 'qmf_object' in kwargs:
+ self.result.append(kwargs['qmf_object'])
+ elif 'qmf_complete' in kwargs or 'qmf_exception' in kwargs:
+ self.agentCount -= 1
+ if self.agentCount == 0:
+ self.waiting = None
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def wait(self, timeout):
+ starttime = time()
+ try:
+ self.cv.acquire()
+ while self.waiting:
+ if (time() - starttime) > timeout:
+ raise Exception("Timed out after %d seconds" % timeout)
+ self.cv.wait(1)
+ finally:
+ self.cv.release()
+
+
+#===================================================================================================
+# SchemaCache
+#===================================================================================================
+class SchemaCache(object):
+ """
+ The SchemaCache is a data structure that stores learned schema information.
+ """
+ def __init__(self):
+ """
+ Create a map of schema packages and a lock to protect this data structure.
+ Note that this lock is at the bottom of any lock hierarchy. If it is held, no other
+ lock in the system should attempt to be acquired.
+ """
+ self.packages = {}
+ self.lock = Lock()
+
+ def getPackages(self):
+ """ Get the list of known QMF packages """
+ list = []
+ try:
+ self.lock.acquire()
+ for package in self.packages:
+ list.append(package)
+ finally:
+ self.lock.release()
+ return list
+
+ def getClasses(self, packageName):
+ """ Get the list of known classes within a QMF package """
+ list = []
+ try:
+ self.lock.acquire()
+ if packageName in self.packages:
+ for pkey in self.packages[packageName]:
+ if isinstance(self.packages[packageName][pkey], SchemaClass):
+ list.append(self.packages[packageName][pkey].getKey())
+ elif self.packages[packageName][pkey] is not None:
+ # schema not present yet, but we have schema type
+ list.append(ClassKey({"_package_name": packageName,
+ "_class_name": pkey[0],
+ "_hash": pkey[1],
+ "_type": self.packages[packageName][pkey]}))
+ finally:
+ self.lock.release()
+ return list
+
+ def getSchema(self, classKey):
+ """ Get the schema for a QMF class, return None if schema not available """
+ pname = classKey.getPackageName()
+ pkey = classKey.getPackageKey()
+ try:
+ self.lock.acquire()
+ if pname in self.packages:
+ if (pkey in self.packages[pname] and
+ isinstance(self.packages[pname][pkey], SchemaClass)):
+ # hack: value may be schema type info if schema not available
+ return self.packages[pname][pkey]
+ finally:
+ self.lock.release()
+ return None
+
+ def declarePackage(self, pname):
+ """ Maybe add a package to the cache. Return True if package was added, None if it pre-existed. """
+ try:
+ self.lock.acquire()
+ if pname in self.packages:
+ return None
+ self.packages[pname] = {}
+ finally:
+ self.lock.release()
+ return True
+
+ def declareClass(self, classKey, classDef=None):
+ """ Add a class definition to the cache, if supplied. Return a pair
+ indicating if the package or class is new.
+ """
+ new_package = False
+ new_class = False
+ pname = classKey.getPackageName()
+ pkey = classKey.getPackageKey()
+ try:
+ self.lock.acquire()
+ if pname not in self.packages:
+ self.packages[pname] = {}
+ new_package = True
+ packageMap = self.packages[pname]
+ if pkey not in packageMap or not isinstance(packageMap[pkey], SchemaClass):
+ if classDef is not None:
+ new_class = True
+ packageMap[pkey] = classDef
+ elif classKey.getType() is not None:
+ # hack: don't indicate "new_class" to caller unless the classKey type
+ # information is present. "new_class" causes the console.newClass()
+ # callback to be invoked, which -requires- a valid classKey type!
+ new_class = True
+ # store the type for the getClasses() method:
+ packageMap[pkey] = classKey.getType()
+
+ finally:
+ self.lock.release()
+ return (new_package, new_class)
+
+
+#===================================================================================================
+# ClassKey
+#===================================================================================================
+class ClassKey:
+ """ A ClassKey uniquely identifies a class from the schema. """
+
+ TYPE_DATA = "_data"
+ TYPE_EVENT = "_event"
+
+ def __init__(self, constructor):
+ if constructor.__class__ == str:
+ # construct from __repr__ string
+ try:
+ # supports two formats:
+ # type present = P:C:T(H)
+ # no type present = P:C(H)
+ tmp = constructor.split(":")
+ if len(tmp) == 3:
+ self.pname, self.cname, rem = tmp
+ self.type, hsh = rem.split("(")
+ else:
+ self.pname, rem = tmp
+ self.cname, hsh = rem.split("(")
+ self.type = None
+ hsh = hsh.strip(")")
+ hexValues = hsh.split("-")
+ h0 = int(hexValues[0], 16)
+ h1 = int(hexValues[1], 16)
+ h2 = int(hexValues[2], 16)
+ h3 = int(hexValues[3], 16)
+ h4 = int(hexValues[4][0:4], 16)
+ h5 = int(hexValues[4][4:12], 16)
+ self.hash = UUID(bytes=struct.pack("!LHHHHL", h0, h1, h2, h3, h4, h5))
+ except:
+ raise Exception("Invalid ClassKey format")
+ elif constructor.__class__ == dict:
+ # construct from QMFv2 map
+ try:
+ self.pname = constructor['_package_name']
+ self.cname = constructor['_class_name']
+ self.hash = constructor['_hash']
+ self.type = constructor.get('_type')
+ except:
+ raise Exception("Invalid ClassKey map format %s" % str(constructor))
+ else:
+ # construct from codec
+ codec = constructor
+ self.pname = str(codec.read_str8())
+ self.cname = str(codec.read_str8())
+ self.hash = UUID(bytes=codec.read_bin128())
+ # old V1 codec did not include "type"
+ self.type = None
+
+ def encode(self, codec):
+ # old V1 codec did not include "type"
+ codec.write_str8(self.pname)
+ codec.write_str8(self.cname)
+ codec.write_bin128(self.hash.bytes)
+
+ def asMap(self):
+ m = {'_package_name': self.pname,
+ '_class_name': self.cname,
+ '_hash': self.hash}
+ if self.type is not None:
+ m['_type'] = self.type
+ return m
+
+ def getPackageName(self):
+ return self.pname
+
+ def getClassName(self):
+ return self.cname
+
+ def getHash(self):
+ return self.hash
+
+ def getType(self):
+ return self.type
+
+ def getHashString(self):
+ return str(self.hash)
+
+ def getPackageKey(self):
+ return (self.cname, self.hash)
+
+ def __repr__(self):
+ if self.type is None:
+ return self.pname + ":" + self.cname + "(" + self.getHashString() + ")"
+ return self.pname + ":" + self.cname + ":" + self.type + "(" + self.getHashString() + ")"
+
+ def _setType(self, _type):
+ if _type == 2 or _type == ClassKey.TYPE_EVENT:
+ self.type = ClassKey.TYPE_EVENT
+ else:
+ self.type = ClassKey.TYPE_DATA
+
+ def __hash__(self):
+ ss = self.pname + self.cname + self.getHashString()
+ return ss.__hash__()
+
+ def __eq__(self, other):
+ return self.__repr__() == other.__repr__()
+
+#===================================================================================================
+# SchemaClass
+#===================================================================================================
+class SchemaClass:
+ """ """
+ CLASS_KIND_TABLE = 1
+ CLASS_KIND_EVENT = 2
+
+ def __init__(self, kind, key, codec, session):
+ self.kind = kind
+ self.classKey = key
+ self.properties = []
+ self.statistics = []
+ self.methods = []
+ self.arguments = []
+ self.session = session
+
+ hasSupertype = 0 #codec.read_uint8()
+ if self.kind == self.CLASS_KIND_TABLE:
+ propCount = codec.read_uint16()
+ statCount = codec.read_uint16()
+ methodCount = codec.read_uint16()
+ if hasSupertype == 1:
+ self.superTypeKey = ClassKey(codec)
+ else:
+ self.superTypeKey = None ;
+ for idx in range(propCount):
+ self.properties.append(SchemaProperty(codec))
+ for idx in range(statCount):
+ self.statistics.append(SchemaStatistic(codec))
+ for idx in range(methodCount):
+ self.methods.append(SchemaMethod(codec))
+
+ elif self.kind == self.CLASS_KIND_EVENT:
+ argCount = codec.read_uint16()
+ if (hasSupertype):
+ self.superTypeKey = ClassKey(codec)
+ else:
+ self.superTypeKey = None ;
+ for idx in range(argCount):
+ self.arguments.append(SchemaArgument(codec, methodArg=False))
+
+ def __repr__(self):
+ if self.kind == self.CLASS_KIND_TABLE:
+ kindStr = "Table"
+ elif self.kind == self.CLASS_KIND_EVENT:
+ kindStr = "Event"
+ else:
+ kindStr = "Unsupported"
+ result = "%s Class: %s " % (kindStr, self.classKey.__repr__())
+ return result
+
+ def getKey(self):
+ """ Return the class-key for this class. """
+ return self.classKey
+
+ def getProperties(self):
+ """ Return the list of properties for the class. """
+ if (self.superTypeKey == None):
+ return self.properties
+ else:
+ return self.properties + self.session.getSchema(self.superTypeKey).getProperties()
+
+ def getStatistics(self):
+ """ Return the list of statistics for the class. """
+ if (self.superTypeKey == None):
+ return self.statistics
+ else:
+ return self.statistics + self.session.getSchema(self.superTypeKey).getStatistics()
+
+ def getMethods(self):
+ """ Return the list of methods for the class. """
+ if (self.superTypeKey == None):
+ return self.methods
+ else:
+ return self.methods + self.session.getSchema(self.superTypeKey).getMethods()
+
+ def getArguments(self):
+ """ Return the list of events for the class. """
+ """ Return the list of methods for the class. """
+ if (self.superTypeKey == None):
+ return self.arguments
+ else:
+ return self.arguments + self.session.getSchema(self.superTypeKey).getArguments()
+
+
+#===================================================================================================
+# SchemaProperty
+#===================================================================================================
+class SchemaProperty:
+ """ """
+ def __init__(self, codec):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ self.type = map["type"]
+ self.access = str(map["access"])
+ self.index = map["index"] != 0
+ self.optional = map["optional"] != 0
+ self.refPackage = None
+ self.refClass = None
+ self.unit = None
+ self.min = None
+ self.max = None
+ self.maxlen = None
+ self.desc = None
+
+ for key, value in map.items():
+ if key == "unit" : self.unit = value
+ elif key == "min" : self.min = value
+ elif key == "max" : self.max = value
+ elif key == "maxlen" : self.maxlen = value
+ elif key == "desc" : self.desc = value
+ elif key == "refPackage" : self.refPackage = value
+ elif key == "refClass" : self.refClass = value
+
+ def __repr__(self):
+ return self.name
+
+
+#===================================================================================================
+# SchemaStatistic
+#===================================================================================================
+class SchemaStatistic:
+ """ """
+ def __init__(self, codec):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ self.type = map["type"]
+ self.unit = None
+ self.desc = None
+
+ for key, value in map.items():
+ if key == "unit" : self.unit = value
+ elif key == "desc" : self.desc = value
+
+ def __repr__(self):
+ return self.name
+
+
+#===================================================================================================
+# SchemaMethod
+#===================================================================================================
+class SchemaMethod:
+ """ """
+ def __init__(self, codec):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ argCount = map["argCount"]
+ if "desc" in map:
+ self.desc = map["desc"]
+ else:
+ self.desc = None
+ self.arguments = []
+
+ for idx in range(argCount):
+ self.arguments.append(SchemaArgument(codec, methodArg=True))
+
+ def __repr__(self):
+ result = self.name + "("
+ first = True
+ for arg in self.arguments:
+ if arg.dir.find("I") != -1:
+ if first:
+ first = False
+ else:
+ result += ", "
+ result += arg.name
+ result += ")"
+ return result
+
+
+#===================================================================================================
+# SchemaArgument
+#===================================================================================================
+class SchemaArgument:
+ """ """
+ def __init__(self, codec, methodArg):
+ map = codec.read_map()
+ self.name = str(map["name"])
+ self.type = map["type"]
+ if methodArg:
+ self.dir = str(map["dir"]).upper()
+ self.unit = None
+ self.min = None
+ self.max = None
+ self.maxlen = None
+ self.desc = None
+ self.default = None
+ self.refPackage = None
+ self.refClass = None
+
+ for key, value in map.items():
+ if key == "unit" : self.unit = value
+ elif key == "min" : self.min = value
+ elif key == "max" : self.max = value
+ elif key == "maxlen" : self.maxlen = value
+ elif key == "desc" : self.desc = value
+ elif key == "default" : self.default = value
+ elif key == "refPackage" : self.refPackage = value
+ elif key == "refClass" : self.refClass = value
+
+
+#===================================================================================================
+# ObjectId
+#===================================================================================================
+class ObjectId:
+ """ Object that represents QMF object identifiers """
+ def __init__(self, constructor, first=0, second=0, agentName=None):
+ if constructor.__class__ == dict:
+ self.isV2 = True
+ self.agentName = agentName
+ self.agentEpoch = 0
+ if '_agent_name' in constructor: self.agentName = constructor['_agent_name']
+ if '_agent_epoch' in constructor: self.agentEpoch = constructor['_agent_epoch']
+ if '_object_name' not in constructor:
+ raise Exception("QMFv2 OBJECT_ID must have the '_object_name' field.")
+ self.objectName = constructor['_object_name']
+ else:
+ self.isV2 = None
+ if not constructor:
+ first = first
+ second = second
+ else:
+ first = constructor.read_uint64()
+ second = constructor.read_uint64()
+ self.agentName = str(first & 0x000000000FFFFFFF)
+ self.agentEpoch = (first & 0x0FFF000000000000) >> 48
+ self.objectName = str(second)
+
+ def _create(cls, agent_name, object_name, epoch=0):
+ oid = {"_agent_name": agent_name,
+ "_object_name": object_name,
+ "_agent_epoch": epoch}
+ return cls(oid)
+ create = classmethod(_create)
+
+ def __cmp__(self, other):
+ if other == None or not isinstance(other, ObjectId) :
+ return 1
+
+ if self.objectName < other.objectName:
+ return -1
+ if self.objectName > other.objectName:
+ return 1
+
+ if self.agentName < other.agentName:
+ return -1
+ if self.agentName > other.agentName:
+ return 1
+
+ if self.agentEpoch < other.agentEpoch:
+ return -1
+ if self.agentEpoch > other.agentEpoch:
+ return 1
+ return 0
+
+ def __repr__(self):
+ return "%d-%d-%d-%s-%s" % (self.getFlags(), self.getSequence(),
+ self.getBrokerBank(), self.getAgentBank(), self.getObject())
+
+ def index(self):
+ return self.__repr__()
+
+ def getFlags(self):
+ return 0
+
+ def getSequence(self):
+ return self.agentEpoch
+
+ def getBrokerBank(self):
+ return 1
+
+ def getAgentBank(self):
+ return self.agentName
+
+ def getV2RoutingKey(self):
+ if self.agentName == '0':
+ return "broker"
+ return self.agentName
+
+ def getObject(self):
+ return self.objectName
+
+ def isDurable(self):
+ return self.getSequence() == 0
+
+ def encode(self, codec):
+ first = (self.agentEpoch << 48) + (1 << 28)
+ second = 0
+
+ try:
+ first += int(self.agentName)
+ except:
+ pass
+
+ try:
+ second = int(self.objectName)
+ except:
+ pass
+
+ codec.write_uint64(first)
+ codec.write_uint64(second)
+
+ def asMap(self):
+ omap = {'_agent_name': self.agentName, '_object_name': self.objectName}
+ if self.agentEpoch != 0:
+ omap['_agent_epoch'] = self.agentEpoch
+ return omap
+
+ def __hash__(self):
+ return self.__repr__().__hash__()
+
+ def __eq__(self, other):
+ return self.__repr__().__eq__(other)
+
+
+#===================================================================================================
+# MethodResult
+#===================================================================================================
+class MethodResult(object):
+ """ """
+ def __init__(self, status, text, outArgs):
+ """ """
+ self.status = status
+ self.text = text
+ self.outArgs = outArgs
+
+ def __getattr__(self, name):
+ if name in self.outArgs:
+ return self.outArgs[name]
+
+ def __repr__(self):
+ return "%s (%d) - %s" % (self.text, self.status, self.outArgs)
+
+
+#===================================================================================================
+# Broker
+#===================================================================================================
+class Broker(Thread):
+ """ This object represents a connection (or potential connection) to a QMF broker. """
+ SYNC_TIME = 60
+ nextSeq = 1
+
+ # for connection recovery
+ DELAY_MIN = 1
+ DELAY_MAX = 128
+ DELAY_FACTOR = 2
+
+ class _q_item:
+ """ Broker-private class to encapsulate data sent to the broker thread
+ queue.
+ """
+ type_wakeup = 0
+ type_v1msg = 1
+ type_v2msg = 2
+
+ def __init__(self, typecode, data):
+ self.typecode = typecode
+ self.data = data
+
+ def __init__(self, session, host, port, authMechs, authUser, authPass, ssl=False, connTimeout=None):
+ """ Create a broker proxy and setup a connection to the broker. Will raise
+ an exception if the connection fails and the session is not configured to
+ retry connection setup (manageConnections = False).
+
+ Spawns a thread to manage the broker connection. Call _shutdown() to
+ shutdown the thread when releasing the broker.
+ """
+ Thread.__init__(self)
+ self.session = session
+ self.host = host
+ self.port = port
+ self.mechanisms = authMechs
+ self.ssl = ssl
+ if connTimeout is not None:
+ connTimeout = float(connTimeout)
+ self.connTimeout = connTimeout
+ self.authUser = authUser
+ self.authPass = authPass
+ self.saslUser = None
+ self.cv = Condition()
+ self.seqToAgentMap = {}
+ self.error = None
+ self.conn_exc = None # exception hit by _tryToConnect()
+ self.brokerId = None
+ self.connected = False
+ self.brokerAgent = None
+ self.brokerSupportsV2 = None
+ self.rcv_queue = Queue() # for msg received on session
+ self.conn = None
+ self.amqpSession = None
+ self.amqpSessionId = "%s.%d.%d" % (platform.uname()[1], os.getpid(), Broker.nextSeq)
+ Broker.nextSeq += 1
+ self.last_age_check = time()
+
+ # thread control
+ self.setDaemon(True)
+ self.setName("Thread for broker: %s:%d" % (host, port))
+ self.canceled = False
+ self.ready = Semaphore(0)
+ self.start()
+ if not self.session.manageConnections:
+ # wait for connection setup to complete in subthread.
+ # On failure, propagate exception to caller
+ self.ready.acquire()
+ if self.conn_exc:
+ self._shutdown() # wait for the subthread to clean up...
+ raise self.conn_exc
+ # connection up - wait for stable...
+ try:
+ self._waitForStable()
+ agent = self.getBrokerAgent()
+ if agent:
+ agent.getObjects(_class="agent")
+ except:
+ self._shutdown() # wait for the subthread to clean up...
+ raise
+
+
+ def isConnected(self):
+ """ Return True if there is an active connection to the broker. """
+ return self.connected
+
+ def getError(self):
+ """ Return the last error message seen while trying to connect to the broker. """
+ return self.error
+
+ def getBrokerId(self):
+ """ Get broker's unique identifier (UUID) """
+ return self.brokerId
+
+ def getBrokerBank(self):
+ """ Return the broker-bank value. This is the value that the broker assigns to
+ objects within its control. This value appears as a field in the ObjectId
+ of objects created by agents controlled by this broker. """
+ return 1
+
+ def getAgent(self, brokerBank, agentBank):
+ """ Return the agent object associated with a particular broker and agent bank value."""
+ bankKey = str(agentBank)
+ try:
+ self.cv.acquire()
+ if bankKey in self.agents:
+ return self.agents[bankKey]
+ finally:
+ self.cv.release()
+ return None
+
+ def getBrokerAgent(self):
+ return self.brokerAgent
+
+ def getSessionId(self):
+ """ Get the identifier of the AMQP session to the broker """
+ return self.amqpSessionId
+
+ def getAgents(self):
+ """ Get the list of agents reachable via this broker """
+ try:
+ self.cv.acquire()
+ return self.agents.values()
+ finally:
+ self.cv.release()
+
+ def getAmqpSession(self):
+ """ Get the AMQP session object for this connected broker. """
+ return self.amqpSession
+
+ def getUrl(self):
+ """ """
+ return "%s:%d" % (self.host, self.port)
+
+ def getFullUrl(self, noAuthIfGuestDefault=True):
+ """ """
+ ssl = ""
+ if self.ssl:
+ ssl = "s"
+ auth = "%s/%s@" % (self.authUser, self.authPass)
+ if self.authUser == "" or \
+ (noAuthIfGuestDefault and self.authUser == "guest" and self.authPass == "guest"):
+ auth = ""
+ return "amqp%s://%s%s:%d" % (ssl, auth, self.host, self.port or 5672)
+
+ def __repr__(self):
+ if self.connected:
+ return "Broker connected at: %s" % self.getUrl()
+ else:
+ return "Disconnected Broker"
+
+ def _setSequence(self, sequence, agent):
+ try:
+ self.cv.acquire()
+ self.seqToAgentMap[sequence] = agent
+ finally:
+ self.cv.release()
+
+ def _clearSequence(self, sequence):
+ try:
+ self.cv.acquire()
+ self.seqToAgentMap.pop(sequence)
+ finally:
+ self.cv.release()
+
+ def _tryToConnect(self):
+ """ Connect to the broker. Returns True if connection setup completes
+ successfully, otherwise returns False and sets self.error/self.conn_exc
+ with error info. Does not raise exceptions.
+ """
+ self.error = None
+ self.conn_exc = None
+ try:
+ try:
+ self.cv.acquire()
+ self.agents = {}
+ finally:
+ self.cv.release()
+
+ self.topicBound = False
+ self.syncInFlight = False
+ self.syncRequest = 0
+ self.syncResult = None
+ self.reqsOutstanding = 1
+
+ try:
+ if self.amqpSession:
+ self.amqpSession.close()
+ except:
+ pass
+ self.amqpSession = None
+
+ try:
+ if self.conn:
+ self.conn.close()
+ except:
+ pass
+ self.conn = None
+
+ sock = connect(self.host, self.port)
+ sock.settimeout(5)
+ oldTimeout = sock.gettimeout()
+ sock.settimeout(self.connTimeout)
+ if self.ssl:
+ connSock = ssl(sock)
+ else:
+ connSock = sock
+ self.conn = Connection(connSock, username=self.authUser, password=self.authPass,
+ mechanism = self.mechanisms, host=self.host, service="qpidd")
+ def aborted():
+ raise Timeout("Waiting for connection to be established with broker")
+ oldAborted = self.conn.aborted
+ self.conn.aborted = aborted
+ self.conn.start()
+ sock.settimeout(oldTimeout)
+ self.conn.aborted = oldAborted
+ uid = self.conn.user_id
+ if uid.__class__ == tuple and len(uid) == 2:
+ self.saslUser = uid[1]
+ else:
+ self.saslUser = self.authUser
+
+ # prevent topic queues from filling up (and causing the agents to
+ # disconnect) by discarding the oldest queued messages when full.
+ topic_queue_options = {"qpid.policy_type":"ring"}
+
+ self.replyName = "reply-%s" % self.amqpSessionId
+ self.amqpSession = self.conn.session(self.amqpSessionId)
+ self.amqpSession.timeout = self.SYNC_TIME
+ self.amqpSession.auto_sync = True
+ self.amqpSession.queue_declare(queue=self.replyName, exclusive=True, auto_delete=True)
+ self.amqpSession.exchange_bind(exchange="amq.direct",
+ queue=self.replyName, binding_key=self.replyName)
+ self.amqpSession.message_subscribe(queue=self.replyName, destination="rdest",
+ accept_mode=self.amqpSession.accept_mode.none,
+ acquire_mode=self.amqpSession.acquire_mode.pre_acquired)
+ self.amqpSession.incoming("rdest").listen(self._v1Cb, self._exceptionCb)
+ self.amqpSession.message_set_flow_mode(destination="rdest", flow_mode=self.amqpSession.flow_mode.window)
+ self.amqpSession.message_flow(destination="rdest", unit=self.amqpSession.credit_unit.byte, value=0xFFFFFFFFL)
+ self.amqpSession.message_flow(destination="rdest", unit=self.amqpSession.credit_unit.message, value=200)
+
+ self.topicName = "topic-%s" % self.amqpSessionId
+ self.amqpSession.queue_declare(queue=self.topicName, exclusive=True,
+ auto_delete=True,
+ arguments=topic_queue_options)
+ self.amqpSession.message_subscribe(queue=self.topicName, destination="tdest",
+ accept_mode=self.amqpSession.accept_mode.none,
+ acquire_mode=self.amqpSession.acquire_mode.pre_acquired)
+ self.amqpSession.incoming("tdest").listen(self._v1Cb, self._exceptionCb)
+ self.amqpSession.message_set_flow_mode(destination="tdest", flow_mode=self.amqpSession.flow_mode.window)
+ self.amqpSession.message_flow(destination="tdest", unit=self.amqpSession.credit_unit.byte, value=0xFFFFFFFFL)
+ self.amqpSession.message_flow(destination="tdest", unit=self.amqpSession.credit_unit.message, value=200)
+
+ ##
+ ## Check to see if the broker has QMFv2 exchanges configured
+ ##
+ direct_result = self.amqpSession.exchange_query("qmf.default.direct")
+ topic_result = self.amqpSession.exchange_query("qmf.default.topic")
+ self.brokerSupportsV2 = not (direct_result.not_found or topic_result.not_found)
+
+ try:
+ self.cv.acquire()
+ self.agents = {}
+ self.brokerAgent = Agent(self, 0, "BrokerAgent", isV2=self.brokerSupportsV2)
+ self.agents['0'] = self.brokerAgent
+ finally:
+ self.cv.release()
+
+ ##
+ ## Set up connectivity for QMFv2
+ ##
+ if self.brokerSupportsV2:
+ # set up 3 queues:
+ # 1 direct queue - for responses destined to this console.
+ # 2 topic queues - one for heartbeats (hb), one for unsolicited data
+ # and event indications (ui).
+ self.v2_direct_queue = "qmfc-v2-%s" % self.amqpSessionId
+ self.amqpSession.queue_declare(queue=self.v2_direct_queue, exclusive=True, auto_delete=True)
+ self.v2_topic_queue_ui = "qmfc-v2-ui-%s" % self.amqpSessionId
+ self.amqpSession.queue_declare(queue=self.v2_topic_queue_ui,
+ exclusive=True, auto_delete=True,
+ arguments=topic_queue_options)
+ self.v2_topic_queue_hb = "qmfc-v2-hb-%s" % self.amqpSessionId
+ self.amqpSession.queue_declare(queue=self.v2_topic_queue_hb,
+ exclusive=True, auto_delete=True,
+ arguments=topic_queue_options)
+
+ self.amqpSession.exchange_bind(exchange="qmf.default.direct",
+ queue=self.v2_direct_queue, binding_key=self.v2_direct_queue)
+ ## Other bindings here...
+
+ self.amqpSession.message_subscribe(queue=self.v2_direct_queue, destination="v2dest",
+ accept_mode=self.amqpSession.accept_mode.none,
+ acquire_mode=self.amqpSession.acquire_mode.pre_acquired)
+ self.amqpSession.incoming("v2dest").listen(self._v2Cb, self._exceptionCb)
+ self.amqpSession.message_set_flow_mode(destination="v2dest", flow_mode=self.amqpSession.flow_mode.window)
+ self.amqpSession.message_flow(destination="v2dest", unit=self.amqpSession.credit_unit.byte, value=0xFFFFFFFFL)
+ self.amqpSession.message_flow(destination="v2dest", unit=self.amqpSession.credit_unit.message, value=50)
+
+ self.amqpSession.message_subscribe(queue=self.v2_topic_queue_ui, destination="v2TopicUI",
+ accept_mode=self.amqpSession.accept_mode.none,
+ acquire_mode=self.amqpSession.acquire_mode.pre_acquired)
+ self.amqpSession.incoming("v2TopicUI").listen(self._v2Cb, self._exceptionCb)
+ self.amqpSession.message_set_flow_mode(destination="v2TopicUI", flow_mode=self.amqpSession.flow_mode.window)
+ self.amqpSession.message_flow(destination="v2TopicUI", unit=self.amqpSession.credit_unit.byte, value=0xFFFFFFFFL)
+ self.amqpSession.message_flow(destination="v2TopicUI", unit=self.amqpSession.credit_unit.message, value=25)
+
+
+ self.amqpSession.message_subscribe(queue=self.v2_topic_queue_hb, destination="v2TopicHB",
+ accept_mode=self.amqpSession.accept_mode.none,
+ acquire_mode=self.amqpSession.acquire_mode.pre_acquired)
+ self.amqpSession.incoming("v2TopicHB").listen(self._v2Cb, self._exceptionCb)
+ self.amqpSession.message_set_flow_mode(destination="v2TopicHB", flow_mode=self.amqpSession.flow_mode.window)
+ self.amqpSession.message_flow(destination="v2TopicHB", unit=self.amqpSession.credit_unit.byte, value=0xFFFFFFFFL)
+ self.amqpSession.message_flow(destination="v2TopicHB", unit=self.amqpSession.credit_unit.message, value=100)
+
+ codec = Codec()
+ self._setHeader(codec, 'B')
+ msg = self._message(codec.encoded)
+ self._send(msg)
+
+ return True # connection complete
+
+ except Exception, e:
+ self.error = "Exception during connection setup: %s - %s" % (e.__class__.__name__, e)
+ self.conn_exc = e
+ if self.session.console:
+ self.session.console.brokerConnectionFailed(self)
+ return False # connection failed
+
+ def _updateAgent(self, obj):
+ """
+ Just received an object of class "org.apache.qpid.broker:agent", which
+ represents a V1 agent. Add or update the list of agent proxies.
+ """
+ bankKey = str(obj.agentBank)
+ agent = None
+ if obj._deleteTime == 0:
+ try:
+ self.cv.acquire()
+ if bankKey not in self.agents:
+ # add new agent only if label is not filtered
+ if len(self.session.agent_filter) == 0 or obj.label in self.session.agent_filter:
+ agent = Agent(self, obj.agentBank, obj.label)
+ self.agents[bankKey] = agent
+ finally:
+ self.cv.release()
+ if agent and self.session.console:
+ self.session._newAgentCallback(agent)
+ else:
+ try:
+ self.cv.acquire()
+ agent = self.agents.pop(bankKey, None)
+ if agent:
+ agent.close()
+ finally:
+ self.cv.release()
+ if agent and self.session.console:
+ self.session._delAgentCallback(agent)
+
+ def _addAgent(self, name, agent):
+ try:
+ self.cv.acquire()
+ self.agents[name] = agent
+ finally:
+ self.cv.release()
+ if self.session.console:
+ self.session._newAgentCallback(agent)
+
+ def _ageAgents(self):
+ if (time() - self.last_age_check) < self.session.agent_heartbeat_min:
+ # don't age if it's too soon
+ return
+ self.cv.acquire()
+ try:
+ to_delete = []
+ to_notify = []
+ for key in self.agents:
+ if self.agents[key].isOld():
+ to_delete.append(key)
+ for key in to_delete:
+ agent = self.agents.pop(key)
+ agent.close()
+ to_notify.append(agent)
+ self.last_age_check = time()
+ finally:
+ self.cv.release()
+ if self.session.console:
+ for agent in to_notify:
+ self.session._delAgentCallback(agent)
+
+ def _v2SendAgentLocate(self, predicate=[]):
+ """
+ Broadcast an agent-locate request to cause all agents in the domain to tell us who they are.
+ """
+ # @todo: send locate only to those agents in agent_filter?
+ dp = self.amqpSession.delivery_properties()
+ dp.routing_key = "console.request.agent_locate"
+ mp = self.amqpSession.message_properties()
+ mp.content_type = "amqp/list"
+ if self.saslUser:
+ mp.user_id = self.saslUser
+ mp.app_id = "qmf2"
+ mp.reply_to = self.amqpSession.reply_to("qmf.default.direct", self.v2_direct_queue)
+ mp.application_headers = {'qmf.opcode':'_agent_locate_request'}
+ sendCodec = Codec()
+ sendCodec.write_list(predicate)
+ msg = Message(dp, mp, sendCodec.encoded)
+ self._send(msg, "qmf.default.topic")
+
+ def _setHeader(self, codec, opcode, seq=0):
+ """ Compose the header of a management message. """
+ codec.write_uint8(ord('A'))
+ codec.write_uint8(ord('M'))
+ codec.write_uint8(ord('2'))
+ codec.write_uint8(ord(opcode))
+ codec.write_uint32(seq)
+
+ def _checkHeader(self, codec):
+ """ Check the header of a management message and extract the opcode and class. """
+ try:
+ octet = chr(codec.read_uint8())
+ if octet != 'A':
+ return None, None
+ octet = chr(codec.read_uint8())
+ if octet != 'M':
+ return None, None
+ octet = chr(codec.read_uint8())
+ if octet != '2':
+ return None, None
+ opcode = chr(codec.read_uint8())
+ seq = codec.read_uint32()
+ return opcode, seq
+ except:
+ return None, None
+
+ def _message (self, body, routing_key="broker", ttl=None):
+ dp = self.amqpSession.delivery_properties()
+ dp.routing_key = routing_key
+ if ttl:
+ dp.ttl = ttl
+ mp = self.amqpSession.message_properties()
+ mp.content_type = "x-application/qmf"
+ if self.saslUser:
+ mp.user_id = self.saslUser
+ mp.reply_to = self.amqpSession.reply_to("amq.direct", self.replyName)
+ return Message(dp, mp, body)
+
+ def _send(self, msg, dest="qpid.management"):
+ self.amqpSession.message_transfer(destination=dest, message=msg)
+
+ def _disconnect(self, err_info=None):
+ """ Called when the remote broker has disconnected. Re-initializes all
+ state associated with the broker.
+ """
+ # notify any waiters, and callback
+ self.cv.acquire()
+ try:
+ if err_info is not None:
+ self.error = err_info
+ _agents = self.agents
+ self.agents = {}
+ for agent in _agents.itervalues():
+ agent.close()
+ self.syncInFlight = False
+ self.reqsOutstanding = 0
+ self.cv.notifyAll()
+ finally:
+ self.cv.release()
+
+ if self.session.console:
+ for agent in _agents.itervalues():
+ self.session._delAgentCallback(agent)
+
+ def _shutdown(self, _timeout=10):
+ """ Disconnect from a broker, and release its resources. Errors are
+ ignored.
+ """
+ if self.isAlive():
+ # kick the thread
+ self.canceled = True
+ self.rcv_queue.put(Broker._q_item(Broker._q_item.type_wakeup, None))
+ self.join(_timeout)
+
+ # abort any pending transactions and delete agents
+ self._disconnect("broker shutdown")
+
+ try:
+ if self.amqpSession:
+ self.amqpSession.close();
+ except:
+ pass
+ self.amqpSession = None
+ try:
+ if self.conn:
+ self.conn.close()
+ except:
+ pass
+ self.conn = None
+ self.connected = False
+
+ def _waitForStable(self):
+ try:
+ self.cv.acquire()
+ if not self.connected:
+ return
+ if self.reqsOutstanding == 0:
+ return
+ self.syncInFlight = True
+ starttime = time()
+ while self.reqsOutstanding != 0:
+ self.cv.wait(self.SYNC_TIME)
+ if time() - starttime > self.SYNC_TIME:
+ raise RuntimeError("Timed out waiting for broker to synchronize")
+ finally:
+ self.cv.release()
+
+ def _incOutstanding(self):
+ try:
+ self.cv.acquire()
+ self.reqsOutstanding += 1
+ finally:
+ self.cv.release()
+
+ def _decOutstanding(self):
+ try:
+ self.cv.acquire()
+ self.reqsOutstanding -= 1
+ if self.reqsOutstanding == 0 and not self.topicBound:
+ self.topicBound = True
+ for key in self.session.v1BindingKeyList:
+ self.amqpSession.exchange_bind(exchange="qpid.management",
+ queue=self.topicName, binding_key=key)
+ if self.brokerSupportsV2:
+ # do not drop heartbeat indications when under load from data
+ # or event indications. Put heartbeats on their own dedicated
+ # queue.
+ #
+ for key in self.session.v2BindingKeyList:
+ if key.startswith("agent.ind.heartbeat"):
+ self.amqpSession.exchange_bind(exchange="qmf.default.topic",
+ queue=self.v2_topic_queue_hb,
+ binding_key=key)
+ else:
+ self.amqpSession.exchange_bind(exchange="qmf.default.topic",
+ queue=self.v2_topic_queue_ui,
+ binding_key=key)
+ # solicit an agent locate now, after we bind to agent.ind.data,
+ # because the agent locate will cause the agent to publish a
+ # data indication - and now we're able to receive it!
+ self._v2SendAgentLocate()
+
+
+ if self.reqsOutstanding == 0 and self.syncInFlight:
+ self.syncInFlight = False
+ self.cv.notify()
+ finally:
+ self.cv.release()
+
+ def _v1Cb(self, msg):
+ """ Callback from session receive thread for V1 messages
+ """
+ self.rcv_queue.put(Broker._q_item(Broker._q_item.type_v1msg, msg))
+
+ def _v1Dispatch(self, msg):
+ try:
+ self._v1DispatchProtected(msg)
+ except Exception, e:
+ print "EXCEPTION in Broker._v1Cb:", e
+ import traceback
+ traceback.print_exc()
+
+ def _v1DispatchProtected(self, msg):
+ """
+ This is the general message handler for messages received via the QMFv1 exchanges.
+ """
+ try:
+ agent = None
+ agent_addr = None
+ mp = msg.get("message_properties")
+ ah = mp.application_headers
+ if ah and 'qmf.agent' in ah:
+ agent_addr = ah['qmf.agent']
+
+ if not agent_addr:
+ #
+ # See if we can determine the agent identity from the routing key
+ #
+ dp = msg.get("delivery_properties")
+ rkey = None
+ if dp and dp.routing_key:
+ rkey = dp.routing_key
+ items = rkey.split('.')
+ if len(items) >= 4:
+ if items[0] == 'console' and items[3].isdigit():
+ agent_addr = str(items[3]) # The QMFv1 Agent Bank
+ if agent_addr != None and agent_addr in self.agents:
+ agent = self.agents[agent_addr]
+
+ codec = Codec(msg.body)
+ alreadyTried = None
+ while True:
+ opcode, seq = self._checkHeader(codec)
+
+ if not agent and not alreadyTried:
+ alreadyTried = True
+ try:
+ self.cv.acquire()
+ if seq in self.seqToAgentMap:
+ agent = self.seqToAgentMap[seq]
+ finally:
+ self.cv.release()
+
+ if opcode == None: break
+ if opcode == 'b': self.session._handleBrokerResp (self, codec, seq)
+ elif opcode == 'p': self.session._handlePackageInd (self, codec, seq)
+ elif opcode == 'q': self.session._handleClassInd (self, codec, seq)
+ elif opcode == 's': self.session._handleSchemaResp (self, codec, seq, agent_addr)
+ elif opcode == 'h': self.session._handleHeartbeatInd (self, codec, seq, msg)
+ elif opcode == 'z': self.session._handleCommandComplete (self, codec, seq, agent)
+ elif agent:
+ agent._handleQmfV1Message(opcode, seq, mp, ah, codec)
+ agent.touch() # mark agent as being alive
+
+ finally: # always ack the message!
+ try:
+ # ignore failures as the session may be shutting down...
+ self.amqpSession.receiver._completed.add(msg.id)
+ self.amqpSession.channel.session_completed(self.amqpSession.receiver._completed)
+ except:
+ pass
+
+
+ def _v2Cb(self, msg):
+ """ Callback from session receive thread for V2 messages
+ """
+ self.rcv_queue.put(Broker._q_item(Broker._q_item.type_v2msg, msg))
+
+ def _v2Dispatch(self, msg):
+ try:
+ self._v2DispatchProtected(msg)
+ except Exception, e:
+ print "EXCEPTION in Broker._v2Cb:", e
+ import traceback
+ traceback.print_exc()
+
+ def _v2DispatchProtected(self, msg):
+ """
+ This is the general message handler for messages received via QMFv2 exchanges.
+ """
+ try:
+ mp = msg.get("message_properties")
+ ah = mp["application_headers"]
+ codec = Codec(msg.body)
+
+ if 'qmf.opcode' in ah:
+ opcode = ah['qmf.opcode']
+ if mp.content_type == "amqp/list":
+ try:
+ content = codec.read_list()
+ if not content:
+ content = []
+ except:
+ # malformed list - ignore
+ content = None
+ elif mp.content_type == "amqp/map":
+ try:
+ content = codec.read_map()
+ if not content:
+ content = {}
+ except:
+ # malformed map - ignore
+ content = None
+ else:
+ content = None
+
+ if content != None:
+ ##
+ ## Directly handle agent heartbeats and agent locate responses as these are broker-scope (they are
+ ## used to maintain the broker's list of agent proxies.
+ ##
+ if opcode == '_agent_heartbeat_indication': self.session._v2HandleHeartbeatInd(self, mp, ah, content)
+ elif opcode == '_agent_locate_response': self.session._v2HandleAgentLocateRsp(self, mp, ah, content)
+ else:
+ ##
+ ## All other opcodes are agent-scope and are forwarded to the agent proxy representing the sender
+ ## of the message.
+ ##
+ agent_addr = ah['qmf.agent']
+ if agent_addr == 'broker':
+ agent_addr = '0'
+ if agent_addr in self.agents:
+ agent = self.agents[agent_addr]
+ agent._handleQmfV2Message(opcode, mp, ah, content)
+ agent.touch()
+
+ finally: # always ack the message!
+ try:
+ # ignore failures as the session may be shutting down...
+ self.amqpSession.receiver._completed.add(msg.id)
+ self.amqpSession.channel.session_completed(self.amqpSession.receiver._completed)
+ except:
+ pass
+
+ def _exceptionCb(self, data):
+ """ Exception notification callback from session receive thread.
+ """
+ self.cv.acquire()
+ try:
+ self.connected = False
+ self.error = "exception received from messaging layer: %s" % str(data)
+ finally:
+ self.cv.release()
+ self.rcv_queue.put(Broker._q_item(Broker._q_item.type_wakeup, None))
+
+ def run(self):
+ """ Main body of the running thread. """
+
+ # First, attempt a connection. In the unmanaged case,
+ # failure to connect needs to cause the Broker()
+ # constructor to raise an exception.
+ delay = self.DELAY_MIN
+ while not self.canceled:
+ if self._tryToConnect(): # connection up
+ break
+ # unmanaged connection - fail & wake up constructor
+ if not self.session.manageConnections:
+ self.ready.release()
+ return
+ # managed connection - try again
+ count = 0
+ while not self.canceled and count < delay:
+ sleep(1)
+ count += 1
+ if delay < self.DELAY_MAX:
+ delay *= self.DELAY_FACTOR
+
+ if self.canceled:
+ self.ready.release()
+ return
+
+ # connection successful!
+ self.cv.acquire()
+ try:
+ self.connected = True
+ finally:
+ self.cv.release()
+
+ self.session._handleBrokerConnect(self)
+ self.ready.release()
+
+ while not self.canceled:
+
+ try:
+ item = self.rcv_queue.get(timeout=self.session.agent_heartbeat_min)
+ except Empty:
+ item = None
+
+ while not self.canceled and item is not None:
+
+ if not self.connected:
+ # connection failure
+ while item:
+ # drain the queue
+ try:
+ item = self.rcv_queue.get(block=False)
+ except Empty:
+ item = None
+ break
+
+ self._disconnect() # clean up any pending agents
+ self.session._handleError(self.error)
+ self.session._handleBrokerDisconnect(self)
+
+ if not self.session.manageConnections:
+ return # do not attempt recovery
+
+ # retry connection setup
+ delay = self.DELAY_MIN
+ while not self.canceled:
+ if self._tryToConnect():
+ break
+ # managed connection - try again
+ count = 0
+ while not self.canceled and count < delay:
+ sleep(1)
+ count += 1
+ if delay < self.DELAY_MAX:
+ delay *= self.DELAY_FACTOR
+
+ if self.canceled:
+ return
+
+ # connection successful!
+ self.cv.acquire()
+ try:
+ self.connected = True
+ finally:
+ self.cv.release()
+
+ self.session._handleBrokerConnect(self)
+
+ elif item.typecode == Broker._q_item.type_v1msg:
+ self._v1Dispatch(item.data)
+ elif item.typecode == Broker._q_item.type_v2msg:
+ self._v2Dispatch(item.data)
+
+ try:
+ item = self.rcv_queue.get(block=False)
+ except Empty:
+ item = None
+
+ # queue drained, age the agents...
+ if not self.canceled:
+ self._ageAgents()
+
+#===================================================================================================
+# Agent
+#===================================================================================================
+class Agent:
+ """
+ This class represents a proxy for a remote agent being managed
+ """
+ def __init__(self, broker, agentBank, label, isV2=False, interval=0):
+ self.broker = broker
+ self.session = broker.session
+ self.schemaCache = self.session.schemaCache
+ self.brokerBank = broker.getBrokerBank()
+ self.agentBank = str(agentBank)
+ self.label = label
+ self.isV2 = isV2
+ self.heartbeatInterval = 0
+ if interval:
+ if interval < self.session.agent_heartbeat_min:
+ self.heartbeatInterval = self.session.agent_heartbeat_min
+ else:
+ self.heartbeatInterval = interval
+ self.lock = Lock()
+ self.seqMgr = self.session.seqMgr
+ self.contextMap = {}
+ self.unsolicitedContext = RequestContext(self, self)
+ self.lastSeenTime = time()
+ self.closed = None
+ self.epoch = 0
+ self.schema_timestamp = None
+
+
+ def _checkClosed(self):
+ if self.closed:
+ raise Exception("Agent is disconnected")
+
+
+ def __call__(self, **kwargs):
+ """
+ This is the handler for unsolicited stuff received from the agent
+ """
+ if 'qmf_object' in kwargs:
+ if self.session.console:
+ obj = kwargs['qmf_object']
+ if len(self.session.class_filter) == 0:
+ self.session.console.objectProps(self.broker, obj)
+ elif obj.getClassKey():
+ # slow path: check classKey against event_filter
+ pname = obj.getClassKey().getPackageName()
+ cname = obj.getClassKey().getClassName()
+ if ((pname, cname) in self.session.class_filter
+ or (pname, None) in self.session.class_filter):
+ self.session.console.objectProps(self.broker, obj)
+ elif 'qmf_object_stats' in kwargs:
+ if self.session.console:
+ obj = kwargs['qmf_object_stats']
+ if len(self.session.class_filter) == 0:
+ self.session.console.objectStats(self.broker, obj)
+ elif obj.getClassKey():
+ # slow path: check classKey against event_filter
+ pname = obj.getClassKey().getPackageName()
+ cname = obj.getClassKey().getClassName()
+ if ((pname, cname) in self.session.class_filter
+ or (pname, None) in self.session.class_filter):
+ self.session.console.objectStats(self.broker, obj)
+ elif 'qmf_event' in kwargs:
+ if self.session.console:
+ event = kwargs['qmf_event']
+ if len(self.session.event_filter) == 0:
+ self.session.console.event(self.broker, event)
+ elif event.classKey:
+ # slow path: check classKey against event_filter
+ pname = event.classKey.getPackageName()
+ ename = event.classKey.getClassName()
+ if ((pname, ename) in self.session.event_filter
+ or (pname, None) in self.session.event_filter):
+ self.session.console.event(self.broker, event)
+ elif 'qmf_schema_id' in kwargs:
+ ckey = kwargs['qmf_schema_id']
+ new_pkg, new_cls = self.session.schemaCache.declareClass(ckey)
+ if self.session.console:
+ if new_pkg:
+ self.session._newPackageCallback(ckey.getPackageName())
+ if new_cls:
+ # translate V2's string based type value to legacy
+ # integer value for backward compatibility
+ cls_type = ckey.getType()
+ if str(cls_type) == ckey.TYPE_DATA:
+ cls_type = 1
+ elif str(cls_type) == ckey.TYPE_EVENT:
+ cls_type = 2
+ self.session._newClassCallback(cls_type, ckey)
+
+ def touch(self):
+ if self.heartbeatInterval:
+ self.lastSeenTime = time()
+
+
+ def setEpoch(self, epoch):
+ self.epoch = epoch
+
+ def update_schema_timestamp(self, timestamp):
+ """ Check the latest schema timestamp from the agent V2 heartbeat. Issue a
+ query for all packages & classes should the timestamp change.
+ """
+ self.lock.acquire()
+ try:
+ if self.schema_timestamp == timestamp:
+ return
+ self.schema_timestamp = timestamp
+
+ context = RequestContext(self, self)
+ sequence = self.seqMgr._reserve(context)
+
+ self.contextMap[sequence] = context
+ context.setSequence(sequence)
+
+ finally:
+ self.lock.release()
+
+ self._v2SendSchemaIdQuery(sequence, {})
+
+
+ def epochMismatch(self, epoch):
+ if epoch == 0 or self.epoch == 0:
+ return None
+ if epoch == self.epoch:
+ return None
+ return True
+
+
+ def isOld(self):
+ if self.heartbeatInterval == 0:
+ return None
+ if time() - self.lastSeenTime > (self.session.agent_heartbeat_miss * self.heartbeatInterval):
+ return True
+ return None
+
+
+ def close(self):
+ self.closed = True
+ copy = {}
+ try:
+ self.lock.acquire()
+ for seq in self.contextMap:
+ copy[seq] = self.contextMap[seq]
+ finally:
+ self.lock.release()
+
+ for seq in copy:
+ context = copy[seq]
+ context.cancel("Agent disconnected")
+ self.seqMgr._release(seq)
+
+
+ def __repr__(self):
+ if self.isV2:
+ ver = "v2"
+ else:
+ ver = "v1"
+ return "Agent(%s) at bank %d.%s (%s)" % (ver, self.brokerBank, self.agentBank, self.label)
+
+
+ def getBroker(self):
+ return self.broker
+
+
+ def getBrokerBank(self):
+ return self.brokerBank
+
+
+ def getAgentBank(self):
+ return self.agentBank
+
+
+ def getV2RoutingKey(self):
+ if self.agentBank == '0':
+ return 'broker'
+ return self.agentBank
+
+
+ def getObjects(self, notifiable=None, **kwargs):
+ """ Get a list of objects from QMF agents.
+ All arguments are passed by name(keyword).
+
+ If 'notifiable' is None (default), this call will block until completion or timeout.
+ If supplied, notifiable is assumed to be a callable object that will be called when the
+ list of queried objects arrives. The single argument to the call shall be a list of
+ the returned objects.
+
+ The class for queried objects may be specified in one of the following ways:
+
+ _schema = <schema> - supply a schema object returned from getSchema.
+ _key = <key> - supply a classKey from the list returned by getClasses.
+ _class = <name> - supply a class name as a string. If the class name exists
+ in multiple packages, a _package argument may also be supplied.
+ _objectId = <id> - get the object referenced by the object-id
+
+ The default timeout for this synchronous operation is 60 seconds. To change the timeout,
+ use the following argument:
+
+ _timeout = <time in seconds>
+
+ If additional arguments are supplied, they are used as property selectors. For example,
+ if the argument name="test" is supplied, only objects whose "name" property is "test"
+ will be returned in the result.
+ """
+ self._checkClosed()
+ if notifiable:
+ if not callable(notifiable):
+ raise Exception("notifiable object must be callable")
+
+ #
+ # Isolate the selectors from the kwargs
+ #
+ selectors = {}
+ for key in kwargs:
+ value = kwargs[key]
+ if key[0] != '_':
+ selectors[key] = value
+
+ #
+ # Allocate a context to track this asynchronous request.
+ #
+ context = RequestContext(self, notifiable, selectors)
+ sequence = self.seqMgr._reserve(context)
+ try:
+ self.lock.acquire()
+ self.contextMap[sequence] = context
+ context.setSequence(sequence)
+ finally:
+ self.lock.release()
+
+ #
+ # Compose and send the query message to the agent using the appropriate protocol for the
+ # agent's QMF version.
+ #
+ if self.isV2:
+ self._v2SendGetQuery(sequence, kwargs)
+ else:
+ self.broker._setSequence(sequence, self)
+ self._v1SendGetQuery(sequence, kwargs)
+
+ #
+ # If this is a synchronous call, block and wait for completion.
+ #
+ if not notifiable:
+ timeout = 60
+ if '_timeout' in kwargs:
+ timeout = kwargs['_timeout']
+ context.waitForSignal(timeout)
+ if context.exception:
+ raise Exception(context.exception)
+ result = context.queryResults
+ return result
+
+
+ def _clearContext(self, sequence):
+ try:
+ self.lock.acquire()
+ try:
+ self.contextMap.pop(sequence)
+ except KeyError:
+ pass # @todo - shouldn't happen, log a warning.
+ finally:
+ self.lock.release()
+
+
+ def _schemaInfoFromV2Agent(self):
+ """
+ We have just received new schema information from this agent. Check to see if there's
+ more work that can now be done.
+ """
+ try:
+ self.lock.acquire()
+ copy_of_map = {}
+ for item in self.contextMap:
+ copy_of_map[item] = self.contextMap[item]
+ finally:
+ self.lock.release()
+
+ self.unsolicitedContext.reprocess()
+ for context in copy_of_map:
+ copy_of_map[context].reprocess()
+
+
+ def _handleV1Completion(self, sequence, code, text):
+ """
+ Called if one of this agent's V1 commands completed
+ """
+ context = None
+ try:
+ self.lock.acquire()
+ if sequence in self.contextMap:
+ context = self.contextMap[sequence]
+ finally:
+ self.lock.release()
+
+ if context:
+ if code != 0:
+ ex = "Error %d: %s" % (code, text)
+ context.setException(ex)
+ context.signal()
+ self.broker._clearSequence(sequence)
+
+
+ def _v1HandleMethodResp(self, codec, seq):
+ """
+ Handle a QMFv1 method response
+ """
+ code = codec.read_uint32()
+ text = codec.read_str16()
+ outArgs = {}
+ self.broker._clearSequence(seq)
+ pair = self.seqMgr._release(seq)
+ if pair == None:
+ return
+ method, synchronous = pair
+ if code == 0:
+ for arg in method.arguments:
+ if arg.dir.find("O") != -1:
+ outArgs[arg.name] = self.session._decodeValue(codec, arg.type, self.broker)
+ result = MethodResult(code, text, outArgs)
+ if synchronous:
+ try:
+ self.broker.cv.acquire()
+ self.broker.syncResult = result
+ self.broker.syncInFlight = False
+ self.broker.cv.notify()
+ finally:
+ self.broker.cv.release()
+ else:
+ if self.session.console:
+ self.session.console.methodResponse(self.broker, seq, result)
+
+
+ def _v1HandleEventInd(self, codec, seq):
+ """
+ Handle a QMFv1 event indication
+ """
+ event = Event(self, codec)
+ self.unsolicitedContext.doEvent(event)
+
+
+ def _v1HandleContentInd(self, codec, sequence, prop=False, stat=False):
+ """
+ Handle a QMFv1 content indication
+ """
+ classKey = ClassKey(codec)
+ schema = self.schemaCache.getSchema(classKey)
+ if not schema:
+ return
+
+ obj = Object(self, schema, codec, prop, stat)
+ if classKey.getPackageName() == "org.apache.qpid.broker" and classKey.getClassName() == "agent" and prop:
+ self.broker._updateAgent(obj)
+
+ context = self.unsolicitedContext
+ try:
+ self.lock.acquire()
+ if sequence in self.contextMap:
+ context = self.contextMap[sequence]
+ finally:
+ self.lock.release()
+
+ context.addV1QueryResult(obj, prop, stat)
+
+
+ def _v2HandleDataInd(self, mp, ah, content):
+ """
+ Handle a QMFv2 data indication from the agent. Note: called from context
+ of the Broker thread.
+ """
+ if content.__class__ != list:
+ return
+
+ if mp.correlation_id:
+ try:
+ self.lock.acquire()
+ sequence = int(mp.correlation_id)
+ if sequence not in self.contextMap:
+ return
+ context = self.contextMap[sequence]
+ finally:
+ self.lock.release()
+ else:
+ context = self.unsolicitedContext
+
+ kind = "_data"
+ if "qmf.content" in ah:
+ kind = ah["qmf.content"]
+ if kind == "_data":
+ for omap in content:
+ context.addV2QueryResult(omap)
+ context.processV2Data()
+ if 'partial' not in ah:
+ context.signal()
+
+ elif kind == "_event":
+ for omap in content:
+ event = Event(self, v2Map=omap)
+ if event.classKey is None or event.schema:
+ # schema optional or present
+ context.doEvent(event)
+ else:
+ # schema not optional and not present
+ if context.addPendingEvent(event):
+ self._v2SendSchemaRequest(event.classKey)
+
+ elif kind == "_schema_id":
+ for sid in content:
+ try:
+ ckey = ClassKey(sid)
+ except:
+ # @todo: log error
+ ckey = None
+ if ckey is not None:
+ # @todo: for now, the application cannot directly send a query for
+ # _schema_id. This request _must_ have been initiated by the framework
+ # in order to update the schema cache.
+ context.notifiable(qmf_schema_id=ckey)
+
+
+ def _v2HandleMethodResp(self, mp, ah, content):
+ """
+ Handle a QMFv2 method response from the agent
+ """
+ context = None
+ sequence = None
+ if mp.correlation_id:
+ try:
+ self.lock.acquire()
+ seq = int(mp.correlation_id)
+ finally:
+ self.lock.release()
+ else:
+ return
+
+ pair = self.seqMgr._release(seq)
+ if pair == None:
+ return
+ method, synchronous = pair
+
+ result = MethodResult(0, 'OK', content['_arguments'])
+ if synchronous:
+ try:
+ self.broker.cv.acquire()
+ self.broker.syncResult = result
+ self.broker.syncInFlight = False
+ self.broker.cv.notify()
+ finally:
+ self.broker.cv.release()
+ else:
+ if self.session.console:
+ self.session.console.methodResponse(self.broker, seq, result)
+
+ def _v2HandleException(self, mp, ah, content):
+ """
+ Handle a QMFv2 exception
+ """
+ context = None
+ if mp.correlation_id:
+ try:
+ self.lock.acquire()
+ seq = int(mp.correlation_id)
+ finally:
+ self.lock.release()
+ else:
+ return
+
+ values = {}
+ if '_values' in content:
+ values = content['_values']
+
+ code = 7
+ text = "error"
+ if 'error_code' in values:
+ code = values['error_code']
+ if 'error_text' in values:
+ text = values['error_text']
+
+ pair = self.seqMgr._release(seq)
+ if pair == None:
+ return
+
+ if pair.__class__ == RequestContext:
+ pair.cancel(text)
+ return
+
+ method, synchronous = pair
+
+ result = MethodResult(code, text, {})
+ if synchronous:
+ try:
+ self.broker.cv.acquire()
+ self.broker.syncResult = result
+ self.broker.syncInFlight = False
+ self.broker.cv.notify()
+ finally:
+ self.broker.cv.release()
+ else:
+ if self.session.console:
+ self.session.console.methodResponse(self.broker, seq, result)
+
+
+ def _v1SendGetQuery(self, sequence, kwargs):
+ """
+ Send a get query to a QMFv1 agent.
+ """
+ #
+ # Build the query map
+ #
+ query = {}
+ if '_class' in kwargs:
+ query['_class'] = kwargs['_class']
+ if '_package' in kwargs:
+ query['_package'] = kwargs['_package']
+ elif '_key' in kwargs:
+ key = kwargs['_key']
+ query['_class'] = key.getClassName()
+ query['_package'] = key.getPackageName()
+ elif '_objectId' in kwargs:
+ query['_objectid'] = kwargs['_objectId'].__repr__()
+
+ #
+ # Construct and transmit the message
+ #
+ sendCodec = Codec()
+ self.broker._setHeader(sendCodec, 'G', sequence)
+ sendCodec.write_map(query)
+ smsg = self.broker._message(sendCodec.encoded, "agent.%d.%s" % (self.brokerBank, self.agentBank))
+ self.broker._send(smsg)
+
+
+ def _v2SendQuery(self, query, sequence):
+ """
+ Given a query map, construct and send a V2 Query message.
+ """
+ dp = self.broker.amqpSession.delivery_properties()
+ dp.routing_key = self.getV2RoutingKey()
+ mp = self.broker.amqpSession.message_properties()
+ mp.content_type = "amqp/map"
+ if self.broker.saslUser:
+ mp.user_id = self.broker.saslUser
+ mp.correlation_id = str(sequence)
+ mp.app_id = "qmf2"
+ mp.reply_to = self.broker.amqpSession.reply_to("qmf.default.direct", self.broker.v2_direct_queue)
+ mp.application_headers = {'qmf.opcode':'_query_request'}
+ sendCodec = Codec()
+ sendCodec.write_map(query)
+ msg = Message(dp, mp, sendCodec.encoded)
+ self.broker._send(msg, "qmf.default.direct")
+
+
+ def _v2SendGetQuery(self, sequence, kwargs):
+ """
+ Send a get query to a QMFv2 agent.
+ """
+ #
+ # Build the query map
+ #
+ query = {'_what': 'OBJECT'}
+ if '_class' in kwargs:
+ schemaMap = {'_class_name': kwargs['_class']}
+ if '_package' in kwargs:
+ schemaMap['_package_name'] = kwargs['_package']
+ query['_schema_id'] = schemaMap
+ elif '_key' in kwargs:
+ query['_schema_id'] = kwargs['_key'].asMap()
+ elif '_objectId' in kwargs:
+ query['_object_id'] = kwargs['_objectId'].asMap()
+
+ self._v2SendQuery(query, sequence)
+
+
+ def _v2SendSchemaIdQuery(self, sequence, kwargs):
+ """
+ Send a query for all schema ids to a QMFv2 agent.
+ """
+ #
+ # Build the query map
+ #
+ query = {'_what': 'SCHEMA_ID'}
+ # @todo - predicate support. For now, return all known schema ids.
+
+ self._v2SendQuery(query, sequence)
+
+
+ def _v2SendSchemaRequest(self, schemaId):
+ """
+ Send a query to an agent to request details on a particular schema class.
+ IMPORTANT: This function currently sends a QMFv1 schema-request to the address of
+ the agent. The agent will send its response to amq.direct/<our-key>.
+ Eventually, this will be converted to a proper QMFv2 schema query.
+ """
+ sendCodec = Codec()
+ seq = self.seqMgr._reserve(None)
+ self.broker._setHeader(sendCodec, 'S', seq)
+ schemaId.encode(sendCodec)
+ smsg = self.broker._message(sendCodec.encoded, self.agentBank)
+ self.broker._send(smsg, "qmf.default.direct")
+
+
+ def _handleQmfV1Message(self, opcode, seq, mp, ah, codec):
+ """
+ Process QMFv1 messages arriving from an agent. Note well: this method is
+ called from the context of the Broker thread.
+ """
+ if opcode == 'm': self._v1HandleMethodResp(codec, seq)
+ elif opcode == 'e': self._v1HandleEventInd(codec, seq)
+ elif opcode == 'c': self._v1HandleContentInd(codec, seq, prop=True)
+ elif opcode == 'i': self._v1HandleContentInd(codec, seq, stat=True)
+ elif opcode == 'g': self._v1HandleContentInd(codec, seq, prop=True, stat=True)
+
+
+ def _handleQmfV2Message(self, opcode, mp, ah, content):
+ """
+ Process QMFv2 messages arriving from an agent. Note well: this method is
+ called from the context of the Broker thread.
+ """
+ if opcode == '_data_indication': self._v2HandleDataInd(mp, ah, content)
+ elif opcode == '_query_response': self._v2HandleDataInd(mp, ah, content)
+ elif opcode == '_method_response': self._v2HandleMethodResp(mp, ah, content)
+ elif opcode == '_exception': self._v2HandleException(mp, ah, content)
+
+
+#===================================================================================================
+# RequestContext
+#===================================================================================================
+class RequestContext(object):
+ """
+ This class tracks an asynchronous request sent to an agent.
+ TODO: Add logic for client-side selection and filtering deleted objects from get-queries
+ """
+ def __init__(self, agent, notifiable, selectors={}):
+ self.sequence = None
+ self.agent = agent
+ self.schemaCache = self.agent.schemaCache
+ self.notifiable = notifiable
+ self.selectors = selectors
+ self.startTime = time()
+ self.rawQueryResults = []
+ self.queryResults = []
+ self.pendingEvents = {}
+ self.exception = None
+ self.waitingForSchema = None
+ self.pendingSignal = None
+ self.cv = Condition()
+ self.blocked = notifiable == None
+
+
+ def setSequence(self, sequence):
+ self.sequence = sequence
+
+
+ def addV1QueryResult(self, data, has_props, has_stats):
+ values = {}
+ if has_props:
+ for prop, val in data.getProperties():
+ values[prop.name] = val
+ if has_stats:
+ for stat, val in data.getStatistics():
+ values[stat.name] = val
+ for key in values:
+ val = values[key]
+ if key in self.selectors and val != self.selectors[key]:
+ return
+
+ if self.notifiable:
+ if has_props:
+ self.notifiable(qmf_object=data)
+ if has_stats:
+ self.notifiable(qmf_object_stats=data)
+ else:
+ self.queryResults.append(data)
+
+
+ def addV2QueryResult(self, data):
+ values = data['_values']
+ for key in values:
+ val = values[key]
+ if key in self.selectors:
+ sel_val = self.selectors[key]
+ if sel_val.__class__ == ObjectId:
+ val = ObjectId(val, agentName=self.agent.getAgentBank())
+ if val != sel_val:
+ return
+ self.rawQueryResults.append(data)
+
+ def addPendingEvent(self, event):
+ """ Stores a received event that is pending a schema. Returns True if this
+ event is the first instance of a given schema identifier.
+ """
+ self.cv.acquire()
+ try:
+ if event.classKey in self.pendingEvents:
+ self.pendingEvents[event.classKey].append((event, time()))
+ return False
+ self.pendingEvents[event.classKey] = [(event, time())]
+ return True
+ finally:
+ self.cv.release()
+
+ def processPendingEvents(self):
+ """ Walk the pending events looking for schemas that are now
+ available. Remove any events that now have schema, and process them.
+ """
+ keysToDelete = []
+ events = []
+ self.cv.acquire()
+ try:
+ for key in self.pendingEvents.iterkeys():
+ schema = self.schemaCache.getSchema(key)
+ if schema:
+ keysToDelete.append(key)
+ for item in self.pendingEvents[key]:
+ # item is (timestamp, event-obj) tuple.
+ # hack: I have no idea what a valid lifetime for an event
+ # should be. 60 seconds???
+ if (time() - item[1]) < 60:
+ item[0].schema = schema
+ events.append(item[0])
+ for key in keysToDelete:
+ self.pendingEvents.pop(key)
+ finally:
+ self.cv.release()
+ for event in events:
+ self.doEvent(event)
+
+ def doEvent(self, data):
+ if self.notifiable:
+ self.notifiable(qmf_event=data)
+
+
+ def setException(self, ex):
+ self.exception = ex
+
+
+ def getAge(self):
+ return time() - self.startTime
+
+
+ def cancel(self, exception):
+ self.setException(exception)
+ try:
+ self.cv.acquire()
+ self.blocked = None
+ self.waitingForSchema = None
+ self.cv.notify()
+ finally:
+ self.cv.release()
+ self._complete()
+
+
+ def waitForSignal(self, timeout):
+ try:
+ self.cv.acquire()
+ while self.blocked:
+ if (time() - self.startTime) > timeout:
+ self.exception = "Request timed out after %d seconds" % timeout
+ return
+ self.cv.wait(1)
+ finally:
+ self.cv.release()
+
+
+ def signal(self):
+ try:
+ self.cv.acquire()
+ if self.waitingForSchema:
+ self.pendingSignal = True
+ return
+ else:
+ self.blocked = None
+ self.cv.notify()
+ finally:
+ self.cv.release()
+ self._complete()
+
+
+ def _complete(self):
+ if self.notifiable:
+ if self.exception:
+ self.notifiable(qmf_exception=self.exception)
+ else:
+ self.notifiable(qmf_complete=True)
+
+ if self.sequence:
+ self.agent._clearContext(self.sequence)
+
+
+ def processV2Data(self):
+ """
+ Attempt to make progress on the entries in the raw_query_results queue. If an entry has a schema
+ that is in our schema cache, process it. Otherwise, send a request for the schema information
+ to the agent that manages the object.
+ """
+ schemaId = None
+ queryResults = []
+ try:
+ self.cv.acquire()
+ if self.waitingForSchema:
+ return
+ while (not self.waitingForSchema) and len(self.rawQueryResults) > 0:
+ head = self.rawQueryResults[0]
+ schemaId = self._getSchemaIdforV2ObjectLH(head)
+ schema = self.schemaCache.getSchema(schemaId)
+ if schema:
+ obj = Object(self.agent, schema, v2Map=head, agentName=self.agent.agentBank)
+ queryResults.append(obj)
+ self.rawQueryResults.pop(0)
+ else:
+ self.waitingForSchema = True
+ finally:
+ self.cv.release()
+
+ if self.waitingForSchema:
+ self.agent._v2SendSchemaRequest(schemaId)
+
+ for result in queryResults:
+ key = result.getClassKey()
+ if key.getPackageName() == "org.apache.qpid.broker" and key.getClassName() == "agent":
+ self.agent.broker._updateAgent(result)
+ if self.notifiable:
+ self.notifiable(qmf_object=result)
+ else:
+ self.queryResults.append(result)
+
+ complete = None
+ try:
+ self.cv.acquire()
+ if not self.waitingForSchema and self.pendingSignal:
+ self.blocked = None
+ self.cv.notify()
+ complete = True
+ finally:
+ self.cv.release()
+
+ if complete:
+ self._complete()
+
+
+ def reprocess(self):
+ """
+ New schema information has been added to the schema-cache. Clear our 'waiting' status
+ and see if we can make more progress on any pending inbound events/objects.
+ """
+ try:
+ self.cv.acquire()
+ self.waitingForSchema = None
+ finally:
+ self.cv.release()
+ self.processV2Data()
+ self.processPendingEvents()
+
+ def _getSchemaIdforV2ObjectLH(self, data):
+ """
+ Given a data map, extract the schema-identifier.
+ """
+ if data.__class__ != dict:
+ return None
+ if '_schema_id' in data:
+ return ClassKey(data['_schema_id'])
+ return None
+
+
+#===================================================================================================
+# Event
+#===================================================================================================
+class Event:
+ """ """
+ def __init__(self, agent, codec=None, v2Map=None):
+ self.agent = agent
+ self.session = agent.session
+ self.broker = agent.broker
+
+ if isinstance(v2Map,dict):
+ self.classKey = None
+ self.schema = None
+ try:
+ self.arguments = v2Map["_values"]
+ self.timestamp = long(v2Map["_timestamp"])
+ self.severity = v2Map["_severity"]
+ if "_schema_id" in v2Map:
+ self.classKey = ClassKey(v2Map["_schema_id"])
+ self.classKey._setType(ClassKey.TYPE_EVENT)
+ except:
+ raise Exception("Invalid event object: %s " % str(v2Map))
+ if self.classKey is not None:
+ self.schema = self.session.schemaCache.getSchema(self.classKey)
+
+ elif codec is not None:
+ self.classKey = ClassKey(codec)
+ self.classKey._setType(ClassKey.TYPE_EVENT)
+ self.timestamp = codec.read_int64()
+ self.severity = codec.read_uint8()
+ self.arguments = {}
+ self.schema = self.session.schemaCache.getSchema(self.classKey)
+ if not self.schema:
+ return
+ for arg in self.schema.arguments:
+ self.arguments[arg.name] = self.session._decodeValue(codec, arg.type,
+ self.broker)
+ else:
+ raise Exception("No constructor for event object.")
+
+
+ def __repr__(self):
+ if self.schema == None:
+ return "<uninterpretable>"
+ out = strftime("%c", gmtime(self.timestamp / 1000000000))
+ out += " " + self._sevName() + " " + self.classKey.getPackageName() + ":" + self.classKey.getClassName()
+ out += " broker=" + self.broker.getUrl()
+ for arg in self.schema.arguments:
+ disp = self.session._displayValue(self.arguments[arg.name], arg.type).encode("utf8")
+ if " " in disp:
+ disp = "\"" + disp + "\""
+ out += " " + arg.name + "=" + disp
+ return out
+
+ def _sevName(self):
+ if self.severity == 0 : return "EMER "
+ if self.severity == 1 : return "ALERT"
+ if self.severity == 2 : return "CRIT "
+ if self.severity == 3 : return "ERROR"
+ if self.severity == 4 : return "WARN "
+ if self.severity == 5 : return "NOTIC"
+ if self.severity == 6 : return "INFO "
+ if self.severity == 7 : return "DEBUG"
+ return "INV-%d" % self.severity
+
+ def getClassKey(self):
+ return self.classKey
+
+ def getArguments(self):
+ return self.arguments
+
+ def getTimestamp(self):
+ return self.timestamp
+
+ def getSchema(self):
+ return self.schema
+
+
+#===================================================================================================
+# SequenceManager
+#===================================================================================================
+class SequenceManager:
+ """ Manage sequence numbers for asynchronous method calls """
+ def __init__(self):
+ self.lock = Lock()
+ self.sequence = long(time()) # pseudo-randomize the start
+ self.pending = {}
+
+ def _reserve(self, data):
+ """ Reserve a unique sequence number """
+ try:
+ self.lock.acquire()
+ result = self.sequence
+ self.sequence = self.sequence + 1
+ self.pending[result] = data
+ finally:
+ self.lock.release()
+ return result
+
+ def _release(self, seq):
+ """ Release a reserved sequence number """
+ data = None
+ try:
+ self.lock.acquire()
+ if seq in self.pending:
+ data = self.pending[seq]
+ del self.pending[seq]
+ finally:
+ self.lock.release()
+ return data
+
+
+#===================================================================================================
+# DebugConsole
+#===================================================================================================
+class DebugConsole(Console):
+ """ """
+ def brokerConnected(self, broker):
+ print "brokerConnected:", broker
+
+ def brokerConnectionFailed(self, broker):
+ print "brokerConnectionFailed:", broker
+
+ def brokerDisconnected(self, broker):
+ print "brokerDisconnected:", broker
+
+ def newPackage(self, name):
+ print "newPackage:", name
+
+ def newClass(self, kind, classKey):
+ print "newClass:", kind, classKey
+
+ def newAgent(self, agent):
+ print "newAgent:", agent
+
+ def delAgent(self, agent):
+ print "delAgent:", agent
+
+ def objectProps(self, broker, record):
+ print "objectProps:", record
+
+ def objectStats(self, broker, record):
+ print "objectStats:", record
+
+ def event(self, broker, event):
+ print "event:", event
+
+ def heartbeat(self, agent, timestamp):
+ print "heartbeat:", agent
+
+ def brokerInfo(self, broker):
+ print "brokerInfo:", broker
+
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/__init__.py b/qpid/extras/qmf/src/py/qmf2-prototype/__init__.py
new file mode 100644
index 0000000000..31d5a2ef58
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/__init__.py
@@ -0,0 +1,18 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/agent.py b/qpid/extras/qmf/src/py/qmf2-prototype/agent.py
new file mode 100644
index 0000000000..4ec00bd288
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/agent.py
@@ -0,0 +1,1380 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import sys
+import datetime
+import time
+import Queue
+from logging import getLogger
+from threading import Thread, RLock, currentThread, Event
+from qpid.messaging import Connection, Message, Empty, SendError
+from uuid import uuid4
+from common import (OpCode, QmfQuery, ContentType, SchemaObjectClass,
+ QmfData, QmfAddress, SchemaClass, SchemaClassId, WorkItem,
+ SchemaMethod, timedelta_to_secs, QMF_APP_ID)
+
+# global flag that indicates which thread (if any) is
+# running the agent notifier callback
+_callback_thread=None
+
+log = getLogger("qmf")
+trace = getLogger("qmf.agent")
+
+
+ ##==============================================================================
+ ## METHOD CALL
+ ##==============================================================================
+
+class _MethodCallHandle(object):
+ """
+ Private class used to hold context when handing off a method call to the
+ application. Given to the app in a WorkItem, provided to the agent when
+ method_response() is invoked.
+ """
+ def __init__(self, correlation_id, reply_to, meth_name, _oid=None,
+ _schema_id=None):
+ self.correlation_id = correlation_id
+ self.reply_to = reply_to
+ self.meth_name = meth_name
+ self.oid = _oid
+ self.schema_id = _schema_id
+
+class MethodCallParams(object):
+ """
+ """
+ def __init__(self, name, _oid=None, _schema_id=None, _in_args=None,
+ _user_id=None):
+ self._meth_name = name
+ self._oid = _oid
+ self._schema_id = _schema_id
+ self._in_args = _in_args
+ self._user_id = _user_id
+
+ def get_name(self):
+ return self._meth_name
+
+ def get_object_id(self):
+ return self._oid
+
+ def get_schema_id(self):
+ return self._schema_id
+
+ def get_args(self):
+ return self._in_args
+
+ def get_user_id(self):
+ return self._user_id
+
+
+ ##==============================================================================
+ ## SUBSCRIPTIONS
+ ##==============================================================================
+
+
+class _SubscriptionState(object):
+ """
+ An internally-managed subscription.
+ """
+ def __init__(self, reply_to, cid, query, interval, duration):
+ self.reply_to = reply_to
+ self.correlation_id = cid
+ self.query = query
+ self.interval = interval
+ self.duration = duration
+ now = datetime.datetime.utcnow()
+ self.next_update = now # do an immediate update
+ self.expiration = now + datetime.timedelta(seconds=duration)
+ self.last_update = None
+ self.id = 0
+
+ def resubscribe(self, now, _duration=None):
+ if _duration is not None:
+ self.duration = _duration
+ self.expiration = now + datetime.timedelta(seconds=self.duration)
+
+ def published(self, now):
+ self.next_update = now + datetime.timedelta(seconds=self.interval)
+ self.last_update = now
+
+
+ ##==============================================================================
+ ## AGENT
+ ##==============================================================================
+
+class Agent(Thread):
+ def __init__(self, name, _domain=None, _notifier=None, **options):
+ Thread.__init__(self)
+ self._running = False
+ self._ready = Event()
+
+ self.name = str(name)
+ self._domain = _domain
+ self._address = QmfAddress.direct(self.name, self._domain)
+ self._notifier = _notifier
+
+ # configurable parameters
+ #
+ self._heartbeat_interval = options.get("heartbeat_interval", 30)
+ self._capacity = options.get("capacity", 10)
+ self._default_duration = options.get("default_duration", 300)
+ self._max_duration = options.get("max_duration", 3600)
+ self._min_duration = options.get("min_duration", 10)
+ self._default_interval = options.get("default_interval", 30)
+ self._min_interval = options.get("min_interval", 5)
+
+ # @todo: currently, max # of objects in a single reply message, would
+ # be better if it were max bytesize of per-msg content...
+ self._max_msg_size = options.get("max_msg_size", 0)
+
+ self._conn = None
+ self._session = None
+ self._direct_receiver = None
+ self._topic_receiver = None
+ self._direct_sender = None
+ self._topic_sender = None
+
+ self._lock = RLock()
+ self._packages = {}
+ self._schema_timestamp = long(0)
+ self._schema = {}
+ # _described_data holds QmfData objects that are associated with schema
+ # it is index by schema_id, object_id
+ self._described_data = {}
+ # _undescribed_data holds unstructured QmfData objects - these objects
+ # have no schema. it is indexed by object_id only.
+ self._undescribed_data = {}
+ self._work_q = Queue.Queue()
+ self._work_q_put = False
+ # subscriptions
+ self._subscription_id = long(time.time())
+ self._subscriptions = {}
+ self._next_subscribe_event = None
+
+ # prevents multiple _wake_thread() calls
+ self._noop_pending = False
+
+
+ def destroy(self, timeout=None):
+ """
+ Must be called before the Agent is deleted.
+ Frees up all resources and shuts down all background threads.
+
+ @type timeout: float
+ @param timeout: maximum time in seconds to wait for all background threads to terminate. Default: forever.
+ """
+ trace.debug("Destroying Agent %s" % self.name)
+ if self._conn:
+ self.remove_connection(timeout)
+ trace.debug("Agent Destroyed")
+
+
+ def get_name(self):
+ return self.name
+
+ def set_connection(self, conn):
+ self._conn = conn
+ self._session = self._conn.session()
+
+ # for messages directly addressed to me
+ self._direct_receiver = self._session.receiver(str(self._address) +
+ ";{create:always,"
+ " node:"
+ " {type:topic,"
+ " x-declare:"
+ " {type:direct}}}",
+ capacity=self._capacity)
+ trace.debug("my direct addr=%s" % self._direct_receiver.source)
+
+ # for sending directly addressed messages.
+ self._direct_sender = self._session.sender(str(self._address.get_node()) +
+ ";{create:always,"
+ " node:"
+ " {type:topic,"
+ " x-declare:"
+ " {type:direct}}}")
+ trace.debug("my default direct send addr=%s" % self._direct_sender.target)
+
+ # for receiving "broadcast" messages from consoles
+ default_addr = QmfAddress.topic(QmfAddress.SUBJECT_CONSOLE_IND + ".#",
+ self._domain)
+ self._topic_receiver = self._session.receiver(str(default_addr) +
+ ";{create:always,"
+ " node:"
+ " {type:topic}}",
+ capacity=self._capacity)
+ trace.debug("console.ind addr=%s" % self._topic_receiver.source)
+
+ # for sending to topic subscribers
+ ind_addr = QmfAddress.topic(QmfAddress.SUBJECT_AGENT_IND,
+ self._domain)
+ self._topic_sender = self._session.sender(str(ind_addr) +
+ ";{create:always,"
+ " node:"
+ " {type:topic}}")
+ trace.debug("agent.ind addr=%s" % self._topic_sender.target)
+
+ self._running = True
+ self.start()
+ self._ready.wait(10)
+ if not self._ready.isSet():
+ raise Exception("Agent managment thread failed to start.")
+
+ def remove_connection(self, timeout=None):
+ # tell connection thread to shutdown
+ self._running = False
+ if self.isAlive():
+ # kick my thread to wake it up
+ self._wake_thread()
+ trace.debug("waiting for agent receiver thread to exit")
+ self.join(timeout)
+ if self.isAlive():
+ log.error( "Agent thread '%s' is hung..." % self.name)
+ self._direct_receiver.close()
+ self._direct_receiver = None
+ self._direct_sender.close()
+ self._direct_sender = None
+ self._topic_receiver.close()
+ self._topic_receiver = None
+ self._topic_sender.close()
+ self._topic_sender = None
+ self._session.close()
+ self._session = None
+ self._conn = None
+ trace.debug("agent connection removal complete")
+
+ def register_object_class(self, schema):
+ """
+ Register an instance of a SchemaClass with this agent
+ """
+ # @todo: need to update subscriptions
+ # @todo: need to mark schema as "non-const"
+ if not isinstance(schema, SchemaClass):
+ raise TypeError("SchemaClass instance expected")
+
+ classId = schema.get_class_id()
+ pname = classId.get_package_name()
+ cname = classId.get_class_name()
+ hstr = classId.get_hash_string()
+ if not hstr:
+ raise Exception("Schema hash is not set.")
+
+ self._lock.acquire()
+ try:
+ if pname not in self._packages:
+ self._packages[pname] = [cname]
+ else:
+ if cname not in self._packages[pname]:
+ self._packages[pname].append(cname)
+ self._schema[classId] = schema
+ self._schema_timestamp = long(time.time() * 1000)
+ finally:
+ self._lock.release()
+
+ def register_event_class(self, schema):
+ return self.register_object_class(schema)
+
+ def raise_event(self, qmfEvent):
+ """
+ TBD
+ """
+ if not self._topic_sender:
+ raise Exception("No connection available")
+
+ # @todo: should we validate against the schema?
+ msg = Message(id=QMF_APP_ID,
+ subject=QmfAddress.SUBJECT_AGENT_EVENT + "." +
+ qmfEvent.get_severity() + "." + self.name,
+ properties={"method":"indication",
+ "qmf.opcode":OpCode.data_ind,
+ "qmf.content": ContentType.event,
+ "qmf.agent":self.name},
+ content=[qmfEvent.map_encode()])
+ # TRACE
+ # log.error("!!! Agent %s sending Event (%s)" %
+ # (self.name, str(msg)))
+ self._topic_sender.send(msg)
+
+ def add_object(self, data):
+ """
+ Register an instance of a QmfAgentData object.
+ """
+ # @todo: need to mark schema as "non-const"
+ if not isinstance(data, QmfAgentData):
+ raise TypeError("QmfAgentData instance expected")
+
+ oid = data.get_object_id()
+ if not oid:
+ raise TypeError("No identifier assigned to QmfAgentData!")
+
+ sid = data.get_schema_class_id()
+
+ self._lock.acquire()
+ try:
+ if sid:
+ if sid not in self._described_data:
+ self._described_data[sid] = {oid: data}
+ else:
+ self._described_data[sid][oid] = data
+ else:
+ self._undescribed_data[oid] = data
+
+ # does the new object match any subscriptions?
+ now = datetime.datetime.utcnow()
+ for sid,sub in self._subscriptions.iteritems():
+ if sub.query.evaluate(data):
+ # matched. Mark the subscription as needing to be
+ # serviced. The _publish() method will notice the new
+ # object and will publish it next time it runs.
+ sub.next_update = now
+ self._next_subscribe_event = None
+ # @todo: should we immediately publish?
+
+ finally:
+ self._lock.release()
+
+ def get_object(self, oid, schema_id):
+ data = None
+ self._lock.acquire()
+ try:
+ if schema_id:
+ data = self._described_data.get(schema_id)
+ if data:
+ data = data.get(oid)
+ else:
+ data = self._undescribed_data.get(oid)
+ finally:
+ self._lock.release()
+ return data
+
+
+ def method_response(self, handle, _out_args=None, _error=None):
+ """
+ """
+ if not isinstance(handle, _MethodCallHandle):
+ raise TypeError("Invalid handle passed to method_response!")
+
+ _map = {SchemaMethod.KEY_NAME:handle.meth_name}
+ if handle.oid is not None:
+ _map[QmfData.KEY_OBJECT_ID] = handle.oid
+ if handle.schema_id is not None:
+ _map[QmfData.KEY_SCHEMA_ID] = handle.schema_id.map_encode()
+ if _out_args is not None:
+ _map[SchemaMethod.KEY_ARGUMENTS] = _out_args.copy()
+ if _error is not None:
+ if not isinstance(_error, QmfData):
+ raise TypeError("Invalid type for error - must be QmfData")
+ _map[SchemaMethod.KEY_ERROR] = _error.map_encode()
+
+ msg = Message(id=QMF_APP_ID,
+ properties={"method":"response",
+ "qmf.opcode":OpCode.method_rsp},
+ content=_map)
+ msg.correlation_id = handle.correlation_id
+
+ self._send_reply(msg, handle.reply_to)
+
+ def get_workitem_count(self):
+ """
+ Returns the count of pending WorkItems that can be retrieved.
+ """
+ return self._work_q.qsize()
+
+ def get_next_workitem(self, timeout=None):
+ """
+ Obtains the next pending work item, or None if none available.
+ """
+ try:
+ wi = self._work_q.get(True, timeout)
+ except Queue.Empty:
+ return None
+ return wi
+
+ def release_workitem(self, wi):
+ """
+ Releases a WorkItem instance obtained by getNextWorkItem(). Called when
+ the application has finished processing the WorkItem.
+ """
+ pass
+
+
+ def run(self):
+ global _callback_thread
+ next_heartbeat = datetime.datetime.utcnow()
+ batch_limit = 10 # a guess
+
+ self._ready.set()
+
+ while self._running:
+
+ #
+ # Process inbound messages
+ #
+ trace.debug("%s processing inbound messages..." % self.name)
+ for i in range(batch_limit):
+ try:
+ msg = self._topic_receiver.fetch(timeout=0)
+ except Empty:
+ break
+ # TRACE
+ # log.error("!!! Agent %s: msg on %s [%s]" %
+ # (self.name, self._topic_receiver.source, msg))
+ self._dispatch(msg, _direct=False)
+
+ for i in range(batch_limit):
+ try:
+ msg = self._direct_receiver.fetch(timeout=0)
+ except Empty:
+ break
+ # TRACE
+ # log.error("!!! Agent %s: msg on %s [%s]" %
+ # (self.name, self._direct_receiver.source, msg))
+ self._dispatch(msg, _direct=True)
+
+ #
+ # Send Heartbeat Notification
+ #
+ now = datetime.datetime.utcnow()
+ if now >= next_heartbeat:
+ trace.debug("%s sending heartbeat..." % self.name)
+ ind = Message(id=QMF_APP_ID,
+ subject=QmfAddress.SUBJECT_AGENT_HEARTBEAT,
+ properties={"method":"indication",
+ "qmf.opcode":OpCode.agent_heartbeat_ind,
+ "qmf.agent":self.name},
+ content=self._makeAgentInfoBody())
+ # TRACE
+ #log.error("!!! Agent %s sending Heartbeat (%s)" %
+ # (self.name, str(ind)))
+ self._topic_sender.send(ind)
+ trace.debug("Agent Indication Sent")
+ next_heartbeat = now + datetime.timedelta(seconds = self._heartbeat_interval)
+
+ #
+ # Monitor Subscriptions
+ #
+ self._lock.acquire()
+ try:
+ now = datetime.datetime.utcnow()
+ if (self._next_subscribe_event is None or
+ now >= self._next_subscribe_event):
+ trace.debug("%s polling subscriptions..." % self.name)
+ self._next_subscribe_event = now + datetime.timedelta(seconds=
+ self._max_duration)
+ dead_ss = {}
+ for sid,ss in self._subscriptions.iteritems():
+ if now >= ss.expiration:
+ dead_ss[sid] = ss
+ continue
+ if now >= ss.next_update:
+ self._publish(ss)
+ next_timeout = min(ss.expiration, ss.next_update)
+ if next_timeout < self._next_subscribe_event:
+ self._next_subscribe_event = next_timeout
+
+ for sid,ss in dead_ss.iteritems():
+ del self._subscriptions[sid]
+ self._unpublish(ss)
+ finally:
+ self._lock.release()
+
+ #
+ # notify application of pending WorkItems
+ #
+ if self._work_q_put and self._notifier:
+ trace.debug("%s notifying application..." % self.name)
+ # new stuff on work queue, kick the the application...
+ self._work_q_put = False
+ _callback_thread = currentThread()
+ trace.debug("Calling agent notifier.indication")
+ self._notifier.indication()
+ _callback_thread = None
+
+ #
+ # Sleep until messages arrive or something times out
+ #
+ now = datetime.datetime.utcnow()
+ next_timeout = next_heartbeat
+ self._lock.acquire()
+ try:
+ # the mailbox expire flag may be cleared by the
+ # app thread(s) in order to force an immediate publish
+ if self._next_subscribe_event is None:
+ next_timeout = now
+ elif self._next_subscribe_event < next_timeout:
+ next_timeout = self._next_subscribe_event
+ finally:
+ self._lock.release()
+
+ timeout = timedelta_to_secs(next_timeout - now)
+
+ if self._running and timeout > 0.0:
+ trace.debug("%s sleeping %s seconds..." % (self.name,
+ timeout))
+ try:
+ self._session.next_receiver(timeout=timeout)
+ except Empty:
+ pass
+
+
+ trace.debug("Shutting down Agent %s thread" % self.name)
+
+ #
+ # Private:
+ #
+
+ def _makeAgentInfoBody(self):
+ """
+ Create an agent indication message body identifying this agent
+ """
+ return QmfData.create({"_name": self.get_name(),
+ "_schema_timestamp": self._schema_timestamp}).map_encode()
+
+ def _send_reply(self, msg, reply_to):
+ """
+ Send a reply message to the given reply_to address
+ """
+ if not isinstance(reply_to, QmfAddress):
+ try:
+ reply_to = QmfAddress.from_string(str(reply_to))
+ except ValueError:
+ log.error("Invalid reply-to address '%s'" % reply_to)
+
+ msg.subject = reply_to.get_subject()
+
+ try:
+ if reply_to.is_direct():
+ # TRACE
+ #log.error("!!! Agent %s direct REPLY-To:%s (%s)" %
+ # (self.name, str(reply_to), str(msg)))
+ self._direct_sender.send(msg)
+ else:
+ # TRACE
+ # log.error("!!! Agent %s topic REPLY-To:%s (%s)" %
+ # (self.name, str(reply_to), str(msg)))
+ self._topic_sender.send(msg)
+ trace.debug("reply msg sent to [%s]" % str(reply_to))
+ except SendError, e:
+ log.error("Failed to send reply msg '%s' (%s)" % (msg, str(e)))
+
+ def _send_query_response(self, content_type, cid, reply_to, objects):
+ """
+ Send a response to a query, breaking the result into multiple
+ messages based on the agent's _max_msg_size config parameter
+ """
+
+ total = len(objects)
+ if self._max_msg_size:
+ max_count = self._max_msg_size
+ else:
+ max_count = total
+
+ start = 0
+ end = min(total, max_count)
+ # send partial response if too many objects present
+ while end < total:
+ m = Message(id=QMF_APP_ID,
+ properties={"method":"response",
+ "partial":None,
+ "qmf.opcode":OpCode.data_ind,
+ "qmf.content":content_type,
+ "qmf.agent":self.name},
+ correlation_id = cid,
+ content=objects[start:end])
+ self._send_reply(m, reply_to)
+ start = end
+ end = min(total, end + max_count)
+
+ m = Message(id=QMF_APP_ID,
+ properties={"method":"response",
+ "qmf.opcode":OpCode.data_ind,
+ "qmf.content":content_type,
+ "qmf.agent":self.name},
+ correlation_id = cid,
+ content=objects[start:end])
+ self._send_reply(m, reply_to)
+
+ def _dispatch(self, msg, _direct=False):
+ """
+ Process a message from a console.
+
+ @param _direct: True if msg directly addressed to this agent.
+ """
+ trace.debug( "Message received from Console! [%s]" % msg )
+
+ opcode = msg.properties.get("qmf.opcode")
+ if not opcode:
+ log.warning("Ignoring unrecognized message '%s'" % msg)
+ return
+ version = 2 # @todo: fix me
+ cmap = {}; props={}
+ if msg.content_type == "amqp/map":
+ cmap = msg.content
+ if msg.properties:
+ props = msg.properties
+
+ if opcode == OpCode.agent_locate_req:
+ self._handleAgentLocateMsg( msg, cmap, props, version, _direct )
+ elif opcode == OpCode.query_req:
+ self._handleQueryMsg( msg, cmap, props, version, _direct )
+ elif opcode == OpCode.method_req:
+ self._handleMethodReqMsg(msg, cmap, props, version, _direct)
+ elif opcode == OpCode.subscribe_req:
+ self._handleSubscribeReqMsg(msg, cmap, props, version, _direct)
+ elif opcode == OpCode.subscribe_refresh_ind:
+ self._handleResubscribeReqMsg(msg, cmap, props, version, _direct)
+ elif opcode == OpCode.subscribe_cancel_ind:
+ self._handleUnsubscribeReqMsg(msg, cmap, props, version, _direct)
+ elif opcode == OpCode.noop:
+ self._noop_pending = False
+ trace.debug("No-op msg received.")
+ else:
+ log.warning("Ignoring message with unrecognized 'opcode' value: '%s'"
+ % opcode)
+
+ def _handleAgentLocateMsg( self, msg, cmap, props, version, direct ):
+ """
+ Process a received agent-locate message
+ """
+ trace.debug("_handleAgentLocateMsg")
+
+ reply = False
+ if props.get("method") == "request":
+ # if the message is addressed to me or wildcard, process it
+ if (msg.subject == "console.ind" or
+ msg.subject == "console.ind.locate" or
+ msg.subject == "console.ind.locate." + self.name):
+ pred = msg.content
+ if not pred:
+ reply = True
+ elif isinstance(pred, type([])):
+ # fake a QmfData containing my identifier for the query compare
+ query = QmfQuery.create_predicate(QmfQuery.TARGET_AGENT, pred)
+ tmpData = QmfData.create({QmfQuery.KEY_AGENT_NAME:
+ self.get_name()},
+ _object_id="my-name")
+ reply = query.evaluate(tmpData)
+
+ if reply:
+ m = Message(id=QMF_APP_ID,
+ properties={"method":"response",
+ "qmf.opcode":OpCode.agent_locate_rsp},
+ content=self._makeAgentInfoBody())
+ m.correlation_id = msg.correlation_id
+ self._send_reply(m, msg.reply_to)
+ else:
+ trace.debug("agent-locate msg not mine - no reply sent")
+
+
+ def _handleQueryMsg(self, msg, cmap, props, version, _direct ):
+ """
+ Handle received query message
+ """
+ trace.debug("_handleQueryMsg")
+
+ if "method" in props and props["method"] == "request":
+ if cmap:
+ try:
+ query = QmfQuery.from_map(cmap)
+ except TypeError:
+ log.error("Invalid Query format: '%s'" % str(cmap))
+ return
+ target = query.get_target()
+ if target == QmfQuery.TARGET_PACKAGES:
+ self._queryPackagesReply( msg, query )
+ elif target == QmfQuery.TARGET_SCHEMA_ID:
+ self._querySchemaReply( msg, query, _idOnly=True )
+ elif target == QmfQuery.TARGET_SCHEMA:
+ self._querySchemaReply( msg, query)
+ elif target == QmfQuery.TARGET_AGENT:
+ log.warning("!!! @todo: Query TARGET=AGENT TBD !!!")
+ elif target == QmfQuery.TARGET_OBJECT_ID:
+ self._queryDataReply(msg, query, _idOnly=True)
+ elif target == QmfQuery.TARGET_OBJECT:
+ self._queryDataReply(msg, query)
+ else:
+ log.warning("Unrecognized query target: '%s'" % str(target))
+
+
+
+ def _handleMethodReqMsg(self, msg, cmap, props, version, _direct):
+ """
+ Process received Method Request
+ """
+ if "method" in props and props["method"] == "request":
+ mname = cmap.get(SchemaMethod.KEY_NAME)
+ if not mname:
+ log.warning("Invalid method call from '%s': no name"
+ % msg.reply_to)
+ return
+
+ in_args = cmap.get(SchemaMethod.KEY_ARGUMENTS)
+ oid = cmap.get(QmfData.KEY_OBJECT_ID)
+ schema_id = cmap.get(QmfData.KEY_SCHEMA_ID)
+ if schema_id:
+ schema_id = SchemaClassId.from_map(schema_id)
+ handle = _MethodCallHandle(msg.correlation_id,
+ msg.reply_to,
+ mname,
+ oid, schema_id)
+ param = MethodCallParams( mname, oid, schema_id, in_args,
+ msg.user_id)
+
+ # @todo: validate the method against the schema:
+ # if self._schema:
+ # # validate
+ # _in_args = _in_args.copy()
+ # ms = self._schema.get_method(name)
+ # if ms is None:
+ # raise ValueError("Method '%s' is undefined." % name)
+
+ # for aname,prop in ms.get_arguments().iteritems():
+ # if aname not in _in_args:
+ # if prop.get_default():
+ # _in_args[aname] = prop.get_default()
+ # elif not prop.is_optional():
+ # raise ValueError("Method '%s' requires argument '%s'"
+ # % (name, aname))
+ # for aname in _in_args.iterkeys():
+ # prop = ms.get_argument(aname)
+ # if prop is None:
+ # raise ValueError("Method '%s' does not define argument"
+ # " '%s'" % (name, aname))
+ # if "I" not in prop.get_direction():
+ # raise ValueError("Method '%s' argument '%s' is not an"
+ # " input." % (name, aname))
+
+ # # @todo check if value is correct (type, range, etc)
+
+ self._work_q.put(WorkItem(WorkItem.METHOD_CALL, handle, param))
+ self._work_q_put = True
+
+ def _handleSubscribeReqMsg(self, msg, cmap, props, version, _direct):
+ """
+ Process received Subscription Request
+ """
+ if "method" in props and props["method"] == "request":
+ query_map = cmap.get("_query")
+ interval = cmap.get("_interval")
+ duration = cmap.get("_duration")
+
+ try:
+ query = QmfQuery.from_map(query_map)
+ except TypeError:
+ log.warning("Invalid query for subscription: %s" %
+ str(query_map))
+ return
+
+ if isinstance(self, AgentExternal):
+ # param = SubscriptionParams(_ConsoleHandle(console_handle,
+ # msg.reply_to),
+ # query,
+ # interval,
+ # duration,
+ # msg.user_id)
+ # self._work_q.put(WorkItem(WorkItem.SUBSCRIBE_REQUEST,
+ # msg.correlation_id, param))
+ # self._work_q_put = True
+ log.error("External Subscription TBD")
+ return
+
+ # validate the query - only specific objects, or
+ # objects wildcard, are currently supported.
+ if (query.get_target() != QmfQuery.TARGET_OBJECT or
+ (query.get_selector() == QmfQuery.PREDICATE and
+ query.get_predicate())):
+ log.error("Subscriptions only support (wildcard) Object"
+ " Queries.")
+ err = QmfData.create(
+ {"reason": "Unsupported Query type for subscription.",
+ "query": str(query.map_encode())})
+ m = Message(id=QMF_APP_ID,
+ properties={"method":"response",
+ "qmf.opcode":OpCode.subscribe_rsp},
+ correlation_id = msg.correlation_id,
+ content={"_error": err.map_encode()})
+ self._send_reply(m, msg.reply_to)
+ return
+
+ if duration is None:
+ duration = self._default_duration
+ else:
+ try:
+ duration = float(duration)
+ if duration > self._max_duration:
+ duration = self._max_duration
+ elif duration < self._min_duration:
+ duration = self._min_duration
+ except:
+ log.warning("Bad duration value: %s" % str(msg))
+ duration = self._default_duration
+
+ if interval is None:
+ interval = self._default_interval
+ else:
+ try:
+ interval = float(interval)
+ if interval < self._min_interval:
+ interval = self._min_interval
+ except:
+ log.warning("Bad interval value: %s" % str(msg))
+ interval = self._default_interval
+
+ ss = _SubscriptionState(msg.reply_to,
+ msg.correlation_id,
+ query,
+ interval,
+ duration)
+ self._lock.acquire()
+ try:
+ sid = self._subscription_id
+ self._subscription_id += 1
+ ss.id = sid
+ self._subscriptions[sid] = ss
+ self._next_subscribe_event = None
+ finally:
+ self._lock.release()
+
+ sr_map = {"_subscription_id": sid,
+ "_interval": interval,
+ "_duration": duration}
+ m = Message(id=QMF_APP_ID,
+ properties={"method":"response",
+ "qmf.opcode":OpCode.subscribe_rsp},
+ correlation_id = msg.correlation_id,
+ content=sr_map)
+ self._send_reply(m, msg.reply_to)
+
+
+
+ def _handleResubscribeReqMsg(self, msg, cmap, props, version, _direct):
+ """
+ Process received Renew Subscription Request
+ """
+ if props.get("method") == "request":
+ sid = cmap.get("_subscription_id")
+ if not sid:
+ log.error("Invalid subscription refresh msg: %s" %
+ str(msg))
+ return
+
+ self._lock.acquire()
+ try:
+ ss = self._subscriptions.get(sid)
+ if not ss:
+ log.error("Ignoring unknown subscription: %s" %
+ str(sid))
+ return
+ duration = cmap.get("_duration")
+ if duration is not None:
+ try:
+ duration = float(duration)
+ if duration > self._max_duration:
+ duration = self._max_duration
+ elif duration < self._min_duration:
+ duration = self._min_duration
+ except:
+ log.error("Bad duration value: %s" % str(msg))
+ duration = None # use existing duration
+
+ ss.resubscribe(datetime.datetime.utcnow(), duration)
+
+ new_duration = ss.duration
+ new_interval = ss.interval
+
+ finally:
+ self._lock.release()
+
+
+ sr_map = {"_subscription_id": sid,
+ "_interval": new_interval,
+ "_duration": new_duration}
+ m = Message(id=QMF_APP_ID,
+ properties={"method":"response",
+ "qmf.opcode":OpCode.subscribe_rsp},
+ correlation_id = msg.correlation_id,
+ content=sr_map)
+ self._send_reply(m, msg.reply_to)
+
+
+ def _handleUnsubscribeReqMsg(self, msg, cmap, props, version, _direct):
+ """
+ Process received Cancel Subscription Request
+ """
+ if props.get("method") == "request":
+ sid = cmap.get("_subscription_id")
+ if not sid:
+ log.warning("No subscription id supplied: %s" % msg)
+ return
+
+ self._lock.acquire()
+ try:
+ if sid in self._subscriptions:
+ dead_sub = self._subscriptions[sid]
+ del self._subscriptions[sid]
+ finally:
+ self._lock.release()
+
+ self._unpublish(dead_sub)
+
+
+ def _queryPackagesReply(self, msg, query):
+ """
+ Run a query against the list of known packages
+ """
+ pnames = []
+ self._lock.acquire()
+ try:
+ for name in self._packages.iterkeys():
+ qmfData = QmfData.create({SchemaClassId.KEY_PACKAGE:name},
+ _object_id="_package")
+ if query.evaluate(qmfData):
+ pnames.append(name)
+
+ self._send_query_response(ContentType.schema_package,
+ msg.correlation_id,
+ msg.reply_to,
+ pnames)
+ finally:
+ self._lock.release()
+
+
+ def _querySchemaReply( self, msg, query, _idOnly=False ):
+ """
+ """
+ schemas = []
+
+ self._lock.acquire()
+ try:
+ # if querying for a specific schema, do a direct lookup
+ if query.get_selector() == QmfQuery.ID:
+ found = self._schema.get(query.get_id())
+ if found:
+ if _idOnly:
+ schemas.append(query.get_id().map_encode())
+ else:
+ schemas.append(found.map_encode())
+ else: # otherwise, evaluate all schema
+ for sid,val in self._schema.iteritems():
+ if query.evaluate(val):
+ if _idOnly:
+ schemas.append(sid.map_encode())
+ else:
+ schemas.append(val.map_encode())
+ if _idOnly:
+ msgkey = ContentType.schema_id
+ else:
+ msgkey = ContentType.schema_class
+
+ self._send_query_response(msgkey,
+ msg.correlation_id,
+ msg.reply_to,
+ schemas)
+ finally:
+ self._lock.release()
+
+
+ def _queryDataReply( self, msg, query, _idOnly=False ):
+ """
+ """
+ # hold the (recursive) lock for the duration so the Agent
+ # won't send data that is currently being modified by the
+ # app.
+ self._lock.acquire()
+ try:
+ response = []
+ data_objs = self._queryData(query)
+ if _idOnly:
+ for obj in data_objs:
+ response.append(obj.get_object_id())
+ else:
+ for obj in data_objs:
+ response.append(obj.map_encode())
+
+ if _idOnly:
+ msgkey = ContentType.object_id
+ else:
+ msgkey = ContentType.data
+
+ self._send_query_response(msgkey,
+ msg.correlation_id,
+ msg.reply_to,
+ response)
+ finally:
+ self._lock.release()
+
+
+ def _queryData(self, query):
+ """
+ Return a list of QmfData objects that match a given query
+ """
+ data_objs = []
+ # extract optional schema_id from target params
+ sid = None
+ t_params = query.get_target_param()
+ if t_params:
+ sid = t_params.get(QmfData.KEY_SCHEMA_ID)
+
+ self._lock.acquire()
+ try:
+ # if querying for a specific object, do a direct lookup
+ if query.get_selector() == QmfQuery.ID:
+ oid = query.get_id()
+ if sid and not sid.get_hash_string():
+ # wildcard schema_id match, check each schema
+ for name,db in self._described_data.iteritems():
+ if (name.get_class_name() == sid.get_class_name()
+ and name.get_package_name() == sid.get_package_name()):
+ found = db.get(oid)
+ if found:
+ data_objs.append(found)
+ else:
+ found = None
+ if sid:
+ db = self._described_data.get(sid)
+ if db:
+ found = db.get(oid)
+ else:
+ found = self._undescribed_data.get(oid)
+ if found:
+ data_objs.append(found)
+
+ else: # otherwise, evaluate all data
+ if sid and not sid.get_hash_string():
+ # wildcard schema_id match, check each schema
+ for name,db in self._described_data.iteritems():
+ if (name.get_class_name() == sid.get_class_name()
+ and name.get_package_name() == sid.get_package_name()):
+ for oid,data in db.iteritems():
+ if query.evaluate(data):
+ data_objs.append(data)
+ else:
+ if sid:
+ db = self._described_data.get(sid)
+ else:
+ db = self._undescribed_data
+
+ if db:
+ for oid,data in db.iteritems():
+ if query.evaluate(data):
+ data_objs.append(data)
+ finally:
+ self._lock.release()
+
+ return data_objs
+
+ def _publish(self, sub):
+ """ Publish a subscription.
+ """
+ response = []
+ now = datetime.datetime.utcnow()
+ objs = self._queryData(sub.query)
+ if objs:
+ for obj in objs:
+ if sub.id not in obj._subscriptions:
+ # new to subscription - publish it
+ obj._subscriptions[sub.id] = sub
+ response.append(obj.map_encode())
+ elif obj._dtime:
+ # obj._dtime is millisec since utc. Convert to datetime
+ utcdt = datetime.datetime.utcfromtimestamp(obj._dtime/1000.0)
+ if utcdt > sub.last_update:
+ response.append(obj.map_encode())
+ else:
+ # obj._utime is millisec since utc. Convert to datetime
+ utcdt = datetime.datetime.utcfromtimestamp(obj._utime/1000.0)
+ if utcdt > sub.last_update:
+ response.append(obj.map_encode())
+
+ if response:
+ trace.debug("!!! %s publishing %s!!!" % (self.name, sub.correlation_id))
+ self._send_query_response( ContentType.data,
+ sub.correlation_id,
+ sub.reply_to,
+ response)
+ sub.published(now)
+
+ def _unpublish(self, sub):
+ """ This subscription is about to be deleted, remove it from any
+ referencing objects.
+ """
+ objs = self._queryData(sub.query)
+ if objs:
+ for obj in objs:
+ if sub.id in obj._subscriptions:
+ del obj._subscriptions[sub.id]
+
+
+
+ def _wake_thread(self):
+ """
+ Make the agent management thread loop wakeup from its next_receiver
+ sleep.
+ """
+ self._lock.acquire()
+ try:
+ if not self._noop_pending:
+ trace.debug("Sending noop to wake up [%s]" % self._address)
+ msg = Message(id=QMF_APP_ID,
+ subject=self.name,
+ properties={"method":"indication",
+ "qmf.opcode":OpCode.noop},
+ content={})
+ try:
+ self._direct_sender.send( msg, sync=True )
+ self._noop_pending = True
+ except SendError, e:
+ log.error(str(e))
+ finally:
+ self._lock.release()
+
+
+ ##==============================================================================
+ ## EXTERNAL DATABASE AGENT
+ ##==============================================================================
+
+class AgentExternal(Agent):
+ """
+ An Agent which uses an external management database.
+ """
+ def __init__(self, name, _domain=None, _notifier=None,
+ _heartbeat_interval=30, _max_msg_size=0, _capacity=10):
+ super(AgentExternal, self).__init__(name, _domain, _notifier,
+ _heartbeat_interval,
+ _max_msg_size, _capacity)
+ log.error("AgentExternal TBD")
+
+
+
+ ##==============================================================================
+ ## DATA MODEL
+ ##==============================================================================
+
+
+class QmfAgentData(QmfData):
+ """
+ A managed data object that is owned by an agent.
+ """
+
+ def __init__(self, agent, _values={}, _subtypes={}, _tag=None,
+ _object_id=None, _schema=None):
+ schema_id = None
+ if _schema:
+ schema_id = _schema.get_class_id()
+
+ if _object_id is None:
+ if not isinstance(_schema, SchemaObjectClass):
+ raise Exception("An object_id must be provided if the object"
+ "doesn't have an associated schema.")
+ ids = _schema.get_id_names()
+ if not ids:
+ raise Exception("Object must have an Id or a schema that"
+ " provides an Id")
+ _object_id = u""
+ for key in ids:
+ value = _values.get(key)
+ if value is None:
+ raise Exception("Object must have a value for key"
+ " attribute '%s'" % str(key))
+ try:
+ _object_id += unicode(value)
+ except:
+ raise Exception("Cannot create object_id from key"
+ " value '%s'" % str(value))
+
+ # timestamp in millisec since epoch UTC
+ ctime = long(time.time() * 1000)
+ super(QmfAgentData, self).__init__(_values=_values, _subtypes=_subtypes,
+ _tag=_tag, _ctime=ctime,
+ _utime=ctime, _object_id=_object_id,
+ _schema_id=schema_id, _const=False)
+ self._agent = agent
+ self._validated = False
+ self._modified = True
+ self._subscriptions = {}
+
+ def destroy(self):
+ self._dtime = long(time.time() * 1000)
+ self._touch()
+ # @todo: publish change
+
+ def is_deleted(self):
+ return self._dtime == 0
+
+ def set_value(self, _name, _value, _subType=None):
+ super(QmfAgentData, self).set_value(_name, _value, _subType)
+ self._utime = long(time.time() * 1000)
+ self._touch(_name)
+ # @todo: publish change
+
+ def inc_value(self, name, delta=1):
+ """ add the delta to the property """
+ # @todo: need to take write-lock
+ val = self.get_value(name)
+ try:
+ val += delta
+ except:
+ raise
+ self.set_value(name, val)
+
+ def dec_value(self, name, delta=1):
+ """ subtract the delta from the property """
+ # @todo: need to take write-lock
+ val = self.get_value(name)
+ try:
+ val -= delta
+ except:
+ raise
+ self.set_value(name, val)
+
+ def validate(self):
+ """
+ Compares this object's data against the associated schema. Throws an
+ exception if the data does not conform to the schema.
+ """
+ props = self._schema.get_properties()
+ for name,val in props.iteritems():
+ # @todo validate: type compatible with amqp_type?
+ # @todo validate: primary keys have values
+ if name not in self._values:
+ if val._isOptional:
+ # ok not to be present, put in dummy value
+ # to simplify access
+ self._values[name] = None
+ else:
+ raise Exception("Required property '%s' not present." % name)
+ self._validated = True
+
+ def _touch(self, field=None):
+ """
+ Mark this object as modified. Used to force a publish of this object
+ if on subscription.
+ """
+ now = datetime.datetime.utcnow()
+ publish = False
+ if field:
+ # if the named field is not continuous, mark any subscriptions as
+ # needing to be published.
+ sid = self.get_schema_class_id()
+ if sid:
+ self._agent._lock.acquire()
+ try:
+ schema = self._agent._schema.get(sid)
+ if schema:
+ prop = schema.get_property(field)
+ if prop and not prop.is_continuous():
+ for sid,sub in self._subscriptions.iteritems():
+ sub.next_update = now
+ publish = True
+ if publish:
+ self._agent._next_subscribe_event = None
+ self._agent._wake_thread()
+ finally:
+ self._agent._lock.release()
+
+
+
+################################################################################
+################################################################################
+################################################################################
+################################################################################
+
+if __name__ == '__main__':
+ # static test cases - no message passing, just exercise API
+ import logging
+ from common import (AgentName, SchemaProperty, qmfTypes, SchemaEventClass)
+
+ logging.getLogger().setLevel(logging.INFO)
+
+ logging.info( "Create an Agent" )
+ _agent_name = AgentName("redhat.com", "agent", "tross")
+ _agent = Agent(str(_agent_name))
+
+ logging.info( "Get agent name: '%s'" % _agent.get_name())
+
+ logging.info( "Create SchemaObjectClass" )
+
+ _schema = SchemaObjectClass(SchemaClassId("MyPackage", "MyClass"),
+ _desc="A test data schema",
+ _object_id_names=["index1", "index2"])
+ # add properties
+ _schema.add_property("index1", SchemaProperty(qmfTypes.TYPE_UINT8))
+ _schema.add_property("index2", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ # these two properties are statistics
+ _schema.add_property("query_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+ _schema.add_property("method_call_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+ # These two properties can be set via the method call
+ _schema.add_property("set_string", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property("set_int", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # add method
+ _meth = SchemaMethod(_desc="Method to set string and int in object." )
+ _meth.add_argument( "arg_int", SchemaProperty(qmfTypes.TYPE_UINT32) )
+ _meth.add_argument( "arg_str", SchemaProperty(qmfTypes.TYPE_LSTR) )
+ _schema.add_method( "set_meth", _meth )
+
+ # Add schema to Agent
+
+ print("Schema Map='%s'" % str(_schema.map_encode()))
+
+ _agent.register_object_class(_schema)
+
+ # instantiate managed data objects matching the schema
+
+ logging.info( "Create QmfAgentData" )
+
+ _obj = QmfAgentData( _agent, _schema=_schema )
+ _obj.set_value("index1", 100)
+ _obj.set_value("index2", "a name" )
+ _obj.set_value("set_string", "UNSET")
+ _obj.set_value("set_int", 0)
+ _obj.set_value("query_count", 0)
+ _obj.set_value("method_call_count", 0)
+
+ print("Obj1 Map='%s'" % str(_obj.map_encode()))
+
+ _agent.add_object( _obj )
+
+ _obj = QmfAgentData( _agent,
+ _values={"index1":99,
+ "index2": "another name",
+ "set_string": "UNSET",
+ "set_int": 0,
+ "query_count": 0,
+ "method_call_count": 0},
+ _schema=_schema)
+
+ print("Obj2 Map='%s'" % str(_obj.map_encode()))
+
+ _agent.add_object(_obj)
+
+ ##############
+
+
+
+ logging.info( "Create SchemaEventClass" )
+
+ _event = SchemaEventClass(SchemaClassId("MyPackage", "MyEvent",
+ stype=SchemaClassId.TYPE_EVENT),
+ _desc="A test data schema",
+ _props={"edata_1": SchemaProperty(qmfTypes.TYPE_UINT32)})
+ _event.add_property("edata_2", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ print("Event Map='%s'" % str(_event.map_encode()))
+
+ _agent.register_event_class(_event)
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/common.py b/qpid/extras/qmf/src/py/qmf2-prototype/common.py
new file mode 100644
index 0000000000..2e5367f54f
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/common.py
@@ -0,0 +1,1738 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import time
+from logging import getLogger
+from threading import Lock
+from threading import Condition
+try:
+ import hashlib
+ _md5Obj = hashlib.md5
+except ImportError:
+ import md5
+ _md5Obj = md5.new
+
+log = getLogger("qmf")
+log_query = getLogger("qmf.query")
+
+
+##
+## Constants
+##
+
+QMF_APP_ID="qmf2"
+
+
+class ContentType(object):
+ """ Values for the 'qmf.content' message header
+ """
+ schema_package = "_schema_package"
+ schema_id = "_schema_id"
+ schema_class = "_schema_class"
+ object_id = "_object_id"
+ data = "_data"
+ event = "_event"
+
+
+class OpCode(object):
+ """ Values for the 'qmf.opcode' message header.
+ """
+ noop = "_noop"
+
+ # codes sent by a console and processed by the agent
+ agent_locate_req = "_agent_locate_request"
+ subscribe_req = "_subscribe_request"
+ subscribe_cancel_ind = "_subscribe_cancel_indication"
+ subscribe_refresh_ind = "_subscribe_refresh_indication"
+ query_req = "_query_request"
+ method_req = "_method_request"
+
+
+ # codes sent by the agent to a console
+ agent_locate_rsp = "_agent_locate_response"
+ agent_heartbeat_ind = "_agent_heartbeat_indication"
+ query_rsp = "_query_response"
+ subscribe_rsp = "_subscribe_response"
+ data_ind = "_data_indication"
+ method_rsp = "_method_response"
+
+
+
+def timedelta_to_secs(td):
+ """
+ Convert a time delta to a time interval in seconds (float)
+ """
+ return td.days * 86400 + td.seconds + td.microseconds/1000000.0
+
+
+##==============================================================================
+## Async Event Model
+##==============================================================================
+
+
+class Notifier(object):
+ """
+ Virtual base class that defines a call back which alerts the application that
+ a QMF Console notification is pending.
+ """
+ def indication(self):
+ """
+ Called when one or more items are ready for the application to process.
+ This method may be called by an internal QMF library thread. Its purpose is to
+ indicate that the application should process pending work items.
+ """
+ raise Exception("The indication method must be overridden by the application!")
+
+
+
+class WorkItem(object):
+ """
+ Describes an event that has arrived for the application to process. The
+ Notifier is invoked when one or more of these WorkItems become available
+ for processing.
+ """
+ # Enumeration of the types of WorkItems produced on the Console
+ AGENT_ADDED=1
+ AGENT_DELETED=2
+ NEW_PACKAGE=3
+ NEW_CLASS=4
+ OBJECT_UPDATE=5
+ EVENT_RECEIVED=7
+ AGENT_HEARTBEAT=8
+ QUERY_COMPLETE=9
+ METHOD_RESPONSE=10
+ SUBSCRIBE_RESPONSE=11
+ SUBSCRIBE_INDICATION=12
+ RESUBSCRIBE_RESPONSE=13
+ # Enumeration of the types of WorkItems produced on the Agent
+ METHOD_CALL=1000
+ QUERY=1001
+ SUBSCRIBE_REQUEST=1002
+ RESUBSCRIBE_REQUEST=1003
+ UNSUBSCRIBE_REQUEST=1004
+
+ def __init__(self, kind, handle, _params=None):
+ """
+ Used by the Console to create a work item.
+
+ @type kind: int
+ @param kind: work item type
+ """
+ self._kind = kind
+ self._handle = handle
+ self._params = _params
+
+ def get_type(self):
+ return self._kind
+
+ def get_handle(self):
+ return self._handle
+
+ def get_params(self):
+ return self._params
+
+
+
+##==============================================================================
+## Addressing
+##==============================================================================
+
+class QmfAddress(object):
+ """
+ Address format: "qmf.<domain>.[topic|direct]/<subject>"
+ TBD
+ """
+
+ TYPE_DIRECT = "direct"
+ TYPE_TOPIC = "topic"
+
+ ADDRESS_FMT = "qmf.%s.%s/%s"
+ DEFAULT_DOMAIN = "default"
+
+ # Directly-addressed messages:
+ # agent's direct address: "qmf.<domain>.direct/<agent-name>
+ # console's direct address: "qmf.<domain>.direct/<console-name>
+
+ # Well-known Topic Addresses:
+ # "qmf.<domain>.topic/<subject>
+ # Where <subject> has the following format:
+ # "console.ind#" - indications sent from consoles
+ # "agent.ind#" - indications sent from agents
+ #
+ # The following "well known" subjects are defined:
+ #
+ # console.ind.locate[.<agent-name>] - agent discovery request
+ # agent.ind.heartbeat[.<agent-name>"] - agent heartbeats
+ # agent.ind.event[.<severity>.<agent-name>] - events
+ # agent.ind.schema[TBD] - schema updates
+ #
+ SUBJECT_AGENT_IND="agent.ind"
+ SUBJECT_AGENT_HEARTBEAT = "agent.ind.heartbeat"
+ SUBJECT_AGENT_EVENT="agent.ind.event"
+ SUBJECT_AGENT_SCHEMA="agent.ind.schema"
+
+ SUBJECT_CONSOLE_IND="console.ind"
+ SUBJECT_CONSOLE_LOCATE_AGENT="console.ind.locate"
+
+
+
+ def __init__(self, subject, domain, type_):
+ if '/' in domain or '.' in domain:
+ raise Exception("domain string must not contain '/' or '.'"
+ " characters.")
+
+ self._subject = subject
+ self._domain = domain
+ self._type = type_
+
+ def _direct(cls, subject, _domain=None):
+ if _domain is None:
+ _domain = QmfAddress.DEFAULT_DOMAIN
+ return cls(subject, _domain, type_=QmfAddress.TYPE_DIRECT)
+ direct = classmethod(_direct)
+
+ def _topic(cls, subject, _domain=None):
+ if _domain is None:
+ _domain = QmfAddress.DEFAULT_DOMAIN
+ return cls(subject, _domain, type_=QmfAddress.TYPE_TOPIC)
+ topic = classmethod(_topic)
+
+ def __from_string(cls, address):
+ node,subject = address.split('/',1)
+ qmf,domain,type_ = node.split('.',2)
+
+ if qmf != "qmf" or (type_ != QmfAddress.TYPE_DIRECT and
+ type_ != QmfAddress.TYPE_TOPIC):
+ raise ValueError("invalid QmfAddress format: %s" % address)
+
+ return cls(subject, domain, type_)
+ from_string = classmethod(__from_string)
+
+ def get_address(self):
+ """
+ Return the QMF address as a string, suitable for use with the AMQP
+ messaging API.
+ """
+ return str(self)
+
+ def get_node(self):
+ """
+ Return the 'node' portion of the address.
+ """
+ return self.get_address().split('/',1)[0]
+
+ def get_subject(self):
+ """
+ Return the 'subject' portion of the address.
+ """
+ return self.get_address().split('/',1)[1]
+
+ def get_domain(self):
+ return self._domain
+
+ def is_direct(self):
+ return self._type == self.TYPE_DIRECT
+
+ def __repr__(self):
+ return QmfAddress.ADDRESS_FMT % (self._domain, self._type, self._subject)
+
+
+
+
+class AgentName(object):
+ """
+ Uniquely identifies a management agent within the management domain.
+ """
+ _separator = ":"
+
+ def __init__(self, vendor, product, name, _str=None):
+ """
+ Note: this object must be immutable, as it is used to index into a dictionary
+ """
+ if _str is not None:
+ # construct from string representation
+ if _str.count(AgentName._separator) < 2:
+ raise TypeError("AgentName string format must be 'vendor.product.name'")
+ self._vendor, self._product, self._name = _str.split(AgentName._separator)
+ else:
+ self._vendor = vendor
+ self._product = product
+ self._name = name
+
+
+ def _from_str(cls, str_):
+ return cls(None, None, None, str_=str_)
+ from_str = classmethod(_from_str)
+
+ def vendor(self):
+ return self._vendor
+
+ def product(self):
+ return self._product
+
+ def name(self):
+ return self._name
+
+ def __cmp__(self, other):
+ if not isinstance(other, AgentName) :
+ raise TypeError("Invalid types for compare")
+ # return 1
+ me = str(self)
+ them = str(other)
+
+ if me < them:
+ return -1
+ if me > them:
+ return 1
+ return 0
+
+ def __hash__(self):
+ return (self._vendor, self._product, self._name).__hash__()
+
+ def __repr__(self):
+ return self._vendor + AgentName._separator + \
+ self._product + AgentName._separator + \
+ self._name
+
+
+
+##==============================================================================
+## DATA MODEL
+##==============================================================================
+
+
+class _mapEncoder(object):
+ """
+ virtual base class for all objects that support being converted to a map
+ """
+
+ def map_encode(self):
+ raise Exception("The map_encode method my be overridden.")
+
+
+class QmfData(_mapEncoder):
+ """
+ Base class representing management data.
+
+ Map format:
+ map["_values"] = map of unordered "name"=<value> pairs (optional)
+ map["_subtype"] = map of unordered "name"="subtype string" pairs (optional)
+ map["_tag"] = application-specific tag for this instance (optional)
+ """
+ KEY_VALUES = "_values"
+ KEY_SUBTYPES = "_subtypes"
+ KEY_TAG="_tag"
+ KEY_OBJECT_ID = "_object_id"
+ KEY_SCHEMA_ID = "_schema_id"
+ KEY_UPDATE_TS = "_update_ts"
+ KEY_CREATE_TS = "_create_ts"
+ KEY_DELETE_TS = "_delete_ts"
+
+ def __init__(self,
+ _values={}, _subtypes={}, _tag=None,
+ _object_id=None, _schema_id=None,
+ _ctime = 0, _utime = 0, _dtime = 0,
+ _map=None, _const=False):
+ """
+ @type _values: dict
+ @param _values: dictionary of initial name=value pairs for object's
+ named data.
+ @type _subtypes: dict
+ @param _subtype: dictionary of subtype strings for each of the object's
+ named data.
+ @type _desc: string
+ @param _desc: Human-readable description of this data object.
+ @type _const: boolean
+ @param _const: if true, this object cannot be modified
+ """
+ if _map is not None:
+ # construct from map
+ _tag = _map.get(self.KEY_TAG, _tag)
+ _values = _map.get(self.KEY_VALUES, _values)
+ _subtypes = _map.get(self.KEY_SUBTYPES, _subtypes)
+ _object_id = _map.get(self.KEY_OBJECT_ID, _object_id)
+ sid = _map.get(self.KEY_SCHEMA_ID)
+ if sid:
+ _schema_id = SchemaClassId.from_map(sid)
+ _ctime = long(_map.get(self.KEY_CREATE_TS, _ctime))
+ _utime = long(_map.get(self.KEY_UPDATE_TS, _utime))
+ _dtime = long(_map.get(self.KEY_DELETE_TS, _dtime))
+
+ self._values = _values.copy()
+ self._subtypes = _subtypes.copy()
+ self._tag = _tag
+ self._ctime = _ctime
+ self._utime = _utime
+ self._dtime = _dtime
+ self._const = _const
+ self._schema_id = _schema_id
+ self._object_id = str(_object_id)
+
+
+ def __create(cls, values, _subtypes={}, _tag=None, _object_id=None,
+ _schema_id=None, _const=False):
+ # timestamp in millisec since epoch UTC
+ ctime = long(time.time() * 1000)
+ return cls(_values=values, _subtypes=_subtypes, _tag=_tag,
+ _ctime=ctime, _utime=ctime,
+ _object_id=_object_id, _schema_id=_schema_id, _const=_const)
+ create = classmethod(__create)
+
+ def __from_map(cls, map_, _const=False):
+ return cls(_map=map_, _const=_const)
+ from_map = classmethod(__from_map)
+
+ def is_managed(self):
+ return self._object_id is not None
+
+ def is_described(self):
+ return self._schema_id is not None
+
+ def get_tag(self):
+ return self._tag
+
+ def get_value(self, name):
+ """
+ Will throw an AttributeError exception if the named value does not exist.
+ """
+ # meta-properties first:
+ if name == SchemaClassId.KEY_PACKAGE:
+ if self._schema_id:
+ return self._schema_id.get_package_name()
+ return None
+ if name == SchemaClassId.KEY_CLASS:
+ if self._schema_id:
+ return self._schema_id.get_class_name()
+ return None
+ if name == SchemaClassId.KEY_TYPE:
+ if self._schema_id:
+ return self._schema_id.get_type()
+ return None
+ if name == SchemaClassId.KEY_HASH:
+ if self._schema_id:
+ return self._schema_id.get_hash_string()
+ return None
+ if name == self.KEY_SCHEMA_ID:
+ return self._schema_id
+ if name == self.KEY_OBJECT_ID:
+ return self._object_id
+ if name == self.KEY_TAG:
+ return self._tag
+ if name == self.KEY_UPDATE_TS:
+ return self._utime
+ if name == self.KEY_CREATE_TS:
+ return self._ctime
+ if name == self.KEY_DELETE_TS:
+ return self._dtime
+
+ try:
+ return self._values[name]
+ except KeyError:
+ raise AttributeError("no value named '%s' in this object" % name)
+
+ def has_value(self, name):
+
+ if name in [SchemaClassId.KEY_PACKAGE, SchemaClassId.KEY_CLASS,
+ SchemaClassId.KEY_TYPE, SchemaClassId.KEY_HASH,
+ self.KEY_SCHEMA_ID]:
+ return self._schema_id is not None
+ if name in [self.KEY_UPDATE_TS, self.KEY_CREATE_TS,
+ self.KEY_DELETE_TS]:
+ return True
+ if name == self.KEY_OBJECT_ID:
+ return self._object_id is not None
+ if name == self.KEY_TAG:
+ return self._tag is not None
+
+ return name in self._values
+
+ def set_value(self, _name, _value, _subType=None):
+ if self._const:
+ raise Exception("cannot modify constant data object")
+ self._values[_name] = _value
+ if _subType:
+ self._subtypes[_name] = _subType
+ return _value
+
+ def get_subtype(self, _name):
+ return self._subtypes.get(_name)
+
+ def get_schema_class_id(self):
+ """
+ @rtype: class SchemaClassId
+ @returns: the identifier of the Schema that describes the structure of the data.
+ """
+ return self._schema_id
+
+ def get_object_id(self):
+ """
+ Get the instance's identification string.
+ @rtype: str
+ @returns: the identification string, or None if not assigned and id.
+ """
+ return self._object_id
+
+ def map_encode(self):
+ _map = {}
+ if self._tag:
+ _map[self.KEY_TAG] = self._tag
+
+ # data in the _values map may require recursive map_encode()
+ vmap = {}
+ for name,val in self._values.iteritems():
+ if isinstance(val, _mapEncoder):
+ vmap[name] = val.map_encode()
+ else:
+ # otherwise, just toss in the native type...
+ vmap[name] = val
+
+ _map[self.KEY_VALUES] = vmap
+ # subtypes are never complex, so safe to just copy
+ _map[self.KEY_SUBTYPES] = self._subtypes.copy()
+ if self._object_id:
+ _map[self.KEY_OBJECT_ID] = self._object_id
+ if self._schema_id:
+ _map[self.KEY_SCHEMA_ID] = self._schema_id.map_encode()
+ return _map
+
+ def __repr__(self):
+ return "QmfData=<<" + str(self.map_encode()) + ">>"
+
+
+ def __setattr__(self, _name, _value):
+ # ignore private data members
+ if _name[0] == '_':
+ return super(QmfData, self).__setattr__(_name, _value)
+ if _name in self._values:
+ return self.set_value(_name, _value)
+ return super(QmfData, self).__setattr__(_name, _value)
+
+ def __getattr__(self, _name):
+ if _name != "_values" and _name in self._values:
+ return self._values[_name]
+ raise AttributeError("no value named '%s' in this object" % _name)
+
+ def __getitem__(self, _name):
+ return self.__getattr__(_name)
+
+ def __setitem__(self, _name, _value):
+ return self.__setattr__(_name, _value)
+
+
+
+class QmfEvent(QmfData):
+ """
+ A QMF Event is a type of described data that is not managed. Events are
+ notifications that are sent by Agents. An event notifies a Console of a
+ change in some aspect of the system under managment.
+ """
+ KEY_TIMESTAMP = "_timestamp"
+ KEY_SEVERITY = "_severity"
+
+ SEV_EMERG = "emerg"
+ SEV_ALERT = "alert"
+ SEV_CRIT = "crit"
+ SEV_ERR = "err"
+ SEV_WARNING = "warning"
+ SEV_NOTICE = "notice"
+ SEV_INFO = "info"
+ SEV_DEBUG = "debug"
+
+ def __init__(self, _timestamp=None, _sev=SEV_NOTICE, _values={},
+ _subtypes={}, _tag=None,
+ _map=None,
+ _schema_id=None, _const=True):
+ """
+ @type _map: dict
+ @param _map: if not None, construct instance from map representation.
+ @type _timestamp: int
+ @param _timestamp: moment in time when event occurred, expressed
+ as milliseconds since Midnight, Jan 1, 1970 UTC.
+ @type _agentId: class AgentId
+ @param _agentId: Identifies agent issuing this event.
+ @type _schema: class Schema
+ @param _schema:
+ @type _schemaId: class SchemaClassId (event)
+ @param _schemaId: identi
+ """
+
+ if _map is not None:
+ # construct from map
+ super(QmfEvent, self).__init__(_map=_map, _const=_const,
+ _object_id="_event")
+ _timestamp = _map.get(self.KEY_TIMESTAMP, _timestamp)
+ _sev = _map.get(self.KEY_SEVERITY, _sev)
+ else:
+ super(QmfEvent, self).__init__(_object_id="_event",
+ _values=_values,
+ _subtypes=_subtypes, _tag=_tag,
+ _schema_id=_schema_id,
+ _const=_const)
+ if _timestamp is None:
+ raise TypeError("QmfEvent: a valid timestamp is required.")
+
+ try:
+ self._timestamp = long(_timestamp)
+ except:
+ raise TypeError("QmfEvent: a numeric timestamp is required.")
+
+ self._severity = _sev
+
+ def _create(cls, timestamp, severity, values,
+ _subtypes={}, _tag=None, _schema_id=None, _const=False):
+ return cls(_timestamp=timestamp, _sev=severity, _values=values,
+ _subtypes=_subtypes, _tag=_tag, _schema_id=_schema_id, _const=_const)
+ create = classmethod(_create)
+
+ def _from_map(cls, map_, _const=False):
+ return cls(_map=map_, _const=_const)
+ from_map = classmethod(_from_map)
+
+ def get_timestamp(self):
+ return self._timestamp
+
+ def get_severity(self):
+ return self._severity
+
+ def map_encode(self):
+ _map = super(QmfEvent, self).map_encode()
+ _map[self.KEY_TIMESTAMP] = self._timestamp
+ _map[self.KEY_SEVERITY] = self._severity
+ return _map
+
+
+
+##==============================================================================
+## QUERY
+##==============================================================================
+
+
+
+class QmfQuery(_mapEncoder):
+
+ KEY_TARGET="what"
+ KEY_PREDICATE="where"
+ KEY_ID="id"
+
+ ### Query Types
+ ID=1
+ PREDICATE=2
+
+ #### Query Targets ####
+ TARGET_PACKAGES="schema_package"
+ # (returns just package names)
+ # allowed predicate key(s):
+ #
+ # SchemaClassId.KEY_PACKAGE
+
+ TARGET_SCHEMA_ID="schema_id"
+ TARGET_SCHEMA="schema"
+ # allowed id: value:
+ # SchemaClassId
+ #
+ # allowed predicate key(s):
+ # SchemaClassId.KEY_PACKAGE
+ # SchemaClassId.KEY_CLASS
+ # SchemaClassId.KEY_TYPE
+ # SchemaClassId.KEY_HASH
+ # SchemaClass.KEY_SCHEMA_ID
+ # name of property (exist test only)
+ # name of method (exist test only)
+
+ TARGET_AGENT="agent"
+ # allowed id: value:
+ # string name of agent
+ # allowed predicate keys(s):
+ #
+ KEY_AGENT_NAME="_name"
+
+ TARGET_OBJECT_ID="object_id"
+ TARGET_OBJECT="object"
+ # If object is described by a schema, the value of the target map must
+ # include a "_schema_id": {map encoded schema id} value.
+ #
+ # allowed id: value:
+ # object_id string
+ #
+ # allowed predicate keys(s):
+ #
+ # QmfData.KEY_OBJECT_ID
+ # QmfData.KEY_UPDATE_TS
+ # QmfData.KEY_CREATE_TS
+ # QmfData.KEY_DELETE_TS
+ # <name of data value>
+
+ # supported predicate operators
+
+ # evaluation operators
+ QUOTE="quote"
+ UNQUOTE="unquote"
+ # boolean operators
+ EQ="eq"
+ NE="ne"
+ LT="lt"
+ LE="le"
+ GT="gt"
+ GE="ge"
+ RE_MATCH="re_match"
+ EXISTS="exists"
+ TRUE="true"
+ FALSE="false"
+ # logic operators
+ AND="and"
+ OR="or"
+ NOT="not"
+
+ _valid_targets = [TARGET_PACKAGES, TARGET_OBJECT_ID, TARGET_SCHEMA, TARGET_SCHEMA_ID,
+ TARGET_OBJECT, TARGET_AGENT]
+ _valid_bool_ops = [EQ, NE, LT, GT, LE, GE, EXISTS, RE_MATCH, TRUE, FALSE]
+ _valid_logic_ops = [AND, OR, NOT]
+ _valid_eval_ops = [QUOTE, UNQUOTE]
+
+ def __init__(self, _target=None, _target_params=None, _predicate=None,
+ _id=None, _map=None):
+ """
+ """
+ if _map is not None:
+ target_map = _map.get(self.KEY_TARGET)
+ if not target_map:
+ raise TypeError("QmfQuery requires a target map")
+
+ _target = None
+ for key in target_map.iterkeys():
+ if key in self._valid_targets:
+ _target = key
+ break
+ if _target is None:
+ raise TypeError("Invalid QmfQuery target: '%s'" %
+ str(target_map))
+
+ # convert target params from map format
+ _target_params = target_map.get(_target)
+ if _target_params:
+ if not isinstance(_target_params, type({})):
+ raise TypeError("target params must be a map: '%s'" %
+ str(_target_params))
+ t_params = {}
+ for name,value in _target_params.iteritems():
+ if name == QmfData.KEY_SCHEMA_ID:
+ t_params[name] = SchemaClassId.from_map(value)
+ else:
+ t_params[name] = value
+ _target_params = t_params
+
+ _id = _map.get(self.KEY_ID)
+ if _id is not None:
+ # Convert identifier to native type if necessary
+ if _target == self.TARGET_SCHEMA:
+ _id = SchemaClassId.from_map(_id)
+ else:
+ _predicate = _map.get(self.KEY_PREDICATE, _predicate)
+
+ self._target = _target
+ if not self._target:
+ raise TypeError("QmfQuery requires a target value")
+ self._target_params = _target_params
+ self._predicate = _predicate
+ self._id = _id
+
+ # constructors
+ def _create_wildcard(cls, target, _target_params=None):
+ return cls(_target=target, _target_params=_target_params)
+ create_wildcard = classmethod(_create_wildcard)
+
+ def _create_wildcard_object_id(cls, schema_id):
+ """
+ Create a wildcard to match all object_ids for a given schema.
+ """
+ if not isinstance(schema_id, SchemaClassId):
+ raise TypeError("class SchemaClassId expected")
+ params = {QmfData.KEY_SCHEMA_ID: schema_id}
+ return cls(_target=QmfQuery.TARGET_OBJECT_ID,
+ _target_params=params)
+ create_wildcard_object_id = classmethod(_create_wildcard_object_id)
+
+ def _create_wildcard_object(cls, schema_id):
+ """
+ Create a wildcard to match all objects for a given schema.
+ """
+ if not isinstance(schema_id, SchemaClassId):
+ raise TypeError("class SchemaClassId expected")
+ params = {QmfData.KEY_SCHEMA_ID: schema_id}
+ return cls(_target=QmfQuery.TARGET_OBJECT,
+ _target_params=params)
+ create_wildcard_object = classmethod(_create_wildcard_object)
+
+ def _create_predicate(cls, target, predicate, _target_params=None):
+ return cls(_target=target, _target_params=_target_params,
+ _predicate=predicate)
+ create_predicate = classmethod(_create_predicate)
+
+ def _create_id(cls, target, ident, _target_params=None):
+ return cls(_target=target, _target_params=_target_params, _id=ident)
+ create_id = classmethod(_create_id)
+
+ def _create_id_object(cls, object_id, _schema_id=None):
+ """
+ Create a ID Query for an object (schema optional).
+ """
+ if _schema_id is not None:
+ if not isinstance(_schema_id, SchemaClassId):
+ raise TypeError("class SchemaClassId expected")
+ params = {QmfData.KEY_SCHEMA_ID: _schema_id}
+ else:
+ params = None
+ return cls(_target=QmfQuery.TARGET_OBJECT,
+ _id=object_id,
+ _target_params=params)
+ create_id_object = classmethod(_create_id_object)
+
+ def _create_id_object_id(cls, object_id, _schema_id=None):
+ """
+ Create a ID Query for object_ids (schema optional).
+ """
+ if _schema_id is not None:
+ if not isinstance(_schema_id, SchemaClassId):
+ raise TypeError("class SchemaClassId expected")
+ params = {QmfData.KEY_SCHEMA_ID: _schema_id}
+ else:
+ params = None
+ return cls(_target=QmfQuery.TARGET_OBJECT_ID,
+ _id=object_id,
+ _target_params=params)
+ create_id_object_id = classmethod(_create_id_object_id)
+
+ def _from_map(cls, map_):
+ return cls(_map=map_)
+ from_map = classmethod(_from_map)
+ # end constructors
+
+ def get_target(self):
+ return self._target
+
+ def get_target_param(self):
+ return self._target_params
+
+ def get_selector(self):
+ if self._id:
+ return QmfQuery.ID
+ else:
+ return QmfQuery.PREDICATE
+
+ def get_id(self):
+ return self._id
+
+ def get_predicate(self):
+ """
+ """
+ return self._predicate
+
+ def evaluate(self, qmfData):
+ """
+ """
+ if self._id:
+ if self._target == self.TARGET_SCHEMA:
+ return (qmfData.has_value(qmfData.KEY_SCHEMA_ID) and
+ qmfData.get_value(qmfData.KEY_SCHEMA_ID) == self._id)
+ elif self._target == self.TARGET_OBJECT:
+ return (qmfData.has_value(qmfData.KEY_OBJECT_ID) and
+ qmfData.get_value(qmfData.KEY_OBJECT_ID) == self._id)
+ elif self._target == self.TARGET_AGENT:
+ return (qmfData.has_value(self.KEY_AGENT_NAME) and
+ qmfData.get_value(self.KEY_AGENT_NAME) == self._id)
+
+ raise Exception("Unsupported query target '%s'" % str(self._target))
+
+ if self._predicate:
+ return self._eval_pred(self._predicate, qmfData)
+ # no predicate and no id - always match
+ return True
+
+ def map_encode(self):
+ t_params = {}
+ if self._target_params:
+ for name,value in self._target_params.iteritems():
+ if isinstance(value, _mapEncoder):
+ t_params[name] = value.map_encode()
+ else:
+ t_params[name] = value
+ if t_params:
+ _map = {self.KEY_TARGET: {self._target: t_params}}
+ else:
+ _map = {self.KEY_TARGET: {self._target: None}}
+
+ if self._id is not None:
+ if isinstance(self._id, _mapEncoder):
+ _map[self.KEY_ID] = self._id.map_encode()
+ else:
+ _map[self.KEY_ID] = self._id
+ elif self._predicate is not None:
+ _map[self.KEY_PREDICATE] = self._predicate
+ return _map
+
+ def _eval_pred(self, pred, qmfData):
+ """
+ Evaluate the predicate expression against a QmfData object.
+ """
+ if not isinstance(qmfData, QmfData):
+ raise TypeError("Query expects to evaluate QmfData types.")
+
+ if not isinstance(pred, type([])):
+ log.warning("Invalid type for predicate expression: '%s'" % str(pred))
+ return False
+
+ # empty predicate - match all???
+ if len(pred) == 0:
+ return True
+
+ oper = pred[0]
+ if oper == QmfQuery.TRUE:
+ log_query.debug("query evaluate TRUE")
+ return True
+
+ if oper == QmfQuery.FALSE:
+ log_query.debug("query evaluate FALSE")
+ return False
+
+ if oper == QmfQuery.AND:
+ log_query.debug("query evaluate AND: '%s'" % str(pred))
+ for exp in pred[1:]:
+ if not self._eval_pred(exp, qmfData):
+ log_query.debug("---> False")
+ return False
+ log_query.debug("---> True")
+ return True
+
+ if oper == QmfQuery.OR:
+ log_query.debug("query evaluate OR: [%s]" % str(pred))
+ for exp in pred[1:]:
+ if self._eval_pred(exp, qmfData):
+ log_query.debug("---> True")
+ return True
+ log_query.debug("---> False")
+ return False
+
+ if oper == QmfQuery.NOT:
+ log_query.debug("query evaluate NOT: [%s]" % str(pred))
+ for exp in pred[1:]:
+ if self._eval_pred(exp, qmfData):
+ log_query.debug("---> False")
+ return False
+ log_query.debug("---> True")
+ return True
+
+ if oper == QmfQuery.EXISTS:
+ if len(pred) != 2:
+ log.warning("Malformed query: 'exists' operator"
+ " - bad arguments '%s'" % str(pred))
+ return False
+ ### Q: Should we assume "quote", or should it be explicit?
+ ### "foo" or ["quote" "foo"]
+ ### my guess is "explicit"
+ log_query.debug("query evaluate EXISTS: [%s]" % str(pred))
+ try:
+ arg = self._fetch_pred_arg(pred[1], qmfData)
+ except AttributeError:
+ log.warning("query parameter not found: '%s'" % str(pred))
+ return False
+ v = qmfData.has_value(arg)
+ log_query.debug("---> %s" % str(v))
+ return v
+
+ # binary operators
+ if oper in [QmfQuery.EQ, QmfQuery.NE, QmfQuery.LT,
+ QmfQuery.LE, QmfQuery.GT, QmfQuery.GE,
+ QmfQuery.RE_MATCH]:
+ if len(pred) != 3:
+ log.warning("Malformed query: '%s' operator"
+ " - requires 2 arguments '%s'" %
+ (oper, str(pred)))
+ return False
+ # @todo: support regular expression match
+ log_query.debug("query evaluate binary op: [%s]" % str(pred))
+ try:
+ arg1 = self._fetch_pred_arg(pred[1], qmfData)
+ arg2 = self._fetch_pred_arg(pred[2], qmfData)
+ except AttributeError:
+ log.warning("query parameter not found: '%s'" % str(pred))
+ return False
+ log_query.debug("query evaluate %s: %s, %s" % (oper, str(arg1), str(arg2)))
+ v = False
+ try:
+ if oper == QmfQuery.EQ: v = arg1 == arg2
+ elif oper == QmfQuery.NE: v = arg1 != arg2
+ elif oper == QmfQuery.LT: v = arg1 < arg2
+ elif oper == QmfQuery.LE: v = arg1 <= arg2
+ elif oper == QmfQuery.GT: v = arg1 > arg2
+ elif oper == QmfQuery.GE: v = arg1 >= arg2
+ except TypeError:
+ log.warning("query comparison failed: '%s'" % str(pred))
+ log_query.debug("---> %s" % str(v))
+ return v
+
+ log.warning("Unrecognized query operator: [%s]" % str(pred[0]))
+ return False
+
+ def _fetch_pred_arg(self, arg, qmfData):
+ """
+ Determine the value of a predicate argument by evaluating quoted
+ arguments.
+ """
+ if isinstance(arg, basestring):
+ return qmfData.get_value(arg)
+ if isinstance(arg, type([])) and len(arg) == 2:
+ if arg[0] == QmfQuery.QUOTE:
+ return arg[1]
+ if arg[0] == QmfQuery.UNQUOTE:
+ return qmfData.get_value(arg[1])
+ return arg
+
+ def __repr__(self):
+ return "QmfQuery=<<" + str(self.map_encode()) + ">>"
+
+
+
+
+
+##==============================================================================
+## SCHEMA
+##==============================================================================
+
+
+# Argument typecodes, access, and direction qualifiers
+
+class qmfTypes(object):
+ TYPE_UINT8 = 1
+ TYPE_UINT16 = 2
+ TYPE_UINT32 = 3
+ TYPE_UINT64 = 4
+
+ TYPE_SSTR = 6
+ TYPE_LSTR = 7
+
+ TYPE_ABSTIME = 8
+ TYPE_DELTATIME = 9
+
+ TYPE_REF = 10
+
+ TYPE_BOOL = 11
+
+ TYPE_FLOAT = 12
+ TYPE_DOUBLE = 13
+
+ TYPE_UUID = 14
+
+ TYPE_MAP = 15
+
+ TYPE_INT8 = 16
+ TYPE_INT16 = 17
+ TYPE_INT32 = 18
+ TYPE_INT64 = 19
+
+ TYPE_OBJECT = 20
+
+ TYPE_LIST = 21
+
+ TYPE_ARRAY = 22
+
+# New subtypes:
+# integer (for time, duration, signed/unsigned)
+# double (float)
+# bool
+# string
+# map (ref, qmfdata)
+# list
+# uuid
+
+
+class qmfAccess(object):
+ READ_CREATE = 1
+ READ_WRITE = 2
+ READ_ONLY = 3
+
+
+class qmfDirection(object):
+ DIR_IN = 1
+ DIR_OUT = 2
+ DIR_IN_OUT = 3
+
+
+
+def _to_bool( param ):
+ """
+ Helper routine to convert human-readable representations of
+ boolean values to python bool types.
+ """
+ _false_strings = ["off", "no", "false", "0", "none"]
+ _true_strings = ["on", "yes", "true", "1"]
+ if type(param) == str:
+ lparam = param.lower()
+ if lparam in _false_strings:
+ return False
+ if lparam in _true_strings:
+ return True
+ raise TypeError("unrecognized boolean string: '%s'" % param )
+ else:
+ return bool(param)
+
+
+
+class SchemaClassId(_mapEncoder):
+ """
+ Unique identifier for an instance of a SchemaClass.
+
+ Map format:
+ map["package_name"] = str, name of associated package
+ map["class_name"] = str, name of associated class
+ map["type"] = str, "data"|"event", default: "data"
+ optional:
+ map["hash_str"] = str, hash value in standard format or None
+ if hash is unknown.
+ """
+ KEY_PACKAGE="_package_name"
+ KEY_CLASS="_class_name"
+ KEY_TYPE="_type"
+ KEY_HASH="_hash_str"
+
+ TYPE_DATA = "_data"
+ TYPE_EVENT = "_event"
+
+ _valid_types=[TYPE_DATA, TYPE_EVENT]
+ _schemaHashStrFormat = "%08x-%08x-%08x-%08x"
+ _schemaHashStrDefault = "00000000-00000000-00000000-00000000"
+
+ def __init__(self, pname=None, cname=None, stype=TYPE_DATA, hstr=None,
+ _map=None):
+ """
+ @type pname: str
+ @param pname: the name of the class's package
+ @type cname: str
+ @param cname: name of the class
+ @type stype: str
+ @param stype: schema type [data | event]
+ @type hstr: str
+ @param hstr: the hash value in '%08x-%08x-%08x-%08x' format
+ """
+ if _map is not None:
+ # construct from map
+ pname = _map.get(self.KEY_PACKAGE, pname)
+ cname = _map.get(self.KEY_CLASS, cname)
+ stype = _map.get(self.KEY_TYPE, stype)
+ hstr = _map.get(self.KEY_HASH, hstr)
+
+ self._pname = pname
+ self._cname = cname
+ if stype not in SchemaClassId._valid_types:
+ raise TypeError("Invalid SchemaClassId type: '%s'" % stype)
+ self._type = stype
+ self._hstr = hstr
+ if self._hstr:
+ try:
+ # sanity check the format of the hash string
+ hexValues = hstr.split("-")
+ h0 = int(hexValues[0], 16)
+ h1 = int(hexValues[1], 16)
+ h2 = int(hexValues[2], 16)
+ h3 = int(hexValues[3], 16)
+ except:
+ raise Exception("Invalid SchemaClassId format: bad hash string: '%s':"
+ % hstr)
+ # constructor
+ def _create(cls, pname, cname, stype=TYPE_DATA, hstr=None):
+ return cls(pname=pname, cname=cname, stype=stype, hstr=hstr)
+ create = classmethod(_create)
+
+ # map constructor
+ def _from_map(cls, map_):
+ return cls(_map=map_)
+ from_map = classmethod(_from_map)
+
+ def get_package_name(self):
+ """
+ Access the package name in the SchemaClassId.
+
+ @rtype: str
+ """
+ return self._pname
+
+
+ def get_class_name(self):
+ """
+ Access the class name in the SchemaClassId
+
+ @rtype: str
+ """
+ return self._cname
+
+
+ def get_hash_string(self):
+ """
+ Access the schema's hash as a string value
+
+ @rtype: str
+ """
+ return self._hstr
+
+
+ def get_type(self):
+ """
+ Returns the type code associated with this Schema
+
+ @rtype: str
+ """
+ return self._type
+
+ def map_encode(self):
+ _map = {}
+ _map[self.KEY_PACKAGE] = self._pname
+ _map[self.KEY_CLASS] = self._cname
+ _map[self.KEY_TYPE] = self._type
+ if self._hstr: _map[self.KEY_HASH] = self._hstr
+ return _map
+
+ def __repr__(self):
+ hstr = self.get_hash_string()
+ if not hstr:
+ hstr = SchemaClassId._schemaHashStrDefault
+ return self._pname + ":" + self._cname + ":" + self._type + "(" + hstr + ")"
+
+
+ def __cmp__(self, other):
+ if isinstance(other, dict):
+ other = SchemaClassId.from_map(other)
+ if not isinstance(other, SchemaClassId):
+ raise TypeError("Invalid types for compare")
+ # return 1
+ me = str(self)
+ them = str(other)
+ if me < them:
+ return -1
+ if me > them:
+ return 1
+ return 0
+
+
+ def __hash__(self):
+ return (self._pname, self._cname, self._hstr).__hash__()
+
+
+
+class SchemaProperty(_mapEncoder):
+ """
+ Describes the structure of a Property data object.
+ Map format:
+ map["amqp_type"] = int, AMQP type code indicating property's data type
+
+ optional:
+ map["access"] = str, access allowed to this property, default "RO"
+ map["index"] = bool, True if this property is an index value, default False
+ map["optional"] = bool, True if this property is optional, default False
+ map["unit"] = str, describes units used
+ map["min"] = int, minimum allowed value
+ map["max"] = int, maximun allowed value
+ map["maxlen"] = int, if string type, this is the maximum length in bytes
+ required to represent the longest instance of this string.
+ map["desc"] = str, human-readable description of this argument
+ map["reference"] = str, ???
+ map["parent_ref"] = bool, true if this property references an object in
+ which this object is in a child-parent relationship. Default False
+ map["continuous"] = bool, true if the value potentially changes too fast to
+ be directly monitorable. Example: fast changing statistic or random
+ number. Subscriptions to objects containing continuous data will publish
+ only on an interval basis, rather than every time the data changes. Default
+ False.
+ """
+ __hash__ = None
+ _access_strings = ["RO","RW","RC"]
+ _dir_strings = ["I", "O", "IO"]
+ def __init__(self, _type_code=None, _map=None, kwargs={}):
+ if _map is not None:
+ # construct from map
+ _type_code = _map.get("amqp_type", _type_code)
+ kwargs = _map
+ if not _type_code:
+ raise TypeError("SchemaProperty: amqp_type is a mandatory"
+ " parameter")
+
+ self._type = _type_code
+ self._access = "RO"
+ self._isIndex = False
+ self._isOptional = False
+ self._unit = None
+ self._min = None
+ self._max = None
+ self._maxlen = None
+ self._desc = None
+ self._reference = None
+ self._isParentRef = False
+ self._dir = None
+ self._default = None
+ self._is_continuous = False
+
+ for key, value in kwargs.items():
+ if key == "access":
+ value = str(value).upper()
+ if value not in self._access_strings:
+ raise TypeError("invalid value for access parameter: '%s':" % value )
+ self._access = value
+ elif key == "index" : self._isIndex = _to_bool(value)
+ elif key == "optional": self._isOptional = _to_bool(value)
+ elif key == "unit" : self._unit = value
+ elif key == "min" : self._min = value
+ elif key == "max" : self._max = value
+ elif key == "maxlen" : self._maxlen = value
+ elif key == "desc" : self._desc = value
+ elif key == "reference" : self._reference = value
+ elif key == "parent_ref" : self._isParentRef = _to_bool(value)
+ elif key == "parent_ref" : self._isParentRef = _to_bool(value)
+ elif key == "continuous" : self._is_continuous = _to_bool(value)
+ elif key == "dir":
+ value = str(value).upper()
+ if value not in self._dir_strings:
+ raise TypeError("invalid value for direction parameter: '%s'" % value)
+ self._dir = value
+ elif key == "default" : self._default = value
+
+ # constructor
+ def _create(cls, type_code, **kwargs):
+ return cls(_type_code=type_code, kwargs=kwargs)
+ create = classmethod(_create)
+
+ # map constructor
+ def _from_map(cls, map_):
+ return cls(_map=map_)
+ from_map = classmethod(_from_map)
+
+ def get_type(self): return self._type
+
+ def get_access(self): return self._access
+
+ def is_optional(self): return self._isOptional
+
+ def is_index(self): return self._isIndex
+
+ def get_unit(self): return self._unit
+
+ def get_min(self): return self._min
+
+ def get_max(self): return self._max
+
+ def get_max_len(self): return self._maxlen
+
+ def get_desc(self): return self._desc
+
+ def get_reference(self): return self._reference
+
+ def is_parent_ref(self): return self._isParentRef
+
+ def get_direction(self): return self._dir
+
+ def get_default(self): return self._default
+
+ def is_continuous(self): return self._is_continuous
+
+ def map_encode(self):
+ """
+ Return the map encoding of this schema.
+ """
+ _map = {}
+ _map["amqp_type"] = self._type
+ _map["access"] = self._access
+ _map["index"] = self._isIndex
+ _map["optional"] = self._isOptional
+ if self._unit: _map["unit"] = self._unit
+ if self._min: _map["min"] = self._min
+ if self._max: _map["max"] = self._max
+ if self._maxlen: _map["maxlen"] = self._maxlen
+ if self._desc: _map["desc"] = self._desc
+ if self._reference: _map["reference"] = self._reference
+ _map["parent_ref"] = self._isParentRef
+ if self._dir: _map["dir"] = self._dir
+ if self._default: _map["default"] = self._default
+ if self._is_continuous: _map["continuous"] = self._is_continuous
+ return _map
+
+ def __repr__(self):
+ return "SchemaProperty=<<" + str(self.map_encode()) + ">>"
+
+ def _update_hash(self, hasher):
+ """
+ Update the given hash object with a hash computed over this schema.
+ """
+ hasher.update(str(self._type))
+ hasher.update(str(self._isIndex))
+ hasher.update(str(self._isOptional))
+ hasher.update(str(self._is_continuous))
+ if self._access: hasher.update(self._access)
+ if self._unit: hasher.update(self._unit)
+ if self._desc: hasher.update(self._desc)
+ if self._dir: hasher.update(self._dir)
+ if self._default: hasher.update(self._default)
+
+
+class SchemaMethod(_mapEncoder):
+ """
+ The SchemaMethod class describes the method's structure, and contains a
+ SchemaProperty class for each argument declared by the method.
+
+ Map format:
+ map["arguments"] = map of "name"=<SchemaProperty> pairs.
+ map["desc"] = str, description of the method
+ """
+ KEY_NAME="_name"
+ KEY_ARGUMENTS="_arguments"
+ KEY_DESC="_desc"
+ KEY_ERROR="_error"
+ def __init__(self, _args={}, _desc=None, _map=None):
+ """
+ Construct a SchemaMethod.
+
+ @type args: map of "name"=<SchemaProperty> objects
+ @param args: describes the arguments accepted by the method
+ @type _desc: str
+ @param _desc: Human-readable description of the schema
+ """
+ if _map is not None:
+ _desc = _map.get(self.KEY_DESC)
+ margs = _map.get(self.KEY_ARGUMENTS)
+ if margs:
+ # margs are in map format - covert to SchemaProperty
+ tmp_args = {}
+ for name,val in margs.iteritems():
+ tmp_args[name] = SchemaProperty.from_map(val)
+ _args=tmp_args
+
+ self._arguments = _args.copy()
+ self._desc = _desc
+
+ # map constructor
+ def _from_map(cls, map_):
+ return cls(_map=map_)
+ from_map = classmethod(_from_map)
+
+ def get_desc(self): return self._desc
+
+ def get_arg_count(self): return len(self._arguments)
+
+ def get_arguments(self): return self._arguments.copy()
+
+ def get_argument(self, name): return self._arguments.get(name)
+
+ def add_argument(self, name, schema):
+ """
+ Add an argument to the list of arguments passed to this method.
+ Used by an agent for dynamically creating method schema.
+
+ @type name: string
+ @param name: name of new argument
+ @type schema: SchemaProperty
+ @param schema: SchemaProperty to add to this method
+ """
+ if not isinstance(schema, SchemaProperty):
+ raise TypeError("argument must be a SchemaProperty class")
+ # "Input" argument, by default
+ if schema._dir is None:
+ schema._dir = "I"
+ self._arguments[name] = schema
+
+ def map_encode(self):
+ """
+ Return the map encoding of this schema.
+ """
+ _map = {}
+ _args = {}
+ for name,val in self._arguments.iteritems():
+ _args[name] = val.map_encode()
+ _map[self.KEY_ARGUMENTS] = _args
+ if self._desc: _map[self.KEY_DESC] = self._desc
+ return _map
+
+ def __repr__(self):
+ result = "SchemaMethod=<<args=("
+ first = True
+ for name,arg in self._arguments.iteritems():
+ if first:
+ first = False
+ else:
+ result += ", "
+ result += name
+ result += ")>>"
+ return result
+
+ def _update_hash(self, hasher):
+ """
+ Update the given hash object with a hash computed over this schema.
+ """
+ for name,val in self._arguments.iteritems():
+ hasher.update(name)
+ val._update_hash(hasher)
+ if self._desc: hasher.update(self._desc)
+
+
+
+class SchemaClass(QmfData):
+ """
+ Base class for Data and Event Schema classes.
+
+ Map format:
+ map(QmfData), plus:
+ map["_schema_id"] = map representation of a SchemaClassId instance
+ map["_primary_key_names"] = order list of primary key names
+ """
+ KEY_PRIMARY_KEY_NAMES="_primary_key_names"
+ KEY_DESC = "_desc"
+
+ SUBTYPE_PROPERTY="qmfProperty"
+ SUBTYPE_METHOD="qmfMethod"
+
+ def __init__(self, _classId=None, _desc=None, _map=None):
+ """
+ Schema Class constructor.
+
+ @type classId: class SchemaClassId
+ @param classId: Identifier for this SchemaClass
+ @type _desc: str
+ @param _desc: Human-readable description of the schema
+ """
+ if _map is not None:
+ super(SchemaClass, self).__init__(_map=_map)
+
+ # decode each value based on its type
+ for name,value in self._values.iteritems():
+ if self._subtypes.get(name) == self.SUBTYPE_METHOD:
+ self._values[name] = SchemaMethod.from_map(value)
+ else:
+ self._values[name] = SchemaProperty.from_map(value)
+ cid = _map.get(self.KEY_SCHEMA_ID)
+ if cid:
+ _classId = SchemaClassId.from_map(cid)
+ self._object_id_names = _map.get(self.KEY_PRIMARY_KEY_NAMES,[])
+ _desc = _map.get(self.KEY_DESC)
+ else:
+ if _classId is None:
+ raise Exception("A class identifier must be supplied.")
+ super(SchemaClass, self).__init__(_object_id=str(_classId))
+ self._object_id_names = []
+
+ self._classId = _classId
+ self._desc = _desc
+
+ def get_class_id(self):
+ if not self._classId.get_hash_string():
+ self.generate_hash()
+ return self._classId
+
+ def get_desc(self): return self._desc
+
+ def generate_hash(self):
+ """
+ generate an md5 hash over the body of the schema,
+ and return a string representation of the hash
+ in format "%08x-%08x-%08x-%08x"
+ """
+ md5Hash = _md5Obj()
+ md5Hash.update(self._classId.get_package_name())
+ md5Hash.update(self._classId.get_class_name())
+ md5Hash.update(self._classId.get_type())
+ for name,x in self._values.iteritems():
+ md5Hash.update(name)
+ x._update_hash( md5Hash )
+ for name,value in self._subtypes.iteritems():
+ md5Hash.update(name)
+ md5Hash.update(value)
+ idx = 0
+ for name in self._object_id_names:
+ md5Hash.update(str(idx) + name)
+ idx += 1
+ hstr = md5Hash.hexdigest()[0:8] + "-" +\
+ md5Hash.hexdigest()[8:16] + "-" +\
+ md5Hash.hexdigest()[16:24] + "-" +\
+ md5Hash.hexdigest()[24:32]
+ # update classId with new hash value
+ self._classId._hstr = hstr
+ return hstr
+
+
+ def get_property_count(self):
+ count = 0
+ for value in self._subtypes.itervalues():
+ if value == self.SUBTYPE_PROPERTY:
+ count += 1
+ return count
+
+ def get_properties(self):
+ props = {}
+ for name,value in self._subtypes.iteritems():
+ if value == self.SUBTYPE_PROPERTY:
+ props[name] = self._values.get(name)
+ return props
+
+ def get_property(self, name):
+ if self._subtypes.get(name) == self.SUBTYPE_PROPERTY:
+ return self._values.get(name)
+ return None
+
+ def add_property(self, name, prop):
+ self.set_value(name, prop, self.SUBTYPE_PROPERTY)
+ # need to re-generate schema hash
+ self._classId._hstr = None
+
+ def get_value(self, name):
+ # check for meta-properties first
+ if name == SchemaClassId.KEY_PACKAGE:
+ return self._classId.get_package_name()
+ if name == SchemaClassId.KEY_CLASS:
+ return self._classId.get_class_name()
+ if name == SchemaClassId.KEY_TYPE:
+ return self._classId.get_type()
+ if name == SchemaClassId.KEY_HASH:
+ return self.get_class_id().get_hash_string()
+ if name == self.KEY_SCHEMA_ID:
+ return self.get_class_id()
+ if name == self.KEY_PRIMARY_KEY_NAMES:
+ return self._object_id_names[:]
+ return super(SchemaClass, self).get_value(name)
+
+ def has_value(self, name):
+ if name in [SchemaClassId.KEY_PACKAGE, SchemaClassId.KEY_CLASS, SchemaClassId.KEY_TYPE,
+ SchemaClassId.KEY_HASH, self.KEY_SCHEMA_ID, self.KEY_PRIMARY_KEY_NAMES]:
+ return True
+ super(SchemaClass, self).has_value(name)
+
+ def map_encode(self):
+ """
+ Return the map encoding of this schema.
+ """
+ _map = super(SchemaClass,self).map_encode()
+ _map[self.KEY_SCHEMA_ID] = self.get_class_id().map_encode()
+ if self._object_id_names:
+ _map[self.KEY_PRIMARY_KEY_NAMES] = self._object_id_names[:]
+ if self._desc:
+ _map[self.KEY_DESC] = self._desc
+ return _map
+
+ def __repr__(self):
+ return str(self.get_class_id())
+
+
+
+class SchemaObjectClass(SchemaClass):
+ """
+ A schema class that describes a data object. The data object is composed
+ of zero or more properties and methods. An instance of the SchemaObjectClass
+ can be identified using a key generated by concantenating the values of
+ all properties named in the primary key list.
+
+ Map format:
+ map(SchemaClass)
+ """
+ def __init__(self, _classId=None, _desc=None,
+ _props={}, _methods={}, _object_id_names=[],
+ _map=None):
+ """
+ @type pname: str
+ @param pname: name of package this schema belongs to
+ @type cname: str
+ @param cname: class name for this schema
+ @type desc: str
+ @param desc: Human-readable description of the schema
+ @type _hash: str
+ @param _methods: hash computed on the body of this schema, if known
+ @type _props: map of 'name':<SchemaProperty> objects
+ @param _props: all properties provided by this schema
+ @type _pkey: list of strings
+ @param _pkey: names of each property to be used for constructing the primary key
+ @type _methods: map of 'name':<SchemaMethod> objects
+ @param _methods: all methods provided by this schema
+ """
+ if _map is not None:
+ super(SchemaObjectClass,self).__init__(_map=_map)
+ else:
+ super(SchemaObjectClass, self).__init__(_classId=_classId, _desc=_desc)
+ self._object_id_names = _object_id_names
+ for name,value in _props.iteritems():
+ self.set_value(name, value, self.SUBTYPE_PROPERTY)
+ for name,value in _methods.iteritems():
+ self.set_value(name, value, self.SUBTYPE_METHOD)
+
+ if self._classId.get_type() != SchemaClassId.TYPE_DATA:
+ raise TypeError("Invalid ClassId type for data schema: %s" % self._classId)
+
+ # map constructor
+ def __from_map(cls, map_):
+ return cls(_map=map_)
+ from_map = classmethod(__from_map)
+
+ def get_id_names(self):
+ return self._object_id_names[:]
+
+ def get_method_count(self):
+ count = 0
+ for value in self._subtypes.itervalues():
+ if value == self.SUBTYPE_METHOD:
+ count += 1
+ return count
+
+ def get_methods(self):
+ meths = {}
+ for name,value in self._subtypes.iteritems():
+ if value == self.SUBTYPE_METHOD:
+ meths[name] = self._values.get(name)
+ return meths
+
+ def get_method(self, name):
+ if self._subtypes.get(name) == self.SUBTYPE_METHOD:
+ return self._values.get(name)
+ return None
+
+ def add_method(self, name, method):
+ self.set_value(name, method, self.SUBTYPE_METHOD)
+ # need to re-generate schema hash
+ self._classId._hstr = None
+
+
+
+
+class SchemaEventClass(SchemaClass):
+ """
+ A schema class that describes an event. The event is composed
+ of zero or more properties.
+
+ Map format:
+ map["schema_id"] = map, SchemaClassId map for this object.
+ map["desc"] = string description of this schema
+ map["properties"] = map of "name":SchemaProperty values.
+ """
+ def __init__(self, _classId=None, _desc=None, _props={},
+ _map=None):
+ if _map is not None:
+ super(SchemaEventClass,self).__init__(_map=_map)
+ else:
+ super(SchemaEventClass, self).__init__(_classId=_classId,
+ _desc=_desc)
+ for name,value in _props.iteritems():
+ self.set_value(name, value, self.SUBTYPE_PROPERTY)
+
+ if self._classId.get_type() != SchemaClassId.TYPE_EVENT:
+ raise TypeError("Invalid ClassId type for event schema: %s" %
+ self._classId)
+
+ # map constructor
+ def __from_map(cls, map_):
+ return cls(_map=map_)
+ from_map = classmethod(__from_map)
+
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/console.py b/qpid/extras/qmf/src/py/qmf2-prototype/console.py
new file mode 100644
index 0000000000..9227835b3f
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/console.py
@@ -0,0 +1,2626 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import sys
+import os
+import platform
+import time
+import datetime
+import Queue
+from logging import getLogger
+from threading import Thread, Event
+from threading import RLock
+from threading import currentThread
+from threading import Condition
+
+from qpid.messaging import Connection, Message, Empty, SendError
+
+from common import (QMF_APP_ID, OpCode, QmfQuery, Notifier, ContentType,
+ QmfData, QmfAddress, SchemaClass, SchemaClassId,
+ SchemaEventClass, SchemaObjectClass, WorkItem,
+ SchemaMethod, QmfEvent, timedelta_to_secs)
+
+
+# global flag that indicates which thread (if any) is
+# running the console notifier callback
+_callback_thread=None
+
+
+log = getLogger("qmf")
+trace = getLogger("qmf.console")
+
+
+##==============================================================================
+## Console Transaction Management
+##
+## At any given time, a console application may have multiple outstanding
+## message transactions with agents. The following objects allow the console
+## to track these outstanding transactions.
+##==============================================================================
+
+
+class _Mailbox(object):
+ """
+ Virtual base class for all Mailbox-like objects.
+ """
+ def __init__(self, console):
+ self.console = console
+ self.cid = 0
+ self.console._add_mailbox(self)
+
+ def get_address(self):
+ return self.cid
+
+ def deliver(self, data):
+ """
+ Invoked by Console Management thread when a message arrives for
+ this mailbox.
+ """
+ raise Exception("_Mailbox deliver() method must be provided")
+
+ def destroy(self):
+ """
+ Release the mailbox. Once called, the mailbox should no longer be
+ referenced.
+ """
+ self.console._remove_mailbox(self.cid)
+
+
+class _SyncMailbox(_Mailbox):
+ """
+ A simple mailbox that allows a consumer to wait for delivery of data.
+ """
+ def __init__(self, console):
+ """
+ Invoked by application thread.
+ """
+ super(_SyncMailbox, self).__init__(console)
+ self._cv = Condition()
+ self._data = []
+ self._waiting = False
+
+ def deliver(self, data):
+ """
+ Drop data into the mailbox, waking any waiters if necessary.
+ Invoked by Console Management thread only.
+ """
+ self._cv.acquire()
+ try:
+ self._data.append(data)
+ # if was empty, notify waiters
+ if len(self._data) == 1:
+ self._cv.notify()
+ finally:
+ self._cv.release()
+
+ def fetch(self, timeout=None):
+ """
+ Get one data item from a mailbox, with timeout.
+ Invoked by application thread.
+ """
+ self._cv.acquire()
+ try:
+ if len(self._data) == 0:
+ self._cv.wait(timeout)
+ if len(self._data):
+ return self._data.pop(0)
+ return None
+ finally:
+ self._cv.release()
+
+
+class _AsyncMailbox(_Mailbox):
+ """
+ A Mailbox for asynchronous delivery, with a timeout value.
+ """
+ def __init__(self, console,
+ _timeout=None):
+ """
+ Invoked by application thread.
+ """
+ super(_AsyncMailbox, self).__init__(console)
+ self.console = console
+
+ if _timeout is None:
+ _timeout = console._reply_timeout
+ self.expiration_date = (datetime.datetime.utcnow() +
+ datetime.timedelta(seconds=_timeout))
+ console._lock.acquire()
+ try:
+ console._async_mboxes[self.cid] = self
+ console._next_mbox_expire = None
+ finally:
+ console._lock.release()
+
+ # now that an async mbox has been created, wake the
+ # console mgmt thread so it will know about the mbox expiration
+ # date (and adjust its idle sleep period correctly)
+
+ console._wake_thread()
+
+ def reset_timeout(self, _timeout=None):
+ """ Reset the expiration date for this mailbox.
+ """
+ if _timeout is None:
+ _timeout = self.console._reply_timeout
+ self.console._lock.acquire()
+ try:
+ self.expiration_date = (datetime.datetime.utcnow() +
+ datetime.timedelta(seconds=_timeout))
+ self.console._next_mbox_expire = None
+ finally:
+ self.console._lock.release()
+
+ # wake the console mgmt thread so it will learn about the mbox
+ # expiration date (and adjust its idle sleep period correctly)
+
+ self.console._wake_thread()
+
+ def deliver(self, msg):
+ """
+ """
+ raise Exception("deliver() method must be provided")
+
+ def expire(self):
+ raise Exception("expire() method must be provided")
+
+
+ def destroy(self):
+ self.console._lock.acquire()
+ try:
+ if self.cid in self.console._async_mboxes:
+ del self.console._async_mboxes[self.cid]
+ finally:
+ self.console._lock.release()
+ super(_AsyncMailbox, self).destroy()
+
+
+
+class _QueryMailbox(_AsyncMailbox):
+ """
+ A mailbox used for asynchronous query requests.
+ """
+ def __init__(self, console,
+ agent_name,
+ context,
+ target,
+ _timeout=None):
+ """
+ Invoked by application thread.
+ """
+ super(_QueryMailbox, self).__init__(console,
+ _timeout)
+ self.agent_name = agent_name
+ self.target = target
+ self.context = context
+ self.result = []
+
+ def deliver(self, reply):
+ """
+ Process query response messages delivered to this mailbox.
+ Invoked by Console Management thread only.
+ """
+ trace.debug("Delivering to query mailbox (agent=%s)." % self.agent_name)
+ objects = reply.content
+ if isinstance(objects, type([])):
+ # convert from map to native types if needed
+ if self.target == QmfQuery.TARGET_SCHEMA_ID:
+ for sid_map in objects:
+ self.result.append(SchemaClassId.from_map(sid_map))
+
+ elif self.target == QmfQuery.TARGET_SCHEMA:
+ for schema_map in objects:
+ # extract schema id, convert based on schema type
+ sid_map = schema_map.get(SchemaClass.KEY_SCHEMA_ID)
+ if sid_map:
+ sid = SchemaClassId.from_map(sid_map)
+ if sid:
+ if sid.get_type() == SchemaClassId.TYPE_DATA:
+ schema = SchemaObjectClass.from_map(schema_map)
+ else:
+ schema = SchemaEventClass.from_map(schema_map)
+ self.console._add_schema(schema) # add to schema cache
+ self.result.append(schema)
+
+ elif self.target == QmfQuery.TARGET_OBJECT:
+ for obj_map in objects:
+ # @todo: need the agent name - ideally from the
+ # reply message iself.
+ agent = self.console.get_agent(self.agent_name)
+ if agent:
+ obj = QmfConsoleData(map_=obj_map, agent=agent)
+ # start fetch of schema if not known
+ sid = obj.get_schema_class_id()
+ if sid:
+ self.console._prefetch_schema(sid, agent)
+ self.result.append(obj)
+
+
+ else:
+ # no conversion needed.
+ self.result += objects
+
+ if not "partial" in reply.properties:
+ # log.error("QUERY COMPLETE for %s" % str(self.context))
+ wi = WorkItem(WorkItem.QUERY_COMPLETE, self.context, self.result)
+ self.console._work_q.put(wi)
+ self.console._work_q_put = True
+
+ self.destroy()
+
+
+ def expire(self):
+ trace.debug("Expiring query mailbox (agent=%s)." % self.agent_name)
+ # send along whatever (possibly none) has been received so far
+ wi = WorkItem(WorkItem.QUERY_COMPLETE, self.context, self.result)
+ self.console._work_q.put(wi)
+ self.console._work_q_put = True
+
+ self.destroy()
+
+
+
+class _SchemaPrefetchMailbox(_AsyncMailbox):
+ """
+ Handles responses to schema fetches made by the console.
+ """
+ def __init__(self, console,
+ schema_id,
+ _timeout=None):
+ """
+ Invoked by application thread.
+ """
+ super(_SchemaPrefetchMailbox, self).__init__(console,
+ _timeout)
+ self.schema_id = schema_id
+
+ def deliver(self, reply):
+ """
+ Process schema response messages.
+ """
+ trace.debug("Delivering schema mailbox (id=%s)." % self.schema_id)
+ done = False
+ schemas = reply.content
+ if schemas and isinstance(schemas, type([])):
+ for schema_map in schemas:
+ # extract schema id, convert based on schema type
+ sid_map = schema_map.get(SchemaClass.KEY_SCHEMA_ID)
+ if sid_map:
+ sid = SchemaClassId.from_map(sid_map)
+ if sid:
+ if sid.get_type() == SchemaClassId.TYPE_DATA:
+ schema = SchemaObjectClass.from_map(schema_map)
+ else:
+ schema = SchemaEventClass.from_map(schema_map)
+ self.console._add_schema(schema) # add to schema cache
+ self.destroy()
+
+
+ def expire(self):
+ trace.debug("Expiring schema mailbox (id=%s)." % self.schema_id)
+ self.destroy()
+
+
+
+class _MethodMailbox(_AsyncMailbox):
+ """
+ A mailbox used for asynchronous method requests.
+ """
+ def __init__(self, console,
+ context,
+ _timeout=None):
+ """
+ Invoked by application thread.
+ """
+ super(_MethodMailbox, self).__init__(console,
+ _timeout)
+ self.context = context
+
+ def deliver(self, reply):
+ """
+ Process method response messages delivered to this mailbox.
+ Invoked by Console Management thread only.
+ """
+ trace.debug("Delivering to method mailbox.")
+ _map = reply.content
+ if not _map or not isinstance(_map, type({})):
+ log.error("Invalid method call reply message")
+ result = None
+ else:
+ error=_map.get(SchemaMethod.KEY_ERROR)
+ if error:
+ error = QmfData.from_map(error)
+ result = MethodResult(_error=error)
+ else:
+ result = MethodResult(_out_args=_map.get(SchemaMethod.KEY_ARGUMENTS))
+
+ # create workitem
+ wi = WorkItem(WorkItem.METHOD_RESPONSE, self.context, result)
+ self.console._work_q.put(wi)
+ self.console._work_q_put = True
+
+ self.destroy()
+
+
+ def expire(self):
+ """
+ The mailbox expired without receiving a reply.
+ Invoked by the Console Management thread only.
+ """
+ trace.debug("Expiring method mailbox.")
+ # send along an empty response
+ wi = WorkItem(WorkItem.METHOD_RESPONSE, self.context, None)
+ self.console._work_q.put(wi)
+ self.console._work_q_put = True
+
+ self.destroy()
+
+
+
+class _SubscriptionMailbox(_AsyncMailbox):
+ """
+ A Mailbox for a single subscription. Allows only sychronous "subscribe"
+ and "refresh" requests.
+ """
+ def __init__(self, console, context, agent, duration, interval):
+ """
+ Invoked by application thread.
+ """
+ super(_SubscriptionMailbox, self).__init__(console, duration)
+ self.cv = Condition()
+ self.data = []
+ self.result = []
+ self.context = context
+ self.duration = duration
+ self.interval = interval
+ self.agent_name = agent.get_name()
+ self.agent_subscription_id = None # from agent
+
+ def subscribe(self, query):
+ agent = self.console.get_agent(self.agent_name)
+ if not agent:
+ log.warning("subscribed failed - unknown agent '%s'" %
+ self.agent_name)
+ return False
+ try:
+ trace.debug("Sending Subscribe to Agent (%s)" % self.agent_name)
+ agent._send_subscribe_req(query, self.get_address(), self.interval,
+ self.duration)
+ except SendError, e:
+ log.error(str(e))
+ return False
+ return True
+
+ def resubscribe(self):
+ agent = self.console.get_agent(self.agent_name)
+ if not agent:
+ log.warning("resubscribed failed - unknown agent '%s'",
+ self.agent_name)
+ return False
+ try:
+ trace.debug("Sending resubscribe to Agent %s", self.agent_name)
+ agent._send_resubscribe_req(self.get_address(),
+ self.agent_subscription_id)
+ except SendError, e:
+ log.error(str(e))
+ return False
+ return True
+
+ def deliver(self, msg):
+ """
+ """
+ opcode = msg.properties.get("qmf.opcode")
+ if (opcode == OpCode.subscribe_rsp):
+
+ error = msg.content.get("_error")
+ if error:
+ try:
+ e_map = QmfData.from_map(error)
+ except TypeError:
+ log.warning("Invalid QmfData map received: '%s'"
+ % str(error))
+ e_map = QmfData.create({"error":"Unknown error"})
+ sp = SubscribeParams(None, None, None, e_map)
+ else:
+ self.agent_subscription_id = msg.content.get("_subscription_id")
+ self.duration = msg.content.get("_duration", self.duration)
+ self.interval = msg.content.get("_interval", self.interval)
+ self.reset_timeout(self.duration)
+ sp = SubscribeParams(self.get_address(),
+ self.interval,
+ self.duration,
+ None)
+ self.cv.acquire()
+ try:
+ self.data.append(sp)
+ # if was empty, notify waiters
+ if len(self.data) == 1:
+ self.cv.notify()
+ finally:
+ self.cv.release()
+ return
+
+ # else: data indication
+ agent_name = msg.properties.get("qmf.agent")
+ if not agent_name:
+ log.warning("Ignoring data_ind - no agent name given: %s" %
+ msg)
+ return
+ agent = self.console.get_agent(agent_name)
+ if not agent:
+ log.warning("Ignoring data_ind - unknown agent '%s'" %
+ agent_name)
+ return
+
+ objects = msg.content
+ for obj_map in objects:
+ obj = QmfConsoleData(map_=obj_map, agent=agent)
+ # start fetch of schema if not known
+ sid = obj.get_schema_class_id()
+ if sid:
+ self.console._prefetch_schema(sid, agent)
+ self.result.append(obj)
+
+ if not "partial" in msg.properties:
+ wi = WorkItem(WorkItem.SUBSCRIBE_INDICATION, self.context, self.result)
+ self.result = []
+ self.console._work_q.put(wi)
+ self.console._work_q_put = True
+
+ def fetch(self, timeout=None):
+ """
+ Get one data item from a mailbox, with timeout.
+ Invoked by application thread.
+ """
+ self.cv.acquire()
+ try:
+ if len(self.data) == 0:
+ self.cv.wait(timeout)
+ if len(self.data):
+ return self.data.pop(0)
+ return None
+ finally:
+ self.cv.release()
+
+ def expire(self):
+ """ The subscription expired.
+ """
+ self.destroy()
+
+
+
+
+class _AsyncSubscriptionMailbox(_SubscriptionMailbox):
+ """
+ A Mailbox for a single subscription. Allows only asychronous "subscribe"
+ and "refresh" requests.
+ """
+ def __init__(self, console, context, agent, duration, interval):
+ """
+ Invoked by application thread.
+ """
+ super(_AsyncSubscriptionMailbox, self).__init__(console, context,
+ agent, duration,
+ interval)
+ self.subscribe_pending = False
+
+ def subscribe(self, query, reply_timeout):
+ if super(_AsyncSubscriptionMailbox, self).subscribe(query):
+ self.subscribe_pending = True
+ self.reset_timeout(reply_timeout)
+ return True
+ return False
+
+ def deliver(self, msg):
+ """
+ """
+ super(_AsyncSubscriptionMailbox, self).deliver(msg)
+ sp = self.fetch(0)
+ if sp and self.subscribe_pending:
+ wi = WorkItem(WorkItem.SUBSCRIBE_RESPONSE, self.context, sp)
+ self.console._work_q.put(wi)
+ self.console._work_q_put = True
+
+ self.subscribe_pending = False
+
+ if not sp.succeeded():
+ self.destroy()
+
+
+ def expire(self):
+ """ Either the subscription expired, or a request timedout.
+ """
+ if self.subscribe_pending:
+ wi = WorkItem(WorkItem.SUBSCRIBE_RESPONSE, self.context, None)
+ self.console._work_q.put(wi)
+ self.console._work_q_put = True
+ self.destroy()
+
+
+##==============================================================================
+## DATA MODEL
+##==============================================================================
+
+
+class QmfConsoleData(QmfData):
+ """
+ Console's representation of an managed QmfData instance.
+ """
+ def __init__(self, map_, agent):
+ super(QmfConsoleData, self).__init__(_map=map_,
+ _const=True)
+ self._agent = agent
+
+ def get_timestamps(self):
+ """
+ Returns a list of timestamps describing the lifecycle of
+ the object. All timestamps are represented by the AMQP
+ timestamp type. [0] = time of last update from Agent,
+ [1] = creation timestamp
+ [2] = deletion timestamp, or zero if not
+ deleted.
+ """
+ return [self._utime, self._ctime, self._dtime]
+
+ def get_create_time(self):
+ """
+ returns the creation timestamp
+ """
+ return self._ctime
+
+ def get_update_time(self):
+ """
+ returns the update timestamp
+ """
+ return self._utime
+
+ def get_delete_time(self):
+ """
+ returns the deletion timestamp, or zero if not yet deleted.
+ """
+ return self._dtime
+
+ def is_deleted(self):
+ """
+ True if deletion timestamp not zero.
+ """
+ return self._dtime != long(0)
+
+ def refresh(self, _reply_handle=None, _timeout=None):
+ """
+ request that the Agent update the value of this object's
+ contents.
+ """
+ if _reply_handle is not None:
+ log.error(" ASYNC REFRESH TBD!!!")
+ return None
+
+ assert self._agent
+ assert self._agent._console
+
+ if _timeout is None:
+ _timeout = self._agent._console._reply_timeout
+
+ # create query to agent using this objects ID
+ query = QmfQuery.create_id_object(self.get_object_id(),
+ self.get_schema_class_id())
+ obj_list = self._agent._console.do_query(self._agent, query,
+ _timeout=_timeout)
+ if obj_list is None or len(obj_list) != 1:
+ return None
+
+ self._update(obj_list[0])
+ return self
+
+
+ def invoke_method(self, name, _in_args={}, _reply_handle=None,
+ _timeout=None):
+ """
+ Invoke the named method on this object.
+ """
+ assert self._agent
+ assert self._agent._console
+
+ oid = self.get_object_id()
+ if oid is None:
+ raise ValueError("Cannot invoke methods on unmanaged objects.")
+
+ if _timeout is None:
+ _timeout = self._agent._console._reply_timeout
+
+ if _reply_handle is not None:
+ mbox = _MethodMailbox(self._agent._console,
+ _reply_handle)
+ else:
+ mbox = _SyncMailbox(self._agent._console)
+ cid = mbox.get_address()
+
+ _map = {self.KEY_OBJECT_ID:str(oid),
+ SchemaMethod.KEY_NAME:name}
+
+ sid = self.get_schema_class_id()
+ if sid:
+ _map[self.KEY_SCHEMA_ID] = sid.map_encode()
+ if _in_args:
+ _map[SchemaMethod.KEY_ARGUMENTS] = _in_args
+
+ trace.debug("Sending method req to Agent (%s)" % time.time())
+ try:
+ self._agent._send_method_req(_map, cid)
+ except SendError, e:
+ log.error(str(e))
+ mbox.destroy()
+ return None
+
+ if _reply_handle is not None:
+ return True
+
+ trace.debug("Waiting for response to method req (%s)" % _timeout)
+ replyMsg = mbox.fetch(_timeout)
+ mbox.destroy()
+
+ if not replyMsg:
+ trace.debug("Agent method req wait timed-out.")
+ return None
+
+ _map = replyMsg.content
+ if not _map or not isinstance(_map, type({})):
+ log.error("Invalid method call reply message")
+ return None
+
+ error=_map.get(SchemaMethod.KEY_ERROR)
+ if error:
+ return MethodResult(_error=QmfData.from_map(error))
+ else:
+ return MethodResult(_out_args=_map.get(SchemaMethod.KEY_ARGUMENTS))
+
+ def _update(self, newer):
+ super(QmfConsoleData,self).__init__(_values=newer._values, _subtypes=newer._subtypes,
+ _tag=newer._tag, _object_id=newer._object_id,
+ _ctime=newer._ctime, _utime=newer._utime,
+ _dtime=newer._dtime,
+ _schema_id=newer._schema_id, _const=True)
+
+class QmfLocalData(QmfData):
+ """
+ Console's representation of an unmanaged QmfData instance. There
+ is no remote agent associated with this instance. The Console has
+ full control over this instance.
+ """
+ def __init__(self, values, _subtypes={}, _tag=None, _object_id=None,
+ _schema=None):
+ # timestamp in millisec since epoch UTC
+ ctime = long(time.time() * 1000)
+ super(QmfLocalData, self).__init__(_values=values,
+ _subtypes=_subtypes, _tag=_tag,
+ _object_id=_object_id,
+ _schema=_schema, _ctime=ctime,
+ _utime=ctime, _const=False)
+
+
+class Agent(object):
+ """
+ A local representation of a remote agent managed by this console.
+ """
+ def __init__(self, name, console):
+ """
+ @type name: string
+ @param name: uniquely identifies this agent in the AMQP domain.
+ """
+
+ if not isinstance(console, Console):
+ raise TypeError("parameter must be an instance of class Console")
+
+ self._name = name
+ self._address = QmfAddress.direct(name, console._domain)
+ self._console = console
+ self._sender = None
+ self._packages = {} # map of {package-name:[list of class-names], } for this agent
+ self._subscriptions = [] # list of active standing subscriptions for this agent
+ self._announce_timestamp = None # datetime when last announce received
+ trace.debug( "Created Agent with address: [%s]" % self._address )
+
+
+ def get_name(self):
+ return self._name
+
+ def is_active(self):
+ return self._announce_timestamp != None
+
+ def _send_msg(self, msg, correlation_id=None):
+ """
+ Low-level routine to asynchronously send a message to this agent.
+ """
+ msg.reply_to = str(self._console._address)
+ if correlation_id:
+ msg.correlation_id = str(correlation_id)
+ # TRACE
+ #log.error("!!! Console %s sending to agent %s (%s)" %
+ # (self._console._name, self._name, str(msg)))
+ self._sender.send(msg)
+ # return handle
+
+ def get_packages(self):
+ """
+ Return a list of the names of all packages known to this agent.
+ """
+ return self._packages.keys()
+
+ def get_classes(self):
+ """
+ Return a dictionary [key:class] of classes known to this agent.
+ """
+ return self._packages.copy()
+
+ def get_objects(self, query, kwargs={}):
+ """
+ Return a list of objects that satisfy the given query.
+
+ @type query: dict, or common.Query
+ @param query: filter for requested objects
+ @type kwargs: dict
+ @param kwargs: ??? used to build match selector and query ???
+ @rtype: list
+ @return: list of matching objects, or None.
+ """
+ pass
+
+ def get_object(self, query, kwargs={}):
+ """
+ Get one object - query is expected to match only one object.
+ ??? Recommended: explicit timeout param, default None ???
+
+ @type query: dict, or common.Query
+ @param query: filter for requested objects
+ @type kwargs: dict
+ @param kwargs: ??? used to build match selector and query ???
+ @rtype: qmfConsole.ObjectProxy
+ @return: one matching object, or none
+ """
+ pass
+
+
+ def create_subscription(self, query):
+ """
+ Factory for creating standing subscriptions based on a given query.
+
+ @type query: common.Query object
+ @param query: determines the list of objects for which this subscription applies
+ @rtype: qmfConsole.Subscription
+ @returns: an object representing the standing subscription.
+ """
+ pass
+
+
+ def invoke_method(self, name, _in_args={}, _reply_handle=None,
+ _timeout=None):
+ """
+ Invoke the named method on this agent.
+ """
+ assert self._console
+
+ if _timeout is None:
+ _timeout = self._console._reply_timeout
+
+ if _reply_handle is not None:
+ mbox = _MethodMailbox(self._console,
+ _reply_handle)
+ else:
+ mbox = _SyncMailbox(self._console)
+ cid = mbox.get_address()
+
+ _map = {SchemaMethod.KEY_NAME:name}
+ if _in_args:
+ _map[SchemaMethod.KEY_ARGUMENTS] = _in_args.copy()
+
+ trace.debug("Sending method req to Agent (%s)" % time.time())
+ try:
+ self._send_method_req(_map, cid)
+ except SendError, e:
+ log.error(str(e))
+ mbox.destroy()
+ return None
+
+ if _reply_handle is not None:
+ return True
+
+ trace.debug("Waiting for response to method req (%s)" % _timeout)
+ replyMsg = mbox.fetch(_timeout)
+ mbox.destroy()
+
+ if not replyMsg:
+ trace.debug("Agent method req wait timed-out.")
+ return None
+
+ _map = replyMsg.content
+ if not _map or not isinstance(_map, type({})):
+ log.error("Invalid method call reply message")
+ return None
+
+ return MethodResult(_out_args=_map.get(SchemaMethod.KEY_ARGUMENTS),
+ _error=_map.get(SchemaMethod.KEY_ERROR))
+
+ def enable_events(self):
+ raise Exception("enable_events tbd")
+
+ def disable_events(self):
+ raise Exception("disable_events tbd")
+
+ def destroy(self):
+ raise Exception("destroy tbd")
+
+ def __repr__(self):
+ return str(self._address)
+
+ def __str__(self):
+ return self.__repr__()
+
+ def _send_query(self, query, correlation_id=None):
+ """
+ """
+ msg = Message(id=QMF_APP_ID,
+ properties={"method":"request",
+ "qmf.opcode":OpCode.query_req},
+ content=query.map_encode())
+ self._send_msg( msg, correlation_id )
+
+
+ def _send_method_req(self, mr_map, correlation_id=None):
+ """
+ """
+ msg = Message(id=QMF_APP_ID,
+ properties={"method":"request",
+ "qmf.opcode":OpCode.method_req},
+ content=mr_map)
+ self._send_msg( msg, correlation_id )
+
+ def _send_subscribe_req(self, query, correlation_id, _interval=None,
+ _lifetime=None):
+ """
+ """
+ sr_map = {"_query":query.map_encode()}
+ if _interval is not None:
+ sr_map["_interval"] = _interval
+ if _lifetime is not None:
+ sr_map["_duration"] = _lifetime
+
+ msg = Message(id=QMF_APP_ID,
+ properties={"method":"request",
+ "qmf.opcode":OpCode.subscribe_req},
+ content=sr_map)
+ self._send_msg(msg, correlation_id)
+
+
+ def _send_resubscribe_req(self, correlation_id,
+ subscription_id):
+ """
+ """
+ sr_map = {"_subscription_id":subscription_id}
+
+ msg = Message(id=QMF_APP_ID,
+ properties={"method":"request",
+ "qmf.opcode":OpCode.subscribe_refresh_ind},
+ content=sr_map)
+ self._send_msg(msg, correlation_id)
+
+
+ def _send_unsubscribe_ind(self, correlation_id, subscription_id):
+ """
+ """
+ sr_map = {"_subscription_id":subscription_id}
+
+ msg = Message(id=QMF_APP_ID,
+ properties={"method":"request",
+ "qmf.opcode":OpCode.subscribe_cancel_ind},
+ content=sr_map)
+ self._send_msg(msg, correlation_id)
+
+
+ ##==============================================================================
+ ## METHOD CALL
+ ##==============================================================================
+
+class MethodResult(object):
+ def __init__(self, _out_args=None, _error=None):
+ self._error = _error
+ self._out_args = _out_args
+
+ def succeeded(self):
+ return self._error is None
+
+ def get_exception(self):
+ return self._error
+
+ def get_arguments(self):
+ return self._out_args
+
+ def get_argument(self, name):
+ arg = None
+ if self._out_args:
+ arg = self._out_args.get(name)
+ return arg
+
+
+
+ ##==============================================================================
+ ## SUBSCRIPTION
+ ##==============================================================================
+
+class SubscribeParams(object):
+ """ Represents a standing subscription for this console.
+ """
+ def __init__(self, sid, interval, duration, _error=None):
+ self._sid = sid
+ self._interval = interval
+ self._duration = duration
+ self._error = _error
+
+ def succeeded(self):
+ return self._error is None
+
+ def get_error(self):
+ return self._error
+
+ def get_subscription_id(self):
+ return self._sid
+
+ def get_publish_interval(self):
+ return self._interval
+
+ def get_duration(self):
+ return self._duration
+
+
+ ##==============================================================================
+ ## CONSOLE
+ ##==============================================================================
+
+
+
+
+
+
+class Console(Thread):
+ """
+ A Console manages communications to a collection of agents on behalf of an application.
+ """
+ def __init__(self, name=None, _domain=None, notifier=None,
+ reply_timeout = 60,
+ # agent_timeout = 120,
+ agent_timeout = 60,
+ kwargs={}):
+ """
+ @type name: str
+ @param name: identifier for this console. Must be unique.
+ @type notifier: qmfConsole.Notifier
+ @param notifier: invoked when events arrive for processing.
+ @type kwargs: dict
+ @param kwargs: ??? Unused
+ """
+ Thread.__init__(self)
+ self._operational = False
+ self._ready = Event()
+
+ if not name:
+ self._name = "qmfc-%s.%d" % (platform.node(), os.getpid())
+ else:
+ self._name = str(name)
+ self._domain = _domain
+ self._address = QmfAddress.direct(self._name, self._domain)
+ self._notifier = notifier
+ self._lock = RLock()
+ self._conn = None
+ self._session = None
+ # dict of "agent-direct-address":class Agent entries
+ self._agent_map = {}
+ self._direct_recvr = None
+ self._announce_recvr = None
+ self._locate_sender = None
+ self._schema_cache = {}
+ self._pending_schema_req = []
+ self._agent_discovery_filter = None
+ self._reply_timeout = reply_timeout
+ self._agent_timeout = agent_timeout
+ self._subscribe_timeout = 300 # @todo: parameterize
+ self._next_agent_expire = None
+ self._next_mbox_expire = None
+ # for passing WorkItems to the application
+ self._work_q = Queue.Queue()
+ self._work_q_put = False
+ # Correlation ID and mailbox storage
+ self._correlation_id = long(time.time()) # pseudo-randomize
+ self._post_office = {} # indexed by cid
+ self._async_mboxes = {} # indexed by cid, used to expire them
+
+ def destroy(self, timeout=None):
+ """
+ Must be called before the Console is deleted.
+ Frees up all resources and shuts down all background threads.
+
+ @type timeout: float
+ @param timeout: maximum time in seconds to wait for all background threads to terminate. Default: forever.
+ """
+ trace.debug("Destroying Console...")
+ if self._conn:
+ self.remove_connection(self._conn, timeout)
+ trace.debug("Console Destroyed")
+
+ def add_connection(self, conn):
+ """
+ Add a AMQP connection to the console. The console will setup a session over the
+ connection. The console will then broadcast an Agent Locate Indication over
+ the session in order to discover present agents.
+
+ @type conn: qpid.messaging.Connection
+ @param conn: the connection to the AMQP messaging infrastructure.
+ """
+ if self._conn:
+ raise Exception( "Multiple connections per Console not supported." );
+ self._conn = conn
+ self._session = conn.session(name=self._name)
+
+ # for messages directly addressed to me
+ self._direct_recvr = self._session.receiver(str(self._address) +
+ ";{create:always,"
+ " node:"
+ " {type:topic,"
+ " x-declare:"
+ " {type:direct}}}",
+ capacity=1)
+ trace.debug("my direct addr=%s" % self._direct_recvr.source)
+
+ self._direct_sender = self._session.sender(str(self._address.get_node()) +
+ ";{create:always,"
+ " node:"
+ " {type:topic,"
+ " x-declare:"
+ " {type:direct}}}")
+ trace.debug("my direct sender=%s" % self._direct_sender.target)
+
+ # for receiving "broadcast" messages from agents
+ default_addr = QmfAddress.topic(QmfAddress.SUBJECT_AGENT_IND + ".#",
+ self._domain)
+ self._topic_recvr = self._session.receiver(str(default_addr) +
+ ";{create:always,"
+ " node:{type:topic}}",
+ capacity=1)
+ trace.debug("default topic recv addr=%s" % self._topic_recvr.source)
+
+
+ # for sending to topic subscribers
+ topic_addr = QmfAddress.topic(QmfAddress.SUBJECT_CONSOLE_IND, self._domain)
+ self._topic_sender = self._session.sender(str(topic_addr) +
+ ";{create:always,"
+ " node:{type:topic}}")
+ trace.debug("default topic send addr=%s" % self._topic_sender.target)
+
+ #
+ # Now that receivers are created, fire off the receive thread...
+ #
+ self._operational = True
+ self.start()
+ self._ready.wait(10)
+ if not self._ready.isSet():
+ raise Exception("Console managment thread failed to start.")
+
+
+
+ def remove_connection(self, conn, timeout=None):
+ """
+ Remove an AMQP connection from the console. Un-does the add_connection() operation,
+ and releases any agents and sessions associated with the connection.
+
+ @type conn: qpid.messaging.Connection
+ @param conn: connection previously added by add_connection()
+ """
+ if self._conn and conn and conn != self._conn:
+ log.error( "Attempt to delete unknown connection: %s" % str(conn))
+
+ # tell connection thread to shutdown
+ self._operational = False
+ if self.isAlive():
+ # kick my thread to wake it up
+ self._wake_thread()
+ trace.debug("waiting for console receiver thread to exit")
+ self.join(timeout)
+ if self.isAlive():
+ log.error( "Console thread '%s' is hung..." % self.getName() )
+ self._direct_recvr.close()
+ self._direct_sender.close()
+ self._topic_recvr.close()
+ self._topic_sender.close()
+ self._session.close()
+ self._session = None
+ self._conn = None
+ trace.debug("console connection removal complete")
+
+
+ def get_address(self):
+ """
+ The AMQP address this Console is listening to.
+ """
+ return self._address
+
+
+ def destroy_agent( self, agent ):
+ """
+ Undoes create.
+ """
+ if not isinstance(agent, Agent):
+ raise TypeError("agent must be an instance of class Agent")
+
+ self._lock.acquire()
+ try:
+ if agent._name in self._agent_map:
+ del self._agent_map[agent._name]
+ finally:
+ self._lock.release()
+
+ def find_agent(self, name, timeout=None ):
+ """
+ Given the name of a particular agent, return an instance of class Agent
+ representing that agent. Return None if the agent does not exist.
+ """
+
+ self._lock.acquire()
+ try:
+ agent = self._agent_map.get(name)
+ if agent:
+ return agent
+ finally:
+ self._lock.release()
+
+ # agent not present yet - ping it with an agent_locate
+
+ mbox = _SyncMailbox(self)
+ cid = mbox.get_address()
+
+ query = QmfQuery.create_id(QmfQuery.TARGET_AGENT, name)
+ msg = Message(id=QMF_APP_ID,
+ subject="console.ind.locate." + name,
+ properties={"method":"request",
+ "qmf.opcode":OpCode.agent_locate_req},
+ content=query._predicate)
+ msg.content_type="amqp/list"
+ msg.reply_to = str(self._address)
+ msg.correlation_id = str(cid)
+ trace.debug("%s Sending Agent Locate (%s)", self._name, str(msg))
+ try:
+ self._topic_sender.send(msg)
+ except SendError, e:
+ log.error(str(e))
+ mbox.destroy()
+ return None
+
+ if timeout is None:
+ timeout = self._reply_timeout
+
+ new_agent = None
+ trace.debug("Waiting for response to Agent Locate (%s)" % timeout)
+ mbox.fetch(timeout)
+ mbox.destroy()
+ trace.debug("Agent Locate wait ended (%s)" % time.time())
+ self._lock.acquire()
+ try:
+ new_agent = self._agent_map.get(name)
+ finally:
+ self._lock.release()
+
+ return new_agent
+
+
+ def get_agents(self):
+ """
+ Return the list of known agents.
+ """
+ self._lock.acquire()
+ try:
+ agents = self._agent_map.values()
+ finally:
+ self._lock.release()
+ return agents
+
+
+ def get_agent(self, name):
+ """
+ Return the named agent, else None if not currently available.
+ """
+ self._lock.acquire()
+ try:
+ agent = self._agent_map.get(name)
+ finally:
+ self._lock.release()
+ return agent
+
+
+ def do_query(self, agent, query, _reply_handle=None, _timeout=None ):
+ """
+ """
+ target = query.get_target()
+
+ if _reply_handle is not None:
+ mbox = _QueryMailbox(self,
+ agent.get_name(),
+ _reply_handle,
+ target,
+ _timeout)
+ else:
+ mbox = _SyncMailbox(self)
+
+ cid = mbox.get_address()
+
+ try:
+ trace.debug("Sending Query to Agent (%s)" % time.time())
+ agent._send_query(query, cid)
+ except SendError, e:
+ log.error(str(e))
+ mbox.destroy()
+ return None
+
+ # return now if async reply expected
+ if _reply_handle is not None:
+ return True
+
+ if not _timeout:
+ _timeout = self._reply_timeout
+
+ trace.debug("Waiting for response to Query (%s)" % _timeout)
+ now = datetime.datetime.utcnow()
+ expire = now + datetime.timedelta(seconds=_timeout)
+
+ response = []
+ while (expire > now):
+ _timeout = timedelta_to_secs(expire - now)
+ reply = mbox.fetch(_timeout)
+ if not reply:
+ trace.debug("Query wait timed-out.")
+ break
+
+ objects = reply.content
+ if not objects or not isinstance(objects, type([])):
+ break
+
+ # convert from map to native types if needed
+ if target == QmfQuery.TARGET_SCHEMA_ID:
+ for sid_map in objects:
+ response.append(SchemaClassId.from_map(sid_map))
+
+ elif target == QmfQuery.TARGET_SCHEMA:
+ for schema_map in objects:
+ # extract schema id, convert based on schema type
+ sid_map = schema_map.get(SchemaClass.KEY_SCHEMA_ID)
+ if sid_map:
+ sid = SchemaClassId.from_map(sid_map)
+ if sid:
+ if sid.get_type() == SchemaClassId.TYPE_DATA:
+ schema = SchemaObjectClass.from_map(schema_map)
+ else:
+ schema = SchemaEventClass.from_map(schema_map)
+ self._add_schema(schema) # add to schema cache
+ response.append(schema)
+
+ elif target == QmfQuery.TARGET_OBJECT:
+ for obj_map in objects:
+ obj = QmfConsoleData(map_=obj_map, agent=agent)
+ # start fetch of schema if not known
+ sid = obj.get_schema_class_id()
+ if sid:
+ self._prefetch_schema(sid, agent)
+ response.append(obj)
+ else:
+ # no conversion needed.
+ response += objects
+
+ if not "partial" in reply.properties:
+ # reply not broken up over multiple msgs
+ break
+
+ now = datetime.datetime.utcnow()
+
+ mbox.destroy()
+ return response
+
+
+ def create_subscription(self, agent, query, console_handle,
+ _interval=None, _duration=None,
+ _blocking=True, _timeout=None):
+ if not _duration:
+ _duration = self._subscribe_timeout
+
+ if _timeout is None:
+ _timeout = self._reply_timeout
+
+ if not _blocking:
+ mbox = _AsyncSubscriptionMailbox(self, console_handle, agent,
+ _duration, _interval)
+ if not mbox.subscribe(query, _timeout):
+ mbox.destroy()
+ return False
+ return True
+ else:
+ mbox = _SubscriptionMailbox(self, console_handle, agent, _duration,
+ _interval)
+
+ if not mbox.subscribe(query):
+ mbox.destroy()
+ return None
+
+ trace.debug("Waiting for response to subscription (%s)" % _timeout)
+ # @todo: what if mbox expires here?
+ sp = mbox.fetch(_timeout)
+
+ if not sp:
+ trace.debug("Subscription request wait timed-out.")
+ mbox.destroy()
+ return None
+
+ if not sp.succeeded():
+ mbox.destroy()
+
+ return sp
+
+ def refresh_subscription(self, subscription_id,
+ _duration=None,
+ _timeout=None):
+ if _timeout is None:
+ _timeout = self._reply_timeout
+
+ mbox = self._get_mailbox(subscription_id)
+ if not mbox:
+ log.warning("Subscription %s not found." % subscription_id)
+ return None
+
+ if isinstance(mbox, _AsyncSubscriptionMailbox):
+ return mbox.resubscribe()
+ else:
+ # synchronous - wait for reply
+ if not mbox.resubscribe():
+ # @todo ???? mbox.destroy()
+ return None
+
+ # wait for reply
+
+ trace.debug("Waiting for response to subscription (%s)" % _timeout)
+ sp = mbox.fetch(_timeout)
+
+ if not sp:
+ trace.debug("re-subscribe request wait timed-out.")
+ # @todo???? mbox.destroy()
+ return None
+
+ return sp
+
+
+ def cancel_subscription(self, subscription_id):
+ """
+ """
+ mbox = self._get_mailbox(subscription_id)
+ if not mbox:
+ return
+
+ agent = self.get_agent(mbox.agent_name)
+ if agent:
+ try:
+ trace.debug("Sending UnSubscribe to Agent (%s)" % time.time())
+ agent._send_unsubscribe_ind(subscription_id,
+ mbox.agent_subscription_id)
+ except SendError, e:
+ log.error(str(e))
+
+ mbox.destroy()
+
+
+ def _wake_thread(self):
+ """
+ Make the console management thread loop wakeup from its next_receiver
+ sleep.
+ """
+ trace.debug("Sending noop to wake up [%s]" % self._address)
+ msg = Message(id=QMF_APP_ID,
+ subject=self._name,
+ properties={"method":"indication",
+ "qmf.opcode":OpCode.noop},
+ content={})
+ try:
+ self._direct_sender.send( msg, sync=True )
+ except SendError, e:
+ log.error(str(e))
+
+
+ def run(self):
+ """
+ Console Management Thread main loop.
+ Handles inbound messages, agent discovery, async mailbox timeouts.
+ """
+ global _callback_thread
+
+ self._ready.set()
+
+ while self._operational:
+
+ # qLen = self._work_q.qsize()
+
+ while True:
+ try:
+ msg = self._topic_recvr.fetch(timeout=0)
+ except Empty:
+ break
+ # TRACE:
+ # log.error("!!! Console %s: msg on %s [%s]" %
+ # (self._name, self._topic_recvr.source, msg))
+ self._dispatch(msg, _direct=False)
+
+ while True:
+ try:
+ msg = self._direct_recvr.fetch(timeout = 0)
+ except Empty:
+ break
+ # TRACE
+ #log.error("!!! Console %s: msg on %s [%s]" %
+ # (self._name, self._direct_recvr.source, msg))
+ self._dispatch(msg, _direct=True)
+
+ self._expire_agents() # check for expired agents
+ self._expire_mboxes() # check for expired async mailbox requests
+
+ #if qLen == 0 and self._work_q.qsize() and self._notifier:
+ if self._work_q_put and self._notifier:
+ # new stuff on work queue, kick the the application...
+ self._work_q_put = False
+ _callback_thread = currentThread()
+ trace.debug("Calling console notifier.indication")
+ self._notifier.indication()
+ _callback_thread = None
+
+
+ # wait for a message to arrive, or an agent
+ # to expire, or a mailbox requrest to time out
+ now = datetime.datetime.utcnow()
+ next_expire = self._next_agent_expire
+
+ self._lock.acquire()
+ try:
+ # the mailbox expire flag may be cleared by the
+ # app thread(s) to force an immedate mailbox scan
+ if self._next_mbox_expire is None:
+ next_expire = now
+ elif self._next_mbox_expire < next_expire:
+ next_expire = self._next_mbox_expire
+ finally:
+ self._lock.release()
+
+ timeout = timedelta_to_secs(next_expire - now)
+
+ if self._operational and timeout > 0.0:
+ try:
+ trace.debug("waiting for next rcvr (timeout=%s)..." % timeout)
+ self._session.next_receiver(timeout = timeout)
+ except Empty:
+ pass
+
+ trace.debug("Shutting down Console thread")
+
+ def get_objects(self,
+ _object_id=None,
+ _schema_id=None,
+ _pname=None, _cname=None,
+ _agents=None,
+ _timeout=None):
+ """
+ Retrieve objects by id or schema.
+
+ By object_id: must specify schema_id or pname & cname if object defined
+ by a schema. Undescribed objects: only object_id needed.
+
+ By schema: must specify schema_id or pname & cname - all instances of
+ objects defined by that schema are returned.
+ """
+ if _agents is None:
+ # use copy of current agent list
+ self._lock.acquire()
+ try:
+ agent_list = self._agent_map.values()
+ finally:
+ self._lock.release()
+ elif isinstance(_agents, Agent):
+ agent_list = [_agents]
+ else:
+ agent_list = _agents
+ # @todo validate this list!
+
+ if _timeout is None:
+ _timeout = self._reply_timeout
+
+ # @todo: fix when async do_query done - query all agents at once, then
+ # wait for replies, instead of per-agent querying....
+
+ obj_list = []
+ expired = datetime.datetime.utcnow() + datetime.timedelta(seconds=_timeout)
+ for agent in agent_list:
+ if not agent.is_active():
+ continue
+ now = datetime.datetime.utcnow()
+ if now >= expired:
+ break
+
+ if _pname is None:
+ if _object_id:
+ query = QmfQuery.create_id_object(_object_id,
+ _schema_id)
+ else:
+ if _schema_id is not None:
+ t_params = {QmfData.KEY_SCHEMA_ID: _schema_id}
+ else:
+ t_params = None
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT,
+ t_params)
+ timeout = timedelta_to_secs(expired - now)
+ reply = self.do_query(agent, query, _timeout=timeout)
+ if reply:
+ obj_list = obj_list + reply
+ else:
+ # looking up by package name (and maybe class name), need to
+ # find all schema_ids in that package, then lookup object by
+ # schema_id
+ if _cname is not None:
+ pred = [QmfQuery.AND,
+ [QmfQuery.EQ,
+ SchemaClassId.KEY_PACKAGE,
+ [QmfQuery.QUOTE, _pname]],
+ [QmfQuery.EQ, SchemaClassId.KEY_CLASS,
+ [QmfQuery.QUOTE, _cname]]]
+ else:
+ pred = [QmfQuery.EQ,
+ SchemaClassId.KEY_PACKAGE,
+ [QmfQuery.QUOTE, _pname]]
+ query = QmfQuery.create_predicate(QmfQuery.TARGET_SCHEMA_ID, pred)
+ timeout = timedelta_to_secs(expired - now)
+ sid_list = self.do_query(agent, query, _timeout=timeout)
+ if sid_list:
+ for sid in sid_list:
+ now = datetime.datetime.utcnow()
+ if now >= expired:
+ break
+ if _object_id is not None:
+ query = QmfQuery.create_id_object(_object_id, sid)
+ else:
+ t_params = {QmfData.KEY_SCHEMA_ID: sid}
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT, t_params)
+ timeout = timedelta_to_secs(expired - now)
+ reply = self.do_query(agent, query, _timeout=timeout)
+ if reply:
+ obj_list = obj_list + reply
+ if obj_list:
+ return obj_list
+ return None
+
+
+
+ # called by run() thread ONLY
+ #
+ def _dispatch(self, msg, _direct=True):
+ """
+ PRIVATE: Process a message received from an Agent
+ """
+ trace.debug( "Message received from Agent! [%s]", msg )
+
+ opcode = msg.properties.get("qmf.opcode")
+ if not opcode:
+ log.error("Ignoring unrecognized message '%s'", msg)
+ return
+ version = 2 # @todo: fix me
+
+ cmap = {}; props = {}
+ if msg.content_type == "amqp/map":
+ cmap = msg.content
+ if msg.properties:
+ props = msg.properties
+
+ if opcode == OpCode.agent_heartbeat_ind:
+ self._handle_agent_ind_msg( msg, cmap, version, _direct )
+ elif opcode == OpCode.agent_locate_rsp:
+ self._handle_agent_ind_msg( msg, cmap, version, _direct )
+ elif msg.correlation_id:
+ self._handle_response_msg(msg, cmap, version, _direct)
+ elif opcode == OpCode.data_ind:
+ self._handle_indication_msg(msg, cmap, version, _direct)
+ elif opcode == OpCode.noop:
+ trace.debug("No-op msg received.")
+ else:
+ log.warning("Ignoring message with unrecognized 'opcode' value: '%s'", opcode)
+
+
+ def _handle_agent_ind_msg(self, msg, cmap, version, direct):
+ """
+ Process a received agent-ind message. This message may be a response to a
+ agent-locate, or it can be an unsolicited agent announce.
+ """
+
+ trace.debug("%s _handle_agent_ind_msg '%s'", self._name, str(msg))
+
+ try:
+ tmp = QmfData.from_map(msg.content)
+ except:
+ log.warning("%s invalid Agent Indication msg format '%s'",
+ self._name, str(msg))
+ return
+
+ try:
+ name = tmp.get_value("_name")
+ except:
+ log.warning("Bad Agent ind msg received: %s", str(msg))
+ return
+
+ correlated = False
+ if msg.correlation_id:
+ mbox = self._get_mailbox(msg.correlation_id)
+ correlated = mbox is not None
+
+ agent = None
+ self._lock.acquire()
+ try:
+ agent = self._agent_map.get(name)
+ if agent:
+ # agent already known, just update timestamp
+ agent._announce_timestamp = datetime.datetime.utcnow()
+ finally:
+ self._lock.release()
+
+ if not agent:
+ # need to create and add a new agent?
+ matched = False
+ if self._agent_discovery_filter:
+ matched = self._agent_discovery_filter.evaluate(tmp)
+
+ if (correlated or matched):
+ agent = self._create_agent(name)
+ if not agent:
+ return # failed to add agent
+ agent._announce_timestamp = datetime.datetime.utcnow()
+
+ if matched:
+ # unsolicited, but newly discovered
+ trace.debug("AGENT_ADDED for %s (%s)" % (agent, time.time()))
+ wi = WorkItem(WorkItem.AGENT_ADDED, None, {"agent": agent})
+ self._work_q.put(wi)
+ self._work_q_put = True
+
+ if correlated:
+ # wake up all waiters
+ trace.debug("waking waiters for correlation id %s" % msg.correlation_id)
+ mbox.deliver(msg)
+
+ def _handle_response_msg(self, msg, cmap, version, direct):
+ """
+ Process a received data-ind message.
+ """
+ trace.debug("%s _handle_response_msg '%s'", self._name, str(msg))
+
+ mbox = self._get_mailbox(msg.correlation_id)
+ if not mbox:
+ log.warning("%s Response msg received with unknown correlation_id"
+ " msg='%s'", self._name, str(msg))
+ return
+
+ # wake up all waiters
+ trace.debug("waking waiters for correlation id %s" % msg.correlation_id)
+ mbox.deliver(msg)
+
+ def _handle_indication_msg(self, msg, cmap, version, _direct):
+
+ aname = msg.properties.get("qmf.agent")
+ if not aname:
+ trace.debug("No agent name field in indication message.")
+ return
+
+ content_type = msg.properties.get("qmf.content")
+ if (content_type != ContentType.event or
+ not isinstance(msg.content, type([]))):
+ log.warning("Bad event indication message received: '%s'", msg)
+ return
+
+ emap = msg.content[0]
+ if not isinstance(emap, type({})):
+ trace.debug("Invalid event body in indication message: '%s'", msg)
+ return
+
+ agent = None
+ self._lock.acquire()
+ try:
+ agent = self._agent_map.get(aname)
+ finally:
+ self._lock.release()
+ if not agent:
+ trace.debug("Agent '%s' not known." % aname)
+ return
+ try:
+ # @todo: schema???
+ event = QmfEvent.from_map(emap)
+ except TypeError:
+ trace.debug("Invalid QmfEvent map received: %s" % str(emap))
+ return
+
+ # @todo: schema? Need to fetch it, but not from this thread!
+ # This thread can not pend on a request.
+ trace.debug("Publishing event received from agent %s" % aname)
+ wi = WorkItem(WorkItem.EVENT_RECEIVED, None,
+ {"agent":agent,
+ "event":event})
+ self._work_q.put(wi)
+ self._work_q_put = True
+
+
+ def _expire_mboxes(self):
+ """
+ Check all async mailboxes for outstanding requests that have expired.
+ """
+ self._lock.acquire()
+ try:
+ now = datetime.datetime.utcnow()
+ if self._next_mbox_expire and now < self._next_mbox_expire:
+ return
+ expired_mboxes = []
+ self._next_mbox_expire = None
+ for mbox in self._async_mboxes.itervalues():
+ if now >= mbox.expiration_date:
+ expired_mboxes.append(mbox)
+ else:
+ if (self._next_mbox_expire is None or
+ mbox.expiration_date < self._next_mbox_expire):
+ self._next_mbox_expire = mbox.expiration_date
+
+ for mbox in expired_mboxes:
+ del self._async_mboxes[mbox.cid]
+ finally:
+ self._lock.release()
+
+ for mbox in expired_mboxes:
+ # note: expire() may deallocate the mbox, so don't touch
+ # it further.
+ mbox.expire()
+
+
+ def _expire_agents(self):
+ """
+ Check for expired agents and issue notifications when they expire.
+ """
+ now = datetime.datetime.utcnow()
+ if self._next_agent_expire and now < self._next_agent_expire:
+ return
+ lifetime_delta = datetime.timedelta(seconds = self._agent_timeout)
+ next_expire_delta = lifetime_delta
+ self._lock.acquire()
+ try:
+ trace.debug("!!! expiring agents '%s'" % now)
+ for agent in self._agent_map.itervalues():
+ if agent._announce_timestamp:
+ agent_deathtime = agent._announce_timestamp + lifetime_delta
+ if agent_deathtime <= now:
+ trace.debug("AGENT_DELETED for %s" % agent)
+ agent._announce_timestamp = None
+ wi = WorkItem(WorkItem.AGENT_DELETED, None,
+ {"agent":agent})
+ # @todo: remove agent from self._agent_map
+ self._work_q.put(wi)
+ self._work_q_put = True
+ else:
+ if (agent_deathtime - now) < next_expire_delta:
+ next_expire_delta = agent_deathtime - now
+
+ self._next_agent_expire = now + next_expire_delta
+ trace.debug("!!! next expire cycle = '%s'" % self._next_agent_expire)
+ finally:
+ self._lock.release()
+
+
+
+ def _create_agent( self, name ):
+ """
+ Factory to create/retrieve an agent for this console
+ """
+ trace.debug("creating agent %s" % name)
+ self._lock.acquire()
+ try:
+ agent = self._agent_map.get(name)
+ if agent:
+ return agent
+
+ agent = Agent(name, self)
+ try:
+ agent._sender = self._session.sender(str(agent._address) +
+ ";{create:always,"
+ " node:"
+ " {type:topic,"
+ " x-declare:"
+ " {type:direct}}}")
+ except:
+ log.warning("Unable to create sender for %s" % name)
+ return None
+ trace.debug("created agent sender %s" % agent._sender.target)
+
+ self._agent_map[name] = agent
+ finally:
+ self._lock.release()
+
+ # new agent - query for its schema database for
+ # seeding the schema cache (@todo)
+ # query = QmfQuery({QmfQuery.TARGET_SCHEMA_ID:None})
+ # agent._sendQuery( query )
+
+ return agent
+
+
+
+ def enable_agent_discovery(self, _query=None):
+ """
+ Called to enable the asynchronous Agent Discovery process.
+ Once enabled, AGENT_ADD work items can arrive on the WorkQueue.
+ """
+ # @todo: fix - take predicate only, not entire query!
+ if _query is not None:
+ if (not isinstance(_query, QmfQuery) or
+ _query.get_target() != QmfQuery.TARGET_AGENT):
+ raise TypeError("Type QmfQuery with target == TARGET_AGENT expected")
+ self._agent_discovery_filter = _query
+ else:
+ # create a match-all agent query (no predicate)
+ self._agent_discovery_filter = QmfQuery.create_wildcard(QmfQuery.TARGET_AGENT)
+
+ def disable_agent_discovery(self):
+ """
+ Called to disable the async Agent Discovery process enabled by
+ calling enableAgentDiscovery()
+ """
+ self._agent_discovery_filter = None
+
+
+
+ def get_workitem_count(self):
+ """
+ Returns the count of pending WorkItems that can be retrieved.
+ """
+ return self._work_q.qsize()
+
+
+
+ def get_next_workitem(self, timeout=None):
+ """
+ Returns the next pending work item, or None if none available.
+ @todo: subclass and return an Empty event instead.
+ """
+ try:
+ wi = self._work_q.get(True, timeout)
+ except Queue.Empty:
+ return None
+ return wi
+
+
+ def release_workitem(self, wi):
+ """
+ Return a WorkItem to the Console when it is no longer needed.
+ @todo: call Queue.task_done() - only 2.5+
+
+ @type wi: class qmfConsole.WorkItem
+ @param wi: work item object to return.
+ """
+ pass
+
+ def _add_schema(self, schema):
+ """
+ @todo
+ """
+ if not isinstance(schema, SchemaClass):
+ raise TypeError("SchemaClass type expected")
+
+ self._lock.acquire()
+ try:
+ sid = schema.get_class_id()
+ if not self._schema_cache.has_key(sid):
+ self._schema_cache[sid] = schema
+ if sid in self._pending_schema_req:
+ self._pending_schema_req.remove(sid)
+ finally:
+ self._lock.release()
+
+ def _prefetch_schema(self, schema_id, agent):
+ """
+ Send an async request for the schema identified by schema_id if the
+ schema is not available in the cache.
+ """
+ need_fetch = False
+ self._lock.acquire()
+ try:
+ if ((not self._schema_cache.has_key(schema_id)) and
+ schema_id not in self._pending_schema_req):
+ self._pending_schema_req.append(schema_id)
+ need_fetch = True
+ finally:
+ self._lock.release()
+
+ if need_fetch:
+ mbox = _SchemaPrefetchMailbox(self, schema_id)
+ query = QmfQuery.create_id(QmfQuery.TARGET_SCHEMA, schema_id)
+ trace.debug("Sending Schema Query to Agent (%s)" % time.time())
+ try:
+ agent._send_query(query, mbox.get_address())
+ except SendError, e:
+ log.error(str(e))
+ mbox.destroy()
+ self._lock.acquire()
+ try:
+ self._pending_schema_req.remove(schema_id)
+ finally:
+ self._lock.release()
+
+
+ def _fetch_schema(self, schema_id, _agent=None, _timeout=None):
+ """
+ Find the schema identified by schema_id. If not in the cache, ask the
+ agent for it.
+ """
+ if not isinstance(schema_id, SchemaClassId):
+ raise TypeError("SchemaClassId type expected")
+
+ self._lock.acquire()
+ try:
+ schema = self._schema_cache.get(schema_id)
+ if schema:
+ return schema
+ finally:
+ self._lock.release()
+
+ if _agent is None:
+ return None
+
+ # note: do_query will add the new schema to the cache automatically.
+ slist = self.do_query(_agent,
+ QmfQuery.create_id(QmfQuery.TARGET_SCHEMA, schema_id),
+ _timeout=_timeout)
+ if slist:
+ return slist[0]
+ else:
+ return None
+
+ def _add_mailbox(self, mbox):
+ """
+ Add a mailbox to the post office, and assign it a unique address.
+ """
+ self._lock.acquire()
+ try:
+ mbox.cid = self._correlation_id
+ self._correlation_id += 1
+ self._post_office[mbox.cid] = mbox
+ finally:
+ self._lock.release()
+
+ def _get_mailbox(self, mid):
+ try:
+ mid = long(mid)
+ except TypeError:
+ log.error("Invalid mailbox id: %s" % str(mid))
+ return None
+
+ self._lock.acquire()
+ try:
+ return self._post_office.get(mid)
+ finally:
+ self._lock.release()
+
+
+ def _remove_mailbox(self, mid):
+ """ Remove a mailbox and its address from the post office """
+ try:
+ mid = long(mid)
+ except TypeError:
+ log.error("Invalid mailbox id: %s" % str(mid))
+ return None
+
+ self._lock.acquire()
+ try:
+ if mid in self._post_office:
+ del self._post_office[mid]
+ finally:
+ self._lock.release()
+
+ def __repr__(self):
+ return str(self._address)
+
+ # def get_packages(self):
+ # plist = []
+ # for i in range(self.impl.packageCount()):
+ # plist.append(self.impl.getPackageName(i))
+ # return plist
+
+
+ # def get_classes(self, package, kind=CLASS_OBJECT):
+ # clist = []
+ # for i in range(self.impl.classCount(package)):
+ # key = self.impl.getClass(package, i)
+ # class_kind = self.impl.getClassKind(key)
+ # if class_kind == kind:
+ # if kind == CLASS_OBJECT:
+ # clist.append(SchemaObjectClass(None, None, {"impl":self.impl.getObjectClass(key)}))
+ # elif kind == CLASS_EVENT:
+ # clist.append(SchemaEventClass(None, None, {"impl":self.impl.getEventClass(key)}))
+ # return clist
+
+
+ # def bind_package(self, package):
+ # return self.impl.bindPackage(package)
+
+
+ # def bind_class(self, kwargs = {}):
+ # if "key" in kwargs:
+ # self.impl.bindClass(kwargs["key"])
+ # elif "package" in kwargs:
+ # package = kwargs["package"]
+ # if "class" in kwargs:
+ # self.impl.bindClass(package, kwargs["class"])
+ # else:
+ # self.impl.bindClass(package)
+ # else:
+ # raise Exception("Argument error: invalid arguments, use 'key' or 'package'[,'class']")
+
+
+ # def get_agents(self, broker=None):
+ # blist = []
+ # if broker:
+ # blist.append(broker)
+ # else:
+ # self._cv.acquire()
+ # try:
+ # # copy while holding lock
+ # blist = self._broker_list[:]
+ # finally:
+ # self._cv.release()
+
+ # agents = []
+ # for b in blist:
+ # for idx in range(b.impl.agentCount()):
+ # agents.append(AgentProxy(b.impl.getAgent(idx), b))
+
+ # return agents
+
+
+ # def get_objects(self, query, kwargs = {}):
+ # timeout = 30
+ # agent = None
+ # temp_args = kwargs.copy()
+ # if type(query) == type({}):
+ # temp_args.update(query)
+
+ # if "_timeout" in temp_args:
+ # timeout = temp_args["_timeout"]
+ # temp_args.pop("_timeout")
+
+ # if "_agent" in temp_args:
+ # agent = temp_args["_agent"]
+ # temp_args.pop("_agent")
+
+ # if type(query) == type({}):
+ # query = Query(temp_args)
+
+ # self._select = {}
+ # for k in temp_args.iterkeys():
+ # if type(k) == str:
+ # self._select[k] = temp_args[k]
+
+ # self._cv.acquire()
+ # try:
+ # self._sync_count = 1
+ # self._sync_result = []
+ # broker = self._broker_list[0]
+ # broker.send_query(query.impl, None, agent)
+ # self._cv.wait(timeout)
+ # if self._sync_count == 1:
+ # raise Exception("Timed out: waiting for query response")
+ # finally:
+ # self._cv.release()
+
+ # return self._sync_result
+
+
+ # def get_object(self, query, kwargs = {}):
+ # '''
+ # Return one and only one object or None.
+ # '''
+ # objs = objects(query, kwargs)
+ # if len(objs) == 1:
+ # return objs[0]
+ # else:
+ # return None
+
+
+ # def first_object(self, query, kwargs = {}):
+ # '''
+ # Return the first of potentially many objects.
+ # '''
+ # objs = objects(query, kwargs)
+ # if objs:
+ # return objs[0]
+ # else:
+ # return None
+
+
+ # # Check the object against select to check for a match
+ # def _select_match(self, object):
+ # schema_props = object.properties()
+ # for key in self._select.iterkeys():
+ # for prop in schema_props:
+ # if key == p[0].name() and self._select[key] != p[1]:
+ # return False
+ # return True
+
+
+ # def _get_result(self, list, context):
+ # '''
+ # Called by Broker proxy to return the result of a query.
+ # '''
+ # self._cv.acquire()
+ # try:
+ # for item in list:
+ # if self._select_match(item):
+ # self._sync_result.append(item)
+ # self._sync_count -= 1
+ # self._cv.notify()
+ # finally:
+ # self._cv.release()
+
+
+ # def start_sync(self, query): pass
+
+
+ # def touch_sync(self, sync): pass
+
+
+ # def end_sync(self, sync): pass
+
+
+
+
+# def start_console_events(self):
+# self._cb_cond.acquire()
+# try:
+# self._cb_cond.notify()
+# finally:
+# self._cb_cond.release()
+
+
+# def _do_console_events(self):
+# '''
+# Called by the Console thread to poll for events. Passes the events
+# onto the ConsoleHandler associated with this Console. Is called
+# periodically, but can also be kicked by Console.start_console_events().
+# '''
+# count = 0
+# valid = self.impl.getEvent(self._event)
+# while valid:
+# count += 1
+# try:
+# if self._event.kind == qmfengine.ConsoleEvent.AGENT_ADDED:
+# trace.debug("Console Event AGENT_ADDED received")
+# if self._handler:
+# self._handler.agent_added(AgentProxy(self._event.agent, None))
+# elif self._event.kind == qmfengine.ConsoleEvent.AGENT_DELETED:
+# trace.debug("Console Event AGENT_DELETED received")
+# if self._handler:
+# self._handler.agent_deleted(AgentProxy(self._event.agent, None))
+# elif self._event.kind == qmfengine.ConsoleEvent.NEW_PACKAGE:
+# trace.debug("Console Event NEW_PACKAGE received")
+# if self._handler:
+# self._handler.new_package(self._event.name)
+# elif self._event.kind == qmfengine.ConsoleEvent.NEW_CLASS:
+# trace.debug("Console Event NEW_CLASS received")
+# if self._handler:
+# self._handler.new_class(SchemaClassKey(self._event.classKey))
+# elif self._event.kind == qmfengine.ConsoleEvent.OBJECT_UPDATE:
+# trace.debug("Console Event OBJECT_UPDATE received")
+# if self._handler:
+# self._handler.object_update(ConsoleObject(None, {"impl":self._event.object}),
+# self._event.hasProps, self._event.hasStats)
+# elif self._event.kind == qmfengine.ConsoleEvent.EVENT_RECEIVED:
+# trace.debug("Console Event EVENT_RECEIVED received")
+# elif self._event.kind == qmfengine.ConsoleEvent.AGENT_HEARTBEAT:
+# trace.debug("Console Event AGENT_HEARTBEAT received")
+# if self._handler:
+# self._handler.agent_heartbeat(AgentProxy(self._event.agent, None), self._event.timestamp)
+# elif self._event.kind == qmfengine.ConsoleEvent.METHOD_RESPONSE:
+# trace.debug("Console Event METHOD_RESPONSE received")
+# else:
+# trace.debug("Console thread received unknown event: '%s'" % str(self._event.kind))
+# except e:
+# print "Exception caught in callback thread:", e
+# self.impl.popEvent()
+# valid = self.impl.getEvent(self._event)
+# return count
+
+
+
+
+
+# class Broker(ConnectionHandler):
+# # attr_reader :impl :conn, :console, :broker_bank
+# def __init__(self, console, conn):
+# self.broker_bank = 1
+# self.console = console
+# self.conn = conn
+# self._session = None
+# self._cv = Condition()
+# self._stable = None
+# self._event = qmfengine.BrokerEvent()
+# self._xmtMessage = qmfengine.Message()
+# self.impl = qmfengine.BrokerProxy(self.console.impl)
+# self.console.impl.addConnection(self.impl, self)
+# self.conn.add_conn_handler(self)
+# self._operational = True
+
+
+# def shutdown(self):
+# trace.debug("broker.shutdown() called.")
+# self.console.impl.delConnection(self.impl)
+# self.conn.del_conn_handler(self)
+# if self._session:
+# self.impl.sessionClosed()
+# trace.debug("broker.shutdown() sessionClosed done.")
+# self._session.destroy()
+# trace.debug("broker.shutdown() session destroy done.")
+# self._session = None
+# self._operational = False
+# trace.debug("broker.shutdown() done.")
+
+
+# def wait_for_stable(self, timeout = None):
+# self._cv.acquire()
+# try:
+# if self._stable:
+# return
+# if timeout:
+# self._cv.wait(timeout)
+# if not self._stable:
+# raise Exception("Timed out: waiting for broker connection to become stable")
+# else:
+# while not self._stable:
+# self._cv.wait()
+# finally:
+# self._cv.release()
+
+
+# def send_query(self, query, ctx, agent):
+# agent_impl = None
+# if agent:
+# agent_impl = agent.impl
+# self.impl.sendQuery(query, ctx, agent_impl)
+# self.conn.kick()
+
+
+# def _do_broker_events(self):
+# count = 0
+# valid = self.impl.getEvent(self._event)
+# while valid:
+# count += 1
+# if self._event.kind == qmfengine.BrokerEvent.BROKER_INFO:
+# trace.debug("Broker Event BROKER_INFO received");
+# elif self._event.kind == qmfengine.BrokerEvent.DECLARE_QUEUE:
+# trace.debug("Broker Event DECLARE_QUEUE received");
+# self.conn.impl.declareQueue(self._session.handle, self._event.name)
+# elif self._event.kind == qmfengine.BrokerEvent.DELETE_QUEUE:
+# trace.debug("Broker Event DELETE_QUEUE received");
+# self.conn.impl.deleteQueue(self._session.handle, self._event.name)
+# elif self._event.kind == qmfengine.BrokerEvent.BIND:
+# trace.debug("Broker Event BIND received");
+# self.conn.impl.bind(self._session.handle, self._event.exchange, self._event.name, self._event.bindingKey)
+# elif self._event.kind == qmfengine.BrokerEvent.UNBIND:
+# trace.debug("Broker Event UNBIND received");
+# self.conn.impl.unbind(self._session.handle, self._event.exchange, self._event.name, self._event.bindingKey)
+# elif self._event.kind == qmfengine.BrokerEvent.SETUP_COMPLETE:
+# trace.debug("Broker Event SETUP_COMPLETE received");
+# self.impl.startProtocol()
+# elif self._event.kind == qmfengine.BrokerEvent.STABLE:
+# trace.debug("Broker Event STABLE received");
+# self._cv.acquire()
+# try:
+# self._stable = True
+# self._cv.notify()
+# finally:
+# self._cv.release()
+# elif self._event.kind == qmfengine.BrokerEvent.QUERY_COMPLETE:
+# result = []
+# for idx in range(self._event.queryResponse.getObjectCount()):
+# result.append(ConsoleObject(None, {"impl":self._event.queryResponse.getObject(idx), "broker":self}))
+# self.console._get_result(result, self._event.context)
+# elif self._event.kind == qmfengine.BrokerEvent.METHOD_RESPONSE:
+# obj = self._event.context
+# obj._method_result(MethodResponse(self._event.methodResponse()))
+
+# self.impl.popEvent()
+# valid = self.impl.getEvent(self._event)
+
+# return count
+
+
+# def _do_broker_messages(self):
+# count = 0
+# valid = self.impl.getXmtMessage(self._xmtMessage)
+# while valid:
+# count += 1
+# trace.debug("Broker: sending msg on connection")
+# self.conn.impl.sendMessage(self._session.handle, self._xmtMessage)
+# self.impl.popXmt()
+# valid = self.impl.getXmtMessage(self._xmtMessage)
+
+# return count
+
+
+# def _do_events(self):
+# while True:
+# self.console.start_console_events()
+# bcnt = self._do_broker_events()
+# mcnt = self._do_broker_messages()
+# if bcnt == 0 and mcnt == 0:
+# break;
+
+
+# def conn_event_connected(self):
+# trace.debug("Broker: Connection event CONNECTED")
+# self._session = Session(self.conn, "qmfc-%s.%d" % (socket.gethostname(), os.getpid()), self)
+# self.impl.sessionOpened(self._session.handle)
+# self._do_events()
+
+
+# def conn_event_disconnected(self, error):
+# trace.debug("Broker: Connection event DISCONNECTED")
+# pass
+
+
+# def conn_event_visit(self):
+# self._do_events()
+
+
+# def sess_event_session_closed(self, context, error):
+# trace.debug("Broker: Session event CLOSED")
+# self.impl.sessionClosed()
+
+
+# def sess_event_recv(self, context, message):
+# trace.debug("Broker: Session event MSG_RECV")
+# if not self._operational:
+# log.warning("Unexpected session event message received by Broker proxy: context='%s'" % str(context))
+# self.impl.handleRcvMessage(message)
+# self._do_events()
+
+
+
+################################################################################
+################################################################################
+################################################################################
+################################################################################
+# TEMPORARY TEST CODE - TO BE DELETED
+################################################################################
+################################################################################
+################################################################################
+################################################################################
+
+if __name__ == '__main__':
+ # temp test code
+ import logging
+ from common import (qmfTypes, SchemaProperty)
+
+ logging.getLogger().setLevel(logging.INFO)
+
+ logging.info( "************* Creating Async Console **************" )
+
+ class MyNotifier(Notifier):
+ def __init__(self, context):
+ self._myContext = context
+ self.WorkAvailable = False
+
+ def indication(self):
+ print("Indication received! context=%d" % self._myContext)
+ self.WorkAvailable = True
+
+ _noteMe = MyNotifier( 666 )
+
+ _myConsole = Console(notifier=_noteMe)
+
+ _myConsole.enable_agent_discovery()
+ logging.info("Waiting...")
+
+
+ logging.info( "Destroying console:" )
+ _myConsole.destroy( 10 )
+
+ logging.info( "******** Messing around with Schema ********" )
+
+ _sec = SchemaEventClass( _classId=SchemaClassId("myPackage", "myClass",
+ stype=SchemaClassId.TYPE_EVENT),
+ _desc="A typical event schema",
+ _props={"Argument-1": SchemaProperty(_type_code=qmfTypes.TYPE_UINT8,
+ kwargs = {"min":0,
+ "max":100,
+ "unit":"seconds",
+ "desc":"sleep value"}),
+ "Argument-2": SchemaProperty(_type_code=qmfTypes.TYPE_LSTR,
+ kwargs={"maxlen":100,
+ "desc":"a string argument"})})
+ print("_sec=%s" % _sec.get_class_id())
+ print("_sec.gePropertyCount()=%d" % _sec.get_property_count() )
+ print("_sec.getProperty('Argument-1`)=%s" % _sec.get_property('Argument-1') )
+ print("_sec.getProperty('Argument-2`)=%s" % _sec.get_property('Argument-2') )
+ try:
+ print("_sec.getProperty('not-found')=%s" % _sec.get_property('not-found') )
+ except:
+ pass
+ print("_sec.getProperties()='%s'" % _sec.get_properties())
+
+ print("Adding another argument")
+ _arg3 = SchemaProperty( _type_code=qmfTypes.TYPE_BOOL,
+ kwargs={"dir":"IO",
+ "desc":"a boolean argument"})
+ _sec.add_property('Argument-3', _arg3)
+ print("_sec=%s" % _sec.get_class_id())
+ print("_sec.getPropertyCount()=%d" % _sec.get_property_count() )
+ print("_sec.getProperty('Argument-1')=%s" % _sec.get_property('Argument-1') )
+ print("_sec.getProperty('Argument-2')=%s" % _sec.get_property('Argument-2') )
+ print("_sec.getProperty('Argument-3')=%s" % _sec.get_property('Argument-3') )
+
+ print("_arg3.mapEncode()='%s'" % _arg3.map_encode() )
+
+ _secmap = _sec.map_encode()
+ print("_sec.mapEncode()='%s'" % _secmap )
+
+ _sec2 = SchemaEventClass( _map=_secmap )
+
+ print("_sec=%s" % _sec.get_class_id())
+ print("_sec2=%s" % _sec2.get_class_id())
+
+ _soc = SchemaObjectClass( _map = {"_schema_id": {"_package_name": "myOtherPackage",
+ "_class_name": "myOtherClass",
+ "_type": "_data"},
+ "_desc": "A test data object",
+ "_values":
+ {"prop1": {"amqp_type": qmfTypes.TYPE_UINT8,
+ "access": "RO",
+ "index": True,
+ "unit": "degrees"},
+ "prop2": {"amqp_type": qmfTypes.TYPE_UINT8,
+ "access": "RW",
+ "index": True,
+ "desc": "The Second Property(tm)",
+ "unit": "radians"},
+ "statistics": { "amqp_type": qmfTypes.TYPE_DELTATIME,
+ "unit": "seconds",
+ "desc": "time until I retire"},
+ "meth1": {"_desc": "A test method",
+ "_arguments":
+ {"arg1": {"amqp_type": qmfTypes.TYPE_UINT32,
+ "desc": "an argument 1",
+ "dir": "I"},
+ "arg2": {"amqp_type": qmfTypes.TYPE_BOOL,
+ "dir": "IO",
+ "desc": "some weird boolean"}}},
+ "meth2": {"_desc": "A test method",
+ "_arguments":
+ {"m2arg1": {"amqp_type": qmfTypes.TYPE_UINT32,
+ "desc": "an 'nuther argument",
+ "dir":
+ "I"}}}},
+ "_subtypes":
+ {"prop1":"qmfProperty",
+ "prop2":"qmfProperty",
+ "statistics":"qmfProperty",
+ "meth1":"qmfMethod",
+ "meth2":"qmfMethod"},
+ "_primary_key_names": ["prop2", "prop1"]})
+
+ print("_soc='%s'" % _soc)
+
+ print("_soc.getPrimaryKeyList='%s'" % _soc.get_id_names())
+
+ print("_soc.getPropertyCount='%d'" % _soc.get_property_count())
+ print("_soc.getProperties='%s'" % _soc.get_properties())
+ print("_soc.getProperty('prop2')='%s'" % _soc.get_property('prop2'))
+
+ print("_soc.getMethodCount='%d'" % _soc.get_method_count())
+ print("_soc.getMethods='%s'" % _soc.get_methods())
+ print("_soc.getMethod('meth2')='%s'" % _soc.get_method('meth2'))
+
+ _socmap = _soc.map_encode()
+ print("_socmap='%s'" % _socmap)
+ _soc2 = SchemaObjectClass( _map=_socmap )
+ print("_soc='%s'" % _soc)
+ print("_soc2='%s'" % _soc2)
+
+ if _soc2.get_class_id() == _soc.get_class_id():
+ print("soc and soc2 are the same schema")
+
+
+ logging.info( "******** Messing around with ObjectIds ********" )
+
+
+ qd = QmfData( _values={"prop1":1, "prop2":True, "prop3": {"a":"map"}, "prop4": "astring"} )
+ print("qd='%s':" % qd)
+
+ print("prop1=%d prop2=%s prop3=%s prop4=%s" % (qd.prop1, qd.prop2, qd.prop3, qd.prop4))
+
+ print("qd map='%s'" % qd.map_encode())
+ print("qd getProperty('prop4')='%s'" % qd.get_value("prop4"))
+ qd.set_value("prop4", 4, "A test property called 4")
+ print("qd setProperty('prop4', 4)='%s'" % qd.get_value("prop4"))
+ qd.prop4 = 9
+ print("qd.prop4 = 9 ='%s'" % qd.prop4)
+ qd["prop4"] = 11
+ print("qd[prop4] = 11 ='%s'" % qd["prop4"])
+
+ print("qd.mapEncode()='%s'" % qd.map_encode())
+ _qd2 = QmfData( _map = qd.map_encode() )
+ print("_qd2.mapEncode()='%s'" % _qd2.map_encode())
+
+ _qmfDesc1 = QmfConsoleData( {"_values" : {"prop1": 1, "statistics": 666,
+ "prop2": 0}},
+ agent="some agent name?",
+ _schema = _soc)
+
+ print("_qmfDesc1 map='%s'" % _qmfDesc1.map_encode())
+
+ _qmfDesc1._set_schema( _soc )
+
+ print("_qmfDesc1 prop2 = '%s'" % _qmfDesc1.get_value("prop2"))
+ print("_qmfDesc1 primarykey = '%s'" % _qmfDesc1.get_object_id())
+ print("_qmfDesc1 classid = '%s'" % _qmfDesc1.get_schema_class_id())
+
+
+ _qmfDescMap = _qmfDesc1.map_encode()
+ print("_qmfDescMap='%s'" % _qmfDescMap)
+
+ _qmfDesc2 = QmfData( _map=_qmfDescMap, _schema=_soc )
+
+ print("_qmfDesc2 map='%s'" % _qmfDesc2.map_encode())
+ print("_qmfDesc2 prop2 = '%s'" % _qmfDesc2.get_value("prop2"))
+ print("_qmfDesc2 primary key = '%s'" % _qmfDesc2.get_object_id())
+
+
+ logging.info( "******** Messing around with QmfEvents ********" )
+
+
+ _qmfevent1 = QmfEvent( _timestamp = 1111,
+ _schema = _sec,
+ _values = {"Argument-1": 77,
+ "Argument-3": True,
+ "Argument-2": "a string"})
+ print("_qmfevent1.mapEncode()='%s'" % _qmfevent1.map_encode())
+ print("_qmfevent1.getTimestamp()='%s'" % _qmfevent1.get_timestamp())
+
+ _qmfevent1Map = _qmfevent1.map_encode()
+
+ _qmfevent2 = QmfEvent(_map=_qmfevent1Map, _schema=_sec)
+ print("_qmfevent2.mapEncode()='%s'" % _qmfevent2.map_encode())
+
+
+ logging.info( "******** Messing around with Queries ********" )
+
+ _q1 = QmfQuery.create_predicate(QmfQuery.TARGET_AGENT,
+ [QmfQuery.AND,
+ [QmfQuery.EQ, "vendor", [QmfQuery.QUOTE, "AVendor"]],
+ [QmfQuery.EQ, [QmfQuery.QUOTE, "SomeProduct"], "product"],
+ [QmfQuery.EQ, [QmfQuery.UNQUOTE, "name"], [QmfQuery.QUOTE, "Thingy"]],
+ [QmfQuery.OR,
+ [QmfQuery.LE, "temperature", -10],
+ [QmfQuery.FALSE],
+ [QmfQuery.EXISTS, "namey"]]])
+
+ print("_q1.mapEncode() = [%s]" % _q1.map_encode())
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/__init__.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/__init__.py
new file mode 100644
index 0000000000..eff9357e1f
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/__init__.py
@@ -0,0 +1,30 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import agent_discovery
+import basic_query
+import basic_method
+import obj_gets
+import events
+import multi_response
+import async_query
+import async_method
+import subscriptions
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/agent_discovery.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/agent_discovery.py
new file mode 100644
index 0000000000..2c20794aaa
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/agent_discovery.py
@@ -0,0 +1,464 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import unittest
+import logging
+import time
+from threading import Thread, Event
+
+import qpid.messaging
+import qmf2.common
+import qmf2.console
+import qmf2.agent
+
+
+class _testNotifier(qmf2.common.Notifier):
+ def __init__(self):
+ self._event = Event()
+
+ def indication(self):
+ # note: called by qmf daemon thread
+ self._event.set()
+
+ def wait_for_work(self, timeout):
+ # note: called by application thread to wait
+ # for qmf to generate work
+ self._event.wait(timeout)
+ timed_out = self._event.isSet() == False
+ if not timed_out:
+ self._event.clear()
+ return True
+ return False
+
+
+class _agentApp(Thread):
+ def __init__(self, name, broker_url, heartbeat):
+ Thread.__init__(self)
+ self.timeout = 3
+ self.broker_url = broker_url
+ self.notifier = _testNotifier()
+ self.agent = qmf2.agent.Agent(name,
+ _notifier=self.notifier,
+ heartbeat_interval=heartbeat)
+ # No database needed for this test
+ self.running = False
+ self.ready = Event()
+
+ def start_app(self):
+ self.running = True
+ self.start()
+ self.ready.wait(10)
+ if not self.ready.is_set():
+ raise Exception("Agent failed to connect to broker.")
+
+ def stop_app(self):
+ self.running = False
+ # wake main thread
+ self.notifier.indication() # hmmm... collide with daemon???
+ self.join(10)
+ if self.isAlive():
+ raise Exception("AGENT DID NOT TERMINATE AS EXPECTED!!!")
+
+ def run(self):
+ # Connect the agent to the broker,
+ # broker_url = "user/passwd@hostname:port"
+
+ conn = qpid.messaging.Connection(self.broker_url)
+ conn.open()
+ self.agent.set_connection(conn)
+ self.ready.set()
+
+ while self.running:
+ self.notifier.wait_for_work(None)
+ wi = self.agent.get_next_workitem(timeout=0)
+ while wi is not None:
+ logging.error("UNEXPECTED AGENT WORKITEM RECEIVED=%s" % wi.get_type())
+ self.agent.release_workitem(wi)
+ wi = self.agent.get_next_workitem(timeout=0)
+
+ # done, cleanup agent
+ self.agent.remove_connection(self.timeout)
+ self.agent.destroy(self.timeout)
+
+
+class BaseTest(unittest.TestCase):
+ def configure(self, config):
+ self.config = config
+ self.broker = config.broker
+ self.defines = self.config.defines
+
+ def setUp(self):
+ # one second agent indication interval
+ self.agent_heartbeat = 1
+ self.agent1 = _agentApp("agent1", self.broker, self.agent_heartbeat)
+ self.agent1.start_app()
+ self.agent2 = _agentApp("agent2", self.broker, self.agent_heartbeat)
+ self.agent2.start_app()
+
+ def tearDown(self):
+ if self.agent1:
+ self.agent1.stop_app()
+ self.agent1 = None
+ if self.agent2:
+ self.agent2.stop_app()
+ self.agent2 = None
+
+ def test_discover_all(self):
+ """
+ create console
+ enable agent discovery
+ wait
+ expect agent add for agent1 and agent2
+ """
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+ self.console.enable_agent_discovery()
+
+ agent1_found = agent2_found = False
+ wi = self.console.get_next_workitem(timeout=3)
+ while wi and not (agent1_found and agent2_found):
+ if wi.get_type() == wi.AGENT_ADDED:
+ agent = wi.get_params().get("agent")
+ if not agent or not isinstance(agent, qmf2.console.Agent):
+ self.fail("Unexpected workitem from agent")
+ else:
+ if agent.get_name() == "agent1":
+ agent1_found = True
+ elif agent.get_name() == "agent2":
+ agent2_found = True
+ else:
+ self.fail("Unexpected agent name received: %s" %
+ agent.get_name())
+ if agent1_found and agent2_found:
+ break;
+
+ wi = self.console.get_next_workitem(timeout=3)
+
+ self.assertTrue(agent1_found and agent2_found, "All agents not discovered")
+
+ self.console.destroy(10)
+
+
+ def test_discover_one(self):
+ """
+ create console
+ enable agent discovery, filter for agent1 only
+ wait until timeout
+ expect agent add for agent1 only
+ """
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ query = qmf2.common.QmfQuery.create_predicate(
+ qmf2.common.QmfQuery.TARGET_AGENT,
+ [qmf2.common.QmfQuery.EQ, qmf2.common.QmfQuery.KEY_AGENT_NAME,
+ [qmf2.common.QmfQuery.QUOTE, "agent1"]])
+ self.console.enable_agent_discovery(query)
+
+ agent1_found = agent2_found = False
+ wi = self.console.get_next_workitem(timeout=3)
+ while wi:
+ if wi.get_type() == wi.AGENT_ADDED:
+ agent = wi.get_params().get("agent")
+ if not agent or not isinstance(agent, qmf2.console.Agent):
+ self.fail("Unexpected workitem from agent")
+ else:
+ if agent.get_name() == "agent1":
+ agent1_found = True
+ elif agent.get_name() == "agent2":
+ agent2_found = True
+ else:
+ self.fail("Unexpected agent name received: %s" %
+ agent.get_name())
+
+ wi = self.console.get_next_workitem(timeout=2)
+
+ self.assertTrue(agent1_found and not agent2_found, "Unexpected agent discovered")
+
+ self.console.destroy(10)
+
+
+ def test_heartbeat(self):
+ """
+ create console with 2 sec agent timeout
+ enable agent discovery, find all agents
+ stop agent1, expect timeout notification
+ stop agent2, expect timeout notification
+ """
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=2)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+ self.console.enable_agent_discovery()
+
+ agent1_found = agent2_found = False
+ wi = self.console.get_next_workitem(timeout=4)
+ while wi and not (agent1_found and agent2_found):
+ if wi.get_type() == wi.AGENT_ADDED:
+ agent = wi.get_params().get("agent")
+ if not agent or not isinstance(agent, qmf2.console.Agent):
+ self.fail("Unexpected workitem from agent")
+ else:
+ if agent.get_name() == "agent1":
+ agent1_found = True
+ elif agent.get_name() == "agent2":
+ agent2_found = True
+ else:
+ self.fail("Unexpected agent name received: %s" %
+ agent.get_name())
+ if agent1_found and agent2_found:
+ break;
+
+ wi = self.console.get_next_workitem(timeout=4)
+
+ self.assertTrue(agent1_found and agent2_found, "All agents not discovered")
+
+ # now kill agent1 and wait for expiration
+
+ agent1 = self.agent1
+ self.agent1 = None
+ agent1.stop_app()
+
+ wi = self.console.get_next_workitem(timeout=4)
+ while wi is not None:
+ if wi.get_type() == wi.AGENT_DELETED:
+ agent = wi.get_params().get("agent")
+ if not agent or not isinstance(agent, qmf2.console.Agent):
+ self.fail("Unexpected workitem from agent")
+ else:
+ if agent.get_name() == "agent1":
+ agent1_found = False
+ else:
+ self.fail("Unexpected agent_deleted received: %s" %
+ agent.get_name())
+ if not agent1_found:
+ break;
+
+ wi = self.console.get_next_workitem(timeout=4)
+
+ self.assertFalse(agent1_found, "agent1 did not delete!")
+
+ # now kill agent2 and wait for expiration
+
+ agent2 = self.agent2
+ self.agent2 = None
+ agent2.stop_app()
+
+ wi = self.console.get_next_workitem(timeout=4)
+ while wi is not None:
+ if wi.get_type() == wi.AGENT_DELETED:
+ agent = wi.get_params().get("agent")
+ if not agent or not isinstance(agent, qmf2.console.Agent):
+ self.fail("Unexpected workitem from agent")
+ else:
+ if agent.get_name() == "agent2":
+ agent2_found = False
+ else:
+ self.fail("Unexpected agent_deleted received: %s" %
+ agent.get_name())
+ if not agent2_found:
+ break;
+
+ wi = self.console.get_next_workitem(timeout=4)
+
+ self.assertFalse(agent2_found, "agent2 did not delete!")
+
+ self.console.destroy(10)
+
+
+ def test_find_agent(self):
+ """
+ create console
+ do not enable agent discovery
+ find agent1, expect success
+ find agent-none, expect failure
+ find agent2, expect success
+ """
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ agent1 = self.console.find_agent("agent1", timeout=3)
+ self.assertTrue(agent1 and agent1.get_name() == "agent1")
+
+ no_agent = self.console.find_agent("agent-none", timeout=3)
+ self.assertTrue(no_agent == None)
+
+ agent2 = self.console.find_agent("agent2", timeout=3)
+ self.assertTrue(agent2 and agent2.get_name() == "agent2")
+
+ self.console.remove_connection(self.conn, 10)
+ self.console.destroy(10)
+
+
+ def test_heartbeat_x2(self):
+ """
+ create 2 consoles with 2 sec agent timeout
+ enable agent discovery, find all agents
+ stop agent1, expect timeout notification on both consoles
+ stop agent2, expect timeout notification on both consoles
+ """
+ console_count = 2
+ self.consoles = []
+ for i in range(console_count):
+ console = qmf2.console.Console("test-console-" + str(i),
+ notifier=_testNotifier(),
+ agent_timeout=2)
+ conn = qpid.messaging.Connection(self.broker)
+ conn.open()
+ console.add_connection(conn)
+ console.enable_agent_discovery()
+ self.consoles.append(console)
+
+ # now wait for all consoles to discover all agents,
+ # agents send a heartbeat once a second
+ for console in self.consoles:
+ agent1_found = agent2_found = False
+ wi = console.get_next_workitem(timeout=2)
+ while wi and not (agent1_found and agent2_found):
+ if wi.get_type() == wi.AGENT_ADDED:
+ agent = wi.get_params().get("agent")
+ if not agent or not isinstance(agent, qmf2.console.Agent):
+ self.fail("Unexpected workitem from agent")
+ else:
+ if agent.get_name() == "agent1":
+ agent1_found = True
+ elif agent.get_name() == "agent2":
+ agent2_found = True
+ else:
+ self.fail("Unexpected agent name received: %s" %
+ agent.get_name())
+ if agent1_found and agent2_found:
+ break;
+ wi = console.get_next_workitem(timeout=2)
+
+ self.assertTrue(agent1_found and agent2_found, "All agents not discovered")
+
+ # now kill agent1 and wait for expiration
+
+ agent1 = self.agent1
+ self.agent1 = None
+ agent1.stop_app()
+
+ for console in self.consoles:
+ agent1_found = True
+ wi = console.get_next_workitem(timeout=4)
+ while wi is not None:
+ if wi.get_type() == wi.AGENT_DELETED:
+ agent = wi.get_params().get("agent")
+ if not agent or not isinstance(agent, qmf2.console.Agent):
+ self.fail("Unexpected workitem from agent")
+ else:
+ if agent.get_name() == "agent1":
+ agent1_found = False
+ break
+ else:
+ self.fail("Unexpected agent_deleted received: %s" %
+ agent.get_name())
+
+ wi = console.get_next_workitem(timeout=4)
+
+ self.assertFalse(agent1_found, "agent1 did not delete!")
+
+ # now kill agent2 and wait for expiration
+
+ agent2 = self.agent2
+ self.agent2 = None
+ agent2.stop_app()
+
+ for console in self.consoles:
+ agent2_found = True
+ wi = console.get_next_workitem(timeout=4)
+ while wi is not None:
+ if wi.get_type() == wi.AGENT_DELETED:
+ agent = wi.get_params().get("agent")
+ if not agent or not isinstance(agent, qmf2.console.Agent):
+ self.fail("Unexpected workitem from agent")
+ else:
+ if agent.get_name() == "agent2":
+ agent2_found = False
+ break
+ else:
+ self.fail("Unexpected agent_deleted received: %s" %
+ agent.get_name())
+
+ wi = console.get_next_workitem(timeout=4)
+
+ self.assertFalse(agent2_found, "agent2 did not delete!")
+
+
+ for console in self.consoles:
+ console.destroy(10)
+
+
+ def test_find_agent_x2(self):
+ """
+ create 2 consoles, do not enable agent discovery
+ console-1: find agent1, expect success
+ console-2: find agent2, expect success
+ Verify console-1 does -not- know agent2
+ Verify console-2 does -not- know agent1
+ """
+ console_count = 2
+ self.consoles = []
+ for i in range(console_count):
+ console = qmf2.console.Console("test-console-" + str(i),
+ notifier=_testNotifier(),
+ agent_timeout=2)
+ conn = qpid.messaging.Connection(self.broker)
+ conn.open()
+ console.add_connection(conn)
+ self.consoles.append(console)
+
+ agent1 = self.consoles[0].find_agent("agent1", timeout=3)
+ self.assertTrue(agent1 and agent1.get_name() == "agent1")
+
+ agent2 = self.consoles[1].find_agent("agent2", timeout=3)
+ self.assertTrue(agent2 and agent2.get_name() == "agent2")
+
+ # wait long enough for agent heartbeats to be sent...
+
+ time.sleep(self.agent_heartbeat * 2)
+
+ agents = self.consoles[0].get_agents()
+ self.assertTrue(len(agents) == 1 and agents[0].get_name() == "agent1")
+ agent1 = self.consoles[0].get_agent("agent1")
+ self.assertTrue(agent1 and agent1.get_name() == "agent1")
+
+
+ agents = self.consoles[1].get_agents()
+ self.assertTrue(len(agents) == 1 and agents[0].get_name() == "agent2")
+ agent2 = self.consoles[1].get_agent("agent2")
+ self.assertTrue(agent2 and agent2.get_name() == "agent2")
+
+ # verify no new agents were learned
+
+ for console in self.consoles:
+ console.destroy(10)
+
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/agent_test.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/agent_test.py
new file mode 100644
index 0000000000..14d8ada197
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/agent_test.py
@@ -0,0 +1,167 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import logging
+import time
+import unittest
+from threading import Semaphore
+
+
+from qpid.messaging import *
+from qmf2.common import (qmfTypes, SchemaProperty, SchemaObjectClass, QmfData,
+ QmfEvent, SchemaMethod, Notifier, SchemaClassId,
+ WorkItem)
+from qmf2.agent import (Agent, QmfAgentData)
+
+
+
+class ExampleNotifier(Notifier):
+ def __init__(self):
+ self._sema4 = Semaphore(0) # locked
+
+ def indication(self):
+ self._sema4.release()
+
+ def waitForWork(self):
+ print("Waiting for event...")
+ self._sema4.acquire()
+ print("...event present")
+
+
+
+
+class QmfTest(unittest.TestCase):
+ def test_begin(self):
+ print("!!! being test")
+
+ def test_end(self):
+ print("!!! end test")
+
+
+#
+# An example agent application
+#
+
+
+if __name__ == '__main__':
+ _notifier = ExampleNotifier()
+ _agent = Agent( "qmf.testAgent", _notifier=_notifier )
+
+ # Dynamically construct a class schema
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("MyPackage", "MyClass"),
+ _desc="A test data schema",
+ _object_id_names=["index1", "index2"] )
+ # add properties
+ _schema.add_property( "index1", SchemaProperty(qmfTypes.TYPE_UINT8))
+ _schema.add_property( "index2", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ # these two properties are statistics
+ _schema.add_property( "query_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+ _schema.add_property( "method_call_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # These two properties can be set via the method call
+ _schema.add_property( "set_string", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "set_int", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+
+ # add method
+ _meth = SchemaMethod( _desc="Method to set string and int in object." )
+ _meth.add_argument( "arg_int", SchemaProperty(qmfTypes.TYPE_UINT32) )
+ _meth.add_argument( "arg_str", SchemaProperty(qmfTypes.TYPE_LSTR) )
+ _schema.add_method( "set_meth", _meth )
+
+ # Add schema to Agent
+
+ _agent.register_object_class(_schema)
+
+ # instantiate managed data objects matching the schema
+
+ _obj1 = QmfAgentData( _agent, _schema=_schema )
+ _obj1.set_value("index1", 100)
+ _obj1.set_value("index2", "a name" )
+ _obj1.set_value("set_string", "UNSET")
+ _obj1.set_value("set_int", 0)
+ _obj1.set_value("query_count", 0)
+ _obj1.set_value("method_call_count", 0)
+ _agent.add_object( _obj1 )
+
+ _agent.add_object( QmfAgentData( _agent, _schema=_schema,
+ _values={"index1":99,
+ "index2": "another name",
+ "set_string": "UNSET",
+ "set_int": 0,
+ "query_count": 0,
+ "method_call_count": 0} ))
+
+ # add an "unstructured" object to the Agent
+ _obj2 = QmfAgentData(_agent, _object_id="01545")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 2)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ _agent.add_object(_obj2)
+
+
+ ## Now connect to the broker
+
+ _c = Connection("localhost")
+ _c.connect()
+ _agent.setConnection(_c)
+
+ _error_data = QmfData.create({"code": -1, "description": "You made a boo-boo."})
+
+ _done = False
+ while not _done:
+ # try:
+ _notifier.waitForWork()
+
+ _wi = _agent.get_next_workitem(timeout=0)
+ while _wi:
+
+ if _wi.get_type() == WorkItem.METHOD_CALL:
+ mc = _wi.get_params()
+
+ if mc.get_name() == "set_meth":
+ print("!!! Calling 'set_meth' on Object_id = %s" % mc.get_object_id())
+ print("!!! args='%s'" % str(mc.get_args()))
+ print("!!! userid=%s" % str(mc.get_user_id()))
+ print("!!! handle=%s" % _wi.get_handle())
+ _agent.method_response(_wi.get_handle(),
+ {"rc1": 100, "rc2": "Success"})
+ else:
+ print("!!! Unknown Method name = %s" % mc.get_name())
+ _agent.method_response(_wi.get_handle(), _error=_error_data)
+ else:
+ print("TBD: work item %d:%s" % (_wi.get_type(), str(_wi.get_params())))
+
+ _agent.release_workitem(_wi)
+ _wi = _agent.get_next_workitem(timeout=0)
+ # except:
+ # print( "shutting down...")
+ # _done = True
+
+ print( "Removing connection... TBD!!!" )
+ #_myConsole.remove_connection( _c, 10 )
+
+ print( "Destroying agent... TBD!!!" )
+ #_myConsole.destroy( 10 )
+
+ print( "******** agent test done ********" )
+
+
+
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/async_method.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/async_method.py
new file mode 100644
index 0000000000..2339fc71a9
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/async_method.py
@@ -0,0 +1,353 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import unittest
+import logging
+from threading import Thread, Event
+
+import qpid.messaging
+from qmf2.common import (Notifier, SchemaObjectClass, SchemaClassId,
+ SchemaProperty, qmfTypes, SchemaMethod, QmfQuery,
+ QmfData, WorkItem)
+import qmf2.console
+from qmf2.agent import(QmfAgentData, Agent, MethodCallParams)
+
+
+class _testNotifier(Notifier):
+ def __init__(self):
+ self._event = Event()
+
+ def indication(self):
+ # note: called by qmf daemon thread
+ self._event.set()
+
+ def wait_for_work(self, timeout):
+ # note: called by application thread to wait
+ # for qmf to generate work
+ self._event.wait(timeout)
+ timed_out = self._event.isSet() == False
+ if not timed_out:
+ self._event.clear()
+ return True
+ return False
+
+
+class _agentApp(Thread):
+ def __init__(self, name, broker_url, heartbeat):
+ Thread.__init__(self)
+ self.notifier = _testNotifier()
+ self.broker_url = broker_url
+ self.agent = Agent(name,
+ _notifier=self.notifier,
+ heartbeat_interval=heartbeat)
+
+ # Dynamically construct a management database
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("MyPackage", "MyClass"),
+ _desc="A test data schema",
+ _object_id_names=["index1", "index2"] )
+ # add properties
+ _schema.add_property( "index1", SchemaProperty(qmfTypes.TYPE_UINT8))
+ _schema.add_property( "index2", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ # these two properties are statistics
+ _schema.add_property( "query_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+ _schema.add_property( "method_call_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # These two properties can be set via the method call
+ _schema.add_property( "set_string", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "set_int", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # add method
+ _meth = SchemaMethod( _desc="Method to set string and int in object." )
+ _meth.add_argument( "arg_int", SchemaProperty(qmfTypes.TYPE_UINT32) )
+ _meth.add_argument( "arg_str", SchemaProperty(qmfTypes.TYPE_LSTR) )
+ # the input value of cookie is returned in the response
+ _meth.add_argument( "cookie", SchemaProperty(qmfTypes.TYPE_LSTR,
+ kwargs={"dir":"IO"}))
+ _schema.add_method( "set_meth", _meth )
+
+ # Add schema to Agent
+
+ self.agent.register_object_class(_schema)
+
+ # instantiate managed data objects matching the schema
+
+ _obj1 = QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":100, "index2":"a name"})
+ _obj1.set_value("set_string", "UNSET")
+ _obj1.set_value("set_int", 0)
+ _obj1.set_value("query_count", 0)
+ _obj1.set_value("method_call_count", 0)
+ self.agent.add_object( _obj1 )
+
+ self.agent.add_object( QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":99,
+ "index2": "another name",
+ "set_string": "UNSET",
+ "set_int": 0,
+ "query_count": 0,
+ "method_call_count": 0} ))
+
+ # add an "unstructured" object to the Agent
+ _obj2 = QmfAgentData(self.agent, _object_id="01545")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 2)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ self.agent.add_object(_obj2)
+
+ self.running = False
+ self.ready = Event()
+
+ def start_app(self):
+ self.running = True
+ self.start()
+ self.ready.wait(10)
+ if not self.ready.is_set():
+ raise Exception("Agent failed to connect to broker.")
+
+ def stop_app(self):
+ self.running = False
+ # wake main thread
+ self.notifier.indication() # hmmm... collide with daemon???
+ self.join(10)
+ if self.isAlive():
+ raise Exception("AGENT DID NOT TERMINATE AS EXPECTED!!!")
+
+ def run(self):
+ # broker_url = "user/passwd@hostname:port"
+ self.conn = qpid.messaging.Connection(self.broker_url)
+ self.conn.open()
+ self.agent.set_connection(self.conn)
+ self.ready.set()
+
+ # Agent application main processing loop
+ while self.running:
+ self.notifier.wait_for_work(None)
+ wi = self.agent.get_next_workitem(timeout=0)
+ while wi is not None:
+ if wi.get_type() == WorkItem.METHOD_CALL:
+ mc = wi.get_params()
+ if not isinstance(mc, MethodCallParams):
+ raise Exception("Unexpected method call parameters")
+
+ if mc.get_name() == "set_meth":
+ obj = self.agent.get_object(mc.get_object_id(),
+ mc.get_schema_id())
+ if obj is None:
+ error_info = QmfData.create({"code": -2,
+ "description":
+ "Bad Object Id."},
+ _object_id="_error")
+ self.agent.method_response(wi.get_handle(),
+ _error=error_info)
+ else:
+ obj.inc_value("method_call_count")
+ out_args = {"code" : 0}
+ if "cookie" in mc.get_args():
+ out_args["cookie"] = mc.get_args()["cookie"]
+ if "arg_int" in mc.get_args():
+ obj.set_value("set_int", mc.get_args()["arg_int"])
+ if "arg_str" in mc.get_args():
+ obj.set_value("set_string", mc.get_args()["arg_str"])
+ self.agent.method_response(wi.get_handle(),
+ out_args)
+ elif mc.get_name() == "a_method":
+ obj = self.agent.get_object(mc.get_object_id(),
+ mc.get_schema_id())
+ if obj is None:
+ error_info = QmfData.create({"code": -3,
+ "description":
+ "Unknown object id."},
+ _object_id="_error")
+ self.agent.method_response(wi.get_handle(),
+ _error=error_info)
+ elif obj.get_object_id() != "01545":
+ error_info = QmfData.create( {"code": -4,
+ "description":
+ "Unexpected id."},
+ _object_id="_error")
+ self.agent.method_response(wi.get_handle(),
+ _error=error_info)
+ else:
+ args = mc.get_args()
+ if ("arg1" in args and args["arg1"] == 1 and
+ "arg2" in args and args["arg2"] == "Now set!"
+ and "arg3" in args and args["arg3"] == 1966):
+ out_args = {"code" : 0}
+ if "cookie" in mc.get_args():
+ out_args["cookie"] = mc.get_args()["cookie"]
+ self.agent.method_response(wi.get_handle(),
+ out_args)
+ else:
+ error_info = QmfData.create(
+ {"code": -5,
+ "description":
+ "Bad Args."},
+ _object_id="_error")
+ self.agent.method_response(wi.get_handle(),
+ _error=error_info)
+ else:
+ error_info = QmfData.create( {"code": -1,
+ "description":
+ "Unknown method call."},
+ _object_id="_error")
+ self.agent.method_response(wi.get_handle(), _error=error_info)
+
+ self.agent.release_workitem(wi)
+ wi = self.agent.get_next_workitem(timeout=0)
+
+ if self.conn:
+ self.agent.remove_connection(10)
+ self.agent.destroy(10)
+
+
+
+class BaseTest(unittest.TestCase):
+ def configure(self, config):
+ self.config = config
+ self.broker = config.broker
+ self.defines = self.config.defines
+
+ def setUp(self):
+ # one second agent heartbeat interval
+ self.agent_heartbeat = 1
+ self.agent1 = _agentApp("agent1", self.broker, self.agent_heartbeat)
+ self.agent1.start_app()
+ self.agent2 = _agentApp("agent2", self.broker, self.agent_heartbeat)
+ self.agent2.start_app()
+
+ def tearDown(self):
+ if self.agent1:
+ self.agent1.stop_app()
+ self.agent1 = None
+ if self.agent2:
+ self.agent2.stop_app()
+ self.agent2 = None
+
+ def test_described_obj(self):
+ # create console
+ # find agents
+ # synchronous query for all objects in schema
+ # method call on each object
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ i_count = 0
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA_ID)
+
+ sid_list = self.console.do_query(agent, query)
+ self.assertTrue(sid_list and len(sid_list) == 1)
+ for sid in sid_list:
+ t_params = {QmfData.KEY_SCHEMA_ID: sid}
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT,
+ _target_params=t_params)
+ obj_list = self.console.do_query(agent, query)
+ self.assertTrue(len(obj_list) == 2)
+ for obj in obj_list:
+ cookie = "cookie-" + str(i_count)
+ i_count += 1
+ mr = obj.invoke_method( "set_meth",
+ {"arg_int": -99,
+ "arg_str": "Now set!",
+ "cookie": cookie},
+ _reply_handle=cookie,
+ _timeout=3)
+ self.assertTrue(mr)
+
+ # done, now wait for async responses
+
+ r_count = 0
+ while self.notifier.wait_for_work(3):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ self.assertTrue(wi.get_type() == WorkItem.METHOD_RESPONSE)
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, qmf2.console.MethodResult))
+ self.assertTrue(reply.succeeded())
+ self.assertTrue(reply.get_argument("cookie") == wi.get_handle())
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ self.assertTrue(r_count == i_count)
+
+ self.console.destroy(10)
+
+
+ def test_managed_obj(self):
+ # create console
+ # find agents
+ # synchronous query for a managed object
+ # method call on each object
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ i_count = 0
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ query = QmfQuery.create_id(QmfQuery.TARGET_OBJECT, "01545")
+ obj_list = self.console.do_query(agent, query)
+
+ self.assertTrue(isinstance(obj_list, type([])))
+ self.assertTrue(len(obj_list) == 1)
+ obj = obj_list[0]
+
+ cookie = "cookie-" + str(i_count)
+ i_count += 1
+ mr = obj.invoke_method("a_method",
+ {"arg1": 1,
+ "arg2": "Now set!",
+ "arg3": 1966,
+ "cookie": cookie},
+ _reply_handle=cookie,
+ _timeout=3)
+ self.assertTrue(mr)
+
+ # done, now wait for async responses
+
+ r_count = 0
+ while self.notifier.wait_for_work(3):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ self.assertTrue(wi.get_type() == WorkItem.METHOD_RESPONSE)
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, qmf2.console.MethodResult))
+ self.assertTrue(reply.succeeded())
+ self.assertTrue(reply.get_argument("cookie") == wi.get_handle())
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ self.assertTrue(r_count == i_count)
+
+ self.console.destroy(10)
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/async_query.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/async_query.py
new file mode 100644
index 0000000000..b1c01611f7
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/async_query.py
@@ -0,0 +1,444 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import unittest
+import logging
+from threading import Thread, Event
+
+import qpid.messaging
+from qmf2.common import (Notifier, SchemaObjectClass, SchemaClassId,
+ SchemaProperty, qmfTypes, SchemaMethod, QmfQuery,
+ QmfData, WorkItem)
+import qmf2.console
+from qmf2.agent import(QmfAgentData, Agent)
+
+
+class _testNotifier(Notifier):
+ def __init__(self):
+ self._event = Event()
+
+ def indication(self):
+ # note: called by qmf daemon thread
+ self._event.set()
+
+ def wait_for_work(self, timeout):
+ # note: called by application thread to wait
+ # for qmf to generate work
+ self._event.wait(timeout)
+ timed_out = self._event.isSet() == False
+ if not timed_out:
+ self._event.clear()
+ return True
+ return False
+
+
+class _agentApp(Thread):
+ def __init__(self, name, broker_url, heartbeat):
+ Thread.__init__(self)
+ self.notifier = _testNotifier()
+ self.broker_url = broker_url
+ self.agent = Agent(name,
+ _notifier=self.notifier,
+ heartbeat_interval=heartbeat)
+
+ # Dynamically construct a management database
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("MyPackage", "MyClass"),
+ _desc="A test data schema",
+ _object_id_names=["index1", "index2"] )
+ # add properties
+ _schema.add_property( "index1", SchemaProperty(qmfTypes.TYPE_UINT8))
+ _schema.add_property( "index2", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ # these two properties are statistics
+ _schema.add_property( "query_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+ _schema.add_property( "method_call_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # These two properties can be set via the method call
+ _schema.add_property( "set_string", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "set_int", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # add method
+ _meth = SchemaMethod( _desc="Method to set string and int in object." )
+ _meth.add_argument( "arg_int", SchemaProperty(qmfTypes.TYPE_UINT32) )
+ _meth.add_argument( "arg_str", SchemaProperty(qmfTypes.TYPE_LSTR) )
+ _schema.add_method( "set_meth", _meth )
+
+ # Add schema to Agent
+
+ self.agent.register_object_class(_schema)
+
+ # instantiate managed data objects matching the schema
+
+ _obj1 = QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":100, "index2":"a name"})
+ _obj1.set_value("set_string", "UNSET")
+ _obj1.set_value("set_int", 0)
+ _obj1.set_value("query_count", 0)
+ _obj1.set_value("method_call_count", 0)
+ self.agent.add_object( _obj1 )
+
+ self.agent.add_object( QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":99,
+ "index2": "another name",
+ "set_string": "UNSET",
+ "set_int": 0,
+ "query_count": 0,
+ "method_call_count": 0} ))
+
+ self.agent.add_object( QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":50,
+ "index2": "my name",
+ "set_string": "SET",
+ "set_int": 0,
+ "query_count": 0,
+ "method_call_count": 0} ))
+
+
+ # add an "unstructured" object to the Agent
+ _obj2 = QmfAgentData(self.agent, _object_id="01545")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 2)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ _obj2.set_value("index1", 50)
+ self.agent.add_object(_obj2)
+
+ _obj2 = QmfAgentData(self.agent, _object_id="01546")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 3)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ _obj2.set_value("index1", 51)
+ self.agent.add_object(_obj2)
+
+ _obj2 = QmfAgentData(self.agent, _object_id="01544")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 4)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ _obj2.set_value("index1", 49)
+ self.agent.add_object(_obj2)
+
+ _obj2 = QmfAgentData(self.agent, _object_id="01543")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 4)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ _obj2.set_value("index1", 48)
+ self.agent.add_object(_obj2)
+
+ self.running = False
+ self.ready = Event()
+
+ def start_app(self):
+ self.running = True
+ self.start()
+ self.ready.wait(10)
+ if not self.ready.is_set():
+ raise Exception("Agent failed to connect to broker.")
+
+ def stop_app(self):
+ self.running = False
+ # wake main thread
+ self.notifier.indication() # hmmm... collide with daemon???
+ self.join(10)
+ if self.isAlive():
+ raise Exception("AGENT DID NOT TERMINATE AS EXPECTED!!!")
+
+ def run(self):
+ # broker_url = "user/passwd@hostname:port"
+ self.conn = qpid.messaging.Connection(self.broker_url)
+ self.conn.open()
+ self.agent.set_connection(self.conn)
+ self.ready.set()
+
+ while self.running:
+ self.notifier.wait_for_work(None)
+ wi = self.agent.get_next_workitem(timeout=0)
+ while wi is not None:
+ logging.error("UNEXPECTED AGENT WORKITEM RECEIVED=%s" % wi.get_type())
+ self.agent.release_workitem(wi)
+ wi = self.agent.get_next_workitem(timeout=0)
+
+ if self.conn:
+ self.agent.remove_connection(10)
+ self.agent.destroy(10)
+
+
+
+
+class BaseTest(unittest.TestCase):
+ def configure(self, config):
+ self.config = config
+ self.broker = config.broker
+ self.defines = self.config.defines
+
+ def setUp(self):
+ # one second agent indication interval
+ self.agent_heartbeat = 1
+ self.agent1 = _agentApp("agent1", self.broker, self.agent_heartbeat)
+ self.agent1.start_app()
+ self.agent2 = _agentApp("agent2", self.broker, self.agent_heartbeat)
+ self.agent2.start_app()
+
+ def tearDown(self):
+ if self.agent1:
+ self.agent1.stop_app()
+ self.agent1 = None
+ if self.agent2:
+ self.agent2.stop_app()
+ self.agent2 = None
+
+ def test_all_schema_ids(self):
+ # create console
+ # find agents
+ # asynchronous query for all schema ids
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # send queries
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA_ID)
+ rc = self.console.do_query(agent, query,
+ _reply_handle=aname)
+ self.assertTrue(rc)
+
+ # done. Now wait for async responses
+
+ count = 0
+ while self.notifier.wait_for_work(3):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ count += 1
+ self.assertTrue(wi.get_type() == WorkItem.QUERY_COMPLETE)
+ self.assertTrue(wi.get_handle() == "agent1" or
+ wi.get_handle() == "agent2")
+ reply = wi.get_params()
+ self.assertTrue(len(reply) == 1)
+ self.assertTrue(isinstance(reply[0], SchemaClassId))
+ self.assertTrue(reply[0].get_package_name() == "MyPackage")
+ self.assertTrue(reply[0].get_class_name() == "MyClass")
+ self.console.release_workitem(wi)
+ wi = self.console.get_next_workitem(timeout=0)
+
+ self.assertTrue(count == 2)
+ self.console.destroy(10)
+
+
+
+ def test_undescribed_objs(self):
+ # create console
+ # find agents
+ # asynchronous query for all non-schema objects
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # send queries
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT)
+ rc = self.console.do_query(agent, query, _reply_handle=aname)
+ self.assertTrue(rc)
+
+ # done. Now wait for async responses
+
+ count = 0
+ while self.notifier.wait_for_work(3):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ count += 1
+ self.assertTrue(wi.get_type() == WorkItem.QUERY_COMPLETE)
+ self.assertTrue(wi.get_handle() == "agent1" or
+ wi.get_handle() == "agent2")
+ reply = wi.get_params()
+ self.assertTrue(len(reply) == 4)
+ self.assertTrue(isinstance(reply[0], qmf2.console.QmfConsoleData))
+ self.assertFalse(reply[0].is_described()) # no schema
+ self.console.release_workitem(wi)
+ wi = self.console.get_next_workitem(timeout=0)
+
+ self.assertTrue(count == 2)
+ self.console.destroy(10)
+
+
+
+ def test_described_objs(self):
+ # create console
+ # find agents
+ # asynchronous query for all schema-based objects
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ #
+ t_params = {QmfData.KEY_SCHEMA_ID: SchemaClassId("MyPackage", "MyClass")}
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT, t_params)
+ #
+ rc = self.console.do_query(agent, query, _reply_handle=aname)
+ self.assertTrue(rc)
+
+ # done. Now wait for async responses
+
+ count = 0
+ while self.notifier.wait_for_work(3):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ count += 1
+ self.assertTrue(wi.get_type() == WorkItem.QUERY_COMPLETE)
+ self.assertTrue(wi.get_handle() == "agent1" or
+ wi.get_handle() == "agent2")
+ reply = wi.get_params()
+ self.assertTrue(len(reply) == 3)
+ self.assertTrue(isinstance(reply[0], qmf2.console.QmfConsoleData))
+ self.assertTrue(reply[0].is_described()) # has schema
+ self.console.release_workitem(wi)
+ wi = self.console.get_next_workitem(timeout=0)
+
+ self.assertTrue(count == 2)
+ # @todo test if the console has learned the corresponding schemas....
+ self.console.destroy(10)
+
+
+
+ def test_all_schemas(self):
+ # create console
+ # find agents
+ # asynchronous query for all schemas
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ # test internal state using non-api calls:
+ # no schemas present yet
+ self.assertTrue(len(self.console._schema_cache) == 0)
+ # end test
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # send queries
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA)
+ rc = self.console.do_query(agent, query, _reply_handle=aname)
+ self.assertTrue(rc)
+
+ # done. Now wait for async responses
+
+ count = 0
+ while self.notifier.wait_for_work(3):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ count += 1
+ self.assertTrue(wi.get_type() == WorkItem.QUERY_COMPLETE)
+ self.assertTrue(wi.get_handle() == "agent1" or
+ wi.get_handle() == "agent2")
+ reply = wi.get_params()
+ self.assertTrue(len(reply) == 1)
+ self.assertTrue(isinstance(reply[0], qmf2.common.SchemaObjectClass))
+ self.assertTrue(reply[0].get_class_id().get_package_name() == "MyPackage")
+ self.assertTrue(reply[0].get_class_id().get_class_name() == "MyClass")
+ self.console.release_workitem(wi)
+ wi = self.console.get_next_workitem(timeout=0)
+
+ self.assertTrue(count == 2)
+
+ # test internal state using non-api calls:
+ # schema has been learned
+ self.assertTrue(len(self.console._schema_cache) == 1)
+ # end test
+
+ self.console.destroy(10)
+
+
+
+ def test_query_expiration(self):
+ # create console
+ # find agents
+ # kill the agents
+ # send async query
+ # wait for & verify expiration
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=30)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ # find the agents
+ agents = []
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+ agents.append(agent)
+
+ # now nuke the agents from orbit. It's the only way to be sure.
+
+ self.agent1.stop_app()
+ self.agent1 = None
+ self.agent2.stop_app()
+ self.agent2 = None
+
+ # now send queries to agents that no longer exist
+ for agent in agents:
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA)
+ rc = self.console.do_query(agent, query,
+ _reply_handle=agent.get_name(),
+ _timeout=2)
+ self.assertTrue(rc)
+
+ # done. Now wait for async responses due to timeouts
+
+ count = 0
+ while self.notifier.wait_for_work(3):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ count += 1
+ self.assertTrue(wi.get_type() == WorkItem.QUERY_COMPLETE)
+ self.assertTrue(wi.get_handle() == "agent1" or
+ wi.get_handle() == "agent2")
+ reply = wi.get_params()
+ self.assertTrue(len(reply) == 0) # empty
+
+ self.console.release_workitem(wi)
+ wi = self.console.get_next_workitem(timeout=0)
+
+ self.assertTrue(count == 2)
+ self.console.destroy(10)
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/basic_method.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/basic_method.py
new file mode 100644
index 0000000000..8d038bc4c8
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/basic_method.py
@@ -0,0 +1,391 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import unittest
+import logging
+from threading import Thread, Event
+
+import qpid.messaging
+from qmf2.common import (Notifier, SchemaObjectClass, SchemaClassId,
+ SchemaProperty, qmfTypes, SchemaMethod, QmfQuery,
+ QmfData, WorkItem)
+import qmf2.console
+from qmf2.agent import(QmfAgentData, Agent, MethodCallParams)
+
+
+class _testNotifier(Notifier):
+ def __init__(self):
+ self._event = Event()
+
+ def indication(self):
+ # note: called by qmf daemon thread
+ self._event.set()
+
+ def wait_for_work(self, timeout):
+ # note: called by application thread to wait
+ # for qmf to generate work
+ self._event.wait(timeout)
+ timed_out = self._event.isSet() == False
+ if not timed_out:
+ self._event.clear()
+ return True
+ return False
+
+
+class _agentApp(Thread):
+ def __init__(self, name, broker_url, heartbeat):
+ Thread.__init__(self)
+ self.notifier = _testNotifier()
+ self.broker_url = broker_url
+ self.agent = Agent(name,
+ _notifier=self.notifier,
+ heartbeat_interval=heartbeat)
+
+ # Dynamically construct a management database
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("MyPackage", "MyClass"),
+ _desc="A test data schema",
+ _object_id_names=["index1", "index2"] )
+ # add properties
+ _schema.add_property( "index1", SchemaProperty(qmfTypes.TYPE_UINT8))
+ _schema.add_property( "index2", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ # these two properties are statistics
+ _schema.add_property( "query_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+ _schema.add_property( "method_call_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # These two properties can be set via the method call
+ _schema.add_property( "set_string", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "set_int", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # add method
+ _meth = SchemaMethod( _desc="Method to set string and int in object." )
+ _meth.add_argument( "arg_int", SchemaProperty(qmfTypes.TYPE_UINT32) )
+ _meth.add_argument( "arg_str", SchemaProperty(qmfTypes.TYPE_LSTR) )
+ _schema.add_method( "set_meth", _meth )
+
+ # Add schema to Agent
+
+ self.agent.register_object_class(_schema)
+
+ # instantiate managed data objects matching the schema
+
+ _obj1 = QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":100, "index2":"a name"})
+ _obj1.set_value("set_string", "UNSET")
+ _obj1.set_value("set_int", 0)
+ _obj1.set_value("query_count", 0)
+ _obj1.set_value("method_call_count", 0)
+ self.agent.add_object( _obj1 )
+
+ self.agent.add_object( QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":99,
+ "index2": "another name",
+ "set_string": "UNSET",
+ "set_int": 0,
+ "query_count": 0,
+ "method_call_count": 0} ))
+
+ # add an "unstructured" object to the Agent
+ _obj2 = QmfAgentData(self.agent, _object_id="01545")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 2)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ self.agent.add_object(_obj2)
+
+ self.running = False
+ self.ready = Event()
+
+ def start_app(self):
+ self.running = True
+ self.start()
+ self.ready.wait(10)
+ if not self.ready.is_set():
+ raise Exception("Agent failed to connect to broker.")
+
+ def stop_app(self):
+ self.running = False
+ # wake main thread
+ self.notifier.indication() # hmmm... collide with daemon???
+ self.join(10)
+ if self.isAlive():
+ raise Exception("AGENT DID NOT TERMINATE AS EXPECTED!!!")
+
+ def run(self):
+ # broker_url = "user/passwd@hostname:port"
+ self.conn = qpid.messaging.Connection(self.broker_url)
+ self.conn.open()
+ self.agent.set_connection(self.conn)
+ self.ready.set()
+
+ # Agent application main processing loop
+ while self.running:
+ self.notifier.wait_for_work(None)
+ wi = self.agent.get_next_workitem(timeout=0)
+ while wi is not None:
+ if wi.get_type() == WorkItem.METHOD_CALL:
+ mc = wi.get_params()
+ if not isinstance(mc, MethodCallParams):
+ raise Exception("Unexpected method call parameters")
+
+ if mc.get_name() == "set_meth":
+ obj = self.agent.get_object(mc.get_object_id(),
+ mc.get_schema_id())
+ if obj is None:
+ error_info = QmfData.create({"code": -2,
+ "description":
+ "Bad Object Id."},
+ _object_id="_error")
+ self.agent.method_response(wi.get_handle(),
+ _error=error_info)
+ else:
+ obj.inc_value("method_call_count")
+ if "arg_int" in mc.get_args():
+ obj.set_value("set_int", mc.get_args()["arg_int"])
+ if "arg_str" in mc.get_args():
+ obj.set_value("set_string", mc.get_args()["arg_str"])
+ self.agent.method_response(wi.get_handle(),
+ {"code" : 0})
+ elif mc.get_name() == "a_method":
+ obj = self.agent.get_object(mc.get_object_id(),
+ mc.get_schema_id())
+ if obj is None:
+ error_info = QmfData.create({"code": -3,
+ "description":
+ "Unknown object id."},
+ _object_id="_error")
+ self.agent.method_response(wi.get_handle(),
+ _error=error_info)
+ elif obj.get_object_id() != "01545":
+ error_info = QmfData.create( {"code": -4,
+ "description":
+ "Unexpected id."},
+ _object_id="_error")
+ self.agent.method_response(wi.get_handle(),
+ _error=error_info)
+ else:
+ args = mc.get_args()
+ if ("arg1" in args and args["arg1"] == 1 and
+ "arg2" in args and args["arg2"] == "Now set!"
+ and "arg3" in args and args["arg3"] == 1966):
+ self.agent.method_response(wi.get_handle(),
+ {"code" : 0})
+ else:
+ error_info = QmfData.create(
+ {"code": -5,
+ "description":
+ "Bad Args."},
+ _object_id="_error")
+ self.agent.method_response(wi.get_handle(),
+ _error=error_info)
+ else:
+ error_info = QmfData.create( {"code": -1,
+ "description":
+ "Unknown method call."},
+ _object_id="_error")
+ self.agent.method_response(wi.get_handle(), _error=error_info)
+
+ self.agent.release_workitem(wi)
+ wi = self.agent.get_next_workitem(timeout=0)
+
+ if self.conn:
+ self.agent.remove_connection(10)
+ self.agent.destroy(10)
+
+
+
+class BaseTest(unittest.TestCase):
+ def configure(self, config):
+ self.config = config
+ self.broker = config.broker
+ self.defines = self.config.defines
+
+ def setUp(self):
+ # one second agent heartbeat interval
+ self.agent_heartbeat = 1
+ self.agent1 = _agentApp("agent1", self.broker, self.agent_heartbeat)
+ self.agent1.start_app()
+ self.agent2 = _agentApp("agent2", self.broker, self.agent_heartbeat)
+ self.agent2.start_app()
+
+ def tearDown(self):
+ if self.agent1:
+ self.agent1.stop_app()
+ self.agent1 = None
+ if self.agent2:
+ self.agent2.stop_app()
+ self.agent2 = None
+
+ def test_described_obj(self):
+ # create console
+ # find agents
+ # synchronous query for all objects in schema
+ # method call on each object
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA_ID)
+
+ sid_list = self.console.do_query(agent, query)
+ self.assertTrue(sid_list and len(sid_list) == 1)
+ for sid in sid_list:
+ t_params = {QmfData.KEY_SCHEMA_ID: sid}
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT,
+ _target_params=t_params)
+ obj_list = self.console.do_query(agent, query)
+ self.assertTrue(len(obj_list) == 2)
+ for obj in obj_list:
+ mr = obj.invoke_method( "set_meth", {"arg_int": -99,
+ "arg_str": "Now set!"},
+ _timeout=3)
+ self.assertTrue(isinstance(mr, qmf2.console.MethodResult))
+ self.assertTrue(mr.succeeded())
+ self.assertTrue(mr.get_argument("code") == 0)
+
+ self.assertTrue(obj.get_value("method_call_count") == 0)
+ self.assertTrue(obj.get_value("set_string") == "UNSET")
+ self.assertTrue(obj.get_value("set_int") == 0)
+
+ obj.refresh()
+
+ self.assertTrue(obj.get_value("method_call_count") == 1)
+ self.assertTrue(obj.get_value("set_string") == "Now set!")
+ self.assertTrue(obj.get_value("set_int") == -99)
+
+ self.console.destroy(10)
+
+
+ def test_bad_method_schema(self):
+ # create console
+ # find agents
+ # synchronous query for all objects with schema
+ # invalid method call on each object
+ # - should throw a ValueError - NOT YET.
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA_ID)
+
+ sid_list = self.console.do_query(agent, query)
+ self.assertTrue(sid_list and len(sid_list) == 1)
+ for sid in sid_list:
+
+ t_params = {QmfData.KEY_SCHEMA_ID: sid}
+ query = QmfQuery.create_predicate(QmfQuery.TARGET_OBJECT,
+ [QmfQuery.TRUE],
+ _target_params=t_params)
+
+ obj_list = self.console.do_query(agent, query)
+ self.assertTrue(len(obj_list) == 2)
+ for obj in obj_list:
+ mr = obj.invoke_method("unknown_method",
+ {"arg1": -99, "arg2": "Now set!"},
+ _timeout=3)
+ # self.failUnlessRaises(ValueError,
+ # obj.invoke_method,
+ # "unknown_meth",
+ # {"arg1": -99, "arg2": "Now set!"},
+ # _timeout=3)
+ self.assertTrue(isinstance(mr, qmf2.console.MethodResult))
+ self.assertFalse(mr.succeeded())
+ self.assertTrue(isinstance(mr.get_exception(), QmfData))
+
+ self.console.destroy(10)
+
+ def test_bad_method_no_schema(self):
+ # create console
+ # find agents
+ # synchronous query for all objects with no schema
+ # invalid method call on each object
+ # - should throw a ValueError
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT)
+
+ obj_list = self.console.do_query(agent, query)
+ self.assertTrue(len(obj_list) == 1)
+ for obj in obj_list:
+ self.assertTrue(obj.get_schema_class_id() == None)
+ mr = obj.invoke_method("unknown_meth",
+ {"arg1": -99, "arg2": "Now set!"},
+ _timeout=3)
+ self.assertTrue(isinstance(mr, qmf2.console.MethodResult))
+ self.assertFalse(mr.succeeded())
+ self.assertTrue(isinstance(mr.get_exception(), QmfData))
+
+ self.console.destroy(10)
+
+ def test_managed_obj(self):
+ # create console
+ # find agents
+ # synchronous query for a managed object
+ # method call on each object
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ query = QmfQuery.create_id(QmfQuery.TARGET_OBJECT, "01545")
+ obj_list = self.console.do_query(agent, query)
+
+ self.assertTrue(isinstance(obj_list, type([])))
+ self.assertTrue(len(obj_list) == 1)
+ obj = obj_list[0]
+
+ mr = obj.invoke_method("a_method",
+ {"arg1": 1,
+ "arg2": "Now set!",
+ "arg3": 1966},
+ _timeout=3)
+ self.assertTrue(isinstance(mr, qmf2.console.MethodResult))
+ self.assertTrue(mr.succeeded())
+ self.assertTrue(mr.get_argument("code") == 0)
+ # @todo refresh and verify changes
+
+ self.console.destroy(10)
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/basic_query.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/basic_query.py
new file mode 100644
index 0000000000..9f5dda6d54
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/basic_query.py
@@ -0,0 +1,492 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import unittest
+import logging
+from threading import Thread, Event
+
+import qpid.messaging
+from qmf2.common import (Notifier, SchemaObjectClass, SchemaClassId,
+ SchemaProperty, qmfTypes, SchemaMethod, QmfQuery,
+ QmfData)
+import qmf2.console
+from qmf2.agent import(QmfAgentData, Agent)
+
+
+class _testNotifier(Notifier):
+ def __init__(self):
+ self._event = Event()
+
+ def indication(self):
+ # note: called by qmf daemon thread
+ self._event.set()
+
+ def wait_for_work(self, timeout):
+ # note: called by application thread to wait
+ # for qmf to generate work
+ self._event.wait(timeout)
+ timed_out = self._event.isSet() == False
+ if not timed_out:
+ self._event.clear()
+ return True
+ return False
+
+
+class _agentApp(Thread):
+ def __init__(self, name, broker_url, heartbeat):
+ Thread.__init__(self)
+ self.notifier = _testNotifier()
+ self.broker_url = broker_url
+ self.agent = Agent(name,
+ _notifier=self.notifier,
+ heartbeat_interval=heartbeat)
+
+ # Dynamically construct a management database
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("MyPackage", "MyClass"),
+ _desc="A test data schema",
+ _object_id_names=["index1", "index2"] )
+ # add properties
+ _schema.add_property( "index1", SchemaProperty(qmfTypes.TYPE_UINT8))
+ _schema.add_property( "index2", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ # these two properties are statistics
+ _schema.add_property( "query_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+ _schema.add_property( "method_call_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # These two properties can be set via the method call
+ _schema.add_property( "set_string", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "set_int", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # add method
+ _meth = SchemaMethod( _desc="Method to set string and int in object." )
+ _meth.add_argument( "arg_int", SchemaProperty(qmfTypes.TYPE_UINT32) )
+ _meth.add_argument( "arg_str", SchemaProperty(qmfTypes.TYPE_LSTR) )
+ _schema.add_method( "set_meth", _meth )
+
+ # Add schema to Agent
+
+ self.agent.register_object_class(_schema)
+
+ # instantiate managed data objects matching the schema
+
+ _obj1 = QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":100, "index2":"a name"})
+ _obj1.set_value("set_string", "UNSET")
+ _obj1.set_value("set_int", 0)
+ _obj1.set_value("query_count", 0)
+ _obj1.set_value("method_call_count", 0)
+ self.agent.add_object( _obj1 )
+
+ self.agent.add_object( QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":99,
+ "index2": "another name",
+ "set_string": "UNSET",
+ "set_int": 0,
+ "query_count": 0,
+ "method_call_count": 0} ))
+
+ self.agent.add_object( QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":50,
+ "index2": "my name",
+ "set_string": "SET",
+ "set_int": 0,
+ "query_count": 0,
+ "method_call_count": 0} ))
+
+
+ # add an "unstructured" object to the Agent
+ _obj2 = QmfAgentData(self.agent, _object_id="01545")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 2)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ _obj2.set_value("index1", 50)
+ self.agent.add_object(_obj2)
+
+ _obj2 = QmfAgentData(self.agent, _object_id="01546")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 3)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ _obj2.set_value("index1", 51)
+ self.agent.add_object(_obj2)
+
+ _obj2 = QmfAgentData(self.agent, _object_id="01544")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 4)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ _obj2.set_value("index1", 49)
+ self.agent.add_object(_obj2)
+
+ _obj2 = QmfAgentData(self.agent, _object_id="01543")
+ _obj2.set_value("field1", "a value")
+ _obj2.set_value("field2", 4)
+ _obj2.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj2.set_value("field4", ["a", "list", "value"])
+ _obj2.set_value("index1", 48)
+ self.agent.add_object(_obj2)
+
+ self.running = False
+ self.ready = Event()
+
+ def start_app(self):
+ self.running = True
+ self.start()
+ self.ready.wait(10)
+ if not self.ready.is_set():
+ raise Exception("Agent failed to connect to broker.")
+
+ def stop_app(self):
+ self.running = False
+ # wake main thread
+ self.notifier.indication() # hmmm... collide with daemon???
+ self.join(10)
+ if self.isAlive():
+ raise Exception("AGENT DID NOT TERMINATE AS EXPECTED!!!")
+
+ def run(self):
+ # broker_url = "user/passwd@hostname:port"
+ self.conn = qpid.messaging.Connection(self.broker_url)
+ self.conn.open()
+ self.agent.set_connection(self.conn)
+ self.ready.set()
+
+ while self.running:
+ self.notifier.wait_for_work(None)
+ wi = self.agent.get_next_workitem(timeout=0)
+ while wi is not None:
+ logging.error("UNEXPECTED AGENT WORKITEM RECEIVED=%s" % wi.get_type())
+ self.agent.release_workitem(wi)
+ wi = self.agent.get_next_workitem(timeout=0)
+
+ if self.conn:
+ self.agent.remove_connection(10)
+ self.agent.destroy(10)
+
+
+
+
+class BaseTest(unittest.TestCase):
+ def configure(self, config):
+ self.config = config
+ self.broker = config.broker
+ self.defines = self.config.defines
+
+ def setUp(self):
+ # one second agent indication interval
+ self.agent_heartbeat = 1
+ self.agent1 = _agentApp("agent1", self.broker, self.agent_heartbeat)
+ self.agent1.start_app()
+ self.agent2 = _agentApp("agent2", self.broker, self.agent_heartbeat)
+ self.agent2.start_app()
+
+ def tearDown(self):
+ if self.agent1:
+ self.agent1.stop_app()
+ self.agent1 = None
+ if self.agent2:
+ self.agent2.stop_app()
+ self.agent2 = None
+
+ def test_all_oids(self):
+ # create console
+ # find agents
+ # synchronous query for all schemas
+ # synchronous query for all objects per schema
+ # verify known object ids are returned
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # first, find objects per schema
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA_ID)
+ sid_list = self.console.do_query(agent, query)
+ self.assertTrue(sid_list and len(sid_list) == 1)
+ for sid in sid_list:
+ t_params = {QmfData.KEY_SCHEMA_ID: sid}
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT_ID,
+ _target_params=t_params)
+
+ oid_list = self.console.do_query(agent, query)
+
+ self.assertTrue(isinstance(oid_list, type([])),
+ "Unexpected return type")
+ self.assertTrue(len(oid_list) == 3, "Wrong count")
+ self.assertTrue('100a name' in oid_list)
+ self.assertTrue('99another name' in oid_list)
+ self.assertTrue('50my name' in oid_list)
+ self.assertTrue('01545' not in oid_list)
+
+
+ # now, find all unmanaged objects (no schema)
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT_ID)
+ oid_list = self.console.do_query(agent, query)
+
+ self.assertTrue(isinstance(oid_list, type([])),
+ "Unexpected return type")
+ self.assertTrue(len(oid_list) == 4, "Wrong count")
+ self.assertTrue('100a name' not in oid_list)
+ self.assertTrue('99another name' not in oid_list)
+ self.assertTrue('01545' in oid_list)
+ self.assertTrue('01544' in oid_list)
+ self.assertTrue('01543' in oid_list)
+ self.assertTrue('01546' in oid_list)
+
+ self.console.destroy(10)
+
+
+ def test_direct_oids(self):
+ # create console
+ # find agents
+ # synchronous query for each objects
+ # verify objects and schemas are correct
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # first, find objects per schema
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA_ID)
+ sid_list = self.console.do_query(agent, query)
+ self.assertTrue(sid_list and len(sid_list) == 1)
+
+ for oid in ['100a name', '99another name']:
+ query = QmfQuery.create_id_object(oid, sid_list[0])
+ obj_list = self.console.do_query(agent, query)
+
+ self.assertTrue(isinstance(obj_list, type([])),
+ "Unexpected return type")
+ self.assertTrue(len(obj_list) == 1)
+ obj = obj_list[0]
+ self.assertTrue(isinstance(obj, QmfData))
+ self.assertTrue(obj.get_object_id() == oid)
+ self.assertTrue(obj.get_schema_class_id() == sid_list[0])
+ schema_id = obj.get_schema_class_id()
+ self.assertTrue(isinstance(schema_id, SchemaClassId))
+ self.assertTrue(obj.is_described())
+
+ # now find schema-less objects
+ for oid in ['01545']:
+ query = QmfQuery.create_id_object(oid)
+ obj_list = self.console.do_query(agent, query)
+
+ self.assertTrue(isinstance(obj_list, type([])),
+ "Unexpected return type")
+ self.assertTrue(len(obj_list) == 1)
+ obj = obj_list[0]
+ self.assertTrue(isinstance(obj, QmfData))
+ self.assertTrue(obj.get_object_id() == oid)
+ self.assertFalse(obj.is_described())
+
+ self.console.destroy(10)
+
+
+
+ def test_packages(self):
+ # create console
+ # find agents
+ # synchronous query all package names
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_PACKAGES)
+ package_list = self.console.do_query(agent, query)
+ self.assertTrue(len(package_list) == 1)
+ self.assertTrue('MyPackage' in package_list)
+
+
+ self.console.destroy(10)
+
+
+
+ def test_predicate_schema_id(self):
+ # create console
+ # find agents
+ # synchronous query for all schema by package name
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ query = QmfQuery.create_predicate(QmfQuery.TARGET_SCHEMA,
+ [QmfQuery.EQ,
+ SchemaClassId.KEY_PACKAGE,
+ [QmfQuery.QUOTE, "MyPackage"]])
+
+ schema_list = self.console.do_query(agent, query)
+ self.assertTrue(len(schema_list))
+ for schema in schema_list:
+ self.assertTrue(schema.get_class_id().get_package_name() ==
+ "MyPackage")
+
+
+ self.console.destroy(10)
+
+
+
+ def test_predicate_no_match(self):
+ # create console
+ # find agents
+ # synchronous query for all schema by package name
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ query = QmfQuery.create_predicate(QmfQuery.TARGET_SCHEMA,
+ [QmfQuery.EQ,
+ [QmfQuery.UNQUOTE, SchemaClassId.KEY_PACKAGE],
+ [QmfQuery.QUOTE, "No-Such-Package"]])
+
+ schema_list = self.console.do_query(agent, query)
+ self.assertTrue(len(schema_list) == 0)
+
+ self.console.destroy(10)
+
+
+ def test_predicate_match_string(self):
+ # create console
+ # find agents
+ # synchronous query for all objects with a value named
+ # set_string which is < or equal to "UNSET"
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # get the schema id for MyPackage:MyClass schema
+ query = QmfQuery.create_predicate(QmfQuery.TARGET_SCHEMA_ID,
+ [QmfQuery.AND,
+ [QmfQuery.EQ, SchemaClassId.KEY_PACKAGE,
+ [QmfQuery.QUOTE, "MyPackage"]],
+ [QmfQuery.EQ, SchemaClassId.KEY_CLASS,
+ [QmfQuery.QUOTE, "MyClass"]]])
+ sid_list = self.console.do_query(agent, query)
+ self.assertTrue(len(sid_list) == 1)
+
+ query = QmfQuery.create_predicate(QmfQuery.TARGET_OBJECT,
+ [QmfQuery.AND,
+ [QmfQuery.EXISTS, [QmfQuery.QUOTE, "set_string"]],
+ [QmfQuery.EQ, "set_string", [QmfQuery.QUOTE, "UNSET"]]],
+ _target_params={QmfData.KEY_SCHEMA_ID: sid_list[0]})
+ obj_list = self.console.do_query(agent, query)
+ self.assertTrue(len(obj_list) == 2)
+ for obj in obj_list:
+ self.assertTrue(obj.has_value("set_string"))
+ self.assertTrue(obj.get_value("set_string") == "UNSET")
+
+ self.console.destroy(10)
+
+
+
+ def test_predicate_match_integer(self):
+ # create console
+ # find agents
+ # synchronous query for all objects with a value named
+ # "index1" which is < or equal to various values
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # Query the unmanaged (no schema) objects
+
+ # == 50
+ query = QmfQuery.create_predicate(QmfQuery.TARGET_OBJECT,
+ [QmfQuery.AND,
+ [QmfQuery.EXISTS, [QmfQuery.QUOTE, "index1"]],
+ [QmfQuery.EQ, "index1", 50]])
+
+ obj_list = self.console.do_query(agent, query)
+ self.assertTrue(len(obj_list) == 1)
+ self.assertTrue(obj_list[0].has_value("index1"))
+ self.assertTrue(obj_list[0].get_value("index1") == 50)
+
+ # <= 50
+ query = QmfQuery.create_predicate(QmfQuery.TARGET_OBJECT,
+ [QmfQuery.AND,
+ [QmfQuery.EXISTS, [QmfQuery.QUOTE, "index1"]],
+ [QmfQuery.LE, "index1", 50]])
+
+ obj_list = self.console.do_query(agent, query)
+ self.assertTrue(len(obj_list) == 3)
+ for obj in obj_list:
+ self.assertTrue(obj.has_value("index1"))
+ self.assertTrue(obj.get_value("index1") <= 50)
+
+
+ # > 50
+ query = QmfQuery.create_predicate(QmfQuery.TARGET_OBJECT,
+ [QmfQuery.AND,
+ [QmfQuery.EXISTS, [QmfQuery.QUOTE, "index1"]],
+ [QmfQuery.GT, "index1", 50]])
+
+ obj_list = self.console.do_query(agent, query)
+ self.assertTrue(len(obj_list) == 1)
+ for obj in obj_list:
+ self.assertTrue(obj.has_value("index1"))
+ self.assertTrue(obj.get_value("index1") > 50)
+
+ self.console.destroy(10)
+
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/console_test.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/console_test.py
new file mode 100644
index 0000000000..ac0e064f20
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/console_test.py
@@ -0,0 +1,175 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import logging
+import time
+from threading import Semaphore
+
+
+from qpid.messaging import *
+from qmf2.common import (Notifier, QmfQuery, QmfQueryPredicate, MsgKey,
+ SchemaClassId, SchemaClass, QmfData)
+from qmf2.console import Console
+
+
+class ExampleNotifier(Notifier):
+ def __init__(self):
+ self._sema4 = Semaphore(0) # locked
+
+ def indication(self):
+ self._sema4.release()
+
+ def waitForWork(self):
+ print("Waiting for event...")
+ self._sema4.acquire()
+ print("...event present")
+
+
+logging.getLogger().setLevel(logging.INFO)
+
+print( "Starting Connection" )
+_c = Connection("localhost")
+_c.connect()
+
+print( "Starting Console" )
+
+_notifier = ExampleNotifier()
+_myConsole = Console(notifier=_notifier)
+_myConsole.addConnection( _c )
+
+# Allow discovery only for the agent named "qmf.testAgent"
+# @todo: replace "manual" query construction with
+# a formal class-based Query API
+_query = QmfQuery.create_predicate(QmfQuery.TARGET_AGENT,
+ QmfQueryPredicate({QmfQuery.CMP_EQ:
+ [QmfQuery.KEY_AGENT_NAME,
+ "qmf.testAgent"]}))
+_myConsole.enable_agent_discovery(_query)
+
+_done = False
+while not _done:
+# try:
+ _notifier.waitForWork()
+
+ _wi = _myConsole.get_next_workitem(timeout=0)
+ while _wi:
+ print("!!! work item received %d:%s" % (_wi.get_type(),
+ str(_wi.get_params())))
+
+
+ if _wi.get_type() == _wi.AGENT_ADDED:
+ _agent = _wi.get_params().get("agent")
+ if not _agent:
+ print("!!!! AGENT IN REPLY IS NULL !!! ")
+
+ _query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT_ID)
+ oid_list = _myConsole.doQuery(_agent, _query)
+
+ print("!!!************************** REPLY=%s" % oid_list)
+
+ for oid in oid_list:
+ _query = QmfQuery.create_id(QmfQuery.TARGET_OBJECT,
+ oid)
+ obj_list = _myConsole.doQuery(_agent, _query)
+
+ print("!!!************************** REPLY=%s" % obj_list)
+
+ if obj_list is None:
+ obj_list={}
+
+ for obj in obj_list:
+ resp = obj.invoke_method( "set_meth",
+ {"arg_int": -11,
+ "arg_str": "are we not goons?"},
+ None,
+ 3)
+ if resp is None:
+ print("!!!*** NO RESPONSE FROM METHOD????")
+ else:
+ print("!!! method succeeded()=%s" % resp.succeeded())
+ print("!!! method exception()=%s" % resp.get_exception())
+ print("!!! method get args() = %s" % resp.get_arguments())
+
+ if not obj.is_described():
+ resp = obj.invoke_method( "bad method",
+ {"arg_int": -11,
+ "arg_str": "are we not goons?"},
+ None,
+ 3)
+ if resp is None:
+ print("!!!*** NO RESPONSE FROM METHOD????")
+ else:
+ print("!!! method succeeded()=%s" % resp.succeeded())
+ print("!!! method exception()=%s" % resp.get_exception())
+ print("!!! method get args() = %s" % resp.get_arguments())
+
+
+ #---------------------------------
+ #_query = QmfQuery.create_id(QmfQuery.TARGET_OBJECT, "99another name")
+
+ #obj_list = _myConsole.doQuery(_agent, _query)
+
+ #---------------------------------
+
+ # _query = QmfQuery.create_wildcard(QmfQuery.TARGET_PACKAGES)
+
+ # package_list = _myConsole.doQuery(_agent, _query)
+
+ # for pname in package_list:
+ # print("!!! Querying for schema from package: %s" % pname)
+ # _query = QmfQuery.create_predicate(QmfQuery.TARGET_SCHEMA_ID,
+ # QmfQueryPredicate(
+ # {QmfQuery.CMP_EQ: [SchemaClassId.KEY_PACKAGE, pname]}))
+
+ # schema_id_list = _myConsole.doQuery(_agent, _query)
+ # for sid in schema_id_list:
+ # _query = QmfQuery.create_predicate(QmfQuery.TARGET_SCHEMA,
+ # QmfQueryPredicate(
+ # {QmfQuery.CMP_EQ: [SchemaClass.KEY_SCHEMA_ID,
+ # sid.map_encode()]}))
+
+ # schema_list = _myConsole.doQuery(_agent, _query)
+ # for schema in schema_list:
+ # sid = schema.get_class_id()
+ # _query = QmfQuery.create_predicate(
+ # QmfQuery.TARGET_OBJECT_ID,
+ # QmfQueryPredicate({QmfQuery.CMP_EQ:
+ # [QmfData.KEY_SCHEMA_ID,
+ # sid.map_encode()]}))
+
+ # oid_list = _myConsole.doQuery(_agent, _query)
+ # for oid in oid_list:
+ # _query = QmfQuery.create_id(
+ # QmfQuery.TARGET_OBJECT, oid)
+ # _reply = _myConsole.doQuery(_agent, _query)
+
+ # print("!!!************************** REPLY=%s" % _reply)
+
+
+ _myConsole.release_workitem(_wi)
+ _wi = _myConsole.get_next_workitem(timeout=0)
+# except:
+# logging.info( "shutting down..." )
+# _done = True
+
+print( "Removing connection" )
+_myConsole.removeConnection( _c, 10 )
+
+print( "Destroying console:" )
+_myConsole.destroy( 10 )
+
+print( "******** console test done ********" )
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/events.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/events.py
new file mode 100644
index 0000000000..624c9b3823
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/events.py
@@ -0,0 +1,202 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import unittest
+import time
+import datetime
+import logging
+from threading import Thread, Event
+
+import qpid.messaging
+from qpid.harness import Skipped
+from qmf2.common import (Notifier, SchemaObjectClass, SchemaClassId,
+ SchemaProperty, qmfTypes, SchemaMethod, QmfQuery,
+ QmfData, SchemaEventClass,
+ QmfEvent)
+import qmf2.console
+from qmf2.agent import(QmfAgentData, Agent)
+
+
+class _testNotifier(Notifier):
+ def __init__(self):
+ self._event = Event()
+
+ def indication(self):
+ # note: called by qmf daemon thread
+ self._event.set()
+
+ def wait_for_work(self, timeout):
+ # note: called by application thread to wait
+ # for qmf to generate work
+ self._event.wait(timeout)
+ timed_out = self._event.isSet() == False
+ if not timed_out:
+ self._event.clear()
+ return True
+ return False
+
+
+class _agentApp(Thread):
+ def __init__(self, name, broker_url, heartbeat):
+ Thread.__init__(self)
+ self.timeout = 3
+ self.broker_url = broker_url
+ self.notifier = _testNotifier()
+ self.agent = Agent(name,
+ _notifier=self.notifier,
+ heartbeat_interval=heartbeat)
+
+ # Dynamically construct a management database
+
+ _schema = SchemaEventClass(_classId=SchemaClassId("MyPackage",
+ "MyClass",
+ stype=SchemaClassId.TYPE_EVENT),
+ _desc="A test event schema")
+ # add properties
+ _schema.add_property( "prop-1", SchemaProperty(qmfTypes.TYPE_UINT8))
+ _schema.add_property( "prop-2", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ # Add schema to Agent
+ self.schema = _schema
+ self.agent.register_object_class(_schema)
+
+ self.running = False
+ self.ready = Event()
+
+ def start_app(self):
+ self.running = True
+ self.start()
+ self.ready.wait(10)
+ if not self.ready.is_set():
+ raise Exception("Agent failed to connect to broker.")
+ # time.sleep(1)
+
+ def stop_app(self):
+ self.running = False
+ # wake main thread
+ self.notifier.indication() # hmmm... collide with daemon???
+ self.join(self.timeout)
+ if self.isAlive():
+ raise Exception("AGENT DID NOT TERMINATE AS EXPECTED!!!")
+
+ def run(self):
+ # broker_url = "user/passwd@hostname:port"
+ conn = qpid.messaging.Connection(self.broker_url)
+ try:
+ conn.open()
+ except qpid.messaging.ConnectError, e:
+ raise Skipped(e)
+
+ self.agent.set_connection(conn)
+ self.ready.set()
+
+ counter = 1
+ while self.running:
+ # post an event every second
+ event = QmfEvent.create(long(time.time() * 1000),
+ QmfEvent.SEV_WARNING,
+ {"prop-1": counter,
+ "prop-2": str(datetime.datetime.utcnow())},
+ _schema_id=self.schema.get_class_id())
+ counter += 1
+ self.agent.raise_event(event)
+ wi = self.agent.get_next_workitem(timeout=0)
+ while wi is not None:
+ logging.error("UNEXPECTED AGENT WORKITEM RECEIVED=%s" % wi.get_type())
+ self.agent.release_workitem(wi)
+ wi = self.agent.get_next_workitem(timeout=0)
+ self.notifier.wait_for_work(1)
+
+ self.agent.remove_connection(self.timeout)
+ self.agent.destroy(self.timeout)
+
+
+
+class BaseTest(unittest.TestCase):
+ def configure(self, config):
+ self.config = config
+ self.broker = config.broker
+ self.defines = self.config.defines
+
+ def setUp(self):
+ # one second agent indication interval
+ self.agent1 = _agentApp("agent1", self.broker, 1)
+ self.agent1.start_app()
+ self.agent2 = _agentApp("agent2", self.broker, 1)
+ self.agent2.start_app()
+
+ def tearDown(self):
+ if self.agent1:
+ self.agent1.stop_app()
+ self.agent1 = None
+ if self.agent2:
+ self.agent2.stop_app()
+ self.agent2 = None
+
+ def test_get_events(self):
+ # create console
+ # find agents
+
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ try:
+ self.conn.open()
+ except qpid.messaging.ConnectError, e:
+ raise Skipped(e)
+
+ self.console.add_connection(self.conn)
+
+ # find the agents
+ for aname in ["agent1", "agent2"]:
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # now wait for events
+ agent1_events = agent2_events = 0
+ wi = self.console.get_next_workitem(timeout=4)
+ while wi:
+ if wi.get_type() == wi.EVENT_RECEIVED:
+ event = wi.get_params().get("event")
+ self.assertTrue(isinstance(event, QmfEvent))
+ self.assertTrue(event.get_severity() == QmfEvent.SEV_WARNING)
+ self.assertTrue(event.get_value("prop-1") > 0)
+
+ agent = wi.get_params().get("agent")
+ if not agent or not isinstance(agent, qmf2.console.Agent):
+ self.fail("Unexpected workitem from agent")
+ else:
+ if agent.get_name() == "agent1":
+ agent1_events += 1
+ elif agent.get_name() == "agent2":
+ agent2_events += 1
+ else:
+ self.fail("Unexpected agent name received: %s" %
+ agent.get_name())
+ if agent1_events and agent2_events:
+ break;
+
+ wi = self.console.get_next_workitem(timeout=4)
+
+ self.assertTrue(agent1_events > 0 and agent2_events > 0)
+
+ self.console.destroy(10)
+
+
+
+
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/multi_response.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/multi_response.py
new file mode 100644
index 0000000000..991fa0114e
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/multi_response.py
@@ -0,0 +1,280 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import unittest
+import logging
+from threading import Thread, Event
+
+import qpid.messaging
+from qmf2.common import (Notifier, SchemaObjectClass, SchemaClassId,
+ SchemaProperty, qmfTypes, SchemaMethod, QmfQuery,
+ QmfData)
+import qmf2.console
+from qmf2.agent import(QmfAgentData, Agent)
+
+# note: objects, schema per agent must each be > max objs
+_SCHEMAS_PER_AGENT=7
+_OBJS_PER_AGENT=19
+_MAX_OBJS_PER_MSG=3
+
+
+class _testNotifier(Notifier):
+ def __init__(self):
+ self._event = Event()
+
+ def indication(self):
+ # note: called by qmf daemon thread
+ self._event.set()
+
+ def wait_for_work(self, timeout):
+ # note: called by application thread to wait
+ # for qmf to generate work
+ self._event.wait(timeout)
+ timed_out = self._event.isSet() == False
+ if not timed_out:
+ self._event.clear()
+ return True
+ return False
+
+
+class _agentApp(Thread):
+ def __init__(self, name, broker_url, heartbeat):
+ Thread.__init__(self)
+ self.schema_count = _SCHEMAS_PER_AGENT
+ self.obj_count = _OBJS_PER_AGENT
+ self.notifier = _testNotifier()
+ self.broker_url = broker_url
+ self.agent = Agent(name,
+ _notifier=self.notifier,
+ heartbeat_interval=heartbeat,
+ max_msg_size=_MAX_OBJS_PER_MSG)
+
+ # Dynamically construct a management database
+ for i in range(self.schema_count):
+ _schema = SchemaObjectClass( _classId=SchemaClassId("MyPackage",
+ "MyClass-" + str(i)),
+ _desc="A test data schema",
+ _object_id_names=["index1", "index2"] )
+ # add properties
+ _schema.add_property( "index1", SchemaProperty(qmfTypes.TYPE_UINT8))
+ _schema.add_property( "index2", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ # these two properties are statistics
+ _schema.add_property( "query_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+ _schema.add_property( "method_call_count", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # These two properties can be set via the method call
+ _schema.add_property( "set_string", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "set_int", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ # add method
+ _meth = SchemaMethod( _desc="Method to set string and int in object." )
+ _meth.add_argument( "arg_int", SchemaProperty(qmfTypes.TYPE_UINT32) )
+ _meth.add_argument( "arg_str", SchemaProperty(qmfTypes.TYPE_LSTR) )
+ _schema.add_method( "set_meth", _meth )
+
+ # Add schema to Agent
+
+ self.agent.register_object_class(_schema)
+
+ # instantiate managed data objects matching the schema
+
+ for j in range(self.obj_count):
+
+ self.agent.add_object( QmfAgentData( self.agent, _schema=_schema,
+ _values={"index1":j,
+ "index2": "name-" + str(j),
+ "set_string": "UNSET",
+ "set_int": 0,
+ "query_count": 0,
+ "method_call_count": 0} ))
+
+ self.running = False
+ self.ready = Event()
+
+ def start_app(self):
+ self.running = True
+ self.start()
+ self.ready.wait(10)
+ if not self.ready.is_set():
+ raise Exception("Agent failed to connect to broker.")
+
+ def stop_app(self):
+ self.running = False
+ # wake main thread
+ self.notifier.indication() # hmmm... collide with daemon???
+ self.join(10)
+ if self.isAlive():
+ raise Exception("AGENT DID NOT TERMINATE AS EXPECTED!!!")
+
+ def run(self):
+ # broker_url = "user/passwd@hostname:port"
+ self.conn = qpid.messaging.Connection(self.broker_url)
+ self.conn.open()
+ self.agent.set_connection(self.conn)
+ self.ready.set()
+
+ while self.running:
+ self.notifier.wait_for_work(None)
+ wi = self.agent.get_next_workitem(timeout=0)
+ while wi is not None:
+ logging.error("UNEXPECTED AGENT WORKITEM RECEIVED=%s" % wi.get_type())
+ self.agent.release_workitem(wi)
+ wi = self.agent.get_next_workitem(timeout=0)
+
+ if self.conn:
+ self.agent.remove_connection(10)
+ self.agent.destroy(10)
+
+
+
+
+class BaseTest(unittest.TestCase):
+ def configure(self, config):
+ self.agent_count = 2
+ self.config = config
+ self.broker = config.broker
+ self.defines = self.config.defines
+
+ def setUp(self):
+ # one second agent indication interval
+ self.agent_heartbeat = 1
+ self.agents = []
+ for a in range(self.agent_count):
+ agent = _agentApp("agent-" + str(a),
+ self.broker,
+ self.agent_heartbeat)
+ agent.start_app()
+ self.agents.append(agent)
+
+ def tearDown(self):
+ for agent in self.agents:
+ if agent is not None:
+ agent.stop_app()
+
+ def test_all_schema_id(self):
+ # create console
+ # find agents
+ # synchronous query for all schemas_ids
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for agent_app in self.agents:
+ agent = self.console.find_agent(agent_app.agent.get_name(), timeout=3)
+ self.assertTrue(agent and agent.get_name() == agent_app.agent.get_name())
+
+ # get a list of all schema_ids
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA_ID)
+ sid_list = self.console.do_query(agent, query)
+ self.assertTrue(sid_list and len(sid_list) == _SCHEMAS_PER_AGENT)
+ for sid in sid_list:
+ self.assertTrue(isinstance(sid, SchemaClassId))
+ self.assertTrue(sid.get_package_name() == "MyPackage")
+ self.assertTrue(sid.get_class_name().split('-')[0] == "MyClass")
+
+ self.console.destroy(10)
+
+
+ def test_all_schema(self):
+ # create console
+ # find agents
+ # synchronous query for all schemas
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for agent_app in self.agents:
+ agent = self.console.find_agent(agent_app.agent.get_name(), timeout=3)
+ self.assertTrue(agent and agent.get_name() == agent_app.agent.get_name())
+
+ # get a list of all schema_ids
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA)
+ schema_list = self.console.do_query(agent, query)
+ self.assertTrue(schema_list and
+ len(schema_list) == _SCHEMAS_PER_AGENT)
+ for schema in schema_list:
+ self.assertTrue(isinstance(schema, SchemaObjectClass))
+
+ self.console.destroy(10)
+
+
+ def test_all_object_id(self):
+ # create console
+ # find agents
+ # synchronous query for all object_ids by schema_id
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for agent_app in self.agents:
+ agent = self.console.find_agent(agent_app.agent.get_name(), timeout=3)
+ self.assertTrue(agent and agent.get_name() == agent_app.agent.get_name())
+
+ # get a list of all schema_ids
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA_ID)
+ sid_list = self.console.do_query(agent, query)
+ self.assertTrue(sid_list and len(sid_list) == _SCHEMAS_PER_AGENT)
+ for sid in sid_list:
+ query = QmfQuery.create_wildcard_object_id(sid)
+ oid_list = self.console.do_query(agent, query)
+ self.assertTrue(oid_list and
+ len(oid_list) == _OBJS_PER_AGENT)
+ for oid in oid_list:
+ self.assertTrue(isinstance(oid, basestring))
+
+ self.console.destroy(10)
+
+
+ def test_all_objects(self):
+ # create console
+ # find agents
+ # synchronous query for all objects by schema_id
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for agent_app in self.agents:
+ agent = self.console.find_agent(agent_app.agent.get_name(), timeout=3)
+ self.assertTrue(agent and agent.get_name() == agent_app.agent.get_name())
+
+ # get a list of all schema_ids
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_SCHEMA_ID)
+ sid_list = self.console.do_query(agent, query)
+ self.assertTrue(sid_list and len(sid_list) == _SCHEMAS_PER_AGENT)
+ for sid in sid_list:
+ query = QmfQuery.create_wildcard_object(sid)
+ obj_list = self.console.do_query(agent, query)
+ self.assertTrue(obj_list and
+ len(obj_list) == _OBJS_PER_AGENT)
+ for obj in obj_list:
+ self.assertTrue(isinstance(obj,
+ qmf2.console.QmfConsoleData))
+
+ self.console.destroy(10)
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/obj_gets.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/obj_gets.py
new file mode 100644
index 0000000000..695b096973
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/obj_gets.py
@@ -0,0 +1,581 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import unittest
+import logging
+import datetime
+from threading import Thread, Event
+
+import qpid.messaging
+from qmf2.common import (Notifier, SchemaObjectClass, SchemaClassId,
+ SchemaProperty, qmfTypes, SchemaMethod, QmfQuery,
+ QmfData)
+import qmf2.console
+from qmf2.agent import(QmfAgentData, Agent)
+
+
+class _testNotifier(Notifier):
+ def __init__(self):
+ self._event = Event()
+
+ def indication(self):
+ # note: called by qmf daemon thread
+ self._event.set()
+
+ def wait_for_work(self, timeout):
+ # note: called by application thread to wait
+ # for qmf to generate work
+ self._event.wait(timeout)
+ timed_out = self._event.isSet() == False
+ if not timed_out:
+ self._event.clear()
+ return True
+ return False
+
+
+class _agentApp(Thread):
+ def __init__(self, name, broker_url, heartbeat):
+ Thread.__init__(self)
+ self.notifier = _testNotifier()
+ self.broker_url = broker_url
+ self.agent = Agent(name,
+ _notifier=self.notifier,
+ heartbeat_interval=heartbeat)
+
+ # Management Database
+ # - two different schema packages,
+ # - two classes within one schema package
+ # - multiple objects per schema package+class
+ # - two "undescribed" objects
+
+ # "package1/class1"
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("package1", "class1"),
+ _desc="A test data schema - one",
+ _object_id_names=["key"] )
+
+ _schema.add_property( "key", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "count1", SchemaProperty(qmfTypes.TYPE_UINT32))
+ _schema.add_property( "count2", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ self.agent.register_object_class(_schema)
+
+ _obj = QmfAgentData( self.agent,
+ _values={"key":"p1c1_key1"},
+ _schema=_schema)
+ _obj.set_value("count1", 0)
+ _obj.set_value("count2", 0)
+ self.agent.add_object( _obj )
+
+ _obj = QmfAgentData( self.agent,
+ _values={"key":"p1c1_key2"},
+ _schema=_schema )
+ _obj.set_value("count1", 9)
+ _obj.set_value("count2", 10)
+ self.agent.add_object( _obj )
+
+ # "package1/class2"
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("package1", "class2"),
+ _desc="A test data schema - two",
+ _object_id_names=["name"] )
+ # add properties
+ _schema.add_property( "name", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "string1", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ self.agent.register_object_class(_schema)
+
+ _obj = QmfAgentData( self.agent,
+ _values={"name":"p1c2_name1"},
+ _schema=_schema )
+ _obj.set_value("string1", "a data string")
+ self.agent.add_object( _obj )
+
+
+ # "package2/class1"
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("package2", "class1"),
+ _desc="A test data schema - second package",
+ _object_id_names=["key"] )
+
+ _schema.add_property( "key", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "counter", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ self.agent.register_object_class(_schema)
+
+ _obj = QmfAgentData( self.agent,
+ _values={"key":"p2c1_key1"},
+ _schema=_schema )
+ _obj.set_value("counter", 0)
+ self.agent.add_object( _obj )
+
+ _obj = QmfAgentData( self.agent,
+ _values={"key":"p2c1_key2"},
+ _schema=_schema )
+ _obj.set_value("counter", 2112)
+ self.agent.add_object( _obj )
+
+
+ # add two "unstructured" objects to the Agent
+
+ _obj = QmfAgentData(self.agent, _object_id="undesc-1")
+ _obj.set_value("field1", "a value")
+ _obj.set_value("field2", 2)
+ _obj.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj.set_value("field4", ["a", "list", "value"])
+ self.agent.add_object(_obj)
+
+
+ _obj = QmfAgentData(self.agent, _object_id="undesc-2")
+ _obj.set_value("key-1", "a value")
+ _obj.set_value("key-2", 2)
+ self.agent.add_object(_obj)
+
+ self.running = False
+ self.ready = Event()
+
+ def start_app(self):
+ self.running = True
+ self.start()
+ self.ready.wait(10)
+ if not self.ready.is_set():
+ raise Exception("Agent failed to connect to broker.")
+
+ def stop_app(self):
+ self.running = False
+ self.notifier.indication() # hmmm... collide with daemon???
+ self.join(10)
+ if self.isAlive():
+ raise Exception("AGENT DID NOT TERMINATE AS EXPECTED!!!")
+
+ def run(self):
+ # broker_url = "user/passwd@hostname:port"
+ self.conn = qpid.messaging.Connection(self.broker_url)
+ self.conn.open()
+ self.agent.set_connection(self.conn)
+ self.ready.set()
+
+ while self.running:
+ self.notifier.wait_for_work(None)
+ wi = self.agent.get_next_workitem(timeout=0)
+ while wi is not None:
+ logging.error("UNEXPECTED AGENT WORKITEM RECEIVED=%s" % wi.get_type())
+ self.agent.release_workitem(wi)
+ wi = self.agent.get_next_workitem(timeout=0)
+
+ if self.conn:
+ self.agent.remove_connection(10)
+ self.agent.destroy(10)
+
+
+class BaseTest(unittest.TestCase):
+ agent_count = 5
+
+ def configure(self, config):
+ self.config = config
+ self.broker = config.broker
+ self.defines = self.config.defines
+
+ def setUp(self):
+ self.agents = []
+ for i in range(self.agent_count):
+ agent = _agentApp("agent-" + str(i), self.broker, 1)
+ agent.start_app()
+ self.agents.append(agent)
+ #print("!!!! STARTING TEST: %s" % datetime.datetime.utcnow())
+
+ def tearDown(self):
+ #print("!!!! STOPPING TEST: %s" % datetime.datetime.utcnow())
+ for agent in self.agents:
+ if agent is not None:
+ agent.stop_app()
+
+
+ def test_all_agents(self):
+ # create console
+ # find all agents
+ # synchronous query for all objects by id
+ # verify known object ids are returned
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for agent_app in self.agents:
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # console has discovered all agents, now query all undesc-2 objects
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_object_id="undesc-2", _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(len(objs) == self.agent_count)
+ for obj in objs:
+ self.assertTrue(obj.get_object_id() == "undesc-2")
+
+ # now query all objects from schema "package1"
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_pname="package1", _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(len(objs) == (self.agent_count * 3))
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+
+ # now query all objects from schema "package2"
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_pname="package2", _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(len(objs) == (self.agent_count * 2))
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package2")
+
+ # now query all objects from schema "package1/class2"
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_pname="package1", _cname="class2",
+ _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(len(objs) == self.agent_count)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class2")
+
+ # given the schema identifier from the last query, repeat using the
+ # specific schema id
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ schema_id = objs[0].get_schema_class_id()
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_schema_id=schema_id, _timeout=5)
+ self.assertTrue(len(objs) == self.agent_count)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id() == schema_id)
+
+
+ self.console.destroy(10)
+
+
+
+ def test_agent_subset(self):
+ # create console
+ # find all agents
+ # synchronous query for all objects by id
+ # verify known object ids are returned
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ agent_list = []
+ for agent_app in self.agents:
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+ agent_list.append(agent)
+
+ # Only use a subset of the agents:
+ agent_list = agent_list[:len(agent_list)/2]
+
+ # console has discovered all agents, now query all undesc-2 objects
+ objs = self.console.get_objects(_object_id="undesc-2",
+ _agents=agent_list, _timeout=5)
+ self.assertTrue(len(objs) == len(agent_list))
+ for obj in objs:
+ self.assertTrue(obj.get_object_id() == "undesc-2")
+
+ # now query all objects from schema "package1"
+ objs = self.console.get_objects(_pname="package1",
+ _agents=agent_list,
+ _timeout=5)
+ self.assertTrue(len(objs) == (len(agent_list) * 3))
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+
+ # now query all objects from schema "package2"
+ objs = self.console.get_objects(_pname="package2",
+ _agents=agent_list,
+ _timeout=5)
+ self.assertTrue(len(objs) == (len(agent_list) * 2))
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package2")
+
+ # now query all objects from schema "package1/class2"
+ objs = self.console.get_objects(_pname="package1", _cname="class2",
+ _agents=agent_list,
+ _timeout=5)
+ self.assertTrue(len(objs) == len(agent_list))
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class2")
+
+ # given the schema identifier from the last query, repeat using the
+ # specific schema id
+ schema_id = objs[0].get_schema_class_id()
+ objs = self.console.get_objects(_schema_id=schema_id,
+ _agents=agent_list,
+ _timeout=5)
+ self.assertTrue(len(objs) == len(agent_list))
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id() == schema_id)
+
+
+ self.console.destroy(10)
+
+
+
+ def test_single_agent(self):
+ # create console
+ # find all agents
+ # synchronous query for all objects by id
+ # verify known object ids are returned
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ agent_list = []
+ for agent_app in self.agents:
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+ agent_list.append(agent)
+
+ # Only use one agent
+ agent = agent_list[0]
+
+ # console has discovered all agents, now query all undesc-2 objects
+ objs = self.console.get_objects(_object_id="undesc-2",
+ _agents=agent, _timeout=5)
+ self.assertTrue(len(objs) == 1)
+ for obj in objs:
+ self.assertTrue(obj.get_object_id() == "undesc-2")
+
+ # now query all objects from schema "package1"
+ objs = self.console.get_objects(_pname="package1",
+ _agents=agent,
+ _timeout=5)
+ self.assertTrue(len(objs) == 3)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+
+ # now query all objects from schema "package2"
+ objs = self.console.get_objects(_pname="package2",
+ _agents=agent,
+ _timeout=5)
+ self.assertTrue(len(objs) == 2)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package2")
+
+ # now query all objects from schema "package1/class2"
+ objs = self.console.get_objects(_pname="package1", _cname="class2",
+ _agents=agent,
+ _timeout=5)
+ self.assertTrue(len(objs) == 1)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class2")
+
+ # given the schema identifier from the last query, repeat using the
+ # specific schema id
+ schema_id = objs[0].get_schema_class_id()
+ objs = self.console.get_objects(_schema_id=schema_id,
+ _agents=agent,
+ _timeout=5)
+ self.assertTrue(len(objs) == 1)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id() == schema_id)
+
+
+ self.console.destroy(10)
+
+
+
+ def test_all_objs_by_oid(self):
+ # create console
+ # find all agents
+ # synchronous query for all described objects by:
+ # oid & schema_id
+ # oid & package name
+ # oid & package and class name
+ # verify known object ids are returned
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for agent_app in self.agents:
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # now query all objects from schema "package1"
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_pname="package1",
+ _object_id="p1c1_key1", _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(len(objs) == self.agent_count)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class1")
+ self.assertTrue(obj.get_object_id() == "p1c1_key1")
+ # mooch the schema for a later test
+ schema_id_p1c1 = objs[0].get_schema_class_id()
+
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_pname="package1",
+ _object_id="p1c2_name1", _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(len(objs) == self.agent_count)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class2")
+ self.assertTrue(obj.get_object_id() == "p1c2_name1")
+
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_pname="package2", _cname="class1",
+ _object_id="p2c1_key1", _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(len(objs) == self.agent_count)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package2")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class1")
+ self.assertTrue(obj.get_object_id() == "p2c1_key1")
+
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_schema_id=schema_id_p1c1,
+ _object_id="p1c1_key2", _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(len(objs) == self.agent_count)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class1")
+ self.assertTrue(obj.get_object_id() == "p1c1_key2")
+
+ # this should return all "undescribed" objects
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(len(objs) == (self.agent_count * 2))
+ for obj in objs:
+ self.assertTrue(obj.get_object_id() == "undesc-1" or
+ obj.get_object_id() == "undesc-2")
+
+ # these should fail
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_schema_id=schema_id_p1c1,
+ _object_id="does not exist",
+ _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(objs == None)
+
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_pname="package2",
+ _object_id="does not exist",
+ _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(objs == None)
+
+ #print("!!!! STARTING GET: %s" % datetime.datetime.utcnow())
+ objs = self.console.get_objects(_pname="package3",
+ _object_id="does not exist",
+ _timeout=5)
+ #print("!!!! STOPPING GET: %s" % datetime.datetime.utcnow())
+ self.assertTrue(objs == None)
+
+ self.console.destroy(10)
+
+
+ def test_wildcard_schema_id(self):
+ # create console
+ # find all agents
+ # synchronous query for all described objects by:
+ # oid & wildcard schema_id
+ # wildcard schema_id
+ # verify known object ids are returned
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ for agent_app in self.agents:
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ wild_schema_id = SchemaClassId("package1", "class1")
+ objs = self.console.get_objects(_schema_id=wild_schema_id, _timeout=5)
+ self.assertTrue(len(objs) == (self.agent_count * 2))
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class1")
+
+ wild_schema_id = SchemaClassId("package1", "class2")
+ objs = self.console.get_objects(_schema_id=wild_schema_id, _timeout=5)
+ self.assertTrue(len(objs) == self.agent_count)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class2")
+ self.assertTrue(obj.get_object_id() == "p1c2_name1")
+
+ wild_schema_id = SchemaClassId("package2", "class1")
+ objs = self.console.get_objects(_schema_id=wild_schema_id, _timeout=5)
+ self.assertTrue(len(objs) == (self.agent_count * 2))
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package2")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class1")
+
+ wild_schema_id = SchemaClassId("package1", "class1")
+ objs = self.console.get_objects(_schema_id=wild_schema_id,
+ _object_id="p1c1_key2", _timeout=5)
+ self.assertTrue(len(objs) == self.agent_count)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package1")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class1")
+ self.assertTrue(obj.get_object_id() == "p1c1_key2")
+
+ # should fail
+ objs = self.console.get_objects(_schema_id=wild_schema_id,
+ _object_id="does not exist",
+ _timeout=5)
+ self.assertTrue(objs == None)
+
+ wild_schema_id = SchemaClassId("package2", "class1")
+ objs = self.console.get_objects(_schema_id=wild_schema_id,
+ _object_id="p2c1_key2", _timeout=5)
+ self.assertTrue(len(objs) == self.agent_count)
+ for obj in objs:
+ self.assertTrue(obj.get_schema_class_id().get_package_name() == "package2")
+ self.assertTrue(obj.get_schema_class_id().get_class_name() == "class1")
+ self.assertTrue(obj.get_object_id() == "p2c1_key2")
+
+ # should fail
+ wild_schema_id = SchemaClassId("package1", "bad-class")
+ objs = self.console.get_objects(_schema_id=wild_schema_id,
+ _object_id="p1c1_key2", _timeout=5)
+ self.assertTrue(objs == None)
+
+ self.console.destroy(10)
+
diff --git a/qpid/extras/qmf/src/py/qmf2-prototype/tests/subscriptions.py b/qpid/extras/qmf/src/py/qmf2-prototype/tests/subscriptions.py
new file mode 100644
index 0000000000..5c39af4b32
--- /dev/null
+++ b/qpid/extras/qmf/src/py/qmf2-prototype/tests/subscriptions.py
@@ -0,0 +1,983 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import unittest
+import logging
+import datetime
+import time
+from threading import Thread, Event
+
+import qpid.messaging
+from qmf2.common import (Notifier, SchemaObjectClass, SchemaClassId,
+ SchemaProperty, qmfTypes, SchemaMethod, QmfQuery,
+ QmfData, WorkItem)
+import qmf2.console
+from qmf2.agent import(QmfAgentData, Agent)
+
+
+class _testNotifier(Notifier):
+ def __init__(self):
+ self._event = Event()
+
+ def indication(self):
+ # note: called by qmf daemon thread
+ self._event.set()
+
+ def wait_for_work(self, timeout):
+ # note: called by application thread to wait
+ # for qmf to generate work
+ self._event.wait(timeout)
+ timed_out = self._event.isSet() == False
+ if not timed_out:
+ self._event.clear()
+ return True
+ return False
+
+
+class _agentApp(Thread):
+ def __init__(self, name, broker_url, heartbeat):
+ Thread.__init__(self)
+ self.notifier = _testNotifier()
+ self.broker_url = broker_url
+ self.agent = Agent(name,
+ _notifier=self.notifier,
+ heartbeat_interval=heartbeat,
+ max_duration=10,
+ default_duration=7,
+ min_duration=5,
+ min_interval=1,
+ default_interval=2)
+
+ # Management Database
+ # - two different schema packages,
+ # - two classes within one schema package
+ # - multiple objects per schema package+class
+ # - two "undescribed" objects
+
+ # "package1/class1"
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("package1", "class1"),
+ _desc="A test data schema - one",
+ _object_id_names=["key"] )
+
+ _schema.add_property( "key", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ # note: count1 is continuous, count2 is not
+ count1_prop = SchemaProperty.create(qmfTypes.TYPE_UINT32,
+ continuous=True)
+ _schema.add_property( "count1", count1_prop)
+ count2_prop = SchemaProperty.create(qmfTypes.TYPE_UINT32,
+ continuous=False)
+ _schema.add_property( "count2", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ self.agent.register_object_class(_schema)
+
+ _obj = QmfAgentData( self.agent,
+ _values={"key":"p1c1_key1"},
+ _schema=_schema)
+ _obj.set_value("count1", 0)
+ _obj.set_value("count2", 0)
+ self.agent.add_object( _obj )
+
+ _obj = QmfAgentData( self.agent,
+ _values={"key":"p1c1_key2"},
+ _schema=_schema )
+ _obj.set_value("count1", 9)
+ _obj.set_value("count2", 10)
+ self.agent.add_object( _obj )
+
+ # "package1/class2"
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("package1", "class2"),
+ _desc="A test data schema - two",
+ _object_id_names=["name"] )
+ # add properties
+ _schema.add_property( "name", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "string1", SchemaProperty(qmfTypes.TYPE_LSTR))
+
+ self.agent.register_object_class(_schema)
+
+ _obj = QmfAgentData( self.agent,
+ _values={"name":"p1c2_name1"},
+ _schema=_schema )
+ _obj.set_value("string1", "a data string")
+ self.agent.add_object( _obj )
+
+ _obj = QmfAgentData( self.agent,
+ _values={"name":"p1c2_name2"},
+ _schema=_schema )
+ _obj.set_value("string1", "another data string")
+ self.agent.add_object( _obj )
+
+
+ # "package2/class1"
+
+ _schema = SchemaObjectClass( _classId=SchemaClassId("package2", "class1"),
+ _desc="A test data schema - second package",
+ _object_id_names=["key"] )
+
+ _schema.add_property( "key", SchemaProperty(qmfTypes.TYPE_LSTR))
+ _schema.add_property( "counter", SchemaProperty(qmfTypes.TYPE_UINT32))
+
+ self.agent.register_object_class(_schema)
+
+ _obj = QmfAgentData( self.agent,
+ _values={"key":"p2c1_key1"},
+ _schema=_schema )
+ _obj.set_value("counter", 0)
+ self.agent.add_object( _obj )
+
+ _obj = QmfAgentData( self.agent,
+ _values={"key":"p2c1_key2"},
+ _schema=_schema )
+ _obj.set_value("counter", 2112)
+ self.agent.add_object( _obj )
+
+
+ # add two "unstructured" objects to the Agent
+
+ _obj = QmfAgentData(self.agent, _object_id="undesc-1")
+ _obj.set_value("field1", "a value")
+ _obj.set_value("field2", 2)
+ _obj.set_value("field3", {"a":1, "map":2, "value":3})
+ _obj.set_value("field4", ["a", "list", "value"])
+ self.agent.add_object(_obj)
+
+
+ _obj = QmfAgentData(self.agent, _object_id="undesc-2")
+ _obj.set_value("key-1", "a value")
+ _obj.set_value("key-2", 2)
+ self.agent.add_object(_obj)
+
+ self.running = False
+ self.ready = Event()
+
+ def start_app(self):
+ self.running = True
+ self.start()
+ self.ready.wait(10)
+ if not self.ready.is_set():
+ raise Exception("Agent failed to connect to broker.")
+
+ def stop_app(self):
+ self.running = False
+ self.notifier.indication() # hmmm... collide with daemon???
+ self.join(10)
+ if self.isAlive():
+ raise Exception("AGENT DID NOT TERMINATE AS EXPECTED!!!")
+
+ def run(self):
+ # broker_url = "user/passwd@hostname:port"
+ self.conn = qpid.messaging.Connection(self.broker_url)
+ self.conn.open()
+ self.agent.set_connection(self.conn)
+ self.ready.set()
+
+ while self.running:
+ self.notifier.wait_for_work(None)
+ wi = self.agent.get_next_workitem(timeout=0)
+ while wi is not None:
+ logging.error("UNEXPECTED AGENT WORKITEM RECEIVED=%s" % wi.get_type())
+ self.agent.release_workitem(wi)
+ wi = self.agent.get_next_workitem(timeout=0)
+
+ if self.conn:
+ self.agent.remove_connection(10)
+ self.agent.destroy(10)
+
+
+class BaseTest(unittest.TestCase):
+ agent_count = 5
+
+ def configure(self, config):
+ self.config = config
+ self.broker = config.broker
+ self.defines = self.config.defines
+
+ def setUp(self):
+ self.agents = []
+ for i in range(self.agent_count):
+ agent = _agentApp("agent-" + str(i), self.broker, 1)
+ agent.start_app()
+ self.agents.append(agent)
+ #print("!!!! STARTING TEST: %s" % datetime.datetime.utcnow())
+
+ def tearDown(self):
+ #print("!!!! STOPPING TEST: %s" % datetime.datetime.utcnow())
+ for agent in self.agents:
+ if agent is not None:
+ agent.stop_app()
+
+
+ def test_sync_by_schema(self):
+ # create console
+ # find all agents
+ # subscribe to changes to any object in package1/class1
+ # should succeed - verify 1 publish
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ subscriptions = []
+ index = 0
+
+ # query to match all objects in schema package1/class1
+ sid = SchemaClassId.create("package1", "class1")
+ t_params = {QmfData.KEY_SCHEMA_ID: sid}
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT,
+ _target_params=t_params)
+ for agent_app in self.agents:
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # now subscribe to agent
+
+ sp = self.console.create_subscription(agent,
+ query,
+ index)
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ self.assertTrue(sp.succeeded())
+ self.assertTrue(sp.get_error() == None)
+ self.assertTrue(sp.get_duration() == 10)
+ self.assertTrue(sp.get_publish_interval() == 2)
+
+ subscriptions.append([sp, 0])
+ index += 1
+
+ # now wait for the (2 * interval) and count the updates
+ r_count = 0
+ while self.notifier.wait_for_work(4):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ self.assertTrue(wi.get_type() == WorkItem.SUBSCRIBE_INDICATION)
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 2)
+ for obj in reply:
+ self.assertTrue(isinstance(obj, QmfData))
+ self.assertTrue(obj.get_object_id() == "p1c1_key2" or
+ obj.get_object_id() == "p1c1_key1")
+ sid = reply[0].get_schema_class_id()
+ self.assertTrue(isinstance(sid, SchemaClassId))
+ self.assertTrue(sid.get_package_name() == "package1")
+ self.assertTrue(sid.get_class_name() == "class1")
+
+ self.assertTrue(wi.get_handle() < len(subscriptions))
+ subscriptions[wi.get_handle()][1] += 1
+
+ self.console.release_workitem(wi)
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ # expect 1 publish per subscription
+ self.assertTrue(r_count == 5)
+ for ii in range(len(subscriptions)):
+ self.assertTrue(subscriptions[ii][1] == 1)
+
+ self.console.destroy(10)
+
+
+ def test_sync_by_obj_id(self):
+ # create console
+ # find all agents
+ # subscribe to changes to any object in package1/class1
+ # should succeed
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ subscriptions = []
+ index = 0
+
+ # query to match all objects in schema package1/class1
+ # sid = SchemaClassId.create("package1", "class1")
+ # t_params = {QmfData.KEY_SCHEMA_ID: sid}
+ query = QmfQuery.create_id_object("undesc-2")
+
+ for agent_app in self.agents:
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # now subscribe to agent
+
+ sp = self.console.create_subscription(agent,
+ query,
+ index)
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ self.assertTrue(sp.succeeded())
+ self.assertTrue(sp.get_error() == None)
+
+ subscriptions.append([sp, 0])
+ index += 1
+
+ # now wait for all subscriptions to expire (2x interval w/o
+ # indications)
+ r_count = 0
+ while self.notifier.wait_for_work(4):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ self.assertTrue(wi.get_type() == WorkItem.SUBSCRIBE_INDICATION)
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 1)
+ self.assertTrue(isinstance(reply[0], QmfData))
+ self.assertTrue(reply[0].get_object_id() == "undesc-2")
+ self.assertTrue(wi.get_handle() < len(subscriptions))
+ subscriptions[wi.get_handle()][1] += 1
+
+ self.console.release_workitem(wi)
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ # expect 1 publish per subscription
+ self.assertTrue(r_count == 5)
+ for ii in range(len(subscriptions)):
+ self.assertTrue(subscriptions[ii][1] == 1)
+
+ self.console.destroy(10)
+
+
+ def test_sync_by_obj_id_schema(self):
+ # create console
+ # find all agents
+ # subscribe to changes to any object in package1/class1
+ # should succeed
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ subscriptions = []
+ index = 0
+
+ # query to match object "p2c1_key2" in schema package2/class1
+ sid = SchemaClassId.create("package2", "class1")
+ query = QmfQuery.create_id_object("p2c1_key2", sid)
+
+ for agent_app in self.agents:
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # now subscribe to agent
+
+ sp = self.console.create_subscription(agent,
+ query,
+ index)
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ self.assertTrue(sp.succeeded())
+ self.assertTrue(sp.get_error() == None)
+
+ subscriptions.append([sp, 0])
+ index += 1
+
+ # now wait for all subscriptions to expire (2x interval w/o
+ # indications)
+ r_count = 0
+ while self.notifier.wait_for_work(4):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ self.assertTrue(wi.get_type() == WorkItem.SUBSCRIBE_INDICATION)
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 1)
+ self.assertTrue(isinstance(reply[0], QmfData))
+ self.assertTrue(reply[0].get_object_id() == "p2c1_key2")
+ sid = reply[0].get_schema_class_id()
+ self.assertTrue(isinstance(sid, SchemaClassId))
+ self.assertTrue(sid.get_package_name() == "package2")
+ self.assertTrue(sid.get_class_name() == "class1")
+ self.assertTrue(wi.get_handle() < len(subscriptions))
+ subscriptions[wi.get_handle()][1] += 1
+
+ self.console.release_workitem(wi)
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ # expect 1 publish per subscription
+ self.assertTrue(r_count == 5)
+ for ii in range(len(subscriptions)):
+ self.assertTrue(subscriptions[ii][1] == 1)
+
+ self.console.destroy(10)
+
+
+
+ def test_sync_refresh(self):
+ # create console
+ # find one agent
+ # subscribe to changes to any object in package1/class1
+ # after 3 data indications, refresh
+ # verify > 5 more data indications received
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ # query to match object "p1c1_key2" in schema package1/class1
+ sid = SchemaClassId.create("package1", "class1")
+ query = QmfQuery.create_id_object("p1c1_key2", sid)
+
+ agent_app = self.agents[0]
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # setup subscription on agent
+
+ sp = self.console.create_subscription(agent,
+ query,
+ "my-handle")
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ self.assertTrue(sp.succeeded())
+ self.assertTrue(sp.get_error() == None)
+
+ # refresh after three subscribe indications, count all
+ # indications to verify refresh worked
+ r_count = 0
+ while self.notifier.wait_for_work(4):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ self.assertTrue(wi.get_type() == WorkItem.SUBSCRIBE_INDICATION)
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 1)
+ self.assertTrue(isinstance(reply[0], QmfData))
+ self.assertTrue(reply[0].get_object_id() == "p1c1_key2")
+ sid = reply[0].get_schema_class_id()
+ self.assertTrue(isinstance(sid, SchemaClassId))
+ self.assertTrue(sid.get_package_name() == "package1")
+ self.assertTrue(sid.get_class_name() == "class1")
+ self.assertTrue(wi.get_handle() == "my-handle")
+
+ # count1 is continuous, touching it will force a
+ # publish on the interval
+ self.assertTrue(sid is not None)
+ test_obj = agent_app.agent.get_object("p1c1_key2", sid)
+ self.assertTrue(test_obj is not None)
+ test_obj.set_value("count1", r_count)
+
+ self.console.release_workitem(wi)
+
+ if r_count == 3:
+ rp = self.console.refresh_subscription(sp.get_subscription_id())
+ self.assertTrue(rp)
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ # expect 5 publish per subscription, more if refreshed
+ self.assertTrue(r_count > 5)
+
+ self.console.destroy(10)
+
+
+
+ def test_sync_cancel(self):
+ # create console
+ # find one agent
+ # subscribe to changes to any object in package1/class1
+ # after 2 data indications, cancel subscription
+ # verify < 5 data indications received
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ # query to match object "p1c1_key2" in schema package1/class1
+ sid = SchemaClassId.create("package1", "class1")
+ query = QmfQuery.create_id_object("p1c1_key2", sid)
+
+ agent_app = self.agents[0]
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # setup subscription on agent
+
+ sp = self.console.create_subscription(agent,
+ query,
+ "my-handle")
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ self.assertTrue(sp.succeeded())
+ self.assertTrue(sp.get_error() == None)
+
+ # refresh after three subscribe indications, count all
+ # indications to verify refresh worked
+ r_count = 0
+ while self.notifier.wait_for_work(4):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ self.assertTrue(wi.get_type() == WorkItem.SUBSCRIBE_INDICATION)
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 1)
+ self.assertTrue(isinstance(reply[0], QmfData))
+ self.assertTrue(reply[0].get_object_id() == "p1c1_key2")
+ sid = reply[0].get_schema_class_id()
+ self.assertTrue(isinstance(sid, SchemaClassId))
+ self.assertTrue(sid.get_package_name() == "package1")
+ self.assertTrue(sid.get_class_name() == "class1")
+ self.assertTrue(wi.get_handle() == "my-handle")
+
+ # count1 is continuous, touching it will force a
+ # publish on the interval
+ self.assertTrue(sid is not None)
+ test_obj = agent_app.agent.get_object("p1c1_key2", sid)
+ self.assertTrue(test_obj is not None)
+ test_obj.set_value("count1", r_count)
+
+ self.console.release_workitem(wi)
+
+ if r_count == 3:
+ self.console.cancel_subscription(sp.get_subscription_id())
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ # expect only 3 publish received before cancel
+ self.assertTrue(r_count == 3)
+
+ self.console.destroy(10)
+
+
+ def test_async_by_obj_id_schema(self):
+ # create console
+ # find one agent
+ # async subscribe to changes to any object in package1/class1
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ # query to match object "p2c1_key2" in schema package2/class1
+ sid = SchemaClassId.create("package2", "class1")
+ query = QmfQuery.create_id_object("p2c1_key2", sid)
+
+ agent_app = self.agents[0]
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # setup subscription on agent
+
+ rc = self.console.create_subscription(agent,
+ query,
+ "my-handle",
+ _blocking=False)
+ self.assertTrue(rc)
+
+ r_count = 0
+ sp = None
+ while self.notifier.wait_for_work(4):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ if wi.get_type() == WorkItem.SUBSCRIBE_RESPONSE:
+ self.assertTrue(wi.get_handle() == "my-handle")
+ sp = wi.get_params()
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ self.assertTrue(sp.succeeded())
+ self.assertTrue(sp.get_error() == None)
+ else:
+ self.assertTrue(wi.get_type() ==
+ WorkItem.SUBSCRIBE_INDICATION)
+ # sp better be set up by now!
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 1)
+ self.assertTrue(isinstance(reply[0], QmfData))
+ self.assertTrue(reply[0].get_object_id() == "p2c1_key2")
+ sid = reply[0].get_schema_class_id()
+ self.assertTrue(isinstance(sid, SchemaClassId))
+ self.assertTrue(sid.get_package_name() == "package2")
+ self.assertTrue(sid.get_class_name() == "class1")
+ self.assertTrue(wi.get_handle() == "my-handle")
+
+ self.console.release_workitem(wi)
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ # one response + one publish = 2
+ self.assertTrue(r_count == 2)
+
+ self.console.destroy(10)
+
+ def test_async_refresh(self):
+ # create console
+ # find one agent
+ # async subscribe to changes to any object in package1/class1
+ # refresh after third data indication
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ # query to match object "p1c1_key2" in schema package1/class1
+ sid = SchemaClassId.create("package1", "class1")
+ query = QmfQuery.create_id_object("p1c1_key2", sid)
+
+ agent_app = self.agents[0]
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # setup subscription on agent
+
+ rc = self.console.create_subscription(agent,
+ query,
+ "my-handle",
+ _blocking=False)
+ self.assertTrue(rc)
+
+ # refresh after three subscribe indications, count all
+ # indications to verify refresh worked
+ r_count = 0
+ i_count = 0
+ sp = None
+ while self.notifier.wait_for_work(4):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ if wi.get_type() == WorkItem.SUBSCRIBE_RESPONSE:
+ self.assertTrue(wi.get_handle() == "my-handle")
+ sp = wi.get_params()
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ self.assertTrue(sp.succeeded())
+ self.assertTrue(sp.get_error() == None)
+ else:
+ self.assertTrue(wi.get_type() ==
+ WorkItem.SUBSCRIBE_INDICATION)
+ i_count += 1
+ # sp better be set up by now!
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 1)
+ self.assertTrue(isinstance(reply[0], QmfData))
+ self.assertTrue(reply[0].get_object_id() == "p1c1_key2")
+ sid = reply[0].get_schema_class_id()
+ self.assertTrue(isinstance(sid, SchemaClassId))
+ self.assertTrue(sid.get_package_name() == "package1")
+ self.assertTrue(sid.get_class_name() == "class1")
+ self.assertTrue(wi.get_handle() == "my-handle")
+
+ # count1 is continuous, touching it will force a
+ # publish on the interval
+ self.assertTrue(sid is not None)
+ test_obj = agent_app.agent.get_object("p1c1_key2", sid)
+ self.assertTrue(test_obj is not None)
+ test_obj.set_value("count1", r_count)
+
+ if r_count == 4: # 3 data + 1 subscribe reply
+ rp = self.console.refresh_subscription(sp.get_subscription_id())
+ self.assertTrue(rp)
+
+ self.console.release_workitem(wi)
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ # expect 5 publish per subscription, more if refreshed
+ self.assertTrue(sp is not None)
+ self.assertTrue(i_count > 5)
+
+ self.console.destroy(10)
+
+
+ def test_async_cancel(self):
+ # create console
+ # find one agent
+ # async subscribe to changes to any object in package1/class1
+ # cancel after first data indication
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ # query to match object "p1c1_key2" in schema package1/class1
+ sid = SchemaClassId.create("package1", "class1")
+ query = QmfQuery.create_id_object("p1c1_key2", sid)
+
+ agent_app = self.agents[0]
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # setup subscription on agent
+
+ rc = self.console.create_subscription(agent,
+ query,
+ "my-handle",
+ _blocking=False)
+ self.assertTrue(rc)
+
+ r_count = 0
+ sp = None
+ while self.notifier.wait_for_work(4):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ if wi.get_type() == WorkItem.SUBSCRIBE_RESPONSE:
+ self.assertTrue(wi.get_handle() == "my-handle")
+ sp = wi.get_params()
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ self.assertTrue(sp.succeeded())
+ self.assertTrue(sp.get_error() == None)
+ else:
+ self.assertTrue(wi.get_type() ==
+ WorkItem.SUBSCRIBE_INDICATION)
+ # sp better be set up by now!
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 1)
+ self.assertTrue(isinstance(reply[0], QmfData))
+ self.assertTrue(reply[0].get_object_id() == "p1c1_key2")
+ sid = reply[0].get_schema_class_id()
+ self.assertTrue(isinstance(sid, SchemaClassId))
+ self.assertTrue(sid.get_package_name() == "package1")
+ self.assertTrue(sid.get_class_name() == "class1")
+ self.assertTrue(wi.get_handle() == "my-handle")
+
+ # count1 is continuous, touching it will force a
+ # publish on the interval
+ self.assertTrue(sid is not None)
+ test_obj = agent_app.agent.get_object("p1c1_key2", sid)
+ self.assertTrue(test_obj is not None)
+ test_obj.set_value("count1", r_count)
+
+ if r_count == 3:
+ self.console.cancel_subscription(sp.get_subscription_id())
+
+ self.console.release_workitem(wi)
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ # expect cancel after 3 replies
+ self.assertTrue(r_count == 3)
+
+ self.console.destroy(10)
+
+
+
+
+ def test_sync_periodic_publish_continuous(self):
+ # create console
+ # find all agents
+ # subscribe to changes to any object in package1/class1
+ # should succeed - verify 1 publish
+ # Change continuous property on each publish,
+ # should only see 1 publish per interval
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ subscriptions = []
+ index = 0
+
+ # query to match all objects in schema package1/class1
+ sid = SchemaClassId.create("package1", "class1")
+ t_params = {QmfData.KEY_SCHEMA_ID: sid}
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT,
+ _target_params=t_params)
+ # find an agent
+ agent_app = self.agents[0]
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # setup subscription on agent
+
+ sp = self.console.create_subscription(agent,
+ query,
+ "some-handle")
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ self.assertTrue(sp.succeeded())
+ self.assertTrue(sp.get_error() == None)
+ self.assertTrue(sp.get_duration() == 10)
+ self.assertTrue(sp.get_publish_interval() == 2)
+
+ # now wait for the (2 * interval) and count the updates
+ r_count = 0
+ sid = None
+ while self.notifier.wait_for_work(4):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ self.assertTrue(wi.get_type() == WorkItem.SUBSCRIBE_INDICATION)
+ self.assertTrue(wi.get_handle() == "some-handle")
+ if r_count == 1:
+ # first indication - returns all matching objects
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 2)
+ for obj in reply:
+ self.assertTrue(isinstance(obj, QmfData))
+ self.assertTrue(obj.get_object_id() == "p1c1_key2" or
+ obj.get_object_id() == "p1c1_key1")
+ sid = obj.get_schema_class_id()
+ self.assertTrue(isinstance(sid, SchemaClassId))
+ self.assertTrue(sid.get_package_name() == "package1")
+ self.assertTrue(sid.get_class_name() == "class1")
+
+ else:
+ # verify publish of modified object only!
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 1)
+ obj = reply[0]
+ self.assertTrue(isinstance(obj, QmfData))
+ self.assertTrue(obj.get_object_id() == "p1c1_key2")
+ self.assertTrue(obj.get_value("count1") == r_count - 1)
+ # fail test if we receive more than expected
+ self.assertTrue(r_count < 10)
+
+
+ # now update one of the objects!
+ self.assertTrue(sid is not None)
+ test_obj = agent_app.agent.get_object("p1c1_key2", sid)
+ self.assertTrue(test_obj is not None)
+ test_obj.set_value("count1", r_count)
+
+ self.console.release_workitem(wi)
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ # expect at most 1 publish per interval seen
+ self.assertTrue(r_count < 10)
+
+ self.console.destroy(10)
+
+
+
+
+ def test_sync_periodic_publish_noncontinuous(self):
+ # create console, find agent
+ # subscribe to changes to any object in package1/class1
+ # should succeed - verify 1 publish
+ # Change noncontinuous property on each publish,
+ # should only see 1 publish per each update
+ self.notifier = _testNotifier()
+ self.console = qmf2.console.Console(notifier=self.notifier,
+ agent_timeout=3)
+ self.conn = qpid.messaging.Connection(self.broker)
+ self.conn.open()
+ self.console.add_connection(self.conn)
+
+ subscriptions = []
+ index = 0
+
+ # query to match all objects in schema package1/class1
+ sid = SchemaClassId.create("package1", "class1")
+ t_params = {QmfData.KEY_SCHEMA_ID: sid}
+ query = QmfQuery.create_wildcard(QmfQuery.TARGET_OBJECT,
+ _target_params=t_params)
+ # find an agent
+ agent_app = self.agents[0]
+ aname = agent_app.agent.get_name()
+ agent = self.console.find_agent(aname, timeout=3)
+ self.assertTrue(agent and agent.get_name() == aname)
+
+ # setup subscription on agent
+
+ sp = self.console.create_subscription(agent,
+ query,
+ "some-handle")
+ self.assertTrue(isinstance(sp, qmf2.console.SubscribeParams))
+ self.assertTrue(sp.succeeded())
+ self.assertTrue(sp.get_error() == None)
+ self.assertTrue(sp.get_duration() == 10)
+ self.assertTrue(sp.get_publish_interval() == 2)
+
+ # now wait for the (2 * interval) and count the updates
+ r_count = 0
+ sid = None
+ while self.notifier.wait_for_work(4):
+ wi = self.console.get_next_workitem(timeout=0)
+ while wi is not None:
+ r_count += 1
+ self.assertTrue(wi.get_type() == WorkItem.SUBSCRIBE_INDICATION)
+ self.assertTrue(wi.get_handle() == "some-handle")
+ if r_count == 1:
+ # first indication - returns all matching objects
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 2)
+ for obj in reply:
+ self.assertTrue(isinstance(obj, QmfData))
+ self.assertTrue(obj.get_object_id() == "p1c1_key2" or
+ obj.get_object_id() == "p1c1_key1")
+ sid = obj.get_schema_class_id()
+ self.assertTrue(isinstance(sid, SchemaClassId))
+ self.assertTrue(sid.get_package_name() == "package1")
+ self.assertTrue(sid.get_class_name() == "class1")
+
+ else:
+ # verify publish of modified object only!
+ reply = wi.get_params()
+ self.assertTrue(isinstance(reply, type([])))
+ self.assertTrue(len(reply) == 1)
+ obj = reply[0]
+ self.assertTrue(isinstance(obj, QmfData))
+ self.assertTrue(obj.get_object_id() == "p1c1_key2")
+ self.assertTrue(obj.get_value("count2") == r_count - 1)
+ # fail test if we receive more than expected
+ self.assertTrue(r_count < 30)
+
+
+ # now update the noncontinuous field of one of the objects!
+ if r_count < 20:
+ self.assertTrue(sid is not None)
+ test_obj = agent_app.agent.get_object("p1c1_key2", sid)
+ self.assertTrue(test_obj is not None)
+ test_obj.set_value("count2", r_count)
+
+ self.console.release_workitem(wi)
+
+ wi = self.console.get_next_workitem(timeout=0)
+
+ # expect at least 1 publish per update
+ self.assertTrue(r_count > 10)
+
+ self.console.destroy(10)
diff --git a/qpid/extras/sasl/LICENSE b/qpid/extras/sasl/LICENSE
new file mode 100644
index 0000000000..cff2a5e25d
--- /dev/null
+++ b/qpid/extras/sasl/LICENSE
@@ -0,0 +1,234 @@
+=========================================================================
+== Apache License ==
+=========================================================================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+=========================================================================
+== Boost License ==
+=========================================================================
+
+Boost Software License - Version 1.0 - August 17th, 2003
+
+Permission is hereby granted, free of charge, to any person or organization
+obtaining a copy of the software and accompanying documentation covered by
+this license (the "Software") to use, reproduce, display, distribute,
+execute, and transmit the Software, and to prepare derivative works of the
+Software, and to permit third-parties to whom the Software is furnished to
+do so, all subject to the following:
+
+The copyright notices in the Software and this entire statement, including
+the above license grant, this restriction and the following disclaimer,
+must be included in all copies of the Software, in whole or in part, and
+all derivative works of the Software, unless such copies or derivative
+works are solely in the form of machine-executable object code generated by
+a source language processor.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
diff --git a/qpid/extras/sasl/Makefile.am b/qpid/extras/sasl/Makefile.am
new file mode 100644
index 0000000000..efa6d4f2d3
--- /dev/null
+++ b/qpid/extras/sasl/Makefile.am
@@ -0,0 +1,30 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+LIBTOOL_DEPS = @LIBTOOL_DEPS@
+
+AUTOMAKE_OPTIONS = 1.9.2 foreign
+ACLOCAL_AMFLAGS = -I m4
+
+EXTRA_DIST = LICENSE
+SUBDIRS = src python ruby
+
+# Update libtool, if needed.
+libtool: $(LIBTOOL_DEPS)
+ $(SHELL) ./config.status --recheck
diff --git a/qpid/extras/sasl/bootstrap b/qpid/extras/sasl/bootstrap
new file mode 100755
index 0000000000..906e5a71e4
--- /dev/null
+++ b/qpid/extras/sasl/bootstrap
@@ -0,0 +1,33 @@
+#!/bin/sh
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+set -e
+aclocal -I m4
+autoheader
+libtoolize --automake
+
+automake --add-missing
+autoconf
+
+if [ "$1" = "-build" -o "$1" = "--build" ] ; then
+ shift
+ ./configure "$@"
+ make
+ make check
+fi
diff --git a/qpid/extras/sasl/build-aux/compile b/qpid/extras/sasl/build-aux/compile
new file mode 100755
index 0000000000..1b1d232169
--- /dev/null
+++ b/qpid/extras/sasl/build-aux/compile
@@ -0,0 +1,142 @@
+#! /bin/sh
+# Wrapper for compilers which do not understand `-c -o'.
+
+scriptversion=2005-05-14.22
+
+# Copyright (C) 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc.
+# Written by Tom Tromey <tromey@cygnus.com>.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# This file is maintained in Automake, please report
+# bugs to <bug-automake@gnu.org> or send patches to
+# <automake-patches@gnu.org>.
+
+case $1 in
+ '')
+ echo "$0: No command. Try \`$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
+ -h | --h*)
+ cat <<\EOF
+Usage: compile [--help] [--version] PROGRAM [ARGS]
+
+Wrapper for compilers which do not understand `-c -o'.
+Remove `-o dest.o' from ARGS, run PROGRAM with the remaining
+arguments, and rename the output as expected.
+
+If you are trying to build a whole package this is not the
+right script to run: please start by reading the file `INSTALL'.
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+ exit $?
+ ;;
+ -v | --v*)
+ echo "compile $scriptversion"
+ exit $?
+ ;;
+esac
+
+ofile=
+cfile=
+eat=
+
+for arg
+do
+ if test -n "$eat"; then
+ eat=
+ else
+ case $1 in
+ -o)
+ # configure might choose to run compile as `compile cc -o foo foo.c'.
+ # So we strip `-o arg' only if arg is an object.
+ eat=1
+ case $2 in
+ *.o | *.obj)
+ ofile=$2
+ ;;
+ *)
+ set x "$@" -o "$2"
+ shift
+ ;;
+ esac
+ ;;
+ *.c)
+ cfile=$1
+ set x "$@" "$1"
+ shift
+ ;;
+ *)
+ set x "$@" "$1"
+ shift
+ ;;
+ esac
+ fi
+ shift
+done
+
+if test -z "$ofile" || test -z "$cfile"; then
+ # If no `-o' option was seen then we might have been invoked from a
+ # pattern rule where we don't need one. That is ok -- this is a
+ # normal compilation that the losing compiler can handle. If no
+ # `.c' file was seen then we are probably linking. That is also
+ # ok.
+ exec "$@"
+fi
+
+# Name of file we expect compiler to create.
+cofile=`echo "$cfile" | sed -e 's|^.*/||' -e 's/\.c$/.o/'`
+
+# Create the lock directory.
+# Note: use `[/.-]' here to ensure that we don't use the same name
+# that we are using for the .o file. Also, base the name on the expected
+# object file name, since that is what matters with a parallel build.
+lockdir=`echo "$cofile" | sed -e 's|[/.-]|_|g'`.d
+while true; do
+ if mkdir "$lockdir" >/dev/null 2>&1; then
+ break
+ fi
+ sleep 1
+done
+# FIXME: race condition here if user kills between mkdir and trap.
+trap "rmdir '$lockdir'; exit 1" 1 2 15
+
+# Run the compile.
+"$@"
+ret=$?
+
+if test -f "$cofile"; then
+ mv "$cofile" "$ofile"
+elif test -f "${cofile}bj"; then
+ mv "${cofile}bj" "$ofile"
+fi
+
+rmdir "$lockdir"
+exit $ret
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-end: "$"
+# End:
diff --git a/qpid/extras/sasl/build-aux/config.guess b/qpid/extras/sasl/build-aux/config.guess
new file mode 100755
index 0000000000..c93201a4d2
--- /dev/null
+++ b/qpid/extras/sasl/build-aux/config.guess
@@ -0,0 +1,1501 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
+# Inc.
+
+timestamp='2006-11-08'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+# 02110-1301, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Originally written by Per Bothner <per@bothner.com>.
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted ChangeLog entry.
+#
+# This script attempts to guess a canonical system name similar to
+# config.sub. If it succeeds, it prints the system name on stdout, and
+# exits with 0. Otherwise, it exits with 1.
+#
+# The plan is that this can be called by configure scripts if you
+# don't specify an explicit build system type.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep __ELF__ >/dev/null
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "${UNAME_VERSION}" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}"
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ exit ;;
+ *:SolidBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:MirBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE="alpha" ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE="alpha" ;;
+ "EV5 (21164)")
+ UNAME_MACHINE="alphaev5" ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE="alphaev56" ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE="alphapca56" ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE="alphapca57" ;;
+ "EV6 (21264)")
+ UNAME_MACHINE="alphaev6" ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE="alphaev67" ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE="alphaev68" ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE="alphaev69" ;;
+ "EV7 (21364)")
+ UNAME_MACHINE="alphaev7" ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE="alphaev79" ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ exit ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit ;;
+ arm:riscos:*:*|arm:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ i86pc:SunOS:5.*:*)
+ echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten${UNAME_RELEASE}
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c &&
+ dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`$dummy $dummyarg` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[45])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH="hppa2.0n" ;;
+ 64) HP_ARCH="hppa2.0w" ;;
+ '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if [ ${HP_ARCH} = "hppa2.0w" ]
+ then
+ eval $set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep __LP64__ >/dev/null
+ then
+ HP_ARCH="hppa2.0w"
+ else
+ HP_ARCH="hppa64"
+ fi
+ fi
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:FreeBSD:*:*)
+ case ${UNAME_MACHINE} in
+ pc98)
+ echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ amd64)
+ echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ *)
+ echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ esac
+ exit ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit ;;
+ i*:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit ;;
+ i*:windows32*:*)
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
+ exit ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit ;;
+ x86:Interix*:[3456]*)
+ echo i586-pc-interix${UNAME_RELEASE}
+ exit ;;
+ EM64T:Interix*:[3456]* | authenticamd:Interix*:[3456]*)
+ echo x86_64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+ echo i${UNAME_MACHINE}-pc-mks
+ exit ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i586-pc-interix
+ exit ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
+ exit ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit ;;
+ arm*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ avr32*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ cris:Linux:*:*)
+ echo cris-axis-linux-gnu
+ exit ;;
+ crisv32:Linux:*:*)
+ echo crisv32-axis-linux-gnu
+ exit ;;
+ frv:Linux:*:*)
+ echo frv-unknown-linux-gnu
+ exit ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ m32r*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ mips:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef mips
+ #undef mipsel
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=mipsel
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=mips
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
+ /^CPU/{
+ s: ::g
+ p
+ }'`"
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+ ;;
+ mips64:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef mips64
+ #undef mips64el
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=mips64el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=mips64
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
+ /^CPU/{
+ s: ::g
+ p
+ }'`"
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
+ ;;
+ or32:Linux:*:*)
+ echo or32-unknown-linux-gnu
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-gnu
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-gnu
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
+ if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-gnu ;;
+ PA8*) echo hppa2.0-unknown-linux-gnu ;;
+ *) echo hppa-unknown-linux-gnu ;;
+ esac
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-gnu
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux
+ exit ;;
+ sh64*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-gnu
+ exit ;;
+ vax:Linux:*:*)
+ echo ${UNAME_MACHINE}-dec-linux-gnu
+ exit ;;
+ x86_64:Linux:*:*)
+ echo x86_64-unknown-linux-gnu
+ exit ;;
+ i*86:Linux:*:*)
+ # The BFD linker knows what the default object file format is, so
+ # first see if it will tell us. cd to the root directory to prevent
+ # problems with other programs or directories called `ld' in the path.
+ # Set LC_ALL=C to ensure ld outputs messages in English.
+ ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
+ | sed -ne '/supported targets:/!d
+ s/[ ][ ]*/ /g
+ s/.*supported targets: *//
+ s/ .*//
+ p'`
+ case "$ld_supported_targets" in
+ elf32-i386)
+ TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
+ ;;
+ a.out-i386-linux)
+ echo "${UNAME_MACHINE}-pc-linux-gnuaout"
+ exit ;;
+ coff-i386)
+ echo "${UNAME_MACHINE}-pc-linux-gnucoff"
+ exit ;;
+ "")
+ # Either a pre-BFD a.out linker (linux-gnuoldld) or
+ # one that does not give us useful --help.
+ echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
+ exit ;;
+ esac
+ # Determine whether the default compiler is a.out or elf
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <features.h>
+ #ifdef __ELF__
+ # ifdef __GLIBC__
+ # if __GLIBC__ >= 2
+ LIBC=gnu
+ # else
+ LIBC=gnulibc1
+ # endif
+ # else
+ LIBC=gnulibc1
+ # endif
+ #else
+ #if defined(__INTEL_COMPILER) || defined(__PGI) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+ LIBC=gnu
+ #else
+ LIBC=gnuaout
+ #endif
+ #endif
+ #ifdef __dietlibc__
+ LIBC=dietlibc
+ #endif
+EOF
+ eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
+ /^LIBC/{
+ s: ::g
+ p
+ }'`"
+ test x"${LIBC}" != x && {
+ echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
+ exit
+ }
+ test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; }
+ ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i386.
+ echo i386-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo ${UNAME_MACHINE}-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux${UNAME_RELEASE}
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ case $UNAME_PROCESSOR in
+ unknown) UNAME_PROCESSOR=powerpc ;;
+ esac
+ echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = "x86"; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NSE-?:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSR-?:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = "386"; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux${UNAME_RELEASE}
+ exit ;;
+ *:DragonFly:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "${UNAME_MACHINE}" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+ exit ;;
+ i*86:rdos:*:*)
+ echo ${UNAME_MACHINE}-pc-rdos
+ exit ;;
+esac
+
+#echo '(No uname command or uname output not recognized.)' 1>&2
+#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
+
+eval $set_cc_for_build
+cat >$dummy.c <<EOF
+#ifdef _SEQUENT_
+# include <sys/types.h>
+# include <sys/utsname.h>
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (__arm) && defined (__acorn) && defined (__unix)
+ printf ("arm-acorn-riscix\n"); exit (0);
+#endif
+
+#if defined (hp300) && !defined (hpux)
+ printf ("m68k-hp-bsd\n"); exit (0);
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+
+#endif
+
+#if defined (vax)
+# if !defined (ultrix)
+# include <sys/param.h>
+# if defined (BSD)
+# if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+# else
+# if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# endif
+# else
+ printf ("vax-dec-bsd\n"); exit (0);
+# endif
+# else
+ printf ("vax-dec-ultrix\n"); exit (0);
+# endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+
+# Apollos put the system type in the environment.
+
+test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
+
+# Convex versions that predate uname can use getsysinfo(1)
+
+if [ -x /usr/convex/getsysinfo ]
+then
+ case `getsysinfo -f cpu_type` in
+ c1*)
+ echo c1-convex-bsd
+ exit ;;
+ c2*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ c34*)
+ echo c34-convex-bsd
+ exit ;;
+ c38*)
+ echo c38-convex-bsd
+ exit ;;
+ c4*)
+ echo c4-convex-bsd
+ exit ;;
+ esac
+fi
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+ http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.guess
+and
+ http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.sub
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/qpid/extras/sasl/build-aux/config.rpath b/qpid/extras/sasl/build-aux/config.rpath
new file mode 100755
index 0000000000..c492a93b66
--- /dev/null
+++ b/qpid/extras/sasl/build-aux/config.rpath
@@ -0,0 +1,614 @@
+#! /bin/sh
+# Output a system dependent set of variables, describing how to set the
+# run time search path of shared libraries in an executable.
+#
+# Copyright 1996-2006 Free Software Foundation, Inc.
+# Taken from GNU libtool, 2001
+# Originally by Gordon Matzigkeit <gord@gnu.ai.mit.edu>, 1996
+#
+# This file is free software; the Free Software Foundation gives
+# unlimited permission to copy and/or distribute it, with or without
+# modifications, as long as this notice is preserved.
+#
+# The first argument passed to this file is the canonical host specification,
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# The environment variables CC, GCC, LDFLAGS, LD, with_gnu_ld
+# should be set by the caller.
+#
+# The set of defined variables is at the end of this script.
+
+# Known limitations:
+# - On IRIX 6.5 with CC="cc", the run time search patch must not be longer
+# than 256 bytes, otherwise the compiler driver will dump core. The only
+# known workaround is to choose shorter directory names for the build
+# directory and/or the installation directory.
+
+# All known linkers require a `.a' archive for static linking (except MSVC,
+# which needs '.lib').
+libext=a
+shrext=.so
+
+host="$1"
+host_cpu=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'`
+host_vendor=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'`
+host_os=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'`
+
+# Code taken from libtool.m4's _LT_CC_BASENAME.
+
+for cc_temp in $CC""; do
+ case $cc_temp in
+ compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+ distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+ \-*) ;;
+ *) break;;
+ esac
+done
+cc_basename=`echo "$cc_temp" | sed -e 's%^.*/%%'`
+
+# Code taken from libtool.m4's AC_LIBTOOL_PROG_COMPILER_PIC.
+
+wl=
+if test "$GCC" = yes; then
+ wl='-Wl,'
+else
+ case "$host_os" in
+ aix*)
+ wl='-Wl,'
+ ;;
+ darwin*)
+ case $cc_basename in
+ xlc*)
+ wl='-Wl,'
+ ;;
+ esac
+ ;;
+ mingw* | pw32* | os2*)
+ ;;
+ hpux9* | hpux10* | hpux11*)
+ wl='-Wl,'
+ ;;
+ irix5* | irix6* | nonstopux*)
+ wl='-Wl,'
+ ;;
+ newsos6)
+ ;;
+ linux*)
+ case $cc_basename in
+ icc* | ecc*)
+ wl='-Wl,'
+ ;;
+ pgcc | pgf77 | pgf90)
+ wl='-Wl,'
+ ;;
+ ccc*)
+ wl='-Wl,'
+ ;;
+ como)
+ wl='-lopt='
+ ;;
+ *)
+ case `$CC -V 2>&1 | sed 5q` in
+ *Sun\ C*)
+ wl='-Wl,'
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+ osf3* | osf4* | osf5*)
+ wl='-Wl,'
+ ;;
+ sco3.2v5*)
+ ;;
+ solaris*)
+ wl='-Wl,'
+ ;;
+ sunos4*)
+ wl='-Qoption ld '
+ ;;
+ sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+ wl='-Wl,'
+ ;;
+ sysv4*MP*)
+ ;;
+ unicos*)
+ wl='-Wl,'
+ ;;
+ uts4*)
+ ;;
+ esac
+fi
+
+# Code taken from libtool.m4's AC_LIBTOOL_PROG_LD_SHLIBS.
+
+hardcode_libdir_flag_spec=
+hardcode_libdir_separator=
+hardcode_direct=no
+hardcode_minus_L=no
+
+case "$host_os" in
+ cygwin* | mingw* | pw32*)
+ # FIXME: the MSVC++ port hasn't been tested in a loooong time
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ if test "$GCC" != yes; then
+ with_gnu_ld=no
+ fi
+ ;;
+ interix*)
+ # we just hope/assume this is gcc and not c89 (= MSVC++)
+ with_gnu_ld=yes
+ ;;
+ openbsd*)
+ with_gnu_ld=no
+ ;;
+esac
+
+ld_shlibs=yes
+if test "$with_gnu_ld" = yes; then
+ # Set some defaults for GNU ld with shared library support. These
+ # are reset later if shared libraries are not supported. Putting them
+ # here allows them to be overridden if necessary.
+ # Unlike libtool, we use -rpath here, not --rpath, since the documented
+ # option of GNU ld is called -rpath, not --rpath.
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ case "$host_os" in
+ aix3* | aix4* | aix5*)
+ # On AIX/PPC, the GNU linker is very broken
+ if test "$host_cpu" != ia64; then
+ ld_shlibs=no
+ fi
+ ;;
+ amigaos*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ # Samuel A. Falvo II <kc5tja@dolphin.openprojects.net> reports
+ # that the semantics of dynamic libraries on AmigaOS, at least up
+ # to version 4, is to share data among multiple programs linked
+ # with the same dynamic library. Since this doesn't match the
+ # behavior of shared libraries on other platforms, we cannot use
+ # them.
+ ld_shlibs=no
+ ;;
+ beos*)
+ if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+ :
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ cygwin* | mingw* | pw32*)
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec='-L$libdir'
+ if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then
+ :
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ interix3*)
+ hardcode_direct=no
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ ;;
+ linux*)
+ if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+ :
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ netbsd*)
+ ;;
+ solaris*)
+ if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then
+ ld_shlibs=no
+ elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+ :
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+ case `$LD -v 2>&1` in
+ *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*)
+ ld_shlibs=no
+ ;;
+ *)
+ if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+ hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`'
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+ ;;
+ sunos4*)
+ hardcode_direct=yes
+ ;;
+ *)
+ if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then
+ :
+ else
+ ld_shlibs=no
+ fi
+ ;;
+ esac
+ if test "$ld_shlibs" = no; then
+ hardcode_libdir_flag_spec=
+ fi
+else
+ case "$host_os" in
+ aix3*)
+ # Note: this linker hardcodes the directories in LIBPATH if there
+ # are no directories specified by -L.
+ hardcode_minus_L=yes
+ if test "$GCC" = yes; then
+ # Neither direct hardcoding nor static linking is supported with a
+ # broken collect2.
+ hardcode_direct=unsupported
+ fi
+ ;;
+ aix4* | aix5*)
+ if test "$host_cpu" = ia64; then
+ # On IA64, the linker does run time linking by default, so we don't
+ # have to do anything special.
+ aix_use_runtimelinking=no
+ else
+ aix_use_runtimelinking=no
+ # Test if we are trying to use run time linking or normal
+ # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+ # need to do runtime linking.
+ case $host_os in aix4.[23]|aix4.[23].*|aix5*)
+ for ld_flag in $LDFLAGS; do
+ if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ aix_use_runtimelinking=yes
+ break
+ fi
+ done
+ ;;
+ esac
+ fi
+ hardcode_direct=yes
+ hardcode_libdir_separator=':'
+ if test "$GCC" = yes; then
+ case $host_os in aix4.[012]|aix4.[012].*)
+ collect2name=`${CC} -print-prog-name=collect2`
+ if test -f "$collect2name" && \
+ strings "$collect2name" | grep resolve_lib_name >/dev/null
+ then
+ # We have reworked collect2
+ hardcode_direct=yes
+ else
+ # We have old collect2
+ hardcode_direct=unsupported
+ hardcode_minus_L=yes
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_libdir_separator=
+ fi
+ ;;
+ esac
+ fi
+ # Begin _LT_AC_SYS_LIBPATH_AIX.
+ echo 'int main () { return 0; }' > conftest.c
+ ${CC} ${LDFLAGS} conftest.c -o conftest
+ aix_libpath=`dump -H conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; }
+}'`
+ if test -z "$aix_libpath"; then
+ aix_libpath=`dump -HX64 conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; }
+}'`
+ fi
+ if test -z "$aix_libpath"; then
+ aix_libpath="/usr/lib:/lib"
+ fi
+ rm -f conftest.c conftest
+ # End _LT_AC_SYS_LIBPATH_AIX.
+ if test "$aix_use_runtimelinking" = yes; then
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+ else
+ if test "$host_cpu" = ia64; then
+ hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
+ else
+ hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+ fi
+ fi
+ ;;
+ amigaos*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ # see comment about different semantics on the GNU ld section
+ ld_shlibs=no
+ ;;
+ bsdi[45]*)
+ ;;
+ cygwin* | mingw* | pw32*)
+ # When not using gcc, we currently assume that we are using
+ # Microsoft Visual C++.
+ # hardcode_libdir_flag_spec is actually meaningless, as there is
+ # no search path for DLLs.
+ hardcode_libdir_flag_spec=' '
+ libext=lib
+ ;;
+ darwin* | rhapsody*)
+ hardcode_direct=no
+ if test "$GCC" = yes ; then
+ :
+ else
+ case $cc_basename in
+ xlc*)
+ ;;
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+ fi
+ ;;
+ dgux*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ ;;
+ freebsd1*)
+ ld_shlibs=no
+ ;;
+ freebsd2.2*)
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ ;;
+ freebsd2*)
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ ;;
+ freebsd* | kfreebsd*-gnu | dragonfly*)
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ ;;
+ hpux9*)
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ ;;
+ hpux10*)
+ if test "$with_gnu_ld" = no; then
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ hardcode_direct=yes
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ fi
+ ;;
+ hpux11*)
+ if test "$with_gnu_ld" = no; then
+ hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ hardcode_libdir_separator=:
+ case $host_cpu in
+ hppa*64*|ia64*)
+ hardcode_direct=no
+ ;;
+ *)
+ hardcode_direct=yes
+ # hardcode_minus_L: Not really in the search PATH,
+ # but as the default location of the library.
+ hardcode_minus_L=yes
+ ;;
+ esac
+ fi
+ ;;
+ irix5* | irix6* | nonstopux*)
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+ netbsd*)
+ hardcode_libdir_flag_spec='-R$libdir'
+ hardcode_direct=yes
+ ;;
+ newsos6)
+ hardcode_direct=yes
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+ openbsd*)
+ hardcode_direct=yes
+ if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ else
+ case "$host_os" in
+ openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*)
+ hardcode_libdir_flag_spec='-R$libdir'
+ ;;
+ *)
+ hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
+ ;;
+ esac
+ fi
+ ;;
+ os2*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_minus_L=yes
+ ;;
+ osf3*)
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ hardcode_libdir_separator=:
+ ;;
+ osf4* | osf5*)
+ if test "$GCC" = yes; then
+ hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+ else
+ # Both cc and cxx compiler support -rpath directly
+ hardcode_libdir_flag_spec='-rpath $libdir'
+ fi
+ hardcode_libdir_separator=:
+ ;;
+ solaris*)
+ hardcode_libdir_flag_spec='-R$libdir'
+ ;;
+ sunos4*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ hardcode_direct=yes
+ hardcode_minus_L=yes
+ ;;
+ sysv4)
+ case $host_vendor in
+ sni)
+ hardcode_direct=yes # is this really true???
+ ;;
+ siemens)
+ hardcode_direct=no
+ ;;
+ motorola)
+ hardcode_direct=no #Motorola manual says yes, but my tests say they lie
+ ;;
+ esac
+ ;;
+ sysv4.3*)
+ ;;
+ sysv4*MP*)
+ if test -d /usr/nec; then
+ ld_shlibs=yes
+ fi
+ ;;
+ sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7*)
+ ;;
+ sysv5* | sco3.2v5* | sco5v6*)
+ hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`'
+ hardcode_libdir_separator=':'
+ ;;
+ uts4*)
+ hardcode_libdir_flag_spec='-L$libdir'
+ ;;
+ *)
+ ld_shlibs=no
+ ;;
+ esac
+fi
+
+# Check dynamic linker characteristics
+# Code taken from libtool.m4's AC_LIBTOOL_SYS_DYNAMIC_LINKER.
+libname_spec='lib$name'
+case "$host_os" in
+ aix3*)
+ ;;
+ aix4* | aix5*)
+ ;;
+ amigaos*)
+ ;;
+ beos*)
+ ;;
+ bsdi[45]*)
+ ;;
+ cygwin* | mingw* | pw32*)
+ shrext=.dll
+ ;;
+ darwin* | rhapsody*)
+ shrext=.dylib
+ ;;
+ dgux*)
+ ;;
+ freebsd1*)
+ ;;
+ kfreebsd*-gnu)
+ ;;
+ freebsd* | dragonfly*)
+ ;;
+ gnu*)
+ ;;
+ hpux9* | hpux10* | hpux11*)
+ case $host_cpu in
+ ia64*)
+ shrext=.so
+ ;;
+ hppa*64*)
+ shrext=.sl
+ ;;
+ *)
+ shrext=.sl
+ ;;
+ esac
+ ;;
+ interix3*)
+ ;;
+ irix5* | irix6* | nonstopux*)
+ case "$host_os" in
+ irix5* | nonstopux*)
+ libsuff= shlibsuff=
+ ;;
+ *)
+ case $LD in
+ *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= ;;
+ *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 ;;
+ *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 ;;
+ *) libsuff= shlibsuff= ;;
+ esac
+ ;;
+ esac
+ ;;
+ linux*oldld* | linux*aout* | linux*coff*)
+ ;;
+ linux*)
+ ;;
+ knetbsd*-gnu)
+ ;;
+ netbsd*)
+ ;;
+ newsos6)
+ ;;
+ nto-qnx*)
+ ;;
+ openbsd*)
+ ;;
+ os2*)
+ libname_spec='$name'
+ shrext=.dll
+ ;;
+ osf3* | osf4* | osf5*)
+ ;;
+ solaris*)
+ ;;
+ sunos4*)
+ ;;
+ sysv4 | sysv4.3*)
+ ;;
+ sysv4*MP*)
+ ;;
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ ;;
+ uts4*)
+ ;;
+esac
+
+sed_quote_subst='s/\(["`$\\]\)/\\\1/g'
+escaped_wl=`echo "X$wl" | sed -e 's/^X//' -e "$sed_quote_subst"`
+shlibext=`echo "$shrext" | sed -e 's,^\.,,'`
+escaped_hardcode_libdir_flag_spec=`echo "X$hardcode_libdir_flag_spec" | sed -e 's/^X//' -e "$sed_quote_subst"`
+
+LC_ALL=C sed -e 's/^\([a-zA-Z0-9_]*\)=/acl_cv_\1=/' <<EOF
+
+# How to pass a linker flag through the compiler.
+wl="$escaped_wl"
+
+# Static library suffix (normally "a").
+libext="$libext"
+
+# Shared library suffix (normally "so").
+shlibext="$shlibext"
+
+# Flag to hardcode \$libdir into a binary during linking.
+# This must work even if \$libdir does not exist.
+hardcode_libdir_flag_spec="$escaped_hardcode_libdir_flag_spec"
+
+# Whether we need a single -rpath flag with a separated argument.
+hardcode_libdir_separator="$hardcode_libdir_separator"
+
+# Set to yes if using DIR/libNAME.so during linking hardcodes DIR into the
+# resulting binary.
+hardcode_direct="$hardcode_direct"
+
+# Set to yes if using the -LDIR flag during linking hardcodes DIR into the
+# resulting binary.
+hardcode_minus_L="$hardcode_minus_L"
+
+EOF
diff --git a/qpid/extras/sasl/build-aux/config.sub b/qpid/extras/sasl/build-aux/config.sub
new file mode 100755
index 0000000000..7ccee73057
--- /dev/null
+++ b/qpid/extras/sasl/build-aux/config.sub
@@ -0,0 +1,1619 @@
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
+# Inc.
+
+timestamp='2006-11-07'
+
+# This file is (in principle) common to ALL GNU software.
+# The presence of a machine in this file suggests that SOME GNU software
+# can handle that machine. It does not imply ALL GNU software can.
+#
+# This file is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
+# 02110-1301, USA.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+
+# Please send patches to <config-patches@gnu.org>. Submit a context
+# diff and a properly formatted ChangeLog entry.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS
+ $0 [OPTION] ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
+Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit ;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
+ uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
+ storm-chaos* | os2-emx* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis | -knuth | -cray)
+ os=
+ basic_machine=$1
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco6)
+ os=-sco5v6
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | am33_2.0 \
+ | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
+ | bfin \
+ | c4x | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | fr30 | frv \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | i370 | i860 | i960 | ia64 \
+ | ip2k | iq2000 \
+ | m32c | m32r | m32rle | m68000 | m68k | m88k \
+ | maxq | mb | microblaze | mcore \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64vr | mips64vrel \
+ | mips64orion | mips64orionel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa32r2 | mipsisa32r2el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipsisa64sr71k | mipsisa64sr71kel \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | mt \
+ | msp430 \
+ | nios | nios2 \
+ | ns16k | ns32k \
+ | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
+ | pyramid \
+ | score \
+ | sh | sh[1234] | sh[24]a | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+ | spu | strongarm \
+ | tahoe | thumb | tic4x | tic80 | tron \
+ | v850 | v850e \
+ | we32k \
+ | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
+ | z8k)
+ basic_machine=$basic_machine-unknown
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12)
+ # Motorola 68HC11/12.
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ ;;
+ ms1)
+ basic_machine=mt-unknown
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* | avr32-* \
+ | bfin-* | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
+ | clipper-* | craynv-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | elxsi-* \
+ | f30[01]-* | f700-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | i*86-* | i860-* | i960-* | ia64-* \
+ | ip2k-* | iq2000-* \
+ | m32c-* | m32r-* | m32rle-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64vr-* | mips64vrel-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mips64vr5900-* | mips64vr5900el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa32r2-* | mipsisa32r2el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64r2-* | mipsisa64r2el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipstx39-* | mipstx39el-* \
+ | mmix-* \
+ | mt-* \
+ | msp430-* \
+ | nios-* | nios2-* \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
+ | pyramid-* \
+ | romp-* | rs6000-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+ | sparclite-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
+ | tahoe-* | thumb-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tron-* \
+ | v850-* | v850e-* | vax-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
+ | xstormy16-* | xtensa-* \
+ | ymp-* \
+ | z8k-*)
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ abacus)
+ basic_machine=abacus-unknown
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amd64)
+ basic_machine=x86_64-pc
+ ;;
+ amd64-*)
+ basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ craynv)
+ basic_machine=craynv-cray
+ os=-unicosmp
+ ;;
+ cr16c)
+ basic_machine=cr16c-unknown
+ os=-elf
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ crisv32 | crisv32-* | etraxfs*)
+ basic_machine=crisv32-axis
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ crx)
+ basic_machine=crx-unknown
+ os=-elf
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ os=-msdosdjgpp
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+# I'm not sure what "Sysv32" means. Should this be sysv3.2?
+ i*86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ mingw32)
+ basic_machine=i386-pc
+ os=-mingw32
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ ms1-*)
+ basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ openrisc | openrisc-*)
+ basic_machine=or32-unknown
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=-os400
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pc98)
+ basic_machine=i386-pc
+ ;;
+ pc98-*)
+ basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon | athlon_*)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2 | pentiumiii | pentium3)
+ basic_machine=i686-pc
+ ;;
+ pentium4)
+ basic_machine=i786-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium4-*)
+ basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc) basic_machine=powerpc-unknown
+ ;;
+ ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle | ppc-le | powerpc-little)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little | ppc64-le | powerpc64-little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rdos)
+ basic_machine=i386-pc
+ os=-rdos
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sb1)
+ basic_machine=mipsisa64sb1-unknown
+ ;;
+ sb1el)
+ basic_machine=mipsisa64sb1el-unknown
+ ;;
+ sde)
+ basic_machine=mipsisa32-sde
+ os=-elf
+ ;;
+ sei)
+ basic_machine=mips-sei
+ os=-seiux
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sh5el)
+ basic_machine=sh5le-unknown
+ ;;
+ sh64)
+ basic_machine=sh64-unknown
+ ;;
+ sparclite-wrs | simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ tic54x | c54x*)
+ basic_machine=tic54x-unknown
+ os=-coff
+ ;;
+ tic55x | c55x*)
+ basic_machine=tic55x-unknown
+ os=-coff
+ ;;
+ tic6x | c6x*)
+ basic_machine=tic6x-unknown
+ os=-coff
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=-tpf
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ xbox)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ mmix)
+ basic_machine=mmix-knuth
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp10)
+ # there are many clones, so DEC is not a safe bet
+ basic_machine=pdp10-unknown
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh[1234] | sh[24]a | sh[34]eb | sh[1234]le | sh[23]ele)
+ basic_machine=sh-unknown
+ ;;
+ sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+ | -openbsd* | -solidbsd* \
+ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* \
+ | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
+ | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+ | -skyos* | -haiku* | -rdos* | -toppers*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto-qnx*)
+ ;;
+ -nto*)
+ os=`echo $os | sed -e 's|nto|nto-qnx|'`
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ -linux-dietlibc)
+ os=-linux-dietlibc
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -os400*)
+ os=-os400
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -syllable*)
+ os=-syllable
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -tpf*)
+ os=-tpf
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -aros*)
+ os=-aros
+ ;;
+ -kaos*)
+ os=-kaos
+ ;;
+ -zvmoe)
+ os=-zvmoe
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ score-*)
+ os=-elf
+ ;;
+ spu-*)
+ os=-elf
+ ;;
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ c4x-* | tic4x-*)
+ os=-coff
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ # This also exists in the configure program, but was not the
+ # default.
+ # os=-sunos4
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-haiku)
+ os=-haiku
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-knuth)
+ os=-mmixware
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -os400*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -tpf*)
+ vendor=ibm
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/qpid/extras/sasl/build-aux/depcomp b/qpid/extras/sasl/build-aux/depcomp
new file mode 100755
index 0000000000..ca5ea4e1ef
--- /dev/null
+++ b/qpid/extras/sasl/build-aux/depcomp
@@ -0,0 +1,584 @@
+#! /bin/sh
+# depcomp - compile a program generating dependencies as side-effects
+
+scriptversion=2006-10-15.18
+
+# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006 Free Software
+# Foundation, Inc.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# Originally written by Alexandre Oliva <oliva@dcc.unicamp.br>.
+
+case $1 in
+ '')
+ echo "$0: No command. Try \`$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
+ -h | --h*)
+ cat <<\EOF
+Usage: depcomp [--help] [--version] PROGRAM [ARGS]
+
+Run PROGRAMS ARGS to compile a file, generating dependencies
+as side-effects.
+
+Environment variables:
+ depmode Dependency tracking mode.
+ source Source file read by `PROGRAMS ARGS'.
+ object Object file output by `PROGRAMS ARGS'.
+ DEPDIR directory where to store dependencies.
+ depfile Dependency file to output.
+ tmpdepfile Temporary file to use when outputing dependencies.
+ libtool Whether libtool is used (yes/no).
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+ exit $?
+ ;;
+ -v | --v*)
+ echo "depcomp $scriptversion"
+ exit $?
+ ;;
+esac
+
+if test -z "$depmode" || test -z "$source" || test -z "$object"; then
+ echo "depcomp: Variables source, object and depmode must be set" 1>&2
+ exit 1
+fi
+
+# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po.
+depfile=${depfile-`echo "$object" |
+ sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`}
+tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`}
+
+rm -f "$tmpdepfile"
+
+# Some modes work just like other modes, but use different flags. We
+# parameterize here, but still list the modes in the big case below,
+# to make depend.m4 easier to write. Note that we *cannot* use a case
+# here, because this file can only contain one case statement.
+if test "$depmode" = hp; then
+ # HP compiler uses -M and no extra arg.
+ gccflag=-M
+ depmode=gcc
+fi
+
+if test "$depmode" = dashXmstdout; then
+ # This is just like dashmstdout with a different argument.
+ dashmflag=-xM
+ depmode=dashmstdout
+fi
+
+case "$depmode" in
+gcc3)
+## gcc 3 implements dependency tracking that does exactly what
+## we want. Yay! Note: for some reason libtool 1.4 doesn't like
+## it if -MD -MP comes after the -MF stuff. Hmm.
+## Unfortunately, FreeBSD c89 acceptance of flags depends upon
+## the command line argument order; so add the flags where they
+## appear in depend2.am. Note that the slowdown incurred here
+## affects only configure: in makefiles, %FASTDEP% shortcuts this.
+ for arg
+ do
+ case $arg in
+ -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;;
+ *) set fnord "$@" "$arg" ;;
+ esac
+ shift # fnord
+ shift # $arg
+ done
+ "$@"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ mv "$tmpdepfile" "$depfile"
+ ;;
+
+gcc)
+## There are various ways to get dependency output from gcc. Here's
+## why we pick this rather obscure method:
+## - Don't want to use -MD because we'd like the dependencies to end
+## up in a subdir. Having to rename by hand is ugly.
+## (We might end up doing this anyway to support other compilers.)
+## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like
+## -MM, not -M (despite what the docs say).
+## - Using -M directly means running the compiler twice (even worse
+## than renaming).
+ if test -z "$gccflag"; then
+ gccflag=-MD,
+ fi
+ "$@" -Wp,"$gccflag$tmpdepfile"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz
+## The second -e expression handles DOS-style file names with drive letters.
+ sed -e 's/^[^:]*: / /' \
+ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
+## This next piece of magic avoids the `deleted header file' problem.
+## The problem is that when a header file which appears in a .P file
+## is deleted, the dependency causes make to die (because there is
+## typically no way to rebuild the header). We avoid this by adding
+## dummy dependencies for each header file. Too bad gcc doesn't do
+## this for us directly.
+ tr ' ' '
+' < "$tmpdepfile" |
+## Some versions of gcc put a space before the `:'. On the theory
+## that the space means something, we add a space to the output as
+## well.
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+hp)
+ # This case exists only to let depend.m4 do its work. It works by
+ # looking at the text of this script. This case will never be run,
+ # since it is checked for above.
+ exit 1
+ ;;
+
+sgi)
+ if test "$libtool" = yes; then
+ "$@" "-Wp,-MDupdate,$tmpdepfile"
+ else
+ "$@" -MDupdate "$tmpdepfile"
+ fi
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+
+ if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files
+ echo "$object : \\" > "$depfile"
+
+ # Clip off the initial element (the dependent). Don't try to be
+ # clever and replace this with sed code, as IRIX sed won't handle
+ # lines with more than a fixed number of characters (4096 in
+ # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines;
+ # the IRIX cc adds comments like `#:fec' to the end of the
+ # dependency line.
+ tr ' ' '
+' < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \
+ tr '
+' ' ' >> $depfile
+ echo >> $depfile
+
+ # The second pass generates a dummy entry for each header file.
+ tr ' ' '
+' < "$tmpdepfile" \
+ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
+ >> $depfile
+ else
+ # The sourcefile does not contain any dependencies, so just
+ # store a dummy comment line, to avoid errors with the Makefile
+ # "include basename.Plo" scheme.
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+aix)
+ # The C for AIX Compiler uses -M and outputs the dependencies
+ # in a .u file. In older versions, this file always lives in the
+ # current directory. Also, the AIX compiler puts `$object:' at the
+ # start of each line; $object doesn't have directory information.
+ # Version 6 uses the directory in both cases.
+ stripped=`echo "$object" | sed 's/\(.*\)\..*$/\1/'`
+ tmpdepfile="$stripped.u"
+ if test "$libtool" = yes; then
+ "$@" -Wc,-M
+ else
+ "$@" -M
+ fi
+ stat=$?
+
+ if test -f "$tmpdepfile"; then :
+ else
+ stripped=`echo "$stripped" | sed 's,^.*/,,'`
+ tmpdepfile="$stripped.u"
+ fi
+
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+
+ if test -f "$tmpdepfile"; then
+ outname="$stripped.o"
+ # Each line is of the form `foo.o: dependent.h'.
+ # Do two passes, one to just change these to
+ # `$object: dependent.h' and one to simply `dependent.h:'.
+ sed -e "s,^$outname:,$object :," < "$tmpdepfile" > "$depfile"
+ sed -e "s,^$outname: \(.*\)$,\1:," < "$tmpdepfile" >> "$depfile"
+ else
+ # The sourcefile does not contain any dependencies, so just
+ # store a dummy comment line, to avoid errors with the Makefile
+ # "include basename.Plo" scheme.
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+icc)
+ # Intel's C compiler understands `-MD -MF file'. However on
+ # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c
+ # ICC 7.0 will fill foo.d with something like
+ # foo.o: sub/foo.c
+ # foo.o: sub/foo.h
+ # which is wrong. We want:
+ # sub/foo.o: sub/foo.c
+ # sub/foo.o: sub/foo.h
+ # sub/foo.c:
+ # sub/foo.h:
+ # ICC 7.1 will output
+ # foo.o: sub/foo.c sub/foo.h
+ # and will wrap long lines using \ :
+ # foo.o: sub/foo.c ... \
+ # sub/foo.h ... \
+ # ...
+
+ "$@" -MD -MF "$tmpdepfile"
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile"
+ exit $stat
+ fi
+ rm -f "$depfile"
+ # Each line is of the form `foo.o: dependent.h',
+ # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'.
+ # Do two passes, one to just change these to
+ # `$object: dependent.h' and one to simply `dependent.h:'.
+ sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile"
+ # Some versions of the HPUX 10.20 sed can't process this invocation
+ # correctly. Breaking it into two sed invocations is a workaround.
+ sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" |
+ sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+hp2)
+ # The "hp" stanza above does not work with aCC (C++) and HP's ia64
+ # compilers, which have integrated preprocessors. The correct option
+ # to use with these is +Maked; it writes dependencies to a file named
+ # 'foo.d', which lands next to the object file, wherever that
+ # happens to be.
+ # Much of this is similar to the tru64 case; see comments there.
+ dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+ test "x$dir" = "x$object" && dir=
+ base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+ if test "$libtool" = yes; then
+ tmpdepfile1=$dir$base.d
+ tmpdepfile2=$dir.libs/$base.d
+ "$@" -Wc,+Maked
+ else
+ tmpdepfile1=$dir$base.d
+ tmpdepfile2=$dir$base.d
+ "$@" +Maked
+ fi
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile1" "$tmpdepfile2"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ if test -f "$tmpdepfile"; then
+ sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile"
+ # Add `dependent.h:' lines.
+ sed -ne '2,${; s/^ *//; s/ \\*$//; s/$/:/; p;}' "$tmpdepfile" >> "$depfile"
+ else
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile" "$tmpdepfile2"
+ ;;
+
+tru64)
+ # The Tru64 compiler uses -MD to generate dependencies as a side
+ # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'.
+ # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
+ # dependencies in `foo.d' instead, so we check for that too.
+ # Subdirectories are respected.
+ dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
+ test "x$dir" = "x$object" && dir=
+ base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
+
+ if test "$libtool" = yes; then
+ # With Tru64 cc, shared objects can also be used to make a
+ # static library. This mechanism is used in libtool 1.4 series to
+ # handle both shared and static libraries in a single compilation.
+ # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d.
+ #
+ # With libtool 1.5 this exception was removed, and libtool now
+ # generates 2 separate objects for the 2 libraries. These two
+ # compilations output dependencies in $dir.libs/$base.o.d and
+ # in $dir$base.o.d. We have to check for both files, because
+ # one of the two compilations can be disabled. We should prefer
+ # $dir$base.o.d over $dir.libs/$base.o.d because the latter is
+ # automatically cleaned when .libs/ is deleted, while ignoring
+ # the former would cause a distcleancheck panic.
+ tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4
+ tmpdepfile2=$dir$base.o.d # libtool 1.5
+ tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5
+ tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504
+ "$@" -Wc,-MD
+ else
+ tmpdepfile1=$dir$base.o.d
+ tmpdepfile2=$dir$base.d
+ tmpdepfile3=$dir$base.d
+ tmpdepfile4=$dir$base.d
+ "$@" -MD
+ fi
+
+ stat=$?
+ if test $stat -eq 0; then :
+ else
+ rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
+ exit $stat
+ fi
+
+ for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4"
+ do
+ test -f "$tmpdepfile" && break
+ done
+ if test -f "$tmpdepfile"; then
+ sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
+ # That's a tab and a space in the [].
+ sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
+ else
+ echo "#dummy" > "$depfile"
+ fi
+ rm -f "$tmpdepfile"
+ ;;
+
+#nosideeffect)
+ # This comment above is used by automake to tell side-effect
+ # dependency tracking mechanisms from slower ones.
+
+dashmstdout)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout, regardless of -o.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test $1 != '--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ # Remove `-o $object'.
+ IFS=" "
+ for arg
+ do
+ case $arg in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift # fnord
+ shift # $arg
+ ;;
+ esac
+ done
+
+ test -z "$dashmflag" && dashmflag=-M
+ # Require at least two characters before searching for `:'
+ # in the target name. This is to cope with DOS-style filenames:
+ # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise.
+ "$@" $dashmflag |
+ sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile"
+ rm -f "$depfile"
+ cat < "$tmpdepfile" > "$depfile"
+ tr ' ' '
+' < "$tmpdepfile" | \
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+dashXmstdout)
+ # This case only exists to satisfy depend.m4. It is never actually
+ # run, as this mode is specially recognized in the preamble.
+ exit 1
+ ;;
+
+makedepend)
+ "$@" || exit $?
+ # Remove any Libtool call
+ if test "$libtool" = yes; then
+ while test $1 != '--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+ # X makedepend
+ shift
+ cleared=no
+ for arg in "$@"; do
+ case $cleared in
+ no)
+ set ""; shift
+ cleared=yes ;;
+ esac
+ case "$arg" in
+ -D*|-I*)
+ set fnord "$@" "$arg"; shift ;;
+ # Strip any option that makedepend may not understand. Remove
+ # the object too, otherwise makedepend will parse it as a source file.
+ -*|$object)
+ ;;
+ *)
+ set fnord "$@" "$arg"; shift ;;
+ esac
+ done
+ obj_suffix="`echo $object | sed 's/^.*\././'`"
+ touch "$tmpdepfile"
+ ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
+ rm -f "$depfile"
+ cat < "$tmpdepfile" > "$depfile"
+ sed '1,2d' "$tmpdepfile" | tr ' ' '
+' | \
+## Some versions of the HPUX 10.20 sed can't process this invocation
+## correctly. Breaking it into two sed invocations is a workaround.
+ sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile" "$tmpdepfile".bak
+ ;;
+
+cpp)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout.
+ "$@" || exit $?
+
+ # Remove the call to Libtool.
+ if test "$libtool" = yes; then
+ while test $1 != '--mode=compile'; do
+ shift
+ done
+ shift
+ fi
+
+ # Remove `-o $object'.
+ IFS=" "
+ for arg
+ do
+ case $arg in
+ -o)
+ shift
+ ;;
+ $object)
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift # fnord
+ shift # $arg
+ ;;
+ esac
+ done
+
+ "$@" -E |
+ sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \
+ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' |
+ sed '$ s: \\$::' > "$tmpdepfile"
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ cat < "$tmpdepfile" >> "$depfile"
+ sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+msvisualcpp)
+ # Important note: in order to support this mode, a compiler *must*
+ # always write the preprocessed file to stdout, regardless of -o,
+ # because we must use -o when running libtool.
+ "$@" || exit $?
+ IFS=" "
+ for arg
+ do
+ case "$arg" in
+ "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
+ set fnord "$@"
+ shift
+ shift
+ ;;
+ *)
+ set fnord "$@" "$arg"
+ shift
+ shift
+ ;;
+ esac
+ done
+ "$@" -E |
+ sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::echo "`cygpath -u \\"\1\\"`":p' | sort | uniq > "$tmpdepfile"
+ rm -f "$depfile"
+ echo "$object : \\" > "$depfile"
+ . "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile"
+ echo " " >> "$depfile"
+ . "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s::\1\::p' >> "$depfile"
+ rm -f "$tmpdepfile"
+ ;;
+
+none)
+ exec "$@"
+ ;;
+
+*)
+ echo "Unknown depmode $depmode" 1>&2
+ exit 1
+ ;;
+esac
+
+exit 0
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-end: "$"
+# End:
diff --git a/qpid/extras/sasl/build-aux/install-sh b/qpid/extras/sasl/build-aux/install-sh
new file mode 100755
index 0000000000..4fbbae7b7f
--- /dev/null
+++ b/qpid/extras/sasl/build-aux/install-sh
@@ -0,0 +1,507 @@
+#!/bin/sh
+# install - install a program, script, or datafile
+
+scriptversion=2006-10-14.15
+
+# This originates from X11R5 (mit/util/scripts/install.sh), which was
+# later released in X11R6 (xc/config/util/install.sh) with the
+# following copyright and license.
+#
+# Copyright (C) 1994 X Consortium
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
+# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+# Except as contained in this notice, the name of the X Consortium shall not
+# be used in advertising or otherwise to promote the sale, use or other deal-
+# ings in this Software without prior written authorization from the X Consor-
+# tium.
+#
+#
+# FSF changes to this file are in the public domain.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch.
+
+nl='
+'
+IFS=" "" $nl"
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+if test -z "$doit"; then
+ doit_exec=exec
+else
+ doit_exec=$doit
+fi
+
+# Put in absolute file names if you don't have them in your path;
+# or use environment vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+posix_glob=
+posix_mkdir=
+
+# Desired mode of installed file.
+mode=0755
+
+chmodcmd=$chmodprog
+chowncmd=
+chgrpcmd=
+stripcmd=
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=
+dst=
+dir_arg=
+dstarg=
+no_target_directory=
+
+usage="Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
+ or: $0 [OPTION]... SRCFILES... DIRECTORY
+ or: $0 [OPTION]... -t DIRECTORY SRCFILES...
+ or: $0 [OPTION]... -d DIRECTORIES...
+
+In the 1st form, copy SRCFILE to DSTFILE.
+In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
+In the 4th, create DIRECTORIES.
+
+Options:
+-c (ignored)
+-d create directories instead of installing files.
+-g GROUP $chgrpprog installed files to GROUP.
+-m MODE $chmodprog installed files to MODE.
+-o USER $chownprog installed files to USER.
+-s $stripprog installed files.
+-t DIRECTORY install into DIRECTORY.
+-T report an error if DSTFILE is a directory.
+--help display this help and exit.
+--version display version info and exit.
+
+Environment variables override the default commands:
+ CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG
+"
+
+while test $# -ne 0; do
+ case $1 in
+ -c) shift
+ continue;;
+
+ -d) dir_arg=true
+ shift
+ continue;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift
+ shift
+ continue;;
+
+ --help) echo "$usage"; exit $?;;
+
+ -m) mode=$2
+ shift
+ shift
+ case $mode in
+ *' '* | *' '* | *'
+'* | *'*'* | *'?'* | *'['*)
+ echo "$0: invalid mode: $mode" >&2
+ exit 1;;
+ esac
+ continue;;
+
+ -o) chowncmd="$chownprog $2"
+ shift
+ shift
+ continue;;
+
+ -s) stripcmd=$stripprog
+ shift
+ continue;;
+
+ -t) dstarg=$2
+ shift
+ shift
+ continue;;
+
+ -T) no_target_directory=true
+ shift
+ continue;;
+
+ --version) echo "$0 $scriptversion"; exit $?;;
+
+ --) shift
+ break;;
+
+ -*) echo "$0: invalid option: $1" >&2
+ exit 1;;
+
+ *) break;;
+ esac
+done
+
+if test $# -ne 0 && test -z "$dir_arg$dstarg"; then
+ # When -d is used, all remaining arguments are directories to create.
+ # When -t is used, the destination is already specified.
+ # Otherwise, the last argument is the destination. Remove it from $@.
+ for arg
+ do
+ if test -n "$dstarg"; then
+ # $@ is not empty: it contains at least $arg.
+ set fnord "$@" "$dstarg"
+ shift # fnord
+ fi
+ shift # arg
+ dstarg=$arg
+ done
+fi
+
+if test $# -eq 0; then
+ if test -z "$dir_arg"; then
+ echo "$0: no input file specified." >&2
+ exit 1
+ fi
+ # It's OK to call `install-sh -d' without argument.
+ # This can happen when creating conditional directories.
+ exit 0
+fi
+
+if test -z "$dir_arg"; then
+ trap '(exit $?); exit' 1 2 13 15
+
+ # Set umask so as not to create temps with too-generous modes.
+ # However, 'strip' requires both read and write access to temps.
+ case $mode in
+ # Optimize common cases.
+ *644) cp_umask=133;;
+ *755) cp_umask=22;;
+
+ *[0-7])
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw='% 200'
+ fi
+ cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
+ *)
+ if test -z "$stripcmd"; then
+ u_plus_rw=
+ else
+ u_plus_rw=,u+rw
+ fi
+ cp_umask=$mode$u_plus_rw;;
+ esac
+fi
+
+for src
+do
+ # Protect names starting with `-'.
+ case $src in
+ -*) src=./$src ;;
+ esac
+
+ if test -n "$dir_arg"; then
+ dst=$src
+ dstdir=$dst
+ test -d "$dstdir"
+ dstdir_status=$?
+ else
+
+ # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
+ # might cause directories to be created, which would be especially bad
+ # if $src (and thus $dsttmp) contains '*'.
+ if test ! -f "$src" && test ! -d "$src"; then
+ echo "$0: $src does not exist." >&2
+ exit 1
+ fi
+
+ if test -z "$dstarg"; then
+ echo "$0: no destination specified." >&2
+ exit 1
+ fi
+
+ dst=$dstarg
+ # Protect names starting with `-'.
+ case $dst in
+ -*) dst=./$dst ;;
+ esac
+
+ # If destination is a directory, append the input filename; won't work
+ # if double slashes aren't ignored.
+ if test -d "$dst"; then
+ if test -n "$no_target_directory"; then
+ echo "$0: $dstarg: Is a directory" >&2
+ exit 1
+ fi
+ dstdir=$dst
+ dst=$dstdir/`basename "$src"`
+ dstdir_status=0
+ else
+ # Prefer dirname, but fall back on a substitute if dirname fails.
+ dstdir=`
+ (dirname "$dst") 2>/dev/null ||
+ expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ X"$dst" : 'X\(//\)[^/]' \| \
+ X"$dst" : 'X\(//\)$' \| \
+ X"$dst" : 'X\(/\)' \| . 2>/dev/null ||
+ echo X"$dst" |
+ sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)[^/].*/{
+ s//\1/
+ q
+ }
+ /^X\(\/\/\)$/{
+ s//\1/
+ q
+ }
+ /^X\(\/\).*/{
+ s//\1/
+ q
+ }
+ s/.*/./; q'
+ `
+
+ test -d "$dstdir"
+ dstdir_status=$?
+ fi
+ fi
+
+ obsolete_mkdir_used=false
+
+ if test $dstdir_status != 0; then
+ case $posix_mkdir in
+ '')
+ # Create intermediate dirs using mode 755 as modified by the umask.
+ # This is like FreeBSD 'install' as of 1997-10-28.
+ umask=`umask`
+ case $stripcmd.$umask in
+ # Optimize common cases.
+ *[2367][2367]) mkdir_umask=$umask;;
+ .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
+
+ *[0-7])
+ mkdir_umask=`expr $umask + 22 \
+ - $umask % 100 % 40 + $umask % 20 \
+ - $umask % 10 % 4 + $umask % 2
+ `;;
+ *) mkdir_umask=$umask,go-w;;
+ esac
+
+ # With -d, create the new directory with the user-specified mode.
+ # Otherwise, rely on $mkdir_umask.
+ if test -n "$dir_arg"; then
+ mkdir_mode=-m$mode
+ else
+ mkdir_mode=
+ fi
+
+ posix_mkdir=false
+ case $umask in
+ *[123567][0-7][0-7])
+ # POSIX mkdir -p sets u+wx bits regardless of umask, which
+ # is incompatible with FreeBSD 'install' when (umask & 300) != 0.
+ ;;
+ *)
+ tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
+ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
+
+ if (umask $mkdir_umask &&
+ exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
+ then
+ if test -z "$dir_arg" || {
+ # Check for POSIX incompatibilities with -m.
+ # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
+ # other-writeable bit of parent directory when it shouldn't.
+ # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
+ ls_ld_tmpdir=`ls -ld "$tmpdir"`
+ case $ls_ld_tmpdir in
+ d????-?r-*) different_mode=700;;
+ d????-?--*) different_mode=755;;
+ *) false;;
+ esac &&
+ $mkdirprog -m$different_mode -p -- "$tmpdir" && {
+ ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
+ test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
+ }
+ }
+ then posix_mkdir=:
+ fi
+ rmdir "$tmpdir/d" "$tmpdir"
+ else
+ # Remove any dirs left behind by ancient mkdir implementations.
+ rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
+ fi
+ trap '' 0;;
+ esac;;
+ esac
+
+ if
+ $posix_mkdir && (
+ umask $mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
+ )
+ then :
+ else
+
+ # The umask is ridiculous, or mkdir does not conform to POSIX,
+ # or it failed possibly due to a race condition. Create the
+ # directory the slow way, step by step, checking for races as we go.
+
+ case $dstdir in
+ /*) prefix=/ ;;
+ -*) prefix=./ ;;
+ *) prefix= ;;
+ esac
+
+ case $posix_glob in
+ '')
+ if (set -f) 2>/dev/null; then
+ posix_glob=true
+ else
+ posix_glob=false
+ fi ;;
+ esac
+
+ oIFS=$IFS
+ IFS=/
+ $posix_glob && set -f
+ set fnord $dstdir
+ shift
+ $posix_glob && set +f
+ IFS=$oIFS
+
+ prefixes=
+
+ for d
+ do
+ test -z "$d" && continue
+
+ prefix=$prefix$d
+ if test -d "$prefix"; then
+ prefixes=
+ else
+ if $posix_mkdir; then
+ (umask=$mkdir_umask &&
+ $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
+ # Don't fail if two instances are running concurrently.
+ test -d "$prefix" || exit 1
+ else
+ case $prefix in
+ *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
+ *) qprefix=$prefix;;
+ esac
+ prefixes="$prefixes '$qprefix'"
+ fi
+ fi
+ prefix=$prefix/
+ done
+
+ if test -n "$prefixes"; then
+ # Don't fail if two instances are running concurrently.
+ (umask $mkdir_umask &&
+ eval "\$doit_exec \$mkdirprog $prefixes") ||
+ test -d "$dstdir" || exit 1
+ obsolete_mkdir_used=true
+ fi
+ fi
+ fi
+
+ if test -n "$dir_arg"; then
+ { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
+ { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
+ { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
+ test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
+ else
+
+ # Make a couple of temp file names in the proper directory.
+ dsttmp=$dstdir/_inst.$$_
+ rmtmp=$dstdir/_rm.$$_
+
+ # Trap to clean up those temp files at exit.
+ trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
+
+ # Copy the file name to the temp name.
+ (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
+
+ # and set any options; do chmod last to preserve setuid bits.
+ #
+ # If any of these fail, we abort the whole thing. If we want to
+ # ignore errors from any of these, just make sure not to ignore
+ # errors from the above "$doit $cpprog $src $dsttmp" command.
+ #
+ { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \
+ && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \
+ && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \
+ && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
+
+ # Now rename the file to the real destination.
+ { $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null \
+ || {
+ # The rename failed, perhaps because mv can't rename something else
+ # to itself, or perhaps because mv is so ancient that it does not
+ # support -f.
+
+ # Now remove or move aside any old file at destination location.
+ # We try this two ways since rm can't unlink itself on some
+ # systems and the destination file might be busy for other
+ # reasons. In this case, the final cleanup might fail but the new
+ # file should still install successfully.
+ {
+ if test -f "$dst"; then
+ $doit $rmcmd -f "$dst" 2>/dev/null \
+ || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null \
+ && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }; }\
+ || {
+ echo "$0: cannot unlink or rename $dst" >&2
+ (exit 1); exit 1
+ }
+ else
+ :
+ fi
+ } &&
+
+ # Now rename the file to the real destination.
+ $doit $mvcmd "$dsttmp" "$dst"
+ }
+ } || exit 1
+
+ trap '' 0
+ fi
+done
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-end: "$"
+# End:
diff --git a/qpid/extras/sasl/build-aux/mdate-sh b/qpid/extras/sasl/build-aux/mdate-sh
new file mode 100755
index 0000000000..cd916c0a34
--- /dev/null
+++ b/qpid/extras/sasl/build-aux/mdate-sh
@@ -0,0 +1,201 @@
+#!/bin/sh
+# Get modification time of a file or directory and pretty-print it.
+
+scriptversion=2005-06-29.22
+
+# Copyright (C) 1995, 1996, 1997, 2003, 2004, 2005 Free Software
+# Foundation, Inc.
+# written by Ulrich Drepper <drepper@gnu.ai.mit.edu>, June 1995
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+# This file is maintained in Automake, please report
+# bugs to <bug-automake@gnu.org> or send patches to
+# <automake-patches@gnu.org>.
+
+case $1 in
+ '')
+ echo "$0: No file. Try \`$0 --help' for more information." 1>&2
+ exit 1;
+ ;;
+ -h | --h*)
+ cat <<\EOF
+Usage: mdate-sh [--help] [--version] FILE
+
+Pretty-print the modification time of FILE.
+
+Report bugs to <bug-automake@gnu.org>.
+EOF
+ exit $?
+ ;;
+ -v | --v*)
+ echo "mdate-sh $scriptversion"
+ exit $?
+ ;;
+esac
+
+# Prevent date giving response in another language.
+LANG=C
+export LANG
+LC_ALL=C
+export LC_ALL
+LC_TIME=C
+export LC_TIME
+
+# GNU ls changes its time format in response to the TIME_STYLE
+# variable. Since we cannot assume `unset' works, revert this
+# variable to its documented default.
+if test "${TIME_STYLE+set}" = set; then
+ TIME_STYLE=posix-long-iso
+ export TIME_STYLE
+fi
+
+save_arg1=$1
+
+# Find out how to get the extended ls output of a file or directory.
+if ls -L /dev/null 1>/dev/null 2>&1; then
+ ls_command='ls -L -l -d'
+else
+ ls_command='ls -l -d'
+fi
+
+# A `ls -l' line looks as follows on OS/2.
+# drwxrwx--- 0 Aug 11 2001 foo
+# This differs from Unix, which adds ownership information.
+# drwxrwx--- 2 root root 4096 Aug 11 2001 foo
+#
+# To find the date, we split the line on spaces and iterate on words
+# until we find a month. This cannot work with files whose owner is a
+# user named `Jan', or `Feb', etc. However, it's unlikely that `/'
+# will be owned by a user whose name is a month. So we first look at
+# the extended ls output of the root directory to decide how many
+# words should be skipped to get the date.
+
+# On HPUX /bin/sh, "set" interprets "-rw-r--r--" as options, so the "x" below.
+set x`ls -l -d /`
+
+# Find which argument is the month.
+month=
+command=
+until test $month
+do
+ shift
+ # Add another shift to the command.
+ command="$command shift;"
+ case $1 in
+ Jan) month=January; nummonth=1;;
+ Feb) month=February; nummonth=2;;
+ Mar) month=March; nummonth=3;;
+ Apr) month=April; nummonth=4;;
+ May) month=May; nummonth=5;;
+ Jun) month=June; nummonth=6;;
+ Jul) month=July; nummonth=7;;
+ Aug) month=August; nummonth=8;;
+ Sep) month=September; nummonth=9;;
+ Oct) month=October; nummonth=10;;
+ Nov) month=November; nummonth=11;;
+ Dec) month=December; nummonth=12;;
+ esac
+done
+
+# Get the extended ls output of the file or directory.
+set dummy x`eval "$ls_command \"\$save_arg1\""`
+
+# Remove all preceding arguments
+eval $command
+
+# Because of the dummy argument above, month is in $2.
+#
+# On a POSIX system, we should have
+#
+# $# = 5
+# $1 = file size
+# $2 = month
+# $3 = day
+# $4 = year or time
+# $5 = filename
+#
+# On Darwin 7.7.0 and 7.6.0, we have
+#
+# $# = 4
+# $1 = day
+# $2 = month
+# $3 = year or time
+# $4 = filename
+
+# Get the month.
+case $2 in
+ Jan) month=January; nummonth=1;;
+ Feb) month=February; nummonth=2;;
+ Mar) month=March; nummonth=3;;
+ Apr) month=April; nummonth=4;;
+ May) month=May; nummonth=5;;
+ Jun) month=June; nummonth=6;;
+ Jul) month=July; nummonth=7;;
+ Aug) month=August; nummonth=8;;
+ Sep) month=September; nummonth=9;;
+ Oct) month=October; nummonth=10;;
+ Nov) month=November; nummonth=11;;
+ Dec) month=December; nummonth=12;;
+esac
+
+case $3 in
+ ???*) day=$1;;
+ *) day=$3; shift;;
+esac
+
+# Here we have to deal with the problem that the ls output gives either
+# the time of day or the year.
+case $3 in
+ *:*) set `date`; eval year=\$$#
+ case $2 in
+ Jan) nummonthtod=1;;
+ Feb) nummonthtod=2;;
+ Mar) nummonthtod=3;;
+ Apr) nummonthtod=4;;
+ May) nummonthtod=5;;
+ Jun) nummonthtod=6;;
+ Jul) nummonthtod=7;;
+ Aug) nummonthtod=8;;
+ Sep) nummonthtod=9;;
+ Oct) nummonthtod=10;;
+ Nov) nummonthtod=11;;
+ Dec) nummonthtod=12;;
+ esac
+ # For the first six month of the year the time notation can also
+ # be used for files modified in the last year.
+ if (expr $nummonth \> $nummonthtod) > /dev/null;
+ then
+ year=`expr $year - 1`
+ fi;;
+ *) year=$3;;
+esac
+
+# The result.
+echo $day $month $year
+
+# Local Variables:
+# mode: shell-script
+# sh-indentation: 2
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-end: "$"
+# End:
diff --git a/qpid/extras/sasl/build-aux/missing b/qpid/extras/sasl/build-aux/missing
new file mode 100755
index 0000000000..1c8ff7049d
--- /dev/null
+++ b/qpid/extras/sasl/build-aux/missing
@@ -0,0 +1,367 @@
+#! /bin/sh
+# Common stub for a few missing GNU programs while installing.
+
+scriptversion=2006-05-10.23
+
+# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006
+# Free Software Foundation, Inc.
+# Originally by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+# 02110-1301, USA.
+
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that program.
+
+if test $# -eq 0; then
+ echo 1>&2 "Try \`$0 --help' for more information"
+ exit 1
+fi
+
+run=:
+sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p'
+sed_minuso='s/.* -o \([^ ]*\).*/\1/p'
+
+# In the cases where this matters, `missing' is being run in the
+# srcdir already.
+if test -f configure.ac; then
+ configure_ac=configure.ac
+else
+ configure_ac=configure.in
+fi
+
+msg="missing on your system"
+
+case $1 in
+--run)
+ # Try to run requested program, and just exit if it succeeds.
+ run=
+ shift
+ "$@" && exit 0
+ # Exit code 63 means version mismatch. This often happens
+ # when the user try to use an ancient version of a tool on
+ # a file that requires a minimum version. In this case we
+ # we should proceed has if the program had been absent, or
+ # if --run hadn't been passed.
+ if test $? = 63; then
+ run=:
+ msg="probably too old"
+ fi
+ ;;
+
+ -h|--h|--he|--hel|--help)
+ echo "\
+$0 [OPTION]... PROGRAM [ARGUMENT]...
+
+Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an
+error status if there is no known handling for PROGRAM.
+
+Options:
+ -h, --help display this help and exit
+ -v, --version output version information and exit
+ --run try to run the given command, and emulate it if it fails
+
+Supported PROGRAM values:
+ aclocal touch file \`aclocal.m4'
+ autoconf touch file \`configure'
+ autoheader touch file \`config.h.in'
+ autom4te touch the output file, or create a stub one
+ automake touch all \`Makefile.in' files
+ bison create \`y.tab.[ch]', if possible, from existing .[ch]
+ flex create \`lex.yy.c', if possible, from existing .c
+ help2man touch the output file
+ lex create \`lex.yy.c', if possible, from existing .c
+ makeinfo touch the output file
+ tar try tar, gnutar, gtar, then tar without non-portable flags
+ yacc create \`y.tab.[ch]', if possible, from existing .[ch]
+
+Send bug reports to <bug-automake@gnu.org>."
+ exit $?
+ ;;
+
+ -v|--v|--ve|--ver|--vers|--versi|--versio|--version)
+ echo "missing $scriptversion (GNU Automake)"
+ exit $?
+ ;;
+
+ -*)
+ echo 1>&2 "$0: Unknown \`$1' option"
+ echo 1>&2 "Try \`$0 --help' for more information"
+ exit 1
+ ;;
+
+esac
+
+# Now exit if we have it, but it failed. Also exit now if we
+# don't have it and --version was passed (most likely to detect
+# the program).
+case $1 in
+ lex|yacc)
+ # Not GNU programs, they don't have --version.
+ ;;
+
+ tar)
+ if test -n "$run"; then
+ echo 1>&2 "ERROR: \`tar' requires --run"
+ exit 1
+ elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
+ exit 1
+ fi
+ ;;
+
+ *)
+ if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
+ # We have it, but it failed.
+ exit 1
+ elif test "x$2" = "x--version" || test "x$2" = "x--help"; then
+ # Could not run --version or --help. This is probably someone
+ # running `$TOOL --version' or `$TOOL --help' to check whether
+ # $TOOL exists and not knowing $TOOL uses missing.
+ exit 1
+ fi
+ ;;
+esac
+
+# If it does not exist, or fails to run (possibly an outdated version),
+# try to emulate it.
+case $1 in
+ aclocal*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`acinclude.m4' or \`${configure_ac}'. You might want
+ to install the \`Automake' and \`Perl' packages. Grab them from
+ any GNU archive site."
+ touch aclocal.m4
+ ;;
+
+ autoconf)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`${configure_ac}'. You might want to install the
+ \`Autoconf' and \`GNU m4' packages. Grab them from any GNU
+ archive site."
+ touch configure
+ ;;
+
+ autoheader)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`acconfig.h' or \`${configure_ac}'. You might want
+ to install the \`Autoconf' and \`GNU m4' packages. Grab them
+ from any GNU archive site."
+ files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}`
+ test -z "$files" && files="config.h"
+ touch_files=
+ for f in $files; do
+ case $f in
+ *:*) touch_files="$touch_files "`echo "$f" |
+ sed -e 's/^[^:]*://' -e 's/:.*//'`;;
+ *) touch_files="$touch_files $f.in";;
+ esac
+ done
+ touch $touch_files
+ ;;
+
+ automake*)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'.
+ You might want to install the \`Automake' and \`Perl' packages.
+ Grab them from any GNU archive site."
+ find . -type f -name Makefile.am -print |
+ sed 's/\.am$/.in/' |
+ while read f; do touch "$f"; done
+ ;;
+
+ autom4te)
+ echo 1>&2 "\
+WARNING: \`$1' is needed, but is $msg.
+ You might have modified some files without having the
+ proper tools for further handling them.
+ You can get \`$1' as part of \`Autoconf' from any GNU
+ archive site."
+
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -f "$file"; then
+ touch $file
+ else
+ test -z "$file" || exec >$file
+ echo "#! /bin/sh"
+ echo "# Created by GNU Automake missing as a replacement of"
+ echo "# $ $@"
+ echo "exit 0"
+ chmod +x $file
+ exit 1
+ fi
+ ;;
+
+ bison|yacc)
+ echo 1>&2 "\
+WARNING: \`$1' $msg. You should only need it if
+ you modified a \`.y' file. You may need the \`Bison' package
+ in order for those modifications to take effect. You can get
+ \`Bison' from any GNU archive site."
+ rm -f y.tab.c y.tab.h
+ if test $# -ne 1; then
+ eval LASTARG="\${$#}"
+ case $LASTARG in
+ *.y)
+ SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" y.tab.c
+ fi
+ SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" y.tab.h
+ fi
+ ;;
+ esac
+ fi
+ if test ! -f y.tab.h; then
+ echo >y.tab.h
+ fi
+ if test ! -f y.tab.c; then
+ echo 'main() { return 0; }' >y.tab.c
+ fi
+ ;;
+
+ lex|flex)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a \`.l' file. You may need the \`Flex' package
+ in order for those modifications to take effect. You can get
+ \`Flex' from any GNU archive site."
+ rm -f lex.yy.c
+ if test $# -ne 1; then
+ eval LASTARG="\${$#}"
+ case $LASTARG in
+ *.l)
+ SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'`
+ if test -f "$SRCFILE"; then
+ cp "$SRCFILE" lex.yy.c
+ fi
+ ;;
+ esac
+ fi
+ if test ! -f lex.yy.c; then
+ echo 'main() { return 0; }' >lex.yy.c
+ fi
+ ;;
+
+ help2man)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a dependency of a manual page. You may need the
+ \`Help2man' package in order for those modifications to take
+ effect. You can get \`Help2man' from any GNU archive site."
+
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -f "$file"; then
+ touch $file
+ else
+ test -z "$file" || exec >$file
+ echo ".ab help2man is required to generate this page"
+ exit 1
+ fi
+ ;;
+
+ makeinfo)
+ echo 1>&2 "\
+WARNING: \`$1' is $msg. You should only need it if
+ you modified a \`.texi' or \`.texinfo' file, or any other file
+ indirectly affecting the aspect of the manual. The spurious
+ call might also be the consequence of using a buggy \`make' (AIX,
+ DU, IRIX). You might want to install the \`Texinfo' package or
+ the \`GNU make' package. Grab either from any GNU archive site."
+ # The file to touch is that specified with -o ...
+ file=`echo "$*" | sed -n "$sed_output"`
+ test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"`
+ if test -z "$file"; then
+ # ... or it is the one specified with @setfilename ...
+ infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'`
+ file=`sed -n '
+ /^@setfilename/{
+ s/.* \([^ ]*\) *$/\1/
+ p
+ q
+ }' $infile`
+ # ... or it is derived from the source name (dir/f.texi becomes f.info)
+ test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info
+ fi
+ # If the file does not exist, the user really needs makeinfo;
+ # let's fail without touching anything.
+ test -f $file || exit 1
+ touch $file
+ ;;
+
+ tar)
+ shift
+
+ # We have already tried tar in the generic part.
+ # Look for gnutar/gtar before invocation to avoid ugly error
+ # messages.
+ if (gnutar --version > /dev/null 2>&1); then
+ gnutar "$@" && exit 0
+ fi
+ if (gtar --version > /dev/null 2>&1); then
+ gtar "$@" && exit 0
+ fi
+ firstarg="$1"
+ if shift; then
+ case $firstarg in
+ *o*)
+ firstarg=`echo "$firstarg" | sed s/o//`
+ tar "$firstarg" "$@" && exit 0
+ ;;
+ esac
+ case $firstarg in
+ *h*)
+ firstarg=`echo "$firstarg" | sed s/h//`
+ tar "$firstarg" "$@" && exit 0
+ ;;
+ esac
+ fi
+
+ echo 1>&2 "\
+WARNING: I can't seem to be able to run \`tar' with the given arguments.
+ You may want to install GNU tar or Free paxutils, or check the
+ command line arguments."
+ exit 1
+ ;;
+
+ *)
+ echo 1>&2 "\
+WARNING: \`$1' is needed, and is $msg.
+ You might have modified some files without having the
+ proper tools for further handling them. Check the \`README' file,
+ it often tells you about the needed prerequisites for installing
+ this package. You may also peek at any GNU archive site, in case
+ some other package would contain this missing \`$1' program."
+ exit 1
+ ;;
+esac
+
+exit 0
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "scriptversion="
+# time-stamp-format: "%:y-%02m-%02d.%02H"
+# time-stamp-end: "$"
+# End:
diff --git a/qpid/extras/sasl/build-aux/py-compile b/qpid/extras/sasl/build-aux/py-compile
new file mode 120000
index 0000000000..1fad0975ce
--- /dev/null
+++ b/qpid/extras/sasl/build-aux/py-compile
@@ -0,0 +1 @@
+/usr/share/automake-1.11/py-compile \ No newline at end of file
diff --git a/qpid/extras/sasl/configure.ac b/qpid/extras/sasl/configure.ac
new file mode 100644
index 0000000000..206c2f497d
--- /dev/null
+++ b/qpid/extras/sasl/configure.ac
@@ -0,0 +1,317 @@
+dnl Process this file with autoconf to produce a configure script.
+dnl
+dnl This file is free software; as a special exception the author gives
+dnl unlimited permission to copy and/or distribute it, with or without
+dnl modifications, as long as this notice is preserved.
+dnl
+dnl This program is distributed in the hope that it will be useful, but
+dnl WITHOUT ANY WARRANTY, to the extent permitted by law; without even the
+dnl implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+dnl
+dnl When updating the name/version number here, also update it in
+dnl src/qpid/Version.h
+
+AC_INIT([saslwrapper], [0.1], [dev@qpid.apache.org])
+AC_CONFIG_AUX_DIR([build-aux])
+AM_INIT_AUTOMAKE([dist-bzip2 subdir-objects])
+
+# Minimum Autoconf version required.
+AC_PREREQ(2.59)
+
+AC_CONFIG_HEADERS([config.h])
+
+AC_PROG_CC_STDC
+AM_PROG_CC_C_O
+AC_PROG_CXX
+AC_LANG([C++])
+
+# Check for optional use of help2man
+AC_CHECK_PROG([HELP2MAN], [help2man], [help2man])
+AC_ARG_WITH([help2man],
+ [AS_HELP_STRING([--with-help2man], [Use help2man to generate man pages.])],
+ [case "$withval" in
+ yes) test -z "$HELP2MAN" && AC_MSG_ERROR([help2man not found.]) ;;
+ no) HELP2MAN="" ;;
+ *) AC_MSG_ERROR([Bad value ${withval} for --with-help2man.]) ;;
+ esac])
+AM_CONDITIONAL([HAVE_HELP2MAN], [test -n "$HELP2MAN"])
+
+# Check for optional use of doxygen
+AC_CHECK_PROG([DOXYGEN], [doxygen], [doxygen])
+AC_ARG_WITH([doxygen],
+ [AS_HELP_STRING([--with-doxygen], [Use doxygen to generate API documentation.])],
+ [case "$withval" in
+ yes) test -z "$DOXYGEN" && AC_MSG_ERROR([doxygen not found.]) ;;
+ no) DOXYGEN="" ;;
+ *) AC_MSG_ERROR([Bad value ${withval} for --with-doxygen.]) ;;
+ esac])
+AM_CONDITIONAL([HAVE_DOXYGEN], [test -n "$DOXYGEN"])
+
+AC_ARG_ENABLE(warnings,
+[ --enable-warnings turn on lots of compiler warnings (recommended)],
+[case "${enableval}" in
+ yes|no) ;;
+ *) AC_MSG_ERROR([bad value ${enableval} for warnings option]) ;;
+ esac],
+ [enableval=yes])
+
+# Set up for gcc as compiler
+if test x$GXX = xyes; then
+ # Warnings: Enable as many as possible, keep the code clean. Please
+ # do not disable warnings or remove -Werror without discussing on
+ # qpid-dev list.
+ #
+ # The following warnings are deliberately omitted, they warn on valid code.
+ # -Wunreachable-code -Wpadded -Winline
+ # -Wshadow - warns about boost headers.
+ if test "${enableval}" = yes; then
+ gl_COMPILER_FLAGS(-Werror)
+ gl_COMPILER_FLAGS(-pedantic)
+ gl_COMPILER_FLAGS(-Wall)
+ gl_COMPILER_FLAGS(-Wextra)
+ gl_COMPILER_FLAGS(-Wno-shadow)
+ gl_COMPILER_FLAGS(-Wpointer-arith)
+ gl_COMPILER_FLAGS(-Wcast-qual)
+ gl_COMPILER_FLAGS(-Wcast-align)
+ gl_COMPILER_FLAGS(-Wno-long-long)
+ gl_COMPILER_FLAGS(-Wvolatile-register-var)
+ gl_COMPILER_FLAGS(-Winvalid-pch)
+ gl_COMPILER_FLAGS(-Wno-system-headers)
+ gl_COMPILER_FLAGS(-Woverloaded-virtual)
+ AC_SUBST([WARNING_CFLAGS], [$COMPILER_FLAGS])
+ AC_DEFINE([lint], 1, [Define to 1 if the compiler is checking for lint.])
+ COMPILER_FLAGS=
+ fi
+else
+ AC_CHECK_DECL([__SUNPRO_CC], [SUNCC=yes], [SUNCC=no])
+
+ # Set up for sun CC compiler
+ if test x$SUNCC = xyes; then
+ if test "${enableval}" = yes; then
+ WARNING_FLAGS=+w
+ fi
+ CXXFLAGS="$CXXFLAGS -library=stlport4 -mt"
+ LD="$CXX"
+ LDFLAGS="$LDFLAGS -library=stlport4 -mt"
+ AC_SUBST([SUNCC_RUNTIME_LIBS], [-lCrun])
+ fi
+fi
+
+AC_DISABLE_STATIC
+AC_PROG_LIBTOOL
+AC_SUBST([LIBTOOL_DEPS])
+
+# For libraries (libcommon) that use dlopen, dlerror, etc.,
+# test whether we need to link with -ldl.
+gl_saved_libs=$LIBS
+ AC_SEARCH_LIBS(dlopen, [dl],
+ [test "$ac_cv_search_dlopen" = "none required" ||
+ LIB_DLOPEN=$ac_cv_search_dlopen])
+ AC_SUBST([LIB_DLOPEN])
+LIBS=$gl_saved_libs
+
+# Set the argument to be used in "libtool -version-info ARG".
+QPID_CURRENT=2
+QPID_REVISION=0
+QPID_AGE=0
+LIBTOOL_VERSION_INFO_ARG=$QPID_CURRENT:$QPID_REVISION:$QPID_AGE
+AC_SUBST(LIBTOOL_VERSION_INFO_ARG)
+
+gl_CLOCK_TIME
+
+# Enable Valgrind
+AC_ARG_ENABLE([valgrind],
+ [AS_HELP_STRING([--enable-valgrind],
+ [run valgrind memory checker on tests, if available (default yes)])],
+ [case $enableval in
+ yes|no) enable_VALGRIND=$enableval;;
+ *) AC_MSG_ERROR([Invalid value for --enable-valgrind: $enableval]);;
+ esac],
+ [enable_VALGRIND=yes]
+)
+
+# We use valgrind for the tests. See if it's available.
+# Check for it unconditionally, so we don't have to duplicate its
+# use of AC_SUBST([VALGRIND]).
+AC_CHECK_PROG([VALGRIND], [valgrind], [valgrind])
+test "$enable_VALGRIND" = no && VALGRIND=
+
+# If rpmlint is available we'll run it when building RPMs.
+AC_CHECK_PROG([RPMLINT], [rpmlint], [rpmlint])
+AM_CONDITIONAL([HAS_RPMLINT], [test -n "$RPMLINT"])
+
+# Code generation: generated code is included in the distribution
+# so code generation is only required in an svn checkout.
+# It requires several external tools and files, which we check for here.
+
+AC_CHECK_PROG([RUBY], [ruby], [ruby])
+test -n "$RUBY" && generate=yes
+test -z "$RUBY" && AC_MSG_ERROR([Missing ruby installation (try "yum install ruby").])
+
+# Swig binding generator is needed for the script (Ruby, Python, etc.) bindings.
+AC_PROG_SWIG(1.3.26)
+test ! -x "$SWIG" && SWIG=""
+AC_ARG_WITH([swig],
+ [AS_HELP_STRING([--with-swig], [Use swig to generate qmf bindings.])],
+ [case "$withval" in
+ yes) test -z "$SWIG" && AC_MSG_ERROR([swig not found.]) ;;
+ no) SWIG="" ;;
+ *) AC_MSG_ERROR([Bad value ${withval} for --with-swig.]) ;;
+ esac])
+AM_CONDITIONAL([HAVE_SWIG], [test -n "$SWIG"])
+
+# Ruby bindings: To build ruby wrappers, the ruby-devel files must be present.
+
+AC_PATH_PROGS(RUBY, [ruby1.8 ruby], [])
+AC_ARG_VAR(RUBY, [Ruby interpreter])
+if test -n "$RUBY" ; then
+ AC_ARG_VAR(RUBY_INC, [Directory where ruby.h can be found])
+ if test -z "$RUBY_INC" ; then
+ [RUBY_INC=`$RUBY -rrbconfig -e 'puts Config::CONFIG["rubyhdrdir"] || Config::CONFIG["archdir"]'`]
+ fi
+ AC_SUBST(RUBY_INC)
+
+ AC_ARG_VAR(RUBY_INC_ARCH, [Directory where ruby/config.h can be found (needed from Ruby 1.9)])
+ if test -z "$RUBY_INC_ARCH" ; then
+ [RUBY_INC_ARCH=`$RUBY -rrbconfig -e 'd = Config::CONFIG["rubyhdrdir"];if d != nil; print d + "/" + Config::CONFIG["arch"]; end'`]
+ dnl For earlier versions, just make it the same as RUBY_INC.
+ test x"$RUBY_INC_ARCH" != x || RUBY_INC_ARCH=$RUBY_INC
+ fi
+ AC_SUBST(RUBY_INC_ARCH)
+ AC_ARG_VAR(RUBY_LIB, [Directory to install ruby files into])
+ if test -z "$RUBY_LIB" ; then
+ dnl Kludge to install ruby files under $prefix
+ [RUBY_LIB=`$RUBY -rrbconfig -e 'puts Config::CONFIG["sitelibdir"].gsub("/usr", "${prefix}")'`]
+ fi
+ AC_SUBST(RUBY_LIB)
+
+ AC_ARG_VAR(RUBY_LIB_ARCH, [Directory to install ruby binary modules into])
+ if test -z "$RUBY_LIB_ARCH" ; then
+ dnl Kludge to install ruby files under $prefix
+ [RUBY_LIB_ARCH=`$RUBY -rrbconfig -e 'puts Config::CONFIG["sitearchdir"].gsub("/usr", "${prefix}")'`]
+ fi
+ AC_SUBST(RUBY_LIB_ARCH)
+
+ RUBY_LIBS=
+ case $host_os in
+ cygwin*) RUBY_LIBS=-lruby ;;
+ esac
+ AC_SUBST(RUBY_LIBS)
+
+ RUBY_DLEXT=`$RUBY -rrbconfig -e 'puts Config::CONFIG[["DLEXT"]]'`
+ AC_SUBST(RUBY_DLEXT)
+fi
+AM_CONDITIONAL([HAVE_RUBY_DEVEL], [test -f $RUBY_INC/ruby.h && test -n "$SWIG"])
+
+# Python bindings: To build python wrappers, the ruby-devel files must be present.
+
+AM_PATH_PYTHON()
+if test -n "$PYTHON" ; then
+ AC_MSG_CHECKING([$PYTHON include dir])
+ if $PYTHON -c 'import distutils.sysconfig' 2>/dev/null ; then
+ PYTHON_INC=`$PYTHON -c 'import os,distutils.sysconfig;print(distutils.sysconfig.get_python_inc().replace(os.sep,"/"))'`
+ AC_SUBST(PYTHON_INC)
+ else
+ if test yes = "$with_python" ; then
+ AC_MSG_ERROR([Couldn't import Python module distutils.sysconfig - you probably need to install a python-dev or python-devel package])
+ else
+ AC_MSG_WARN([Couldn't import Python module distutils.sysconfig - you probably don't have a python-dev or python-devel package installed])
+ fi
+ fi
+ AC_MSG_RESULT([$PYTHON_INC])
+ AC_MSG_CHECKING([for directory to install python bindings in])
+ if test -z "$PYTHON_LIB" ; then
+ PYTHON_LIB=`$PYTHON -c 'import os,distutils.sysconfig;print(distutils.sysconfig.get_python_lib(1).replace(os.sep,"/"))'`
+ fi
+ AC_MSG_RESULT([$PYTHON_LIB])
+ AC_ARG_VAR(PYTHON_LIB, [Directory to install python bindings in])
+
+ AC_MSG_CHECKING([for python libraries to link against])
+ PYTHON_LIBS=`$PYTHON -c 'import os,sys;print("-L"+os.path.join(sys.path[[3]],"config")+" -lpython"+sys.version[[:3]])'`
+ AC_SUBST(PYTHON_LIBS)
+ AC_MSG_RESULT([$PYTHON_LIBS])
+fi
+AM_CONDITIONAL([HAVE_PYTHON_DEVEL], [test -f $PYTHON_INC/Python.h && test -n "$SWIG"])
+
+
+LIBS=$tmp_LIBS
+
+# Setup --with-sasl/--without-sasl as arguments to configure
+AC_ARG_WITH([sasl],
+ [AS_HELP_STRING([--with-sasl], [Build with SASL authentication support])],
+ [WANT_SASL="$withval"],
+ [WANT_SASL=check])
+
+# Make sure --with-sasl/--without-sasl were only give yes|no|check
+AS_IF([test "x$WANT_SASL" != xyes -a \
+ "x$WANT_SASL" != xno -a \
+ "x$WANT_SASL" != xcheck],
+ [AC_MSG_ERROR([Bad value for --with-sasl: $withval])])
+
+# If we weren't explicitly asked /not/ to test, i.e. not given --without-sasl
+have_sasl=no
+AS_IF([test "x$WANT_SASL" != xno],
+ # Perform tests for headers and libraries. Remember, AC_CHECK_LIB
+ # will give you some useful default behavior, e.g. setup LDFLAGS, if
+ # you do not give it a second argument, so try not to
+ [AC_CHECK_HEADER([sasl/sasl.h], , [HAVE_SASL_H=no])
+ tmp_LIBS=$LIBS
+ AC_CHECK_LIB([sasl2], [sasl_checkpass], , [HAVE_SASL_LIB=no])
+ # Remove from LIBS, we will link it explicitly in make files.
+ LIBS=$tmp_LIBS
+ # If any of the tests failed
+ AS_IF([test "x$HAVE_SASL_H" = xno -o \
+ "x$HAVE_SASL_LIB" = xno],
+ # And we were given --with, then fail
+ [AS_IF([test "x$WANT_SASL" = xyes],
+ [AC_MSG_ERROR([sasl requested but not available])])],
+ # Otherwise, no tests failed, setup AC_SUBST/AC_DEFINE/vars for AM_CONDITIONALs
+ [AC_DEFINE([BROKER_SASL_NAME], ["qpidd"],
+ [The SASL app name for the qpid Broker])
+ AC_DEFINE([HAVE_SASL], [1], [Enable if libsasl is present])
+ have_sasl=yes])])
+AM_CONDITIONAL([HAVE_SASL], [test "x$have_sasl" = xyes])
+
+
+#Guess host architecture, to choose platform-dependent objects
+case "$host" in
+ *sun-solaris*)
+ arch=solaris
+ ;;
+esac
+AM_CONDITIONAL([SUNOS], [test x$arch = xsolaris])
+
+# Check for some syslog capabilities not present in all systems
+AC_TRY_COMPILE([#include <sys/syslog.h>],
+ [int v = LOG_AUTHPRIV;],
+ [AC_DEFINE([HAVE_LOG_AUTHPRIV], [1], [Set to 1 whether LOG_AUTHPRIV is supported.])],)
+
+AC_TRY_COMPILE([#include <sys/syslog.h>],
+ [int v = LOG_FTP;],
+ [AC_DEFINE([HAVE_LOG_FTP], [1], [Set to 1 whether LOG_FTP is supported.])],)
+
+#Check if we need to include libacl to provide acl API
+gl_saved_libs=$LIBS
+ AC_SEARCH_LIBS(acl, [acl],
+ [test "$ac_cv_search_acl" = "none required" ||
+ LIB_ACL=$ac_cv_search_acl])
+ AC_SUBST([LIB_ACL])
+LIBS=$gl_saved_libs
+
+SOCKLIBS=""
+AC_CHECK_LIB([socket],[socket],[SOCKET_LIB="-lsocket"],[SOCKET_LIB=""],[])
+AC_CHECK_LIB([nsl],[getipnodebyname],[NSL_LIB="-lnsl"],[NSL_LIB=""],[])
+SOCKLIBS="$SOCKET_LIB $NSL_LIB"
+AC_SUBST([SOCKLIBS])
+
+AM_PATH_PYTHON()
+
+# Files to generate
+AC_CONFIG_FILES([
+ Makefile
+ src/Makefile
+ python/Makefile
+ ruby/Makefile
+ ])
+AC_OUTPUT
+
diff --git a/qpid/extras/sasl/include/saslwrapper.h b/qpid/extras/sasl/include/saslwrapper.h
new file mode 100644
index 0000000000..bb2a9af7ff
--- /dev/null
+++ b/qpid/extras/sasl/include/saslwrapper.h
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+*/
+
+#include <stdint.h>
+#include <string>
+
+namespace saslwrapper {
+
+ /**
+ * The following type is used for output arguments (that are strings). The fact that it has
+ * a unique name is used in a SWIG typemap to indicate output arguments. For scripting languages
+ * such as Python and Ruby (which do not support output arguments), the outputs are placed in and
+ * array that is returned by the function. For example, a function that looks like:
+ *
+ * bool function(const string& input, output_string& out1, output_string& out2);
+ *
+ * would be called (in Python) like this:
+ *
+ * boolResult, out1, out2 = function(input)
+ */
+ typedef std::string output_string;
+ class ClientImpl;
+
+ class Client {
+ public:
+
+ Client();
+ ~Client();
+
+ /**
+ * Set attributes to be used in authenticating the session. All attributes should be set
+ * before init() is called.
+ *
+ * @param key Name of attribute being set
+ * @param value Value of attribute being set
+ * @return true iff success. If false is returned, call getError() for error details.
+ *
+ * Available attribute keys:
+ *
+ * service - Name of the service being accessed
+ * username - User identity for authentication
+ * authname - User identity for authorization (if different from username)
+ * password - Password associated with username
+ * host - Fully qualified domain name of the server host
+ * maxbufsize - Maximum receive buffer size for the security layer
+ * minssf - Minimum acceptable security strength factor (integer)
+ * maxssf - Maximum acceptable security strength factor (integer)
+ * externalssf - Security strength factor supplied by external mechanism (i.e. SSL/TLS)
+ * externaluser - Authentication ID (of client) as established by external mechanism
+ */
+ bool setAttr(const std::string& key, const std::string& value);
+ bool setAttr(const std::string& key, uint32_t value);
+
+ /**
+ * Initialize the client object. This should be called after all of the properties have been set.
+ *
+ * @return true iff success. If false is returned, call getError() for error details.
+ */
+ bool init();
+
+ /**
+ * Start the SASL exchange with the server.
+ *
+ * @param mechList List of mechanisms provided by the server
+ * @param chosen The mechanism chosen by the client
+ * @param initialResponse Initial block of data to send to the server
+ *
+ * @return true iff success. If false is returned, call getError() for error details.
+ */
+ bool start(const std::string& mechList, output_string& chosen, output_string& initialResponse);
+
+ /**
+ * Step the SASL handshake.
+ *
+ * @param challenge The challenge supplied by the server
+ * @param response (output) The response to be sent back to the server
+ *
+ * @return true iff success. If false is returned, call getError() for error details.
+ */
+ bool step(const std::string& challenge, output_string& response);
+
+ /**
+ * Encode data for secure transmission to the server.
+ *
+ * @param clearText Clear text data to be encrypted
+ * @param cipherText (output) Encrypted data to be transmitted
+ *
+ * @return true iff success. If false is returned, call getError() for error details.
+ */
+ bool encode(const std::string& clearText, output_string& cipherText);
+
+ /**
+ * Decode data received from the server.
+ *
+ * @param cipherText Encrypted data received from the server
+ * @param clearText (output) Decrypted clear text data
+ *
+ * @return true iff success. If false is returned, call getError() for error details.
+ */
+ bool decode(const std::string& cipherText, output_string& clearText);
+
+ /**
+ * Get the user identity (used for authentication) associated with this session.
+ * Note that this is particularly useful for single-sign-on mechanisms in which the
+ * username is not supplied by the application.
+ *
+ * @param userId (output) Authenticated user ID for this session.
+ */
+ bool getUserId(output_string& userId);
+
+ /**
+ * Get error message for last error.
+ * This function will return the last error message then clear the error state.
+ * If there was no error or the error state has been cleared, this function will output
+ * an empty string.
+ *
+ * @param error Error message string
+ */
+ void getError(output_string& error);
+
+ private:
+ ClientImpl* impl;
+
+ // Declare private copy constructor and assignment operator. Ensure that this
+ // class is non-copyable.
+ Client(const Client&);
+ const Client& operator=(const Client&);
+ };
+
+}
diff --git a/qpid/extras/sasl/m4/ac_pkg_swig.m4 b/qpid/extras/sasl/m4/ac_pkg_swig.m4
new file mode 100644
index 0000000000..6e385c067c
--- /dev/null
+++ b/qpid/extras/sasl/m4/ac_pkg_swig.m4
@@ -0,0 +1,120 @@
+# ===========================================================================
+# http://www.nongnu.org/autoconf-archive/ac_pkg_swig.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AC_PROG_SWIG([major.minor.micro])
+#
+# DESCRIPTION
+#
+# This macro searches for a SWIG installation on your system. If found you
+# should call SWIG via $(SWIG). You can use the optional first argument to
+# check if the version of the available SWIG is greater than or equal to
+# the value of the argument. It should have the format: N[.N[.N]] (N is a
+# number between 0 and 999. Only the first N is mandatory.)
+#
+# If the version argument is given (e.g. 1.3.17), AC_PROG_SWIG checks that
+# the swig package is this version number or higher.
+#
+# In configure.in, use as:
+#
+# AC_PROG_SWIG(1.3.17)
+# SWIG_ENABLE_CXX
+# SWIG_MULTI_MODULE_SUPPORT
+# SWIG_PYTHON
+#
+# LICENSE
+#
+# Copyright (c) 2008 Sebastian Huber <sebastian-huber@web.de>
+# Copyright (c) 2008 Alan W. Irwin <irwin@beluga.phys.uvic.ca>
+# Copyright (c) 2008 Rafael Laboissiere <rafael@laboissiere.net>
+# Copyright (c) 2008 Andrew Collier <colliera@ukzn.ac.za>
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+# Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception, the respective Autoconf Macro's copyright owner
+# gives unlimited permission to copy, distribute and modify the configure
+# scripts that are the output of Autoconf when processing the Macro. You
+# need not follow the terms of the GNU General Public License when using
+# or distributing such scripts, even though portions of the text of the
+# Macro appear in them. The GNU General Public License (GPL) does govern
+# all other use of the material that constitutes the Autoconf Macro.
+#
+# This special exception to the GPL applies to versions of the Autoconf
+# Macro released by the Autoconf Archive. When you make and distribute a
+# modified version of the Autoconf Macro, you may extend this special
+# exception to the GPL to apply to your modified version as well.
+#
+# Fixed by Sandro Santilli to consider 2.0.0 > 1.3.37 (2010-06-15)
+
+AC_DEFUN([AC_PROG_SWIG],[
+ AC_PATH_PROG([SWIG],[swig])
+ if test -z "$SWIG" ; then
+ AC_MSG_WARN([cannot find 'swig' program. You should look at http://www.swig.org])
+ SWIG='echo "Error: SWIG is not installed. You should look at http://www.swig.org" ; false'
+ elif test -n "$1" ; then
+ AC_MSG_CHECKING([for SWIG version])
+ [swig_version=`$SWIG -version 2>&1 | grep 'SWIG Version' | sed 's/.*\([0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\).*/\1/g'`]
+ AC_MSG_RESULT([$swig_version])
+ if test -n "$swig_version" ; then
+ # Calculate the required version number components
+ [required=$1]
+ [required_major=`echo $required | sed 's/[^0-9].*//'`]
+ if test -z "$required_major" ; then
+ [required_major=0]
+ fi
+ [required=`echo $required | sed 's/[0-9]*[^0-9]//'`]
+ [required_minor=`echo $required | sed 's/[^0-9].*//'`]
+ if test -z "$required_minor" ; then
+ [required_minor=0]
+ fi
+ [required=`echo $required | sed 's/[0-9]*[^0-9]//'`]
+ [required_patch=`echo $required | sed 's/[^0-9].*//'`]
+ if test -z "$required_patch" ; then
+ [required_patch=0]
+ fi
+ # Calculate the available version number components
+ [available=$swig_version]
+ [available_major=`echo $available | sed 's/[^0-9].*//'`]
+ if test -z "$available_major" ; then
+ [available_major=0]
+ fi
+ [available=`echo $available | sed 's/[0-9]*[^0-9]//'`]
+ [available_minor=`echo $available | sed 's/[^0-9].*//'`]
+ if test -z "$available_minor" ; then
+ [available_minor=0]
+ fi
+ [available=`echo $available | sed 's/[0-9]*[^0-9]//'`]
+ [available_patch=`echo $available | sed 's/[^0-9].*//'`]
+ if test -z "$available_patch" ; then
+ [available_patch=0]
+ fi
+ [required_full=`printf %2.2d%2.2d%2.2d%2.2d $required_major $required_minor $required_patch]`
+ [available_full=`printf %2.2d%2.2d%2.2d%2.2d $available_major $available_minor $available_patch]`
+ if test $available_full -lt $required_full; then
+ AC_MSG_WARN([SWIG version >= $1 is required. You have $swig_version. You should look at http://www.swig.org])
+ SWIG='echo "Error: SWIG version >= $1 is required. You have '"$swig_version"'. You should look at http://www.swig.org" ; false'
+ else
+ AC_MSG_NOTICE([SWIG executable is '$SWIG'])
+ SWIG_LIB=`$SWIG -swiglib`
+ AC_MSG_NOTICE([SWIG library directory is '$SWIG_LIB'])
+ fi
+ else
+ AC_MSG_WARN([cannot determine SWIG version])
+ SWIG='echo "Error: Cannot determine SWIG version. You should look at http://www.swig.org" ; false'
+ fi
+ fi
+ AC_SUBST([SWIG_LIB])
+])
diff --git a/qpid/extras/sasl/m4/compiler-flags.m4 b/qpid/extras/sasl/m4/compiler-flags.m4
new file mode 100644
index 0000000000..01cb728f02
--- /dev/null
+++ b/qpid/extras/sasl/m4/compiler-flags.m4
@@ -0,0 +1,23 @@
+# serial 3
+# Find valid warning flags for the C Compiler. -*-Autoconf-*-
+dnl Copyright (C) 2001, 2002, 2006 Free Software Foundation, Inc.
+dnl This file is free software; the Free Software Foundation
+dnl gives unlimited permission to copy and/or distribute it,
+dnl with or without modifications, as long as this notice is preserved.
+dnl Written by Jesse Thilo.
+
+AC_DEFUN([gl_COMPILER_FLAGS],
+ [AC_MSG_CHECKING(whether compiler accepts $1)
+ AC_SUBST(COMPILER_FLAGS)
+ ac_save_CFLAGS="$CFLAGS"
+ CFLAGS="$CFLAGS $1"
+ ac_save_CXXFLAGS="$CXXFLAGS"
+ CXXFLAGS="$CXXFLAGS $1"
+ AC_TRY_COMPILE(,
+ [int x;],
+ COMPILER_FLAGS="$COMPILER_FLAGS $1"
+ AC_MSG_RESULT(yes),
+ AC_MSG_RESULT(no))
+ CFLAGS="$ac_save_CFLAGS"
+ CXXFLAGS="$ac_save_CXXFLAGS"
+ ])
diff --git a/qpid/extras/sasl/python/Makefile.am b/qpid/extras/sasl/python/Makefile.am
new file mode 100644
index 0000000000..7c61f37cee
--- /dev/null
+++ b/qpid/extras/sasl/python/Makefile.am
@@ -0,0 +1,43 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+if HAVE_SWIG
+if HAVE_PYTHON_DEVEL
+
+EXTRA_DIST = python.i
+INCLUDES = -I$(top_srcdir)/include
+
+generated_file_list = saslwrapper.cpp saslwrapper.py
+BUILT_SOURCES = $(generated_file_list)
+
+$(generated_file_list): python.i $(top_srcdir)/src/saslwrapper.i
+ $(SWIG) -c++ -python -Wall -I/usr/include $(INCLUDES) -o saslwrapper.cpp $(srcdir)/python.i
+
+pyexec_PYTHON = saslwrapper.py
+pyexec_LTLIBRARIES = _saslwrapper.la
+
+_saslwrapper_la_LDFLAGS = -avoid-version -module -shared
+_saslwrapper_la_LIBADD = $(PYTHON_LIBS) $(top_builddir)/src/libsaslwrapper.la -lsasl2
+_saslwrapper_la_CXXFLAGS = -I$(PYTHON_INC) -fno-strict-aliasing
+nodist__saslwrapper_la_SOURCES = saslwrapper.cpp
+
+CLEANFILES = $(generated_file_list)
+
+endif
+endif
diff --git a/qpid/extras/sasl/python/python.i b/qpid/extras/sasl/python/python.i
new file mode 100644
index 0000000000..9f4e7ee8af
--- /dev/null
+++ b/qpid/extras/sasl/python/python.i
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+%module saslwrapper
+
+
+/* unsigned32 Convert from Python --> C */
+%typemap(in) uint32_t {
+ if (PyInt_Check($input)) {
+ $1 = (uint32_t) PyInt_AsUnsignedLongMask($input);
+ } else if (PyLong_Check($input)) {
+ $1 = (uint32_t) PyLong_AsUnsignedLong($input);
+ } else {
+ SWIG_exception_fail(SWIG_ValueError, "unknown integer type");
+ }
+}
+
+/* unsigned32 Convert from C --> Python */
+%typemap(out) uint32_t {
+ $result = PyInt_FromLong((long)$1);
+}
+
+
+/* unsigned16 Convert from Python --> C */
+%typemap(in) uint16_t {
+ if (PyInt_Check($input)) {
+ $1 = (uint16_t) PyInt_AsUnsignedLongMask($input);
+ } else if (PyLong_Check($input)) {
+ $1 = (uint16_t) PyLong_AsUnsignedLong($input);
+ } else {
+ SWIG_exception_fail(SWIG_ValueError, "unknown integer type");
+ }
+}
+
+/* unsigned16 Convert from C --> Python */
+%typemap(out) uint16_t {
+ $result = PyInt_FromLong((long)$1);
+}
+
+
+/* signed32 Convert from Python --> C */
+%typemap(in) int32_t {
+ if (PyInt_Check($input)) {
+ $1 = (int32_t) PyInt_AsLong($input);
+ } else if (PyLong_Check($input)) {
+ $1 = (int32_t) PyLong_AsLong($input);
+ } else {
+ SWIG_exception_fail(SWIG_ValueError, "unknown integer type");
+ }
+}
+
+/* signed32 Convert from C --> Python */
+%typemap(out) int32_t {
+ $result = PyInt_FromLong((long)$1);
+}
+
+
+/* unsigned64 Convert from Python --> C */
+%typemap(in) uint64_t {
+%#ifdef HAVE_LONG_LONG
+ if (PyLong_Check($input)) {
+ $1 = (uint64_t)PyLong_AsUnsignedLongLong($input);
+ } else if (PyInt_Check($input)) {
+ $1 = (uint64_t)PyInt_AsUnsignedLongLongMask($input);
+ } else
+%#endif
+ {
+ SWIG_exception_fail(SWIG_ValueError, "unsupported integer size - uint64_t input too large");
+ }
+}
+
+/* unsigned64 Convert from C --> Python */
+%typemap(out) uint64_t {
+%#ifdef HAVE_LONG_LONG
+ $result = PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)$1);
+%#else
+ SWIG_exception_fail(SWIG_ValueError, "unsupported integer size - uint64_t output too large");
+%#endif
+}
+
+/* signed64 Convert from Python --> C */
+%typemap(in) int64_t {
+%#ifdef HAVE_LONG_LONG
+ if (PyLong_Check($input)) {
+ $1 = (int64_t)PyLong_AsLongLong($input);
+ } else if (PyInt_Check($input)) {
+ $1 = (int64_t)PyInt_AsLong($input);
+ } else
+%#endif
+ {
+ SWIG_exception_fail(SWIG_ValueError, "unsupported integer size - int64_t input too large");
+ }
+}
+
+/* signed64 Convert from C --> Python */
+%typemap(out) int64_t {
+%#ifdef HAVE_LONG_LONG
+ $result = PyLong_FromLongLong((PY_LONG_LONG)$1);
+%#else
+ SWIG_exception_fail(SWIG_ValueError, "unsupported integer size - int64_t output too large");
+%#endif
+}
+
+
+/* Convert from Python --> C */
+%typemap(in) void * {
+ $1 = (void *)$input;
+}
+
+/* Convert from C --> Python */
+%typemap(out) void * {
+ $result = (PyObject *) $1;
+ Py_INCREF($result);
+}
+
+%typemap (typecheck, precedence=SWIG_TYPECHECK_UINT64) uint64_t {
+ $1 = PyLong_Check($input) ? 1 : 0;
+}
+
+%typemap (typecheck, precedence=SWIG_TYPECHECK_UINT32) uint32_t {
+ $1 = PyInt_Check($input) ? 1 : 0;
+}
+
+/* Handle output arguments of type "output_string" */
+%typemap(in, numinputs=0) saslwrapper::output_string& (std::string temp) {
+ $1 = &temp;
+}
+
+%typemap(argout) saslwrapper::output_string& {
+ // Append output value $1 to $result
+ PyObject *o, *o2, *o3;
+ o = PyString_FromStringAndSize($1->c_str(), $1->length());
+ if ((!$result) || ($result == Py_None)) {
+ $result = o;
+ } else {
+ if (!PyTuple_Check($result)) {
+ PyObject *o2 = $result;
+ $result = PyTuple_New(1);
+ PyTuple_SetItem($result,0,o2);
+ }
+ o3 = PyTuple_New(1);
+ PyTuple_SetItem(o3,0,o);
+ o2 = $result;
+ $result = PySequence_Concat(o2,o3);
+ Py_DECREF(o2);
+ Py_DECREF(o3);
+ }
+}
+
+
+
+%include "../src/saslwrapper.i"
+
diff --git a/qpid/extras/sasl/ruby/Makefile.am b/qpid/extras/sasl/ruby/Makefile.am
new file mode 100644
index 0000000000..85fde1085d
--- /dev/null
+++ b/qpid/extras/sasl/ruby/Makefile.am
@@ -0,0 +1,44 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+if HAVE_SWIG
+if HAVE_RUBY_DEVEL
+
+EXTRA_DIST = ruby.i
+INCLUDES = -I$(top_srcdir)/include
+
+generated_file_list = saslwrapper.cpp
+BUILT_SOURCES = $(generated_file_list)
+
+$(generated_file_list): ruby.i $(top_srcdir)/src/saslwrapper.i
+ $(SWIG) -c++ -ruby -Wall -I/usr/include $(INCLUDES) -o saslwrapper.cpp $(srcdir)/ruby.i
+
+rubylibdir = $(RUBY_LIB)
+rubylibarchdir = $(RUBY_LIB_ARCH)
+rubylibarch_LTLIBRARIES = saslwrapper.la
+
+saslwrapper_la_LDFLAGS = -avoid-version -module -shared ".$(RUBY_DLEXT)"
+saslwrapper_la_LIBADD = $(RUBY_LIBS) $(top_builddir)/src/libsaslwrapper.la -lsasl2
+saslwrapper_la_CXXFLAGS = $(INCLUDES) -I$(RUBY_INC) -I$(RUBY_INC_ARCH) -fno-strict-aliasing
+nodist_saslwrapper_la_SOURCES = saslwrapper.cpp
+
+CLEANFILES = $(generated_file_list)
+
+endif
+endif
diff --git a/qpid/extras/sasl/ruby/ruby.i b/qpid/extras/sasl/ruby/ruby.i
new file mode 100644
index 0000000000..7c20f7f071
--- /dev/null
+++ b/qpid/extras/sasl/ruby/ruby.i
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+%module saslwrapper
+%include "typemaps.i"
+%include "stl.i"
+
+%{
+#include "saslwrapper.h"
+%}
+
+%typemap (in) void *
+{
+ $1 = (void *) $input;
+}
+
+%typemap (out) void *
+{
+ $result = (VALUE) $1;
+}
+
+%typemap (in) uint16_t
+{
+ $1 = NUM2UINT($input);
+}
+
+%typemap (out) uint16_t
+{
+ $result = UINT2NUM((uint16_t) $1);
+}
+
+%typemap (in) uint32_t
+{
+ if (TYPE($input) == T_BIGNUM)
+ $1 = NUM2UINT($input);
+ else
+ $1 = FIX2UINT($input);
+}
+
+%typemap (out) uint32_t
+{
+ $result = UINT2NUM((uint32_t) $1);
+}
+
+%typemap (in) int32_t
+{
+ if (TYPE($input) == T_BIGNUM)
+ $1 = NUM2INT($input);
+ else
+ $1 = FIX2INT($input);
+}
+
+%typemap (out) int32_t
+{
+ $result = INT2NUM((int32_t) $1);
+}
+
+%typemap (typecheck, precedence=SWIG_TYPECHECK_INTEGER) uint32_t {
+ $1 = FIXNUM_P($input);
+}
+
+%typemap (in) uint64_t
+{
+ if (TYPE($input) == T_BIGNUM)
+ $1 = NUM2ULL($input);
+ else
+ $1 = (uint64_t) FIX2LONG($input);
+}
+
+%typemap (out) uint64_t
+{
+ $result = ULL2NUM((uint64_t) $1);
+}
+
+%typemap (in) int64_t
+{
+ if (TYPE($input) == T_BIGNUM)
+ $1 = NUM2LL($input);
+ else
+ $1 = (int64_t) FIX2LONG($input);
+}
+
+%typemap (out) int64_t
+{
+ $result = LL2NUM((int64_t) $1);
+}
+
+%typemap (typecheck, precedence=SWIG_TYPECHECK_INTEGER) uint64_t {
+ $1 = FIXNUM_P($input);
+}
+
+namespace saslwrapper {
+ class Client {
+ public:
+
+ Client();
+ ~Client();
+ bool setAttr(const std::string& INPUT, const std::string& INPUT);
+ bool setAttr(const std::string& INPUT, uint32_t INPUT);
+ bool init();
+ bool start(const std::string& INPUT, std::string& OUTPUT, std::string& OUTPUT);
+ bool step(const std::string& INPUT, std::string& OUTPUT);
+ bool encode(const std::string& INPUT, std::string& OUTPUT);
+ bool decode(const std::string& INPUT, std::string& OUTPUT);
+ bool getUserId(std::string& OUTPUT);
+ void getError(std::string& OUTPUT);
+ };
+}
diff --git a/qpid/extras/sasl/src/Makefile.am b/qpid/extras/sasl/src/Makefile.am
new file mode 100644
index 0000000000..c2aa8dd188
--- /dev/null
+++ b/qpid/extras/sasl/src/Makefile.am
@@ -0,0 +1,40 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+EXTRA_DIST = saslwrapper.i
+INCLUDES = -I$(top_srcdir)/include
+
+nobase_include_HEADERS = ../include/saslwrapper.h
+
+lib_LTLIBRARIES = libsaslwrapper.la
+libsaslwrapper_la_SOURCES = cyrus/saslwrapper.cpp
+libsaslwrapper_la_CXXFLAGS = -fno-strict-aliasing
+
+# Library Version Information:
+#
+# CURRENT => API/ABI version. Bump this if the interface changes
+# REVISION => Version of underlying implementation.
+# Bump if implementation changes but API/ABI doesn't
+# AGE => Number of API/ABI versions this is backward compatible with
+#
+CURRENT = 1
+REVISION = 0
+AGE = 0
+
+libsaslwrapper_la_LDFLAGS = -version-info $(CURRENT):$(REVISION):$(AGE)
diff --git a/qpid/extras/sasl/src/cyrus/saslwrapper.cpp b/qpid/extras/sasl/src/cyrus/saslwrapper.cpp
new file mode 100644
index 0000000000..f8b08acfa6
--- /dev/null
+++ b/qpid/extras/sasl/src/cyrus/saslwrapper.cpp
@@ -0,0 +1,383 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+*/
+
+#include "saslwrapper.h"
+#include <sasl/sasl.h>
+#include <sstream>
+#include <malloc.h>
+#include <string.h>
+#include <unistd.h>
+#include <iostream>
+
+using namespace std;
+using namespace saslwrapper;
+
+namespace saslwrapper {
+
+ class ClientImpl {
+ friend class Client;
+ ClientImpl() : conn(0), cbIndex(0), maxBufSize(65535), minSsf(0), maxSsf(65535), externalSsf(0), secret(0) {}
+ ~ClientImpl() { if (conn) sasl_dispose(&conn); conn = 0; }
+ bool setAttr(const string& key, const string& value);
+ bool setAttr(const string& key, uint32_t value);
+ bool init();
+ bool start(const string& mechList, output_string& chosen, output_string& initialResponse);
+ bool step(const string& challenge, output_string& response);
+ bool encode(const string& clearText, output_string& cipherText);
+ bool decode(const string& cipherText, output_string& clearText);
+ bool getUserId(output_string& userId);
+ void getError(output_string& error);
+
+ void addCallback(unsigned long id, void* proc);
+ void lastCallback() { addCallback(SASL_CB_LIST_END, 0); }
+ void setError(const string& context, int code, const string& text = "", const string& text2 = "");
+ void interact(sasl_interact_t* prompt);
+
+ static int cbName(void *context, int id, const char **result, unsigned *len);
+ static int cbPassword(sasl_conn_t *conn, void *context, int id, sasl_secret_t **psecret);
+
+ static bool initialized;
+ sasl_conn_t* conn;
+ sasl_callback_t callbacks[8];
+ int cbIndex;
+ string error;
+ string serviceName;
+ string userName;
+ string authName;
+ string password;
+ string hostName;
+ string externalUserName;
+ uint32_t maxBufSize;
+ uint32_t minSsf;
+ uint32_t maxSsf;
+ uint32_t externalSsf;
+ sasl_secret_t* secret;
+ };
+}
+
+bool ClientImpl::initialized = false;
+
+bool ClientImpl::init()
+{
+ int result;
+
+ if (!initialized) {
+ initialized = true;
+ result = sasl_client_init(0);
+ if (result != SASL_OK) {
+ setError("sasl_client_init", result, sasl_errstring(result, 0, 0));
+ return false;
+ }
+ }
+
+ int cbIndex = 0;
+
+ addCallback(SASL_CB_GETREALM, 0);
+ if (!userName.empty()) {
+ addCallback(SASL_CB_USER, (void*) cbName);
+ addCallback(SASL_CB_AUTHNAME, (void*) cbName);
+
+ if (!password.empty())
+ addCallback(SASL_CB_PASS, (void*) cbPassword);
+ else
+ addCallback(SASL_CB_PASS, 0);
+ }
+ lastCallback();
+
+ unsigned flags;
+
+ flags = 0;
+ if (!authName.empty() && authName != userName)
+ flags |= SASL_NEED_PROXY;
+
+ result = sasl_client_new(serviceName.c_str(), hostName.c_str(), 0, 0, callbacks, flags, &conn);
+ if (result != SASL_OK) {
+ setError("sasl_client_new", result, sasl_errstring(result, 0, 0));
+ return false;
+ }
+
+ sasl_security_properties_t secprops;
+
+ secprops.min_ssf = minSsf;
+ secprops.max_ssf = maxSsf;
+ secprops.maxbufsize = maxBufSize;
+ secprops.property_names = 0;
+ secprops.property_values = 0;
+ secprops.security_flags = 0;
+
+ result = sasl_setprop(conn, SASL_SEC_PROPS, &secprops);
+ if (result != SASL_OK) {
+ setError("sasl_setprop(SASL_SEC_PROPS)", result);
+ sasl_dispose(&conn);
+ conn = 0;
+ return false;
+ }
+
+ if (!externalUserName.empty()) {
+ result = sasl_setprop(conn, SASL_AUTH_EXTERNAL, externalUserName.c_str());
+ if (result != SASL_OK) {
+ setError("sasl_setprop(SASL_AUTH_EXTERNAL)", result);
+ sasl_dispose(&conn);
+ conn = 0;
+ return false;
+ }
+
+ result = sasl_setprop(conn, SASL_SSF_EXTERNAL, &externalSsf);
+ if (result != SASL_OK) {
+ setError("sasl_setprop(SASL_SSF_EXTERNAL)", result);
+ sasl_dispose(&conn);
+ conn = 0;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ClientImpl::setAttr(const string& key, const string& value)
+{
+ if (key == "service")
+ serviceName = value;
+ else if (key == "username")
+ userName = value;
+ else if (key == "authname")
+ authName = value;
+ else if (key == "password") {
+ password = value;
+ free(secret);
+ secret = (sasl_secret_t*) malloc(sizeof(sasl_secret_t) + password.length());
+ }
+ else if (key == "host")
+ hostName = value;
+ else if (key == "externaluser")
+ externalUserName = value;
+ else {
+ setError("setAttr", -1, "Unknown string attribute name", key);
+ return false;
+ }
+
+ return true;
+}
+
+bool ClientImpl::setAttr(const string& key, uint32_t value)
+{
+ if (key == "minssf")
+ minSsf = value;
+ else if (key == "maxssf")
+ maxSsf = value;
+ else if (key == "externalssf")
+ externalSsf = value;
+ else if (key == "maxbufsize")
+ maxBufSize = value;
+ else {
+ setError("setAttr", -1, "Unknown integer attribute name", key);
+ return false;
+ }
+
+ return true;
+}
+
+bool ClientImpl::start(const string& mechList, output_string& chosen, output_string& initialResponse)
+{
+ int result;
+ sasl_interact_t* prompt = 0;
+ const char* resp;
+ const char* mech;
+ unsigned int len;
+
+ do {
+ result = sasl_client_start(conn, mechList.c_str(), &prompt, &resp, &len, &mech);
+ if (result == SASL_INTERACT)
+ interact(prompt);
+ } while (result == SASL_INTERACT);
+ if (result != SASL_OK && result != SASL_CONTINUE) {
+ setError("sasl_client_start", result);
+ return false;
+ }
+
+ chosen = string(mech);
+ initialResponse = string(resp, len);
+ return true;
+}
+
+bool ClientImpl::step(const string& challenge, output_string& response)
+{
+ int result;
+ sasl_interact_t* prompt = 0;
+ const char* resp;
+ unsigned int len;
+
+ do {
+ result = sasl_client_step(conn, challenge.c_str(), challenge.size(), &prompt, &resp, &len);
+ if (result == SASL_INTERACT)
+ interact(prompt);
+ } while (result == SASL_INTERACT);
+ if (result != SASL_OK && result != SASL_CONTINUE) {
+ setError("sasl_client_step", result);
+ return false;
+ }
+
+ response = string(resp, len);
+ return true;
+}
+
+bool ClientImpl::encode(const string& clearText, output_string& cipherText)
+{
+ const char* output;
+ unsigned int outlen;
+ int result = sasl_encode(conn, clearText.c_str(), clearText.size(), &output, &outlen);
+ if (result != SASL_OK) {
+ setError("sasl_encode", result);
+ return false;
+ }
+ cipherText = string(output, outlen);
+ return true;
+}
+
+bool ClientImpl::decode(const string& cipherText, output_string& clearText)
+{
+ const char* input = cipherText.c_str();
+ unsigned int inLen = cipherText.size();
+ unsigned int remaining = inLen;
+ const char* cursor = input;
+ const char* output;
+ unsigned int outlen;
+
+ clearText = string();
+ while (remaining > 0) {
+ unsigned int segmentLen = (remaining < maxBufSize) ? remaining : maxBufSize;
+ int result = sasl_decode(conn, cursor, segmentLen, &output, &outlen);
+ if (result != SASL_OK) {
+ setError("sasl_decode", result);
+ return false;
+ }
+ clearText = clearText + string(output, outlen);
+ cursor += segmentLen;
+ remaining -= segmentLen;
+ }
+ return true;
+}
+
+bool ClientImpl::getUserId(output_string& userId)
+{
+ int result;
+ const char* operName;
+
+ result = sasl_getprop(conn, SASL_USERNAME, (const void**) &operName);
+ if (result != SASL_OK) {
+ setError("sasl_getprop(SASL_USERNAME)", result);
+ return false;
+ }
+
+ userId = string(operName);
+ return true;
+}
+
+void ClientImpl::getError(output_string& _error)
+{
+ _error = error;
+ error.clear();
+}
+
+void ClientImpl::addCallback(unsigned long id, void* proc)
+{
+ callbacks[cbIndex].id = id;
+ callbacks[cbIndex].proc = (int (*)()) proc;
+ callbacks[cbIndex].context = this;
+ cbIndex++;
+}
+
+void ClientImpl::setError(const string& context, int code, const string& text, const string& text2)
+{
+ stringstream err;
+ string etext(text.empty() ? sasl_errdetail(conn) : text);
+ err << "Error in " << context << " (" << code << ") " << etext;
+ if (!text2.empty())
+ err << " - " << text2;
+ error = err.str();
+}
+
+void ClientImpl::interact(sasl_interact_t* prompt)
+{
+ string output;
+ char* input;
+
+ if (prompt->id == SASL_CB_PASS) {
+ string ppt(prompt->prompt);
+ ppt += ": ";
+ char* pass = getpass(ppt.c_str());
+ output = string(pass);
+ } else {
+ cout << prompt->prompt;
+ if (prompt->defresult)
+ cout << " [" << prompt->defresult << "]";
+ cout << ": ";
+ cin >> output;
+ }
+ prompt->result = output.c_str();
+ prompt->len = output.length();
+}
+
+int ClientImpl::cbName(void *context, int id, const char **result, unsigned *len)
+{
+ ClientImpl* impl = (ClientImpl*) context;
+
+ if (id == SASL_CB_USER || (id == SASL_CB_AUTHNAME && impl->authName.empty())) {
+ *result = impl->userName.c_str();
+ //*len = impl->userName.length();
+ } else if (id == SASL_CB_AUTHNAME) {
+ *result = impl->authName.c_str();
+ //*len = impl->authName.length();
+ }
+
+ return SASL_OK;
+}
+
+int ClientImpl::cbPassword(sasl_conn_t *conn, void *context, int id, sasl_secret_t **psecret)
+{
+ ClientImpl* impl = (ClientImpl*) context;
+ size_t length = impl->password.length();
+
+ if (id == SASL_CB_PASS) {
+ impl->secret->len = length;
+ ::memcpy(impl->secret->data, impl->password.c_str(), length);
+ } else
+ impl->secret->len = 0;
+
+ *psecret = impl->secret;
+ return SASL_OK;
+}
+
+
+//==========================================================
+// WRAPPERS
+//==========================================================
+
+Client::Client() : impl(new ClientImpl()) {}
+Client::~Client() { delete impl; }
+bool Client::setAttr(const string& key, const string& value) { return impl->setAttr(key, value); }
+bool Client::setAttr(const string& key, uint32_t value) { return impl->setAttr(key, value); }
+bool Client::init() { return impl->init(); }
+bool Client::start(const string& mechList, output_string& chosen, output_string& initialResponse) { return impl->start(mechList, chosen, initialResponse); }
+bool Client::step(const string& challenge, output_string& response) { return impl->step(challenge, response); }
+bool Client::encode(const string& clearText, output_string& cipherText) { return impl->encode(clearText, cipherText); }
+bool Client::decode(const string& cipherText, output_string& clearText) { return impl->decode(cipherText, clearText); }
+bool Client::getUserId(output_string& userId) { return impl->getUserId(userId); }
+void Client::getError(output_string& error) { impl->getError(error); }
+
diff --git a/qpid/extras/sasl/src/saslwrapper.i b/qpid/extras/sasl/src/saslwrapper.i
new file mode 100644
index 0000000000..533ac79ce5
--- /dev/null
+++ b/qpid/extras/sasl/src/saslwrapper.i
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+%{
+#include "saslwrapper.h"
+%}
+
+%include stl.i
+%include <saslwrapper.h>
+
+%inline {
+
+using namespace std;
+using namespace saslwrapper;
+
+namespace saslwrapper {
+
+}
+}
+
+%{
+
+%};
+