diff options
author | Justin Ross <jross@apache.org> | 2016-04-21 12:31:34 +0000 |
---|---|---|
committer | Justin Ross <jross@apache.org> | 2016-04-21 12:31:34 +0000 |
commit | 71149592670f7592886751a9a866459bef0f12cc (patch) | |
tree | e4d1fd948055e36d1560112a318e77a210506d06 /qpid/cpp | |
parent | a835fb2724824dcd8a470fb51424cedeb6b38f62 (diff) | |
download | qpid-python-71149592670f7592886751a9a866459bef0f12cc.tar.gz |
QPID-7207: Create independent cpp and python subtrees, with content from tools and extras
git-svn-id: https://svn.apache.org/repos/asf/qpid/trunk@1740289 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'qpid/cpp')
153 files changed, 18437 insertions, 2593 deletions
diff --git a/qpid/cpp/CMakeLists.txt b/qpid/cpp/CMakeLists.txt index 56f09c27c3..12a0503398 100644 --- a/qpid/cpp/CMakeLists.txt +++ b/qpid/cpp/CMakeLists.txt @@ -51,6 +51,7 @@ include(BuildInstallSettings.cmake) enable_testing() include (CTest) +configure_file(${CMAKE_SOURCE_DIR}/CTestCustom.cmake ${CMAKE_BINARY_DIR}/CTestCustom.cmake) if (MSVC) # Chaxnge warning C4996 from level 1 to level 4. These are real and shouldn't @@ -80,7 +81,7 @@ endif (WIN32) set_absolute_install_path (QPIDC_CONF_FILE ${QPID_INSTALL_CONFDIR}/qpidc.conf) set_absolute_install_path (QPIDD_CONF_FILE ${QPID_INSTALL_CONFDIR}/qpidd.conf) -install(FILES LICENSE.txt NOTICE.txt DESTINATION ${QPID_INSTALL_DOCDIR}) +install(FILES LICENSE.txt NOTICE.txt DESTINATION ${QPID_INSTALL_DOCDIR}) install(FILES include/qmf/qmf2.i DESTINATION ${QPID_INSTALL_INCLUDEDIR}/qmf) @@ -217,6 +218,7 @@ if (MSVC) endif (MSVC) # Subdirectories +add_subdirectory(management/python) add_subdirectory(managementgen) add_subdirectory(src) add_subdirectory(etc) diff --git a/qpid/cpp/CTestCustom.cmake b/qpid/cpp/CTestCustom.cmake new file mode 100644 index 0000000000..57efd32a81 --- /dev/null +++ b/qpid/cpp/CTestCustom.cmake @@ -0,0 +1 @@ +set(CTEST_CUSTOM_PRE_TEST "python ${CMAKE_BINARY_DIR}/src/tests/check_dependencies.py") diff --git a/qpid/cpp/INSTALL.txt b/qpid/cpp/INSTALL.txt index 717c9b0908..cee7f1764e 100644 --- a/qpid/cpp/INSTALL.txt +++ b/qpid/cpp/INSTALL.txt @@ -194,7 +194,7 @@ a source distribution: (*) Boost 1.33 will also work. -Optional support for AMQP 1.0 requires (see AMQP_1.0 for details): +Optional support for AMQP 1.0 requires (see docs/amqp-1.0.txt for details): * Qpid proton-c <http://qpid.apache.org/proton> (0.5) Note: If Proton is installed in a non-standard location, there are two ways to locate it: 1. Recommended: use proton 0.7 or later and use the same install prefix @@ -206,7 +206,7 @@ Optional XML exchange requires: * xqilla <http://xqilla.sourceforge.net/HomePage> (2.0.0) * xerces-c <http://xerces.apache.org/xerces-c/> (2.7.0) -Optional SSL support requires: +Optional SSL support requires (see docs/ssl.txt for details): * nss <http://www.mozilla.org/projects/security/pki/nss/> * nspr <http://www.mozilla.org/projects/nspr/> diff --git a/qpid/cpp/management/python/.gitignore b/qpid/cpp/management/python/.gitignore new file mode 100644 index 0000000000..4fca027dea --- /dev/null +++ b/qpid/cpp/management/python/.gitignore @@ -0,0 +1,3 @@ +MANIFEST +build +dist diff --git a/qpid/cpp/management/python/CMakeLists.txt b/qpid/cpp/management/python/CMakeLists.txt new file mode 100644 index 0000000000..4e65958043 --- /dev/null +++ b/qpid/cpp/management/python/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +find_package(PythonInterp REQUIRED) + +add_custom_target(management_python_build ALL + COMMAND + ${PYTHON_EXECUTABLE} setup.py build + --build-base=${CMAKE_CURRENT_BINARY_DIR} + --build-scripts=${CMAKE_CURRENT_BINARY_DIR}/bin + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + +install(CODE "execute_process(COMMAND ${PYTHON_EXECUTABLE} setup.py install + --prefix=${CMAKE_INSTALL_PREFIX} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})") + + diff --git a/qpid/cpp/management/python/LICENSE.txt b/qpid/cpp/management/python/LICENSE.txt new file mode 100644 index 0000000000..6b0b1270ff --- /dev/null +++ b/qpid/cpp/management/python/LICENSE.txt @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/qpid/cpp/management/python/MANIFEST.in b/qpid/cpp/management/python/MANIFEST.in new file mode 100644 index 0000000000..ab30e9acee --- /dev/null +++ b/qpid/cpp/management/python/MANIFEST.in @@ -0,0 +1 @@ +include *.txt diff --git a/qpid/cpp/management/python/NOTICE.txt b/qpid/cpp/management/python/NOTICE.txt new file mode 100644 index 0000000000..24512d0da9 --- /dev/null +++ b/qpid/cpp/management/python/NOTICE.txt @@ -0,0 +1,5 @@ +Apache Qpid Python Tools +Copyright 2006-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/qpid/cpp/management/python/bin/.gitignore b/qpid/cpp/management/python/bin/.gitignore new file mode 100644 index 0000000000..f99dba8c08 --- /dev/null +++ b/qpid/cpp/management/python/bin/.gitignore @@ -0,0 +1,13 @@ +qmf-toolc +qpid-configc +qpid-hac +qpid-printeventsc +qpid-qls-analyzec +qpid-queue-statsc +qpid-receivec +qpid-routec +qpid-sendc +qpid-statc +qpid-store-chkc +qpid-store-resizec +qpid-toolc diff --git a/qpid/cpp/management/python/bin/qmf-tool b/qpid/cpp/management/python/bin/qmf-tool new file mode 100755 index 0000000000..407ae74b10 --- /dev/null +++ b/qpid/cpp/management/python/bin/qmf-tool @@ -0,0 +1,775 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import optparse +import sys +import socket +from cmd import Cmd +from shlex import split +from threading import Lock +from time import strftime, gmtime +from qpid.disp import Display +import qpid_messaging +import qmf2 + +class OptsAndArgs(object): + + def __init__(self, argv): + self.argv = argv + self.usage = """qmf-tool [OPTIONS] [<broker-host>[:<port>]]""" + self.option_parser = optparse.OptionParser(usage=self.usage) + self.conn_group = optparse.OptionGroup(self.option_parser, "Connection Options") + self.conn_group.add_option("-u", "--user", action="store", type="string", help="User name for authentication") + self.conn_group.add_option("-p", "--password", action="store", type="string", help="Password for authentication") + self.conn_group.add_option("-t", "--transport", action="store", type="string", help="Transport type (tcp, ssl, rdma)") + self.conn_group.add_option("-m", "--mechanism", action="store", type="string", help="SASL Mechanism for security") + self.conn_group.add_option("-s", "--service", action="store", type="string", default="qpidd", help="SASL Service name") + self.conn_group.add_option("--min-ssf", action="store", type="int", metavar="<n>", help="Minimum acceptable security strength factor") + self.conn_group.add_option("--max-ssf", action="store", type="int", metavar="<n>", help="Maximum acceptable security strength factor") + self.conn_group.add_option("--conn-option", action="append", default=[], metavar="<NAME=VALUE>", help="Additional connection option(s)") + self.option_parser.add_option_group(self.conn_group) + + self.qmf_group = optparse.OptionGroup(self.option_parser, "QMF Session Options") + self.qmf_group.add_option("--domain", action="store", type="string", help="QMF Domain") + self.qmf_group.add_option("--agent-age", action="store", type="int", metavar="<n>", help="Time, in minutes, to age out non-communicating agents") + self.qmf_group.add_option("--qmf-option", action="append", default=[], metavar="<NAME=VALUE>", help="Additional QMF session option(s)") + self.option_parser.add_option_group(self.qmf_group) + + def parse(self): + host = "localhost" + conn_options = {} + qmf_options = [] + + options, encArgs = self.option_parser.parse_args(args=self.argv) + try: + encoding = locale.getpreferredencoding() + args = [a.decode(encoding) for a in encArgs] + except: + args = encArgs + + if len(args) > 1: + host = args[1] + + if options.user: + conn_options["username"] = options.user + if options.password: + conn_options["password"] = options.password + if options.transport: + conn_options["transport"] = options.transport + if options.mechanism: + conn_options["sasl_mechanisms"] = options.mechanism + if options.service: + conn_options["sasl_service"] = options.service + if options.min_ssf: + conn_options["sasl_min_ssf"] = options.min_ssf + if options.max_ssf: + conn_options["sasl_max_ssf"] = options.max_ssf + for x in options.conn_option: + try: + key, val = x.split('=') + conn_options[key] = val + except: + raise Exception("Improperly formatted text for --conn-option: '%s'" % x) + + if options.domain: + qmf_options.append("domain:'%s'" % options.domain) + if options.agent_age: + qmf_options.append("max-agent-age:%d" % options.agent_age) + for x in options.qmf_option: + try: + key, val = x.split('=') + qmf_options.append("%s:%s" % (key, val)) + except: + raise Exception("Improperly formatted text for --qmf-option: '%s'" % x) + + qmf_string = '{' + first = True + for x in qmf_options: + if first: + first = None + else: + qmf_string += ',' + qmf_string += x + qmf_string += '}' + + return host, conn_options, qmf_string + + + +class Mcli(Cmd): + """ Management Command Interpreter """ + + def __init__(self, dataObject, dispObject): + Cmd.__init__(self) + self.dataObject = dataObject + self.dispObject = dispObject + self.dataObject.setCli(self) + self.prompt = "qmf: " + + def emptyline(self): + pass + + def setPromptMessage(self, p=None): + if p == None: + self.prompt = "qmf: " + else: + self.prompt = "qmf[%s]: " % p + + def do_help(self, data): + print "Management Tool for QMF" + print + print "Agent Commands:" + print " set filter <filter-string> - Filter the list of agents" + print " list agents - Print a list of the known Agents" + print " set default <item-number> - Set the default agent for operations" + print " show filter - Show the agent filter currently in effect" + print " show agent <item-number> - Print detailed information about an Agent" + print " show options - Show option strings used in the QMF session" + print + print "Schema Commands:" + print " list packages - Print a list of packages supported by the default agent" + print " list classes [<package-name>] - Print all classes supported byt the default agent" + print " show class <class-name> [<package-name>] - Show details of a class" + print + print "Data Commands:" + print " query <class-name> [<package-name>] [<predicate>] - Query for data from the agent" + print " list - List accumulated query results" + print " clear - Clear accumulated query results" + print " show <id> - Show details from a data object" + print " call <id> <method> [<args>] - Call a method on a data object" + print + print "General Commands:" + print " set time-format short - Select short timestamp format (default)" + print " set time-format long - Select long timestamp format" + print " quit or ^D - Exit the program" + print + + def complete_set(self, text, line, begidx, endidx): + """ Command completion for the 'set' command """ + tokens = split(line[:begidx]) + if len(tokens) == 1: + return [i for i in ('filter ', 'default ', 'time-format ') if i.startswith(text)] + if len(tokens) == 2 and tokens[1] == 'time-format': + return [i for i in ('long', 'short') if i.startswith(text)] + return [] + + def do_set(self, data): + tokens = split(data) + try: + if tokens[0] == "time-format": + self.dispObject.do_setTimeFormat(tokens[1]) + else: + self.dataObject.do_set(data) + except Exception, e: + print "Exception in set command:", e + + def complete_list(self, text, line, begidx, endidx): + tokens = split(line[:begidx]) + if len(tokens) == 1: + return [i for i in ('agents', 'packages', 'classes ') if i.startswith(text)] + return [] + + def do_list(self, data): + try: + self.dataObject.do_list(data) + except Exception, e: + print "Exception in list command:", e + + def complete_show(self, text, line, begidx, endidx): + tokens = split(line[:begidx]) + if len(tokens) == 1: + return [i for i in ('options', 'filter', 'agent ', 'class ') if i.startswith(text)] + return [] + + def do_show(self, data): + try: + self.dataObject.do_show(data) + except Exception, e: + print "Exception in show command:", e + + def complete_query(self, text, line, begidx, endidx): + return [] + + def do_query(self, data): + try: + self.dataObject.do_query(data) + except Exception, e: + if e.message.__class__ == qmf2.Data: + e = e.message.getProperties() + print "Exception in query command:", e + + def do_call(self, data): + try: + self.dataObject.do_call(data) + except Exception, e: + if e.message.__class__ == qmf2.Data: + e = e.message.getProperties() + print "Exception in call command:", e + + def do_clear(self, data): + try: + self.dataObject.do_clear(data) + except Exception, e: + print "Exception in clear command:", e + + def do_EOF(self, data): + print "quit" + try: + self.dataObject.do_exit() + except: + pass + return True + + def do_quit(self, data): + try: + self.dataObject.do_exit() + except: + pass + return True + + def postcmd(self, stop, line): + return stop + + def postloop(self): + print "Exiting..." + self.dataObject.close() + + +#====================================================================================================== +# QmfData +#====================================================================================================== +class QmfData: + """ + """ + def __init__(self, disp, url, conn_options, qmf_options): + self.disp = disp + self.url = url + self.conn_options = conn_options + self.qmf_options = qmf_options + self.agent_filter = '[]' + self.connection = qpid_messaging.Connection(self.url, **self.conn_options) + self.connection.open() + self.session = qmf2.ConsoleSession(self.connection, self.qmf_options) + self.session.setAgentFilter(self.agent_filter) + self.session.open() + self.lock = Lock() + self.cli = None + self.agents = {} # Map of number => agent object + self.deleted_agents = {} # Map of number => agent object + self.agent_numbers = {} # Map of agent name => number + self.next_number = 1 + self.focus_agent = None + self.data_list = {} + self.next_data_index = 1 + + #======================= + # Methods to support CLI + #======================= + def setCli(self, cli): + self.cli = cli + + def close(self): + try: + self.session.close() + self.connection.close() + except: + pass # we're shutting down - ignore any errors + + def do_list(self, data): + tokens = data.split() + if len(tokens) == 0: + self.listData() + elif tokens[0] == 'agents' or tokens[0] == 'agent': + self.listAgents() + elif tokens[0] == 'packages' or tokens[0] == 'package': + self.listPackages() + elif tokens[0] == 'classes' or tokens[0] == 'class': + self.listClasses(tokens[1:]) + + def do_set(self, data): + tokens = split(data) + if len(tokens) == 0: + print "What do you want to set? type 'help' for more information." + return + if tokens[0] == 'filter': + if len(tokens) == 2: + self.setAgentFilter(tokens[1]) + elif tokens[0] == 'default': + if len(tokens) == 2: + self.updateAgents() + number = int(tokens[1]) + self.focus_agent = self.agents[number] + print "Default Agent: %s" % self.focus_agent.getName() + + def do_show(self, data): + tokens = split(data) + if len(tokens) == 0: + print "What do you want to show? Type 'help' for more information." + return + + if tokens[0] == 'options': + print "Options used in this session:" + print " Connection Options : %s" % self.scrubConnOptions() + print " QMF Session Options: %s" % self.qmf_options + return + + if tokens[0] == 'agent': + self.showAgent(tokens[1:]) + return + + if tokens[0] == 'filter': + print self.agent_filter + return + + if tokens[0] == "default": + if not self.focus_agent: + self.updateAgents() + if self.focus_agent: + print "Default Agent: %s" % self.focus_agent.getName() + else: + print "Default Agent not set" + return + + if tokens[0] == "class": + self.showClass(tokens[1:]) + return + + if tokens[0].isdigit(): + self.showData(tokens[0]) + return + + print "What do you want to show? Type 'help' for more information." + return + + def do_query(self, data): + tokens = split(data) + if len(tokens) == 0: + print "Class name not specified." + return + cname = tokens[0] + pname = None + pred = None + if len(tokens) >= 2: + if tokens[1][0] == '[': + pred = tokens[1] + else: + pname = tokens[1] + if len(tokens) >= 3: + pred = tokens[2] + query = "{class:'%s'" % cname + if pname: + query += ",package:'%s'" % pname + if pred: + query += ",where:%s" % pred + query += "}" + if not self.focus_agent: + self.updateAgents() + d_list = self.focus_agent.query(query) + local_data_list = {} + for d in d_list: + local_data_list[self.next_data_index] = d + self.next_data_index += 1 + rows = [] + for index,val in local_data_list.items(): + rows.append((index, val.getAddr().getName())) + self.data_list[index] = val + self.disp.table("Data Objects Returned: %d:" % len(d_list), ("Number", "Data Address"), rows) + + def do_call(self, data): + tokens = split(data) + if len(tokens) < 2: + print "Data ID and method-name not specified." + return + idx = int(tokens[0]) + methodName = tokens[1] + args = [] + for arg in tokens[2:]: + ## + ## If the argument is a map, list, boolean, integer, or floating (one decimal point), + ## run it through the Python evaluator so it is converted to the correct type. + ## + ## TODO: use a regex for this instead of this convoluted logic + if arg[0] == '{' or arg[0] == '[' or arg == "True" or arg == "False" or \ + ((arg.count('.') < 2 and (arg.count('-') == 0 or \ + (arg.count('-') == 1 and arg[0] == '-')) and \ + arg.replace('.','').replace('-','').isdigit())): + args.append(eval(arg)) + else: + args.append(arg) + + if not idx in self.data_list: + print "Unknown data index, run 'query' to get a list of data indices" + return + + data = self.data_list[idx] + data._getSchema() + result = data._invoke(methodName, args, {}) + rows = [] + for k,v in result.items(): + rows.append((k,v)) + self.disp.table("Output Parameters:", ("Name", "Value"), rows) + + def do_clear(self, data): + self.data_list = {} + self.next_data_index = 1 + print "Accumulated query results cleared" + + def do_exit(self): + pass + + #==================== + # Sub-Command Methods + #==================== + def setAgentFilter(self, filt): + self.agent_filter = filt + self.session.setAgentFilter(filt) + + def updateAgents(self): + agents = self.session.getAgents() + number_list = [] + for agent in agents: + if agent.getName() not in self.agent_numbers: + number = self.next_number + number_list.append(number) + self.next_number += 1 + self.agent_numbers[agent.getName()] = number + self.agents[number] = agent + else: + ## Track seen agents so we can clean out deleted ones + number = self.agent_numbers[agent.getName()] + number_list.append(number) + if number in self.deleted_agents: + self.agents[number] = self.deleted_agents.pop(number) + deleted = [] + for number in self.agents: + if number not in number_list: + deleted.append(number) + for number in deleted: + self.deleted_agents[number] = self.agents.pop(number) + if not self.focus_agent: + self.focus_agent = self.session.getConnectedBrokerAgent() + + def listAgents(self): + self.updateAgents() + rows = [] + for number in self.agents: + agent = self.agents[number] + if self.focus_agent and agent.getName() == self.focus_agent.getName(): + d = '*' + else: + d = '' + rows.append((d, number, agent.getVendor(), agent.getProduct(), agent.getInstance(), agent.getEpoch())) + self.disp.table("QMF Agents:", ("", "Id", "Vendor", "Product", "Instance", "Epoch"), rows) + + def listPackages(self): + if not self.focus_agent: + raise "Default Agent not set - use 'set default'" + self.focus_agent.loadSchemaInfo() + packages = self.focus_agent.getPackages() + for p in packages: + print " %s" % p + + def getClasses(self, tokens): + if not self.focus_agent: + raise "Default Agent not set - use 'set default'" + return + self.focus_agent.loadSchemaInfo() + if len(tokens) == 1: + classes = self.focus_agent.getSchemaIds(tokens[0]); + else: + packages = self.focus_agent.getPackages() + classes = [] + for p in packages: + classes.extend(self.focus_agent.getSchemaIds(p)) + return classes + + def listClasses(self, tokens): + classes = self.getClasses(tokens) + rows = [] + for c in classes: + rows.append((c.getPackageName(), c.getName(), self.classTypeName(c.getType()))) + self.disp.table("Classes:", ("Package", "Class", "Type"), rows) + + def showClass(self, tokens): + if len(tokens) < 1: + return + classes = self.getClasses([]) + c = tokens[0] + p = None + if len(tokens) == 2: + p = tokens[1] + schema = None + sid = None + for cls in classes: + if c == cls.getName(): + if not p or p == cls.getPackageName(): + schema = self.focus_agent.getSchema(cls) + sid = cls + break + if not sid: + return + print "Class: %s:%s (%s) - %s" % \ + (sid.getPackageName(), sid.getName(), self.classTypeName(sid.getType()), schema.getDesc()) + print " hash: %r" % sid.getHash() + props = schema.getProperties() + methods = schema.getMethods() + rows = [] + for prop in props: + name = prop.getName() + dtype = self.typeName(prop.getType()) + if len(prop.getSubtype()) > 0: + dtype += "(%s)" % prop.getSubtype() + access = self.accessName(prop.getAccess()) + idx = self.yes_blank(prop.isIndex()) + opt = self.yes_blank(prop.isOptional()) + unit = prop.getUnit() + desc = prop.getDesc() + rows.append((name, dtype, idx, access, opt, unit, desc)) + self.disp.table("Properties:", ("Name", "Type", "Index", "Access", "Optional", "Unit", "Description"), rows) + if len(methods) > 0: + for meth in methods: + name = meth.getName() + desc = meth.getDesc() + if len(desc) > 0: + desc = " - " + desc + args = meth.getArguments() + rows = [] + for prop in args: + aname = prop.getName() + dtype = self.typeName(prop.getType()) + if len(prop.getSubtype()) > 0: + dtype += "(%s)" % prop.getSubtype() + unit = prop.getUnit() + adesc = prop.getDesc() + io = self.dirName(prop.getDirection()) + rows.append((aname, dtype, io, unit, adesc)) + print + print " Method: %s%s" % (name, desc) + self.disp.table("Arguments:", ("Name", "Type", "Dir", "Unit", "Description"), rows) + + def showAgent(self, tokens): + self.updateAgents() + for token in tokens: + number = int(token) + agent = self.agents[number] + print + print " ==================================================================================" + print " Agent Id: %d" % number + print " Agent Name: %s" % agent.getName() + print " Epoch: %d" % agent.getEpoch() + print " Attributes:" + attrs = agent.getAttributes() + keys = attrs.keys() + keys.sort() + pairs = [] + for key in keys: + if key == '_timestamp' or key == '_schema_updated': + val = disp.timestamp(attrs[key]) + else: + val = attrs[key] + pairs.append((key, val)) + self.printAlignedPairs(pairs) + agent.loadSchemaInfo() + print " Packages:" + packages = agent.getPackages() + for package in packages: + print " %s" % package + + def showData(self, idx): + num = int(idx) + if not num in self.data_list: + print "Data ID not known, run 'query' first to get data" + return + data = self.data_list[num] + props = data.getProperties() + rows = [] + for k,v in props.items(): + rows.append((k, v)) + self.disp.table("Properties:", ("Name", "Value"), rows) + + def listData(self): + if len(self.data_list) == 0: + print "No Query Results - Use the 'query' command" + return + rows = [] + for index,val in self.data_list.items(): + rows.append((index, val.getAgent().getName(), val.getAddr().getName())) + self.disp.table("Accumulated Query Results:", ('Number', 'Agent', 'Data Address'), rows) + + def printAlignedPairs(self, rows, indent=8): + maxlen = 0 + for first, second in rows: + if len(first) > maxlen: + maxlen = len(first) + maxlen += indent + for first, second in rows: + for i in range(maxlen - len(first)): + print "", + print "%s : %s" % (first, second) + + def classTypeName(self, code): + if code == qmf2.SCHEMA_TYPE_DATA: return "Data" + if code == qmf2.SCHEMA_TYPE_EVENT: return "Event" + return "Unknown" + + def typeName (self, typecode): + """ Convert type-codes to printable strings """ + if typecode == qmf2.SCHEMA_DATA_VOID: return "void" + elif typecode == qmf2.SCHEMA_DATA_BOOL: return "bool" + elif typecode == qmf2.SCHEMA_DATA_INT: return "int" + elif typecode == qmf2.SCHEMA_DATA_FLOAT: return "float" + elif typecode == qmf2.SCHEMA_DATA_STRING: return "string" + elif typecode == qmf2.SCHEMA_DATA_MAP: return "map" + elif typecode == qmf2.SCHEMA_DATA_LIST: return "list" + elif typecode == qmf2.SCHEMA_DATA_UUID: return "uuid" + else: + raise ValueError ("Invalid type code: %s" % str(typecode)) + + def valueByType(self, typecode, val): + if typecode == 1: return "%d" % val + elif typecode == 2: return "%d" % val + elif typecode == 3: return "%d" % val + elif typecode == 4: return "%d" % val + elif typecode == 6: return val + elif typecode == 7: return val + elif typecode == 8: return strftime("%c", gmtime(val / 1000000000)) + elif typecode == 9: + if val < 0: val = 0 + sec = val / 1000000000 + min = sec / 60 + hour = min / 60 + day = hour / 24 + result = "" + if day > 0: + result = "%dd " % day + if hour > 0 or result != "": + result += "%dh " % (hour % 24) + if min > 0 or result != "": + result += "%dm " % (min % 60) + result += "%ds" % (sec % 60) + return result + + elif typecode == 10: return str(self.idRegistry.displayId(val)) + elif typecode == 11: + if val: + return "True" + else: + return "False" + + elif typecode == 12: return "%f" % val + elif typecode == 13: return "%f" % val + elif typecode == 14: return "%r" % val + elif typecode == 15: return "%r" % val + elif typecode == 16: return "%d" % val + elif typecode == 17: return "%d" % val + elif typecode == 18: return "%d" % val + elif typecode == 19: return "%d" % val + elif typecode == 20: return "%r" % val + elif typecode == 21: return "%r" % val + elif typecode == 22: return "%r" % val + else: + raise ValueError ("Invalid type code: %s" % str(typecode)) + + def accessName (self, code): + """ Convert element access codes to printable strings """ + if code == qmf2.ACCESS_READ_CREATE: return "ReadCreate" + elif code == qmf2.ACCESS_READ_WRITE: return "ReadWrite" + elif code == qmf2.ACCESS_READ_ONLY: return "ReadOnly" + else: + raise ValueError ("Invalid access code: %s" % str(code)) + + def dirName(self, io): + if io == qmf2.DIR_IN: return "in" + elif io == qmf2.DIR_OUT: return "out" + elif io == qmf2.DIR_IN_OUT: return "in_out" + else: + raise ValueError("Invalid direction code: %r" % io) + + def notNone (self, text): + if text == None: + return "" + else: + return text + + def yes_blank(self, val): + if val: + return "Y" + return "" + + def objectIndex(self, obj): + if obj._objectId.isV2: + return obj._objectId.getObject() + result = "" + first = True + props = obj.getProperties() + for prop in props: + if prop[0].index: + if not first: + result += "." + result += self.valueByType(prop[0].type, prop[1]) + first = None + return result + + def scrubConnOptions(self): + scrubbed = {} + for key, val in self.conn_options.items(): + if key == "password": + val = "***" + scrubbed[key] = val + return str(scrubbed) + + +#========================================================= +# Main Program +#========================================================= +try: + oa = OptsAndArgs(sys.argv) + host, conn_options, qmf_options = oa.parse() +except Exception, e: + print "Parse Error: %s" % e + sys.exit(1) + +disp = Display() + +# Attempt to make a connection to the target broker +try: + data = QmfData(disp, host, conn_options, qmf_options) +except Exception, e: + if str(e).find("Exchange not found") != -1: + print "Management not enabled on broker: Use '-m yes' option on broker startup." + else: + print "Failed: %s - %s" % (e.__class__.__name__, e) + sys.exit(1) + +# Instantiate the CLI interpreter and launch it. +cli = Mcli(data, disp) +print("Management Tool for QMF") +try: + cli.cmdloop() +except KeyboardInterrupt: + print + print "Exiting..." +except Exception, e: + print "Failed: %s - %s" % (e.__class__.__name__, e) + +# alway attempt to cleanup broker resources +data.close() diff --git a/qpid/cpp/management/python/bin/qpid-config b/qpid/cpp/management/python/bin/qpid-config new file mode 100755 index 0000000000..3d4bb6036a --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-config @@ -0,0 +1,878 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import pdb + +import os +from optparse import OptionParser, OptionGroup, IndentedHelpFormatter +import sys +import locale + +home = os.environ.get("QPID_TOOLS_HOME", os.path.normpath("/usr/share/qpid-tools")) +sys.path.append(os.path.join(home, "python")) + +from qpid.messaging import Connection, ConnectionError +from qpidtoollibs import BrokerAgent +from qpidtoollibs import Display, Header + +usage = """ +Usage: qpid-config [OPTIONS] + qpid-config [OPTIONS] exchanges [filter-string] + qpid-config [OPTIONS] queues [filter-string] + qpid-config [OPTIONS] add exchange <type> <name> [AddExchangeOptions] + qpid-config [OPTIONS] del exchange <name> + qpid-config [OPTIONS] add queue <name> [AddQueueOptions] + qpid-config [OPTIONS] del queue <name> [DelQueueOptions] + qpid-config [OPTIONS] bind <exchange-name> <queue-name> [binding-key] + <for type xml> [-f -|filename] + <for type header> [all|any] k1=v1 [, k2=v2...] + qpid-config [OPTIONS] unbind <exchange-name> <queue-name> [binding-key] + qpid-config [OPTIONS] reload-acl + qpid-config [OPTIONS] add <type> <name> [--argument <property-name>=<property-value>] + qpid-config [OPTIONS] del <type> <name> + qpid-config [OPTIONS] list <type> [--show-property <property-name>] + qpid-config [OPTIONS] log [<logstring>] + qpid-config [OPTIONS] shutdown""" + +description = """ +Examples: + +$ qpid-config add queue q +$ qpid-config add exchange direct d -a localhost:5672 +$ qpid-config exchanges -b 10.1.1.7:10000 +$ qpid-config queues -b guest/guest@broker-host:10000 + +Add Exchange <type> values: + + direct Direct exchange for point-to-point communication + fanout Fanout exchange for broadcast communication + topic Topic exchange that routes messages using binding keys with wildcards + headers Headers exchange that matches header fields against the binding keys + xml XML Exchange - allows content filtering using an XQuery + + +Queue Limit Actions: + + none (default) - Use broker's default policy + reject - Reject enqueued messages + ring - Replace oldest unacquired message with new + +Replication levels: + + none - no replication + configuration - replicate queue and exchange existence and bindings, but not messages. + all - replicate configuration and messages + +Log <logstring> value: + + Comma separated <module>:<level> pairs, e.g. 'info+,debug+:Broker,trace+:Queue' +""" + +REPLICATE_LEVELS= ["none", "configuration", "all"] +DEFAULT_PROPERTIES = {"exchange":["name", "type", "durable"], "queue":["name", "durable", "autoDelete"]} + +def get_value(r): + if len(r) == 2: + try: + value = int(r[1]) + except: + value = r[1] + else: value = None + return value + +class Config: + def __init__(self): + self._recursive = False + self._host = "localhost" + self._connTimeout = 10 + self._ignoreDefault = False + self._altern_ex = None + self._durable = False + self._replicate = None + self._if_empty = True + self._if_unused = True + self._fileCount = None + self._fileSize = None + self._efp_partition_num = None + self._efp_pool_file_size = None + self._maxQueueSize = None + self._maxQueueCount = None + self._limitPolicy = None + self._msgSequence = False + self._lvq_key = None + self._ive = False + self._eventGeneration = None + self._file = None + self._flowStopCount = None + self._flowResumeCount = None + self._flowStopSize = None + self._flowResumeSize = None + self._msgGroupHeader = None + self._sharedMsgGroup = False + self._extra_arguments = [] + self._start_replica = None + self._returnCode = 0 + self._list_properties = [] + + def getOptions(self): + options = {} + for a in self._extra_arguments: + r = a.split("=", 1) + options[r[0]] = get_value(r) + return options + + +config = Config() +conn_options = {} + +FILECOUNT = "qpid.file_count" +FILESIZE = "qpid.file_size" +EFP_PARTITION_NUM = "qpid.efp_partition_num" +EFP_POOL_FILE_SIZE = "qpid.efp_pool_file_size" +MAX_QUEUE_SIZE = "qpid.max_size" +MAX_QUEUE_COUNT = "qpid.max_count" +POLICY_TYPE = "qpid.policy_type" +LVQ_KEY = "qpid.last_value_queue_key" +MSG_SEQUENCE = "qpid.msg_sequence" +IVE = "qpid.ive" +FLOW_STOP_COUNT = "qpid.flow_stop_count" +FLOW_RESUME_COUNT = "qpid.flow_resume_count" +FLOW_STOP_SIZE = "qpid.flow_stop_size" +FLOW_RESUME_SIZE = "qpid.flow_resume_size" +MSG_GROUP_HDR_KEY = "qpid.group_header_key" +SHARED_MSG_GROUP = "qpid.shared_msg_group" +REPLICATE = "qpid.replicate" +#There are various arguments to declare that have specific program +#options in this utility. However there is now a generic mechanism for +#passing arguments as well. The SPECIAL_ARGS list contains the +#arguments for which there are specific program options defined +#i.e. the arguments for which there is special processing on add and +#list +SPECIAL_ARGS=[ + FILECOUNT,FILESIZE,EFP_PARTITION_NUM,EFP_POOL_FILE_SIZE, + MAX_QUEUE_SIZE,MAX_QUEUE_COUNT,POLICY_TYPE, + LVQ_KEY,MSG_SEQUENCE,IVE, + FLOW_STOP_COUNT,FLOW_RESUME_COUNT,FLOW_STOP_SIZE,FLOW_RESUME_SIZE, + MSG_GROUP_HDR_KEY,SHARED_MSG_GROUP,REPLICATE] + +class JHelpFormatter(IndentedHelpFormatter): + """Format usage and description without stripping newlines from usage strings + """ + + def format_usage(self, usage): + return usage + + + def format_description(self, description): + if description: + return description + "\n" + else: + return "" + +def Usage(): + print usage + sys.exit(-1) + +def OptionsAndArguments(argv): + """ Set global variables for options, return arguments """ + + global config + + + parser = OptionParser(usage=usage, + description=description, + formatter=JHelpFormatter()) + + group1 = OptionGroup(parser, "General Options") + group1.add_option("-t", "--timeout", action="store", type="int", default=10, metavar="<secs>", help="Maximum time to wait for broker connection (in seconds)") + group1.add_option("-r", "--recursive", action="store_true", help="Show bindings in queue or exchange list") + group1.add_option("-b", "--broker", action="store", type="string", metavar="<address>", help="Address of qpidd broker with syntax: [username/password@] hostname | ip-address [:<port>]") + group1.add_option("-a", "--broker-addr", action="store", type="string", metavar="<address>") + group1.add_option("--sasl-mechanism", action="store", type="string", metavar="<mech>", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override.") + group1.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + group1.add_option("--ssl-certificate", action="store", type="string", metavar="<cert>", help="Client SSL certificate (PEM Format)") + group1.add_option("--ssl-key", action="store", type="string", metavar="<key>", help="Client SSL private key (PEM Format)") + group1.add_option("--ha-admin", action="store_true", help="Allow connection to a HA backup broker.") + parser.add_option_group(group1) + + group_ls = OptionGroup(parser, "Options for Listing Exchanges and Queues") + group_ls.add_option("--ignore-default", action="store_true", help="Ignore the default exchange in exchange or queue list") + parser.add_option_group(group_ls) + + group2 = OptionGroup(parser, "Options for Adding Exchanges and Queues") + group2.add_option("--alternate-exchange", action="store", type="string", metavar="<aexname>", help="Name of the alternate-exchange for the new queue or exchange. Exchanges route messages to the alternate exchange if they are unable to route them elsewhere. Queues route messages to the alternate exchange if they are rejected by a subscriber or orphaned by queue deletion.") + group2.add_option("--durable", action="store_true", help="The new queue or exchange is durable.") + group2.add_option("--replicate", action="store", metavar="<level>", help="Enable automatic replication in a HA cluster. <level> is 'none', 'configuration' or 'all').") + parser.add_option_group(group2) + + group3 = OptionGroup(parser, "Options for Adding Queues") + group3.add_option("--file-count", action="store", type="int", metavar="<n>", help="[legacystore] Number of files in queue's persistence journal") + group3.add_option("--file-size", action="store", type="int", metavar="<n>", help="[legactystore] File size in pages (64KiB/page)") + group3.add_option("--efp-partition-num", action="store", type="int", metavar="<n>", help="[linearstore] EFP partition number") + group3.add_option("--efp-pool-file-size", action="store", type="int", metavar="<n>", help="[linearstore] EFP file size (KiB)") + group3.add_option("--max-queue-size", action="store", type="int", metavar="<n>", help="Maximum in-memory queue size as bytes") + group3.add_option("--max-queue-count", action="store", type="int", metavar="<n>", help="Maximum in-memory queue size as a number of messages") + group3.add_option("--limit-policy", action="store", choices=["none", "reject", "ring", "ring-strict"], metavar="<policy>", help="Action to take when queue limit is reached") + group3.add_option("--lvq-key", action="store", metavar="<key>", help="Last Value Queue key") + group3.add_option("--flow-stop-size", action="store", type="int", metavar="<n>", + help="Turn on sender flow control when the number of queued bytes exceeds this value.") + group3.add_option("--flow-resume-size", action="store", type="int", metavar="<n>", + help="Turn off sender flow control when the number of queued bytes drops below this value.") + group3.add_option("--flow-stop-count", action="store", type="int", metavar="<n>", + help="Turn on sender flow control when the number of queued messages exceeds this value.") + group3.add_option("--flow-resume-count", action="store", type="int", metavar="<n>", + help="Turn off sender flow control when the number of queued messages drops below this value.") + group3.add_option("--group-header", action="store", type="string", metavar="<header-name>", + help="Enable message groups. Specify name of header that holds group identifier.") + group3.add_option("--shared-groups", action="store_true", + help="Allow message group consumption across multiple consumers.") + group3.add_option("--argument", dest="extra_arguments", action="append", default=[], + metavar="<NAME=VALUE>", help="Specify a key-value pair to add to queue arguments") + group3.add_option("--start-replica", metavar="<broker-url>", help="Start replication from the same-named queue at <broker-url>") + # no option for declaring an exclusive queue - which can only be used by the session that creates it. + parser.add_option_group(group3) + + group4 = OptionGroup(parser, "Options for Adding Exchanges") + group4.add_option("--sequence", action="store_true", help="Exchange will insert a 'qpid.msg_sequence' field in the message header") + group4.add_option("--ive", action="store_true", help="Exchange will behave as an 'initial-value-exchange', keeping a reference to the last message forwarded and enqueuing that message to newly bound queues.") + parser.add_option_group(group4) + + group5 = OptionGroup(parser, "Options for Deleting Queues") + group5.add_option("--force", action="store_true", help="Force delete of queue even if it's currently used or it's not empty") + group5.add_option("--force-if-not-empty", action="store_true", help="Force delete of queue even if it's not empty") + group5.add_option("--force-if-used", action="store_true", help="Force delete of queue even if it's currently used") + parser.add_option_group(group5) + + group6 = OptionGroup(parser, "Options for Declaring Bindings") + group6.add_option("-f", "--file", action="store", type="string", metavar="<file.xq>", help="For XML Exchange bindings - specifies the name of a file containing an XQuery.") + parser.add_option_group(group6) + + group_7 = OptionGroup(parser, "Formatting options for 'list' action") + group_7.add_option("--show-property", dest="list_properties", action="append", default=[], + metavar="<property-name>", help="Specify a property of an object to be included in output") + parser.add_option_group(group_7) + + opts, encArgs = parser.parse_args(args=argv) + + try: + encoding = locale.getpreferredencoding() + args = [a.decode(encoding) for a in encArgs] + except: + args = encArgs + + if opts.recursive: + config._recursive = True + if opts.broker: + config._host = opts.broker + if opts.broker_addr: + config._host = opts.broker_addr + if config._host is None: config._host="localhost:5672" + if opts.timeout is not None: + config._connTimeout = opts.timeout + if config._connTimeout == 0: + config._connTimeout = None + if opts.ignore_default: + config._ignoreDefault = True + if opts.alternate_exchange: + config._altern_ex = opts.alternate_exchange + if opts.durable: + config._durable = True + if opts.replicate: + if not opts.replicate in REPLICATE_LEVELS: + raise Exception("Invalid replication level '%s', should be one of: %s" % (opts.replicate, ", ".join(REPLICATE_LEVELS))) + config._replicate = opts.replicate + if opts.ha_admin: config._ha_admin = True + if opts.file: + config._file = opts.file + if opts.file_count is not None: + config._fileCount = opts.file_count + if opts.file_size is not None: + config._fileSize = opts.file_size + if opts.efp_partition_num is not None: + config._efp_partition_num = opts.efp_partition_num + if opts.efp_pool_file_size is not None: + config._efp_pool_file_size = opts.efp_pool_file_size + if opts.max_queue_size is not None: + config._maxQueueSize = opts.max_queue_size + if opts.max_queue_count is not None: + config._maxQueueCount = opts.max_queue_count + if opts.limit_policy: + config._limitPolicy = opts.limit_policy + if opts.sequence: + config._msgSequence = True + if opts.lvq_key: + config._lvq_key = opts.lvq_key + if opts.ive: + config._ive = True + if opts.force: + config._if_empty = False + config._if_unused = False + if opts.force_if_not_empty: + config._if_empty = False + if opts.force_if_used: + config._if_unused = False + if opts.sasl_mechanism: + config._sasl_mechanism = opts.sasl_mechanism + if opts.flow_stop_size is not None: + config._flowStopSize = opts.flow_stop_size + if opts.flow_resume_size is not None: + config._flowResumeSize = opts.flow_resume_size + if opts.flow_stop_count is not None: + config._flowStopCount = opts.flow_stop_count + if opts.flow_resume_count is not None: + config._flowResumeCount = opts.flow_resume_count + if opts.group_header: + config._msgGroupHeader = opts.group_header + if opts.shared_groups: + config._sharedMsgGroup = True + if opts.extra_arguments: + config._extra_arguments = opts.extra_arguments + if opts.start_replica: + config._start_replica = opts.start_replica + if opts.list_properties: + config._list_properties = opts.list_properties + + if opts.sasl_mechanism: + conn_options['sasl_mechanisms'] = opts.sasl_mechanism + if opts.sasl_service_name: + conn_options['sasl_service'] = opts.sasl_service_name + if opts.ssl_certificate: + conn_options['ssl_certfile'] = opts.ssl_certificate + if opts.ssl_key: + if not opts.ssl_certificate: + parser.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = opts.ssl_key + if opts.ha_admin: + conn_options['client_properties'] = {'qpid.ha-admin' : 1} + + return args + + +# +# helpers for the arg parsing in bind(). return multiple values; "ok" +# followed by the resultant args + +# +# accept -f followed by either +# a filename or "-", for stdin. pull the bits into a string, to be +# passed to the xml binding. +# +def snarf_xquery_args(): + if not config._file: + print "Invalid args to bind xml: need an input file or stdin" + return [False] + if config._file == "-": + res = sys.stdin.read() + else: + f = open(config._file) # let this signal if it can't find it + res = f.read() + f.close() + return [True, res] + +# +# look for "any"/"all" and grok the rest of argv into a map +# +def snarf_header_args(args): + + if len(args) < 2: + print "Invalid args to bind headers: need 'any'/'all' plus conditions" + return [False] + op = args[0] + if op == "all" or op == "any": + kv = {} + for thing in args[1:]: + k_and_v = thing.split("=") + kv[k_and_v[0]] = k_and_v[1] + return [True, op, kv] + else: + print "Invalid condition arg to bind headers, need 'any' or 'all', not '" + op + "'" + return [False] + +class BrokerManager: + def __init__(self): + self.brokerName = None + self.conn = None + self.broker = None + + def SetBroker(self, brokerUrl): + self.url = brokerUrl + self.conn = Connection.establish(self.url, **conn_options) + self.broker = BrokerAgent(self.conn) + + def Disconnect(self, ignore=True): + if self.conn: + try: + self.conn.close() + except Exception, e: + if ignore: + # suppress close errors to avoid + # tracebacks when a previous + # exception will be printed to stdout + pass + else: + # raise last exception so complete + # trackback is preserved + raise + + def Overview(self): + exchanges = self.broker.getAllExchanges() + queues = self.broker.getAllQueues() + print "Total Exchanges: %d" % len(exchanges) + etype = {} + for ex in exchanges: + if ex.type not in etype: + etype[ex.type] = 1 + else: + etype[ex.type] = etype[ex.type] + 1 + for typ in etype: + print "%15s: %d" % (typ, etype[typ]) + + print + print " Total Queues: %d" % len(queues) + durable = 0 + for queue in queues: + if queue.durable: + durable = durable + 1 + print " durable: %d" % durable + print " non-durable: %d" % (len(queues) - durable) + + def ExchangeList(self, filter): + exchanges = self.broker.getAllExchanges() + caption1 = "Type " + caption2 = "Exchange Name" + maxNameLen = len(caption2) + found = False + for ex in exchanges: + if self.match(ex.name, filter): + if len(ex.name) > maxNameLen: maxNameLen = len(ex.name) + found = True + if not found: + global config + config._returnCode = 1 + return + + print "%s%-*s Attributes" % (caption1, maxNameLen, caption2) + line = "" + for i in range(((maxNameLen + len(caption1)) / 5) + 5): + line += "=====" + print line + + for ex in exchanges: + if config._ignoreDefault and not ex.name: continue + if self.match(ex.name, filter): + print "%-10s%-*s " % (ex.type, maxNameLen, ex.name), + args = ex.arguments + if not args: args = {} + if ex.durable: print "--durable", + if REPLICATE in args: print "--replicate=%s" % args[REPLICATE], + if MSG_SEQUENCE in args and args[MSG_SEQUENCE]: print "--sequence", + if IVE in args and args[IVE]: print "--ive", + if ex.altExchange: + print "--alternate-exchange=%s" % ex.altExchange, + print + + def ExchangeListRecurse(self, filter): + exchanges = self.broker.getAllExchanges() + bindings = self.broker.getAllBindings() + queues = self.broker.getAllQueues() + for ex in exchanges: + if config._ignoreDefault and not ex.name: continue + if self.match(ex.name, filter): + print "Exchange '%s' (%s)" % (ex.name, ex.type) + for bind in bindings: + if bind.exchangeRef == ex.name: + qname = "<unknown>" + queue = self.findById(queues, bind.queueRef) + if queue != None: + qname = queue.name + if bind.arguments: + print " bind [%s] => %s %s" % (bind.bindingKey, qname, bind.arguments) + else: + print " bind [%s] => %s" % (bind.bindingKey, qname) + + + def QueueList(self, filter): + queues = self.broker.getAllQueues() + caption = "Queue Name" + maxNameLen = len(caption) + found = False + for q in queues: + if self.match(q.name, filter): + if len(q.name) > maxNameLen: maxNameLen = len(q.name) + found = True + if not found: + global config + config._returnCode = 1 + return + + print "%-*s Attributes" % (maxNameLen, caption) + line = "" + for i in range((maxNameLen / 5) + 5): + line += "=====" + print line + + for q in queues: + if self.match(q.name, filter): + print "%-*s " % (maxNameLen, q.name), + args = q.arguments + if not args: args = {} + if q.durable: print "--durable", + if REPLICATE in args: print "--replicate=%s" % args[REPLICATE], + if q.autoDelete: print "auto-del", + if q.exclusive: print "excl", + if FILESIZE in args: print "--file-size=%s" % args[FILESIZE], + if FILECOUNT in args: print "--file-count=%s" % args[FILECOUNT], + if EFP_PARTITION_NUM in args: print "--efp-partition-num=%s" % args[EFP_PARTITION_NUM], + if EFP_POOL_FILE_SIZE in args: print "--efp-pool-file-size=%s" % args[EFP_POOL_FILE_SIZE], + if MAX_QUEUE_SIZE in args: print "--max-queue-size=%s" % args[MAX_QUEUE_SIZE], + if MAX_QUEUE_COUNT in args: print "--max-queue-count=%s" % args[MAX_QUEUE_COUNT], + if POLICY_TYPE in args: print "--limit-policy=%s" % args[POLICY_TYPE].replace("_", "-"), + if LVQ_KEY in args: print "--lvq-key=%s" % args[LVQ_KEY], + if q.altExchange: + print "--alternate-exchange=%s" % q.altExchange, + if FLOW_STOP_SIZE in args: print "--flow-stop-size=%s" % args[FLOW_STOP_SIZE], + if FLOW_RESUME_SIZE in args: print "--flow-resume-size=%s" % args[FLOW_RESUME_SIZE], + if FLOW_STOP_COUNT in args: print "--flow-stop-count=%s" % args[FLOW_STOP_COUNT], + if FLOW_RESUME_COUNT in args: print "--flow-resume-count=%s" % args[FLOW_RESUME_COUNT], + if MSG_GROUP_HDR_KEY in args: print "--group-header=%s" % args[MSG_GROUP_HDR_KEY], + if SHARED_MSG_GROUP in args and args[SHARED_MSG_GROUP] == 1: print "--shared-groups", + print " ".join(["--argument %s=%s" % (k, v) for k,v in args.iteritems() if not k in SPECIAL_ARGS]) + + def QueueListRecurse(self, filter): + exchanges = self.broker.getAllExchanges() + bindings = self.broker.getAllBindings() + queues = self.broker.getAllQueues() + for queue in queues: + if self.match(queue.name, filter): + print "Queue '%s'" % queue.name + for bind in bindings: + if bind.queueRef == queue.name: + ename = "<unknown>" + ex = self.findById(exchanges, bind.exchangeRef) + if ex != None: + ename = ex.name + if ename == "": + if config._ignoreDefault: continue + ename = "''" + if bind.arguments: + print " bind [%s] => %s %s" % (bind.bindingKey, ename, bind.arguments) + else: + print " bind [%s] => %s" % (bind.bindingKey, ename) + + def AddExchange(self, args): + if len(args) < 2: + Usage() + etype = args[0] + ename = args[1] + declArgs = {} + for a in config._extra_arguments: + r = a.split("=", 1) + declArgs[r[0]] = get_value(r) + + if config._msgSequence: + declArgs[MSG_SEQUENCE] = 1 + if config._ive: + declArgs[IVE] = 1 + if config._altern_ex: + declArgs['alternate-exchange'] = config._altern_ex + if config._durable: + declArgs['durable'] = 1 + if config._replicate: + declArgs[REPLICATE] = config._replicate + self.broker.addExchange(etype, ename, declArgs) + + + def DelExchange(self, args): + if len(args) < 1: + Usage() + ename = args[0] + self.broker.delExchange(ename) + + + def AddQueue(self, args): + if len(args) < 1: + Usage() + qname = args[0] + declArgs = {} + for a in config._extra_arguments: + r = a.split("=", 1) + declArgs[r[0]] = get_value(r) + + if config._durable: + # allow the default fileCount and fileSize specified + # in qpid config file to take prededence + if config._fileCount: + declArgs[FILECOUNT] = config._fileCount + if config._fileSize: + declArgs[FILESIZE] = config._fileSize + if config._efp_partition_num: + declArgs[EFP_PARTITION_NUM] = config._efp_partition_num + if config._efp_pool_file_size: + declArgs[EFP_POOL_FILE_SIZE] = config._efp_pool_file_size + + if config._maxQueueSize is not None: + declArgs[MAX_QUEUE_SIZE] = config._maxQueueSize + if config._maxQueueCount is not None: + declArgs[MAX_QUEUE_COUNT] = config._maxQueueCount + if config._limitPolicy: + if config._limitPolicy == "none": + pass + elif config._limitPolicy == "reject": + declArgs[POLICY_TYPE] = "reject" + elif config._limitPolicy == "ring": + declArgs[POLICY_TYPE] = "ring" + + if config._lvq_key: + declArgs[LVQ_KEY] = config._lvq_key + + if config._flowStopSize is not None: + declArgs[FLOW_STOP_SIZE] = config._flowStopSize + if config._flowResumeSize is not None: + declArgs[FLOW_RESUME_SIZE] = config._flowResumeSize + if config._flowStopCount is not None: + declArgs[FLOW_STOP_COUNT] = config._flowStopCount + if config._flowResumeCount is not None: + declArgs[FLOW_RESUME_COUNT] = config._flowResumeCount + + if config._msgGroupHeader: + declArgs[MSG_GROUP_HDR_KEY] = config._msgGroupHeader + if config._sharedMsgGroup: + declArgs[SHARED_MSG_GROUP] = 1 + + if config._altern_ex: + declArgs['alternate-exchange'] = config._altern_ex + if config._durable: + declArgs['durable'] = 1 + if config._replicate: + declArgs[REPLICATE] = config._replicate + self.broker.addQueue(qname, declArgs) + if config._start_replica: # Start replication + self.broker._method("replicate", {"broker":config._start_replica, "queue":qname}, "org.apache.qpid.ha:habroker:ha-broker") + + def DelQueue(self, args): + if len(args) < 1: + Usage() + qname = args[0] + self.broker.delQueue(qname, if_empty=config._if_empty, if_unused=config._if_unused) + + + + def Bind(self, args): + if len(args) < 2: + Usage() + ename = args[0] + qname = args[1] + key = "" + if len(args) > 2: + key = args[2] + + # query the exchange to determine its type. + res = self.broker.getExchange(ename) + + # type of the xchg determines the processing of the rest of + # argv. if it's an xml xchg, we want to find a file + # containing an x-query, and pass that. if it's a headers + # exchange, we need to pass either "any" or all, followed by a + # map containing key/value pairs. if neither of those, extra + # args are ignored. + ok = True + _args = {} + if not res: + pass + elif res.type == "xml": + # this checks/imports the -f arg + [ok, xquery] = snarf_xquery_args() + _args = { "xquery" : xquery } + else: + if res.type == "headers": + [ok, op, kv] = snarf_header_args(args[3:]) + _args = kv + _args["x-match"] = op + + if not ok: + sys.exit(1) + + self.broker.bind(ename, qname, key, _args) + + def Unbind(self, args): + if len(args) < 2: + Usage() + ename = args[0] + qname = args[1] + key = "" + if len(args) > 2: + key = args[2] + self.broker.unbind(ename, qname, key) + + def ReloadAcl(self): + try: + self.broker.reloadAclFile() + except Exception, e: + if str(e).find('No object found') != -1: + print "Failed: ACL Module Not Loaded in Broker" + else: + raise + + def findById(self, items, id): + for item in items: + if item.name == id: + return item + return None + + def match(self, name, filter): + if filter == "": + return True + if name.find(filter) == -1: + return False + return True + +def YN(bool): + if bool: + return 'Y' + return 'N' + +def _clean_ref(o): + if isinstance(o, dict) and "_object_name" in o: + fqn = o["_object_name"] + parts = fqn.split(":",2) + return parts[len(parts)-1] + else: + return o + +def main(argv=None): + args = OptionsAndArguments(argv) + bm = BrokerManager() + + try: + bm.SetBroker(config._host) + if len(args) == 0: + bm.Overview() + else: + cmd = args[0] + modifier = "" + if len(args) > 1: + modifier = args[1] + if cmd == "exchanges": + if config._recursive: + bm.ExchangeListRecurse(modifier) + else: + bm.ExchangeList(modifier) + elif cmd == "queues": + if config._recursive: + bm.QueueListRecurse(modifier) + else: + bm.QueueList(modifier) + elif cmd == "add": + if modifier == "exchange": + bm.AddExchange(args[2:]) + elif modifier == "queue": + bm.AddQueue(args[2:]) + elif len(args) > 2: + bm.broker.create(modifier, args[2], config.getOptions()) + else: + Usage() + elif cmd == "del": + if modifier == "exchange": + bm.DelExchange(args[2:]) + elif modifier == "queue": + bm.DelQueue(args[2:]) + elif len(args) > 2: + bm.broker.delete(modifier, args[2], {}) + else: + Usage() + elif cmd == "bind": + bm.Bind(args[1:]) + elif cmd == "unbind": + bm.Unbind(args[1:]) + elif cmd == "reload-acl": + bm.ReloadAcl() + elif cmd == "list" and len(args) > 1: + # fetch objects + objects = bm.broker.list(modifier) + + # collect available attributes + attributes = [] + for o in objects: + for k in o.keys(): + if k == "name" and k not in attributes: + attributes.insert(0, k) + elif k not in attributes: + attributes.append(k) + + # determine which attributes to display + desired = [] + if len(config._list_properties): + for p in config._list_properties: + if p not in attributes: print "Warning: No such property '%s' for type '%s'" % (p, modifier) + else: desired.append(p) + elif modifier in DEFAULT_PROPERTIES: + desired = DEFAULT_PROPERTIES[modifier] + else: + desired = attributes[:6] + + # display + display = Display(prefix=" ") + headers = [Header(a) for a in desired] + rows = [tuple([_clean_ref(o.get(a, "n/a")) for a in desired]) for o in objects] + display.formattedTable("Objects of type '%s'" % modifier, headers, rows) + elif cmd == "log" and len (args) == 1: + print "Log level:", bm.broker.getLogLevel()["level"] + elif cmd == "log" and len (args) == 2: + bm.broker.setLogLevel(args[1]) + elif cmd == "shutdown": + try: + bm.broker._method("shutdown", {}) + except ConnectionError: + pass # Normal, the broker has been shut down! + bm.conn = None # Don't try to close again + else: + Usage() + except KeyboardInterrupt: + print + except IOError, e: + print e + bm.Disconnect() + return 1 + except SystemExit, e: + bm.Disconnect() + return 1 + except Exception,e: + if e.__class__.__name__ != "Timeout": + # ignore Timeout exception, handle in the loop below + print "Failed: %s: %s" % (e.__class__.__name__, e) + bm.Disconnect() + return 1 + + while True: + # some commands take longer than the default amqp timeout to complete, + # so attempt to disconnect until successful, ignoring Timeouts + try: + bm.Disconnect(ignore=False) + break + except Exception, e: + if e.__class__.__name__ != "Timeout": + print "Failed: %s: %s" % (e.__class__.__name__, e) + return 1 + return config._returnCode + + +if __name__ == "__main__": + sys.exit(main()) + diff --git a/qpid/cpp/management/python/bin/qpid-config.bat b/qpid/cpp/management/python/bin/qpid-config.bat new file mode 100644 index 0000000000..0ab000f5d3 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-config.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-config %* diff --git a/qpid/cpp/management/python/bin/qpid-ha b/qpid/cpp/management/python/bin/qpid-ha new file mode 100755 index 0000000000..1c07658d34 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-ha @@ -0,0 +1,299 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import optparse, sys, time, os, re, math +from qpid.messaging import Connection +from qpid.messaging import Message as QpidMessage +from qpid.util import URL +from qpidtoollibs.broker import BrokerAgent +from qpidtoollibs.config import parse_qpidd_conf +try: + from uuid import uuid4 +except ImportError: + from qpid.datatypes import uuid4 + +# QMF address for the HA broker object. +HA_BROKER = "org.apache.qpid.ha:habroker:ha-broker" +# Define these defaults here rather than in add_option because we want +# to use qpidd.conf for defaults if --config is specified and +# these defaults otherwise: +DEFAULTS = { "broker":"0.0.0.0", "timeout":10.0} + +class ExitStatus(Exception): + """Raised if a command want's a non-0 exit status from the script""" + def __init__(self, status): self.status = status + +def find_qpidd_conf(): + """Return the path to the local qpid.conf file or None if it is not found""" + p = os.path + prefix, bin = p.split(p.dirname(__file__)) + if bin == "bin": # Installed in a standard place. + conf = p.join(prefix, "etc", "qpid", "qpidd.conf") + if p.isfile(conf): return conf + return None + +class Command(object): + """ + Common options and logic for all commands. Subclasses provide additional + options and execution logic. + """ + + commands = [] + + def __init__(self, name, help, arg_names=[], connect_agent=True): + """@param connect_agent true if we should establish a QMF agent connection""" + Command.commands.append(self) + self.name = name + self.connect_agent = connect_agent + self.arg_names = arg_names + usage="%s [options] %s\n\n%s"%(name, " ".join(arg_names), help) + self.help = help + self.op=optparse.OptionParser(usage) + common = optparse.OptionGroup(self.op, "Broker connection options") + def help_default(what): return " (Default %s)"%DEFAULTS[what] + common.add_option("-b", "--broker", metavar="<address>", help="Address of qpidd broker with syntax: [username/password@] hostname | ip-address [:<port>]"+help_default("broker")) + common.add_option("--timeout", type="float", metavar="<seconds>", help="Give up if the broker does not respond within the timeout. 0 means wait forever"+help_default("timeout")) + common.add_option("--sasl-mechanism", metavar="<mech>", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override") + common.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + common.add_option("--ssl-certificate", metavar="<cert>", help="Client SSL certificate (PEM Format)") + common.add_option("--ssl-key", metavar="<key>", help="Client SSL private key (PEM Format)") + common.add_option("--config", metavar="<path/to/qpidd.conf>", help="Read default connection configuration from the qpidd.conf broker configuration file. Defaults are overridden by command-line options.)") + self.op.add_option_group(common) + + def connect(self, opts): + conn_options = {} + if not opts.broker: + opts.broker = DEFAULTS["broker"] + # If we are connecting locally, use local qpidd.conf by default + if not opts.config: opts.config = find_qpidd_conf() + url = URL(opts.broker) + if opts.config: # Use broker config file for defaults + config = parse_qpidd_conf(opts.config) + if not url.user: url.user = config.get("ha-username") + if not url.password: url.password = config.get("ha-password") + if not url.port: url.port = config.get("port") + opts.broker = str(url) + if not opts.sasl_mechanism: opts.sasl_mechanism = config.get("ha-mechanism") + if not opts.timeout: + timeout = config.get("ha-heartbeat-interval") or config.get("link-heartbeat-interval") + if timeout: opts.timeout = float(timeout) + else: # Use DEFAULTS + if not opts.timeout: opts.timeout = DEFAULTS["timeout"] + if opts.sasl_mechanism: conn_options['sasl_mechanisms'] = opts.sasl_mechanism + if opts.sasl_service_name: + conn_options['sasl_service'] = opts.sasl_service_name + if opts.ssl_certificate: conn_options['ssl_certfile'] = opts.ssl_certificate + if opts.ssl_key: + if not opts.ssl_certificate: + self.op.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = opts.ssl_key + conn_options['client_properties'] = {'qpid.ha-admin' : 1} + if opts.timeout: + conn_options['timeout'] = opts.timeout + conn_options['heartbeat'] = int(math.ceil(opts.timeout/2)) + connection = Connection.establish(opts.broker, **conn_options) + qmf_broker = self.connect_agent and BrokerAgent(connection) + ha_broker = self.connect_agent and qmf_broker.getHaBroker() + return (connection, qmf_broker, ha_broker) + + def all_brokers(self, ha_broker, opts, func): + """@return: List of (broker_addr, ha_broker) for all brokers in the cluster. + Returns (broker_addr, Exception) if an exception is raised accessing a broker. + """ + # The brokersUrl setting is not in python URL format, simpler parsing here. + result = [] + brokers = filter(None, re.sub(r'(^amqps?:)|(tcp:)', "", ha_broker.brokersUrl).split(",")) + if brokers and opts.all: + if "@" in opts.broker: userpass = opts.broker.split("@")[0] + else: userpass = None + for b in brokers: + if userpass and not "@" in b: opts.broker = userpass+"@"+b + else: opts.broker = b + try: + connection, qmf_broker, ha_broker = self.connect(opts) + func(ha_broker, b) + except Exception,e: + func(ha_broker, b, e) + else: + func(ha_broker) + + def execute(self, args): + opts, args = self.op.parse_args(args) + if len(args) != len(self.arg_names)+1: + self.op.print_help() + raise Exception("Wrong number of arguments") + self.connection, qmf_broker, ha_broker = self.connect(opts) + if self.connect_agent and not ha_broker: + raise Exception("HA module is not loaded on broker at %s" % opts.broker) + try: self.do_execute(qmf_broker, ha_broker, opts, args) + finally: self.connection.close() + + def do_execute(self, qmf_broker, opts, args): + raise Exception("Command '%s' is not yet implemented"%self.name) + +class ManagerCommand(Command): + """ + Base for commands that should only be used by a cluster manager tool that ensures + cluster consistency. + """ + + manager_commands = [] # Cluster manager commands + + def __init__(self, name, help, arg_names=[], connect_agent=True): + """@param connect_agent true if we should establish a QMF agent connection""" + super(ManagerCommand, self).__init__(name, "[Cluster manager only] "+help, arg_names, connect_agent) + self.commands.remove(self) # Not a user command + self.manager_commands.append(self) + + +class PingCmd(Command): + def __init__(self): + Command.__init__(self, "ping","Check if the broker is alive and responding", connect_agent=False) + def do_execute(self, qmf_broker, ha_broker, opts, args): + self.connection.session() # Make sure we can establish a session. +PingCmd() + +class PromoteCmd(ManagerCommand): + def __init__(self): + super(PromoteCmd, self).__init__("promote", "Promote a backup broker to primary. This command should *only* be used by a cluster manager (such as rgmanager) that ensures only one broker is primary at a time. Promoting more than one broker to primary at the same time will make the cluster inconsistent and will cause data loss and unexpected behavior.") + + def do_execute(self, qmf_broker, ha_broker, opts, args): + qmf_broker._method("promote", {}, HA_BROKER, timeout=opts.timeout) + +PromoteCmd() + + +class StatusCmd(Command): + def __init__(self): + Command.__init__(self, "status", "Print HA status") + self.op.add_option( + "--expect", metavar="<status>", + help="Don't print status. Return 0 if it matches <status>, 1 otherwise") + self.op.add_option( + "--is-primary", action="store_true", default=False, + help="Don't print status. Return 0 if the broker is primary, 1 otherwise") + self.op.add_option( + "--all", action="store_true", default=False, + help="Print status for all brokers in the cluster") + + def do_execute(self, qmf_broker, ha_broker, opts, args): + if opts.is_primary: + if not ha_broker.status in ["active", "recovering"]: raise ExitStatus(1) + return + if opts.expect: + if opts.expect != ha_broker.status: raise ExitStatus(1) + return + + def status(hb, b=None, ex=None): + if ex: print b, ex + elif b: print b, hb.status + else: print hb.status + self.all_brokers(ha_broker, opts, status) + +StatusCmd() + +class ReplicateCmd(Command): + def __init__(self): + Command.__init__(self, "replicate", "Set up replication from <queue> on <remote-broker> to <queue> on the current broker.", ["<queue>", "<remote-broker>"]) + def do_execute(self, qmf_broker, ha_broker, opts, args): + qmf_broker._method("replicate", {"broker":args[1], "queue":args[2]}, HA_BROKER, timeout=opts.timeout) +ReplicateCmd() + +class QueryCmd(Command): + def __init__(self): + Command.__init__(self, "query", "Print HA configuration and status") + self.op.add_option( + "--all", action="store_true", default=False, + help="Print configuration and status for all brokers in the cluster") + + def do_execute(self, qmf_broker, ha_broker, opts, args): + def query(hb, b=None, ex=None): + if ex: + print "%s %s\n" % (b, ex) + else: + if b: + print "%-20s %s"%("Address:", b) + for x in [("Status:", hb.status), + ("Broker ID:", hb.systemId), + ("Brokers URL:", hb.brokersUrl), + ("Public URL:", hb.publicUrl), + ("Replicate: ", hb.replicateDefault) + ]: + print "%-20s %s"%x + if b: print + self.all_brokers(ha_broker, opts, query) + + +QueryCmd() + +def print_usage(prog): + print "usage: %s <command> [<arguments>]\n\nCommands are:\n"%prog + for cmd in Command.commands: + print " %-12s %s."%(cmd.name, cmd.help.split(".")[0]) + print "\nFor help with a command type: %s <command> --help\n"%prog + +def find_command(args, commands): + """Find a command among the arguments and options""" + for arg in args: + cmds = [cmd for cmd in commands if cmd.name == arg] + if cmds: return cmds[0] + return None + +def main_except(argv): + """This version of main raises exceptions""" + args = argv[1:] + commands = Command.commands + if "--cluster-manager" in args: + commands += ManagerCommand.manager_commands + args.remove("--cluster-manager") + if len(args) and args[0] in ['help', '--help', '-help', '-h', 'help-all', '--help-all']: + if 'help-all' in args[0]: + for c in commands: c.op.print_help(); print + else: + print_usage(os.path.basename(argv[0])); + else: + command = find_command(args, commands) + if command: + command.execute(args) + else: + # Check for attempt to use a manager command without --cluster-manager + command = find_command(args, ManagerCommand.manager_commands) + if command: + message="""'%s' should only be called by the cluster manager. +Incorrect use of '%s' will cause cluster malfunction. +To call from a cluster manager use '%s --cluster-manager'. """ + raise Exception(message%((command.name,)*3)) + else: + print_usage(os.path.basename(argv[0])); + raise Exception("No valid command") + +def main(argv): + try: + main_except(argv) + return 0 + except ExitStatus, e: + return e.status + except Exception, e: + print "%s: %s"%(type(e).__name__, e) + return 1 + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/qpid/cpp/management/python/bin/qpid-ha.bat b/qpid/cpp/management/python/bin/qpid-ha.bat new file mode 100644 index 0000000000..29a77a0fb4 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-ha.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-ha %* diff --git a/qpid/cpp/management/python/bin/qpid-printevents b/qpid/cpp/management/python/bin/qpid-printevents new file mode 100755 index 0000000000..f702ca91e8 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-printevents @@ -0,0 +1,191 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import optparse +import sys +from optparse import IndentedHelpFormatter +from time import time, strftime, gmtime, sleep +from threading import Lock, Condition, Thread +from qpid.messaging import Connection +import qpid.messaging.exceptions + +home = os.environ.get("QPID_TOOLS_HOME", os.path.normpath("/usr/share/qpid-tools")) +sys.path.append(os.path.join(home, "python")) + +from qpidtoollibs.broker import EventHelper + + +class Printer(object): + """ + This class serializes printed lines so that events coming from different + threads don't overlap each other. + """ + def __init__(self): + self.lock = Lock() + + def pr(self, text): + self.lock.acquire() + try: + print text + finally: + self.lock.release() + sys.stdout.flush() + + +class EventReceiver(Thread): + """ + One instance of this class is created for each broker that is being monitored. + This class does not use the "reconnect" option because it needs to report as + events when the connection is established and when it's lost. + """ + def __init__(self, printer, url, options): + Thread.__init__(self) + self.printer = printer + self.url = url + self.options = options + self.running = True + self.helper = EventHelper() + + def cancel(self): + self.running = False + + def run(self): + isOpen = False + while self.running: + try: + conn = Connection.establish(self.url, **self.options) + isOpen = True + self.printer.pr(strftime("%c", gmtime(time())) + " NOTIC qpid-printevents:brokerConnected broker=%s" % self.url) + + sess = conn.session() + rx = sess.receiver(self.helper.eventAddress()) + + while self.running: + try: + msg = rx.fetch(1) + event = self.helper.event(msg) + self.printer.pr(event.__repr__()) + sess.acknowledge() + except qpid.messaging.exceptions.Empty: + pass + + except Exception, e: + if isOpen: + self.printer.pr(strftime("%c", gmtime(time())) + " NOTIC qpid-printevents:brokerDisconnected broker=%s" % self.url) + isOpen = False + sleep(1) + + +class JHelpFormatter(IndentedHelpFormatter): + """ + Format usage and description without stripping newlines from usage strings + """ + def format_usage(self, usage): + return usage + + def format_description(self, description): + if description: + return description + "\n" + else: + return "" + +_usage = "%prog [options] [broker-addr]..." + +_description = \ +""" +Collect and print events from one or more Qpid message brokers. + +If no broker-addr is supplied, %prog connects to 'localhost:5672'. + +[broker-addr] syntax: + + [username/password@] hostname + ip-address [:<port>] + +Examples: + +$ %prog localhost:5672 +$ %prog 10.1.1.7:10000 +$ %prog guest/guest@broker-host:10000 +""" + +def main(argv=None): + p = optparse.OptionParser(usage=_usage, description=_description, formatter=JHelpFormatter()) + p.add_option("--heartbeats", action="store_true", default=False, help="Use heartbeats.") + p.add_option("--sasl-mechanism", action="store", type="string", metavar="<mech>", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override.") + p.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + p.add_option("--ssl-certificate", action="store", type="string", metavar="<cert>", help="Client SSL certificate (PEM Format)") + p.add_option("--ssl-key", action="store", type="string", metavar="<key>", help="Client SSL private key (PEM Format)") + p.add_option("--ha-admin", action="store_true", help="Allow connection to a HA backup broker.") + + options, arguments = p.parse_args(args=argv) + if len(arguments) == 0: + arguments.append("localhost") + + brokers = [] + conn_options = {} + props = {} + printer = Printer() + + if options.sasl_mechanism: + conn_options['sasl_mechanisms'] = options.sasl_mechanism + if options.sasl_service_name: + conn_options['sasl_service'] = options.sasl_service_name + if options.ssl_certificate: + conn_options['ssl_certfile'] = options.ssl_certificate + if options.ssl_key: + if not options.ssl_certificate: + p.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = options.ssl_key + if options.ha_admin: + props['qpid.ha-admin'] = 1 + if options.heartbeats: + props['heartbeat'] = 5 + + if len(props) > 0: + conn_options['client_properties'] = props + + try: + try: + for host in arguments: + er = EventReceiver(printer, host, conn_options) + brokers.append(er) + er.start() + + while (True): + sleep(10) + + except KeyboardInterrupt: + print + return 0 + + except Exception, e: + print "Failed: %s - %s" % (e.__class__.__name__, e) + return 1 + finally: + for b in brokers: + b.cancel() + for b in brokers: + b.join() + +if __name__ == '__main__': + sys.exit(main()) diff --git a/qpid/cpp/management/python/bin/qpid-printevents.bat b/qpid/cpp/management/python/bin/qpid-printevents.bat new file mode 100644 index 0000000000..3486bed39d --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-printevents.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-printevents %* diff --git a/qpid/cpp/management/python/bin/qpid-qls-analyze b/qpid/cpp/management/python/bin/qpid-qls-analyze new file mode 100755 index 0000000000..7fbf6b1bb2 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-qls-analyze @@ -0,0 +1,114 @@ +#!/usr/bin/env python + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +qpid-qls-analyze + +Reads and analyzes a Qpid Linear Store (QLS) store directory. +""" + +import os.path +import sys + +default = os.path.normpath('/usr/share/qpid-tools') +home = os.environ.get('QPID_TOOLS_HOME', default) +sys.path.append(os.path.join(home, 'python')) + +import argparse +import os +import qlslibs.analyze +import qlslibs.efp + +class QlsAnalyzerArgParser(argparse.ArgumentParser): + """ + Class to handle command-line arguments. + """ + def __init__(self): + argparse.ArgumentParser.__init__(self, description='Qpid Linear Store Analyzer', prog='qpid-qls-analyze') + self.add_argument('qls_dir', metavar='DIR', + help='Qpid Linear Store (QLS) directory to be analyzed') + self.add_argument('--efp', action='store_true', + help='Analyze the Emtpy File Pool (EFP) and show stats') + self.add_argument('--show-recovered-recs', action='store_true', + help='Show only recovered records') + self.add_argument('--show-recovery-recs', action='store_true', + help='Show material records found during recovery') + self.add_argument('--show-all-recs', action='store_true', + help='Show all records (including fillers) found during recovery') + self.add_argument('--show-xids', action='store_true', + help='Show xid as hex number, otherwise show only xid length. Only has effect when records are shown') +# TODO: Add ability to show xid as an index rather than a value, helps analysis when xid is a long value with +# small differences which cannot easily be seen when looking at an output. Also prints a table of indeces vs xid values. +# self.add_argument('--show-xid-index', action='store_true', +# help='Show xids by index rather than by their value. Useful for long xids. Prints xid index table') + self.add_argument('--show-data', action='store_true', + help='Show data, otherwise show only data length. Only has effect when records are shown') + self.add_argument('--stats', action='store_true', + help='Print journal record stats') + self.add_argument('--txtest', action='store_true', + help='Show qpid-txtest message number as the message content when viewing records. Only has effect when records are shown') + self.add_argument('--txn', action='store_true', + help='Reconcile incomplete transactions') + self.add_argument('--version', action='version', + version='%(prog)s ' + QqpdLinearStoreAnalyzer.QLS_ANALYZE_VERSION) + def parse_args(self, args=None, namespace=None): + args = argparse.ArgumentParser.parse_args(self, args, namespace) + # If required, perform additional validity checks here, raise errors if req'd + return args + +class QqpdLinearStoreAnalyzer(object): + """ + Top-level store analyzer. Will analyze the directory in args.qls_dir as the top-level Qpid Linear Store (QLS) + directory. The following may be analyzed: + * The Empty File Pool (if --efp is specified in the arguments) + * The Linear Store + * The Transaction Prepared List (TPL) + """ + QLS_ANALYZE_VERSION = '1.0' + def __init__(self): + self.args = None + self._process_args() + self.qls_dir = os.path.abspath(self.args.qls_dir) + self.efp_manager = qlslibs.efp.EfpManager(self.qls_dir, None) + self.jrnl_recovery_mgr = qlslibs.analyze.JournalRecoveryManager(self.qls_dir, self.args) + def _process_args(self): + """ Create arg parser and process args """ + parser = QlsAnalyzerArgParser() + self.args = parser.parse_args() + if not os.path.exists(self.args.qls_dir): + parser.error('Journal path "%s" does not exist' % self.args.qls_dir) + def report(self): + """ Create a report on the linear store previously analyzed using analyze() """ + if self.args.efp: + self.efp_manager.report() + self.jrnl_recovery_mgr.report() + def run(self): + """ Run the analyzer, which reads and analyzes the linear store """ + if self.args.efp: + self.efp_manager.run(None) + self.jrnl_recovery_mgr.run() + +#============================================================================== +# main program +#============================================================================== + +if __name__ == "__main__": + M = QqpdLinearStoreAnalyzer() + M.run() + M.report() diff --git a/qpid/cpp/management/python/bin/qpid-queue-stats b/qpid/cpp/management/python/bin/qpid-queue-stats new file mode 100755 index 0000000000..ca78f9b602 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-queue-stats @@ -0,0 +1,159 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import optparse +import sys +import re +import socket +import qpid +from threading import Condition +from qmf.console import Session, Console +from qpid.peer import Closed +from qpid.connection import Connection, ConnectionFailed +from time import sleep + +class BrokerManager(Console): + def __init__(self, host, conn_options): + self.url = host + self.objects = {} + self.filter = None + self.session = Session(self, rcvEvents=False, rcvHeartbeats=False, + userBindings=True, manageConnections=True) + self.broker = self.session.addBroker(self.url, **conn_options) + self.firstError = True + + def setFilter(self,filter): + self.filter = filter + + def brokerConnected(self, broker): + if not self.firstError: + print "*** Broker connected" + self.firstError = False + + def brokerDisconnected(self, broker): + print "*** Broker connection lost - %s, retrying..." % broker.getError() + self.firstError = False + self.objects.clear() + + def objectProps(self, broker, record): + className = record.getClassKey().getClassName() + if className != "queue": + return + + id = record.getObjectId().__repr__() + if id not in self.objects: + self.objects[id] = (record.name, None, None) + + def objectStats(self, broker, record): + className = record.getClassKey().getClassName() + if className != "queue": + return + + id = record.getObjectId().__repr__() + if id not in self.objects: + return + + (name, first, last) = self.objects[id] + if first == None: + self.objects[id] = (name, record, None) + return + + if len(self.filter) > 0 : + match = False + + for x in self.filter: + if x.match(name): + match = True + break + if match == False: + return + + if last == None: + lastSample = first + else: + lastSample = last + + self.objects[id] = (name, first, record) + + deltaTime = float (record.getTimestamps()[0] - lastSample.getTimestamps()[0]) + if deltaTime < 1000000000.0: + return + enqueueRate = float (record.msgTotalEnqueues - lastSample.msgTotalEnqueues) / \ + (deltaTime / 1000000000.0) + dequeueRate = float (record.msgTotalDequeues - lastSample.msgTotalDequeues) / \ + (deltaTime / 1000000000.0) + print "%-41s%10.2f%11d%13.2f%13.2f" % \ + (name, deltaTime / 1000000000, record.msgDepth, enqueueRate, dequeueRate) + sys.stdout.flush() + + + def Display (self): + self.session.bindClass("org.apache.qpid.broker", "queue") + print "Queue Name Sec Depth Enq Rate Deq Rate" + print "========================================================================================" + sys.stdout.flush() + try: + while True: + sleep (1) + if self.firstError and self.broker.getError(): + self.firstError = False + print "*** Error: %s, retrying..." % self.broker.getError() + except KeyboardInterrupt: + print + self.session.delBroker(self.broker) + +def main(argv=None): + p = optparse.OptionParser() + p.add_option('--broker-address','-a', default='localhost' , help='broker-addr is in the form: [username/password@] hostname | ip-address [:<port>] \n ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost') + p.add_option('--filter','-f' ,default=None ,help='a list of comma separated queue names (regex are accepted) to show') + p.add_option("--sasl-mechanism", action="store", type="string", metavar="<mech>", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override.") + p.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + p.add_option("--ssl-certificate", action="store", type="string", metavar="<cert>", help="Client SSL certificate (PEM Format)") + p.add_option("--ssl-key", action="store", type="string", metavar="<key>", help="Client SSL private key (PEM Format)") + + options, arguments = p.parse_args(args=argv) + + conn_options = {} + if options.sasl_mechanism: + conn_options['mechanisms'] = options.sasl_mechanism + if options.sasl_service_name: + conn_options['service'] = options.sasl_service_name + if options.ssl_certificate: + conn_options['ssl_certfile'] = options.ssl_certificate + if options.ssl_key: + if not options.ssl_certificate: + p.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = options.ssl_key + + host = options.broker_address + filter = [] + if options.filter != None: + for s in options.filter.split(","): + filter.append(re.compile(s)) + + bm = BrokerManager(host, conn_options) + bm.setFilter(filter) + bm.Display() + +if __name__ == '__main__': + sys.exit(main()) + diff --git a/qpid/cpp/management/python/bin/qpid-queue-stats.bat b/qpid/cpp/management/python/bin/qpid-queue-stats.bat new file mode 100644 index 0000000000..24290d46b3 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-queue-stats.bat @@ -0,0 +1,3 @@ +@echo off +python %~dp0\qpid-queue-stats %* + diff --git a/qpid/cpp/management/python/bin/qpid-receive b/qpid/cpp/management/python/bin/qpid-receive new file mode 100755 index 0000000000..f14df277ac --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-receive @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import optparse, sys, time +import statistics +from qpid.messaging import * + +SECOND = 1000 +TIME_SEC = 1000000000 + +op = optparse.OptionParser(usage="usage: %prog [options]", description="Drains messages from the specified address") +op.add_option("-b", "--broker", default="localhost:5672", type="str", help="url of broker to connect to") +op.add_option("-a", "--address", type="str", help="address to receive from") +op.add_option("--connection-options", default={}, help="options for the connection") +op.add_option("-m", "--messages", default=0, type="int", help="stop after N messages have been received, 0 means no limit") +op.add_option("--timeout", default=0, type="int", help="timeout in seconds to wait before exiting") +op.add_option("-f", "--forever", default=False, action="store_true", help="ignore timeout and wait forever") +op.add_option("--ignore-duplicates", default=False, action="store_true", help="Detect and ignore duplicates (by checking 'sn' header)") +op.add_option("--verify-sequence", default=False, action="store_true", help="Verify there are no gaps in the message sequence (by checking 'sn' header)") +op.add_option("--check-redelivered", default=False, action="store_true", help="Fails with exception if a duplicate is not marked as redelivered (only relevant when ignore-duplicates is selected)") +op.add_option("--capacity", default=1000, type="int", help="size of the senders outgoing message queue") +op.add_option("--ack-frequency", default=100, type="int", help="Ack frequency (0 implies none of the messages will get accepted)") +op.add_option("--tx", default=0, type="int", help="batch size for transactions (0 implies transaction are not used)") +op.add_option("--rollback-frequency", default=0, type="int", help="rollback frequency (0 implies no transaction will be rolledback)") +op.add_option("--print-content", type="str", default="yes", help="print out message content") +op.add_option("--print-headers", type="str", default="no", help="print out message headers") +op.add_option("--failover-updates", default=False, action="store_true", help="Listen for membership updates distributed via amq.failover") +op.add_option("--report-total", default=False, action="store_true", help="Report total throughput statistics") +op.add_option("--report-every", default=0, type="int", help="Report throughput statistics every N messages") +op.add_option("--report-header", type="str", default="yes", help="Headers on report") +op.add_option("--ready-address", type="str", help="send a message to this address when ready to receive") +op.add_option("--receive-rate", default=0, type="int", help="Receive at rate of N messages/second. 0 means receive as fast as possible") +#op.add_option("--help", default=False, action="store_true", help="print this usage statement") + +def getTimeout(timeout, forever): + if forever: + return None + else: + return SECOND*timeout + + +EOS = "eos" +SN = "sn" + +# Check for duplicate or dropped messages by sequence number +class SequenceTracker: + def __init__(self, opts): + self.opts = opts + self.lastSn = 0 + + # Return True if the message should be procesed, false if it should be ignored. + def track(self, message): + if not(self.opts.verify_sequence) or (self.opts.ignore_duplicates): + return True + sn = message.properties[SN] + duplicate = (sn <= lastSn) + dropped = (sn > lastSn+1) + if self.opts.verify_sequence and dropped: + raise Exception("Gap in sequence numbers %s-%s" %(lastSn, sn)) + ignore = (duplicate and self.opts.ignore_duplicates) + if ignore and self.opts.check_redelivered and (not msg.redelivered): + raise Exception("duplicate sequence number received, message not marked as redelivered!") + if not duplicate: + lastSn = sn + return (not(ignore)) + + +def main(): + opts, args = op.parse_args() + if not opts.address: + raise Exception("Address must be specified!") + + broker = opts.broker + address = opts.address + connection = Connection(opts.broker, **opts.connection_options) + + try: + connection.open() + if opts.failover_updates: + auto_fetch_reconnect_urls(connection) + session = connection.session(transactional=(opts.tx)) + receiver = session.receiver(opts.address) + if opts.capacity > 0: + receiver.capacity = opts.capacity + msg = Message() + count = 0 + txCount = 0 + sequenceTracker = SequenceTracker(opts) + timeout = getTimeout(opts.timeout, opts.forever) + done = False + stats = statistics.ThroughputAndLatency() + reporter = statistics.Reporter(opts.report_every, opts.report_header == "yes", stats) + + if opts.ready_address is not None: + session.sender(opts.ready_address).send(msg) + if opts.tx > 0: + session.commit() + # For receive rate calculation + start = time.time()*TIME_SEC + interval = 0 + if opts.receive_rate > 0: + interval = TIME_SEC / opts.receive_rate + + replyTo = {} # a dictionary of reply-to address -> sender mapping + + while (not done): + try: + msg = receiver.fetch(timeout=timeout) + reporter.message(msg) + if sequenceTracker.track(msg): + if msg.content == EOS: + done = True + else: + count+=1 + if opts.print_headers == "yes": + if msg.subject is not None: + print "Subject: %s" %msg.subject + if msg.reply_to is not None: + print "ReplyTo: %s" %msg.reply_to + if msg.correlation_id is not None: + print "CorrelationId: %s" %msg.correlation_id + if msg.user_id is not None: + print "UserId: %s" %msg.user_id + if msg.ttl is not None: + print "TTL: %s" %msg.ttl + if msg.priority is not None: + print "Priority: %s" %msg.priority + if msg.durable: + print "Durable: true" + if msg.redelivered: + print "Redelivered: true" + print "Properties: %s" %msg.properties + print + if opts.print_content == "yes": + print msg.content + if (opts.messages > 0) and (count >= opts.messages): + done = True + # end of "if sequenceTracker.track(msg):" + if (opts.tx > 0) and (count % opts.tx == 0): + txCount+=1 + if (opts.rollback_frequency > 0) and (txCount % opts.rollback_frequency == 0): + session.rollback() + else: + session.commit() + elif (opts.ack_frequency > 0) and (count % opts.ack_frequency == 0): + session.acknowledge() + if msg.reply_to is not None: # Echo message back to reply-to address. + if msg.reply_to not in replyTo: + replyTo[msg.reply_to] = session.sender(msg.reply_to) + replyTo[msg.reply_to].capacity = opts.capacity + replyTo[msg.reply_to].send(msg) + if opts.receive_rate > 0: + delay = start + count*interval - time.time()*TIME_SEC + if delay > 0: + time.sleep(delay) + # Clear out message properties & content for next iteration. + msg = Message() + except Empty: # no message fetched => break the while cycle + break + # end of while cycle + if opts.report_total: + reporter.report() + if opts.tx > 0: + txCount+=1 + if opts.rollback_frequency and (txCount % opts.rollback_frequency == 0): + session.rollback() + else: + session.commit() + else: + session.acknowledge() + session.close() + connection.close() + except Exception,e: + print e + connection.close() + +if __name__ == "__main__": main() diff --git a/qpid/cpp/management/python/bin/qpid-route b/qpid/cpp/management/python/bin/qpid-route new file mode 100755 index 0000000000..f51d2493e9 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-route @@ -0,0 +1,635 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from optparse import OptionParser, OptionGroup, IndentedHelpFormatter +import sys +import os +import locale +from qmf.console import Session, BrokerURL +from time import sleep + +usage = """ +Usage: qpid-route [OPTIONS] dynamic add <dest-broker> <src-broker> <exchange> [tag] [exclude-list] [mechanism] + qpid-route [OPTIONS] dynamic del <dest-broker> <src-broker> <exchange> + + qpid-route [OPTIONS] route add <dest-broker> <src-broker> <exchange> <routing-key> [tag] [exclude-list] [mechanism] + qpid-route [OPTIONS] route del <dest-broker> <src-broker> <exchange> <routing-key> + qpid-route [OPTIONS] queue add <dest-broker> <src-broker> <exchange> <queue> [mechanism] + qpid-route [OPTIONS] queue del <dest-broker> <src-broker> <exchange> <queue> + qpid-route [OPTIONS] route list [<dest-broker>] + qpid-route [OPTIONS] route flush [<dest-broker>] + qpid-route [OPTIONS] route map [<broker>] + + qpid-route [OPTIONS] link add <dest-broker> <src-broker> [mechanism] + qpid-route [OPTIONS] link del <dest-broker> <src-broker> + qpid-route [OPTIONS] link list [<dest-broker>]""" + +description = """ +ADDRESS syntax: + + [username/password@] hostname + ip-address [:<port>]""" + +def Usage(): + print usage + +class Config: + def __init__(self): + self._verbose = False + self._quiet = False + self._durable = False + self._dellink = False + self._srclocal = False + self._transport = "tcp" + self._ack = 0 + self._credit = 0xFFFFFFFF # unlimited + self._connTimeout = 10 + self._conn_options = {} + +config = Config() + +class JHelpFormatter(IndentedHelpFormatter): + """Format usage and description without stripping newlines from usage strings + """ + + def format_usage(self, usage): + return usage + + + def format_description(self, description): + if description: + return description + "\n" + else: + return "" + +def OptionsAndArguments(argv): + parser = OptionParser(usage=usage, + description=description, + formatter=JHelpFormatter()) + + parser.add_option("--timeout", action="store", type="int", default=10, metavar="<secs>", help="Maximum time to wait for broker connection (in seconds)") + parser.add_option("-v", "--verbose", action="store_true", help="Verbose output") + parser.add_option("-q", "--quiet", action="store_true", help="Quiet output, don't print duplicate warnings") + parser.add_option("-d", "--durable", action="store_true", help="Added configuration shall be durable") + + parser.add_option("-e", "--del-empty-link", action="store_true", help="Delete link after deleting last route on the link") + parser.add_option("-s", "--src-local", action="store_true", help="Make connection to source broker (push route)") + + parser.add_option("--ack", action="store", type="int", metavar="<n>", help="Acknowledge transfers over the bridge in batches of N") + parser.add_option("--credit", action="store", type="int", default=0xFFFFFFFF, metavar="<msgs>", + help="Maximum number of messages a sender can have outstanding (0=unlimited)") + parser.add_option("-t", "--transport", action="store", type="string", default="tcp", metavar="<transport>", help="Transport to use for links, defaults to tcp") + + parser.add_option("--client-sasl-mechanism", action="store", type="string", metavar="<mech>", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). Used when the client connects to the destination broker (not for authentication between the source and destination brokers - that is specified using the [mechanisms] argument to 'add route'). SASL automatically picks the most secure available mechanism - use this option to override.") + parser.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + parser.add_option("--ssl-certificate", action="store", type="string", metavar="<cert>", help="Client SSL certificate (PEM Format)") + parser.add_option("--ssl-key", action="store", type="string", metavar="<key>", help="Client SSL private key (PEM Format)") + parser.add_option("--ha-admin", action="store_true", help="Allow connection to a HA backup broker.") + opts, encArgs = parser.parse_args(args=argv) + + try: + encoding = locale.getpreferredencoding() + args = [a.decode(encoding) for a in encArgs] + except: + args = encArgs + + if opts.timeout: + config._connTimeout = opts.timeout + if config._connTimeout == 0: + config._connTimeout = None + + if opts.verbose: + config._verbose = True + + if opts.quiet: + config._quiet = True + + if opts.durable: + config._durable = True + + if opts.del_empty_link: + config._dellink = True + + if opts.src_local: + config._srclocal = True + + if opts.transport: + config._transport = opts.transport + + if opts.ha_admin: + config._conn_options['client_properties'] = {'qpid.ha-admin' : 1} + + if opts.ack: + config._ack = opts.ack + + config._credit = opts.credit + + if opts.client_sasl_mechanism: + config._conn_options['mechanisms'] = opts.client_sasl_mechanism + if opts.sasl_service_name: + config._conn_options['service'] = opts.sasl_service_name + + if opts.ssl_certificate: + config._conn_options['ssl_certfile'] = opts.ssl_certificate + + if opts.ssl_key: + if not opts.ssl_certificate: + parser.error("missing '--ssl-certificate' (required by '--ssl-key')") + config._conn_options['ssl_keyfile'] = opts.ssl_key + + return args + + +class RouteManager: + def __init__(self, localBroker): + self.brokerList = {} + self.local = BrokerURL(localBroker) + self.remote = None + self.qmf = Session() + self.broker = self.qmf.addBroker(localBroker, config._connTimeout, **config._conn_options) + self.broker._waitForStable() + self.agent = self.broker.getBrokerAgent() + + def disconnect(self): + try: + if self.broker: + self.qmf.delBroker(self.broker) + self.broker = None + while len(self.brokerList): + b = self.brokerList.popitem() + if b[0] != self.local.name(): + self.qmf.delBroker(b[1]) + except: + pass # ignore errors while shutting down + + def getLink(self): + links = self.agent.getObjects(_class="link") + for link in links: + if self.remote.match(link.host, link.port): + return link + return None + + def checkLink(self, link): + retry = 3 + while link is None or (link.state in ("Waiting", "Connecting", "Closing") and retry > 0): + sleep(1) + link = self.getLink() + retry -= 1 + + if link == None: + raise Exception("Link failed to create") + + if link.state == "Failed": + raise Exception("Link failed to create %s" % (link.lastError or "")) + elif config._verbose: + print "Link state is", link.state + + def addLink(self, remoteBroker, interbroker_mechanism=""): + self.remote = BrokerURL(remoteBroker) + if self.local.match(self.remote.host, self.remote.port): + raise Exception("Linking broker to itself is not permitted") + + brokers = self.agent.getObjects(_class="broker") + broker = brokers[0] + link = self.getLink() + if link == None: + res = broker.connect(self.remote.host, self.remote.port, config._durable, + interbroker_mechanism, self.remote.authName or "", self.remote.authPass or "", + config._transport) + + def delLink(self, remoteBroker): + self.remote = BrokerURL(remoteBroker) + brokers = self.agent.getObjects(_class="broker") + broker = brokers[0] + link = self.getLink() + if link == None: + raise Exception("Link not found") + + res = link.close() + if config._verbose: + print "Close method returned:", res.status, res.text + + def listLinks(self): + links = self.agent.getObjects(_class="link") + if len(links) == 0: + print "No Links Found" + else: + print + print "Host Port Transport Durable State Last Error" + print "=============================================================================" + for link in links: + print "%-16s%-8d%-13s%c %-18s%s" % \ + (link.host, link.port, link.transport, YN(link.durable), link.state, link.lastError) + + def mapRoutes(self): + print + print "Finding Linked Brokers:" + + self.brokerList[self.local.name()] = self.broker + print " %s:%s... Ok" % (self.local.host, self.local.port) + + added = True + while added: + added = False + links = self.qmf.getObjects(_class="link") + for link in links: + url = BrokerURL(host=link.host, port=link.port, user=self.broker.authUser, password=self.broker.authPass) + if url.name() not in self.brokerList: + print " %s:%s..." % (link.host, link.port) + try: + url.authName = self.local.authName + url.authPass = self.local.authPass + b = self.qmf.addBroker(url, config._connTimeout, **config._conn_options) + self.brokerList[url.name()] = b + added = True + print "Ok" + except Exception, e: + print e + + print + print "Dynamic Routes:" + bridges = self.qmf.getObjects(_class="bridge", dynamic=True) + fedExchanges = [] + for bridge in bridges: + if bridge.src not in fedExchanges: + fedExchanges.append(bridge.src) + if len(fedExchanges) == 0: + print " none found" + print + + for ex in fedExchanges: + print " Exchange %s:" % ex + pairs = [] + for bridge in bridges: + if bridge.src == ex: + link = bridge._linkRef_ + fromUrl = BrokerURL(host=link.host, port=link.port) + toUrl = bridge.getBroker().getUrl() + found = False + for pair in pairs: + if pair.matches(fromUrl, toUrl): + found = True + if not found: + pairs.append(RoutePair(fromUrl, toUrl)) + for pair in pairs: + print " %s" % pair + print + + print "Static Routes:" + bridges = self.qmf.getObjects(_class="bridge", dynamic=False) + if len(bridges) == 0: + print " none found" + print + + for bridge in bridges: + link = bridge._linkRef_ + fromUrl = "%s:%s" % (link.host, link.port) + toUrl = bridge.getBroker().getUrl() + leftType = "ex" + rightType = "ex" + if bridge.srcIsLocal: + arrow = "=>" + left = bridge.src + right = bridge.dest + if bridge.srcIsQueue: + leftType = "queue" + else: + arrow = "<=" + left = bridge.dest + right = bridge.src + if bridge.srcIsQueue: + rightType = "queue" + + if bridge.srcIsQueue: + print " %s(%s=%s) %s %s(%s=%s)" % \ + (toUrl, leftType, left, arrow, fromUrl, rightType, right) + else: + print " %s(%s=%s) %s %s(%s=%s) key=%s" % \ + (toUrl, leftType, left, arrow, fromUrl, rightType, right, bridge.key) + print + + while len(self.brokerList): + b = self.brokerList.popitem() + if b[0] != self.local.name(): + self.qmf.delBroker(b[1]) + + def addRoute(self, remoteBroker, exchange, routingKey, tag, excludes, interbroker_mechanism="", dynamic=False): + if dynamic and config._srclocal: + raise Exception("--src-local is not permitted on dynamic routes") + + self.addLink(remoteBroker, interbroker_mechanism) + link = self.getLink() + self.checkLink(link) + + bridges = self.agent.getObjects(_class="bridge") + for bridge in bridges: + if bridge.linkRef == link.getObjectId() and \ + bridge.dest == exchange and bridge.key == routingKey and not bridge.srcIsQueue: + if not config._quiet: + raise Exception("Duplicate Route - ignoring: %s(%s)" % (exchange, routingKey)) + sys.exit(0) + + if config._verbose: + print "Creating inter-broker binding..." + res = link.bridge(config._durable, exchange, exchange, routingKey, tag, + excludes, False, config._srclocal, dynamic, + config._ack, credit=config._credit) + if res.status != 0: + raise Exception(res.text) + if config._verbose: + print "Bridge method returned:", res.status, res.text + + def addQueueRoute(self, remoteBroker, interbroker_mechanism, exchange, queue ): + self.addLink(remoteBroker, interbroker_mechanism) + link = self.getLink() + self.checkLink(link) + + bridges = self.agent.getObjects(_class="bridge") + for bridge in bridges: + if bridge.linkRef == link.getObjectId() and \ + bridge.dest == exchange and bridge.src == queue and bridge.srcIsQueue: + if not config._quiet: + raise Exception("Duplicate Route - ignoring: %s(%s)" % (exchange, queue)) + sys.exit(0) + + if config._verbose: + print "Creating inter-broker binding..." + res = link.bridge(config._durable, queue, exchange, "", "", "", True, + config._srclocal, False, config._ack, credit=config._credit) + if res.status != 0: + raise Exception(res.text) + if config._verbose: + print "Bridge method returned:", res.status, res.text + + def delQueueRoute(self, remoteBroker, exchange, queue): + self.remote = BrokerURL(remoteBroker) + link = self.getLink() + if link == None: + if not config._quiet: + raise Exception("No link found from %s to %s" % (self.remote.name(), self.local.name())) + sys.exit(0) + + bridges = self.agent.getObjects(_class="bridge") + for bridge in bridges: + if bridge.linkRef == link.getObjectId() and \ + bridge.dest == exchange and bridge.src == queue and bridge.srcIsQueue: + if config._verbose: + print "Closing bridge..." + res = bridge.close() + if res.status != 0: + raise Exception("Error closing bridge: %d - %s" % (res.status, res.text)) + if len(bridges) == 1 and config._dellink: + link = self.getLink() + if link == None: + sys.exit(0) + if config._verbose: + print "Last bridge on link, closing link..." + res = link.close() + if res.status != 0: + raise Exception("Error closing link: %d - %s" % (res.status, res.text)) + sys.exit(0) + if not config._quiet: + raise Exception("Route not found") + + def delRoute(self, remoteBroker, exchange, routingKey, dynamic=False): + self.remote = BrokerURL(remoteBroker) + link = self.getLink() + if link == None: + if not config._quiet: + raise Exception("No link found from %s to %s" % (self.remote.name(), self.local.name())) + sys.exit(0) + + bridges = self.agent.getObjects(_class="bridge") + for bridge in bridges: + if bridge.linkRef == link.getObjectId() and bridge.dest == exchange and bridge.key == routingKey \ + and bridge.dynamic == dynamic: + if config._verbose: + print "Closing bridge..." + res = bridge.close() + if res.status != 0: + raise Exception("Error closing bridge: %d - %s" % (res.status, res.text)) + if len(bridges) == 1 and config._dellink: + link = self.getLink() + if link == None: + sys.exit(0) + if config._verbose: + print "Last bridge on link, closing link..." + res = link.close() + if res.status != 0: + raise Exception("Error closing link: %d - %s" % (res.status, res.text)) + return + if not config._quiet: + raise Exception("Route not found") + + def listRoutes(self): + links = self.qmf.getObjects(_class="link") + bridges = self.qmf.getObjects(_class="bridge") + + for bridge in bridges: + myLink = None + for link in links: + if bridge.linkRef == link.getObjectId(): + myLink = link + break + if myLink != None: + if bridge.dynamic: + keyText = "<dynamic>" + else: + keyText = bridge.key + print "%s %s:%d %s %s" % (self.local.name(), myLink.host, myLink.port, bridge.dest, keyText) + + def clearAllRoutes(self): + links = self.qmf.getObjects(_class="link") + bridges = self.qmf.getObjects(_class="bridge") + + for bridge in bridges: + if config._verbose: + myLink = None + for link in links: + if bridge.linkRef == link.getObjectId(): + myLink = link + break + if myLink != None: + print "Deleting Bridge: %s:%d %s %s... " % (myLink.host, myLink.port, bridge.dest, bridge.key), + res = bridge.close() + if res.status != 0: + print "Error: %d - %s" % (res.status, res.text) + elif config._verbose: + print "Ok" + + if config._dellink: + links = self.qmf.getObjects(_class="link") + for link in links: + if config._verbose: + print "Deleting Link: %s:%d... " % (link.host, link.port), + res = link.close() + if res.status != 0: + print "Error: %d - %s" % (res.status, res.text) + elif config._verbose: + print "Ok" + +class RoutePair: + def __init__(self, fromUrl, toUrl): + self.fromUrl = fromUrl + self.toUrl = toUrl + self.bidir = False + + def __repr__(self): + if self.bidir: + delimit = "<=>" + else: + delimit = " =>" + return "%s %s %s" % (self.fromUrl, delimit, self.toUrl) + + def matches(self, fromUrl, toUrl): + if fromUrl == self.fromUrl and toUrl == self.toUrl: + return True + if toUrl == self.fromUrl and fromUrl == self.toUrl: + self.bidir = True + return True + return False + + +def YN(val): + if val == 1: + return 'Y' + return 'N' + + +def main(argv=None): + + args = OptionsAndArguments(argv) + nargs = len(args) + if nargs < 2: + Usage() + return(-1) + + if nargs == 2: + localBroker = "localhost" + else: + if config._srclocal: + localBroker = args[3] + remoteBroker = args[2] + else: + localBroker = args[2] + if nargs > 3: + remoteBroker = args[3] + + group = args[0] + cmd = args[1] + + rm = None + try: + rm = RouteManager(localBroker) + if group == "link": + if cmd == "add": + if nargs < 3 or nargs > 5: + Usage() + return(-1) + interbroker_mechanism = "" + if nargs > 4: interbroker_mechanism = args[4] + rm.addLink(remoteBroker, interbroker_mechanism) + rm.checkLink(rm.getLink()) + elif cmd == "del": + if nargs != 4: + Usage() + return(-1) + rm.delLink(remoteBroker) + elif cmd == "list": + rm.listLinks() + + elif group == "dynamic": + if cmd == "add": + if nargs < 5 or nargs > 8: + Usage() + return(-1) + + tag = "" + excludes = "" + interbroker_mechanism = "" + if nargs > 5: tag = args[5] + if nargs > 6: excludes = args[6] + if nargs > 7: interbroker_mechanism = args[7] + rm.addRoute(remoteBroker, args[4], "", tag, excludes, interbroker_mechanism, dynamic=True) + elif cmd == "del": + if nargs != 5: + Usage() + return(-1) + else: + rm.delRoute(remoteBroker, args[4], "", dynamic=True) + + elif group == "route": + if cmd == "add": + if nargs < 6 or nargs > 9: + Usage() + return(-1) + + tag = "" + excludes = "" + interbroker_mechanism = "" + if nargs > 6: tag = args[6] + if nargs > 7: excludes = args[7] + if nargs > 8: interbroker_mechanism = args[8] + rm.addRoute(remoteBroker, args[4], args[5], tag, excludes, interbroker_mechanism, dynamic=False) + elif cmd == "del": + if nargs != 6: + Usage() + return(-1) + rm.delRoute(remoteBroker, args[4], args[5], dynamic=False) + elif cmd == "map": + rm.mapRoutes() + else: + if cmd == "list": + rm.listRoutes() + elif cmd == "flush": + rm.clearAllRoutes() + else: + Usage() + return(-1) + + elif group == "queue": + if nargs < 6 or nargs > 7: + Usage() + return(-1) + if cmd == "add": + interbroker_mechanism = "" + if nargs > 6: interbroker_mechanism = args[6] + rm.addQueueRoute(remoteBroker, interbroker_mechanism, exchange=args[4], queue=args[5] ) + elif cmd == "del": + rm.delQueueRoute(remoteBroker, exchange=args[4], queue=args[5]) + else: + Usage() + return(-1) + else: + Usage() + return(-1) + + except Exception,e: + if rm: + rm.disconnect() # try to release broker resources + print "Failed: %s - %s" % (e.__class__.__name__, e) + return 1 + + rm.disconnect() + return 0 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/qpid/cpp/management/python/bin/qpid-route.bat b/qpid/cpp/management/python/bin/qpid-route.bat new file mode 100644 index 0000000000..ae8e9fe63c --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-route.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-route %* diff --git a/qpid/cpp/management/python/bin/qpid-send b/qpid/cpp/management/python/bin/qpid-send new file mode 100755 index 0000000000..b0105e41a6 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-send @@ -0,0 +1,281 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import optparse, random, os, time, uuid +from qpid.messaging import * +import statistics + +EOS = "eos" +SN = "sn" +TS = "ts" + +TIME_SEC = 1000000000 +SECOND = 1000 + +def nameval(st): + idx = st.find("=") + if idx >= 0: + name = st[0:idx] + value = st[idx+1:] + else: + name = st + value = None + return name, value + + +op = optparse.OptionParser(usage="usage: %prog [options]", description="Spouts messages to the specified address") +op.add_option("-b", "--broker", default="localhost:5672", type="str", help="url of broker to connect to") +op.add_option("-a", "--address", type="str", help="address to send to") +op.add_option("--connection-options", default={}, help="options for the connection") +op.add_option("-m", "--messages", default=1, type="int", help="stop after N messages have been sent, 0 means no limit") +op.add_option("-i", "--id", type="str", help="use the supplied id instead of generating one") +op.add_option("--reply-to", type="str", help="specify reply-to address") +op.add_option("--send-eos", default=0, type="int", help="Send N EOS messages to mark end of input") +op.add_option("--durable", default=False, action="store_true", help="Mark messages as durable") +op.add_option("--ttl", default=0, type="int", help="Time-to-live for messages, in milliseconds") +op.add_option("--priority", default=0, type="int", help="Priority for messages (higher value implies higher priority)") +op.add_option("-P", "--property", default=[], action="append", type="str", help="specify message property") +op.add_option("--correlation-id", type="str", help="correlation-id for message") +op.add_option("--user-id", type="str", help="userid for message") +op.add_option("--content-string", type="str", help="use CONTENT as message content") +op.add_option("--content-size", default=0, type="int", help="create an N-byte message content") +op.add_option("-M", "--content-map", default=[], action="append", type="str", help="specify entry for map content") +op.add_option("--content-stdin", default=False, action="store_true", help="read message content from stdin, one line per message") +op.add_option("--capacity", default=1000, type="int", help="size of the senders outgoing message queue") +op.add_option("--tx", default=0, type="int", help="batch size for transactions (0 implies transaction are not used)") +op.add_option("--rollback-frequency", default=0, type="int", help="rollback frequency (0 implies no transaction will be rolledback)") +op.add_option("--failover-updates", default=False, action="store_true", help="Listen for membership updates distributed via amq.failover") +op.add_option("--report-total", default=False, action="store_true", help="Report total throughput statistics") +op.add_option("--report-every", default=0, type="int", help="Report throughput statistics every N messages") +op.add_option("--report-header", type="str", default="yes", help="Headers on report") +op.add_option("--send-rate", default=0, type="int", help="Send at rate of N messages/second. 0 means send as fast as possible") +op.add_option("--flow-control", default=0, type="int", help="Do end to end flow control to limit queue depth to 2*N. 0 means no flow control.") +op.add_option("--sequence", type="str", default="yes", help="Add a sequence number messages property (required for duplicate/lost message detection)") +op.add_option("--timestamp", type="str", default="yes", help="Add a time stamp messages property (required for latency measurement)") +op.add_option("--group-key", type="str", help="Generate groups of messages using message header 'KEY' to hold the group identifier") +op.add_option("--group-prefix", default="GROUP-", type="str", help="Generate group identifers with 'STRING' prefix (if group-key specified)") +op.add_option("--group-size", default=10, type="int", help="Number of messages per a group (if group-key specified)") +op.add_option("--group-randomize-size", default=False, action="store_true", help="Randomize the number of messages per group to [1...group-size] (if group-key specified)") +op.add_option("--group-interleave", default=1, type="int", help="Simultaineously interleave messages from N different groups (if group-key specified)") + + +class ContentGenerator: + def setContent(self, msg): + return + +class GetlineContentGenerator(ContentGenerator): + def setContent(self, msg): + content = sys.stdin.readline() + got = (not line) + if (got): + msg.content = content + return got + +class FixedContentGenerator(ContentGenerator): + def __init__(self, content=None): + self.content = content + + def setContent(self, msg): + msg.content = self.content + return True + +class MapContentGenerator(ContentGenerator): + def __init__(self, opts=None): + self.opts = opts + + def setContent(self, msg): + self.content = {} + for e in self.opts.content_map: + name, val = nameval(p) + content[name] = val + msg.content = self.content + return True + + +# tag each generated message with a group identifer +class GroupGenerator: + def __init__(self, key, prefix, size, randomize, interleave): + groupKey = key + groupPrefix = prefix + groupSize = size + randomizeSize = randomize + groupSuffix = 0 + if (randomize > 0): + random.seed(os.getpid()) + + for i in range(0, interleave): + self.newGroup() + current = 0 + + def setGroupInfo(self, msg): + if (current == len(groups)): + current = 0 + my_group = groups[current] + msg.properties[groupKey] = my_group[id]; + # print "SENDING GROUPID=[%s]\n" % my_group[id] + my_group[count]=my_group[count]+1 + if (my_group[count] == my_group[size]): + self.newGroup() + del groups[current] + else: + current+=1 + + def newGroup(self): + groupId = "%s%s" % (groupPrefix, groupSuffix) + groupSuffix+=1 + size = groupSize + if (randomizeSize == True): + size = random.randint(1,groupSize) + # print "New group: GROUPID=["%s] size=%s" % (groupId, size) + groups.append({'id':groupId, 'size':size, 'count':0}) + + + +def main(): + opts, args = op.parse_args() + if not opts.address: + raise Exception("Address must be specified!") + + broker = opts.broker + address = opts.address + connection = Connection(opts.broker, **opts.connection_options) + + try: + connection.open() + if (opts.failover_updates): + auto_fetch_reconnect_urls(connection) + session = connection.session(transactional=(opts.tx)) + sender = session.sender(opts.address) + if (opts.capacity>0): + sender.capacity = opts.capacity + sent = 0 + txCount = 0 + stats = statistics.Throughput() + reporter = statistics.Reporter(opts.report_every, opts.report_header == "yes", stats) + + contentGen = ContentGenerator() + content = "" # auxiliary variable for determining content type of message - needs to be changed to {} for Map message + if opts.content_stdin: + opts.messages = 0 # Don't limit number of messages sent. + contentGen = GetlineContentGenerator() + elif opts.content_map is not None: + contentGen = MapContentGenerator(opts) + content = {} + elif opts.content_size is not None: + contentGen = FixedContentGenerator('X' * opts.content_size) + else: + contentGen = FixedContentGenerator(opts.content_string) + if opts.group_key is not None: + groupGen = GroupGenerator(opts.group_key, opts.group_prefix, opts.group_size, opts.group_random_size, opts.group_interleave) + + msg = Message(content=content) + msg.durable = opts.durable + if opts.ttl: + msg.ttl = opts.ttl/1000.0 + if opts.priority: + msg.priority = opts.priority + if opts.reply_to is not None: + if opts.flow_control > 0: + raise Exception("Can't use reply-to and flow-control together") + msg.reply_to = opts.reply_to + if opts.user_id is not None: + msg.user_id = opts.user_id + if opts.correlation_id is not None: + msg.correlation_id = opts.correlation_id + for p in opts.property: + name, val = nameval(p) + msg.properties[name] = val + + start = time.time()*TIME_SEC + interval = 0 + if opts.send_rate > 0: + interval = TIME_SEC/opts.send_rate + + flowControlAddress = "flow-" + str(uuid.uuid1()) + ";{create:always,delete:always}" + flowSent = 0 + if opts.flow_control > 0: + flowControlReceiver = session.receiver(flowControlAddress) + flowControlReceiver.capacity = 2 + + while (contentGen.setContent(msg) == True): + sent+=1 + if opts.sequence == "yes": + msg.properties[SN] = sent + + if opts.flow_control > 0: + if (sent % opts.flow_control == 0): + msg.reply_to = flowControlAddress + flowSent+=1 + else: + msg.reply_to = "" # Clear the reply address. + + if 'groupGen' in vars(): + groupGen.setGroupInfo(msg) + + if (opts.timestamp == "yes"): + msg.properties[TS] = int(time.time()*TIME_SEC) + sender.send(msg) + reporter.message(msg) + + if ((opts.tx > 0) and (sent % opts.tx == 0)): + txCount+=1 + if ((opts.rollbackFrequency > 0) and (txCount % opts.rollbackFrequency == 0)): + session.rollback() + else: + session.commit() + if ((opts.messages > 0) and (sent >= opts.messages)): + break + + if (opts.flow_control > 0) and (flowSent == 2): + flowControlReceiver.fetch(timeout=SECOND) + flowSent -= 1 + + if (opts.send_rate > 0): + delay = start + sent*interval - time.time()*TIME_SEC + if (delay > 0): + time.sleep(delay) + #end of while + + while flowSent > 0: + flowControlReceiver.fetch(timeout=SECOND) + flowSent -= 1 + + if (opts.report_total): + reporter.report() + for i in reversed(range(1,opts.send_eos+1)): + if (opts.sequence == "yes"): + sent+=1 + msg.properties[SN] = sent + msg.properties[EOS] = True #TODO (also in C++ client): add in ability to send digest or similar + sender.send(msg) + if ((opts.tx > 0) and (sent % opts.tx == 0)): + txCount+=1 + if ((opts.rollback_frequency > 0) and (txCount % opts.rollback_frequency == 0)): + session.rollback() + else: + session.commit() + session.sync() + session.close() + connection.close() + except Exception,e: + print e + connection.close() + +if __name__ == "__main__": main() diff --git a/qpid/cpp/management/python/bin/qpid-stat b/qpid/cpp/management/python/bin/qpid-stat new file mode 100755 index 0000000000..1780c4a819 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-stat @@ -0,0 +1,514 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +from optparse import OptionParser, OptionGroup +import sys +import locale +import socket +import re +from qpid.messaging import Connection + +home = os.environ.get("QPID_TOOLS_HOME", os.path.normpath("/usr/share/qpid-tools")) +sys.path.append(os.path.join(home, "python")) + +from qpidtoollibs import BrokerAgent +from qpidtoollibs import Display, Header, Sorter, YN, Commas, TimeLong + + +class Config: + def __init__(self): + self._host = "localhost" + self._connTimeout = 10 + self._types = "" + self._limit = 50 + self._increasing = False + self._sortcol = None + +config = Config() +conn_options = {} + +def OptionsAndArguments(argv): + """ Set global variables for options, return arguments """ + + global config + global conn_options + + usage = \ +"""%prog -g [options] + %prog -c [options] + %prog -e [options] + %prog -q [options] [queue-name] + %prog -u [options] + %prog -m [options] + %prog --acl [options]""" + + parser = OptionParser(usage=usage) + + group1 = OptionGroup(parser, "General Options") + group1.add_option("-b", "--broker", action="store", type="string", default="localhost", metavar="<url>", + help="URL of the broker to query") + group1.add_option("-t", "--timeout", action="store", type="int", default=10, metavar="<secs>", + help="Maximum time to wait for broker connection (in seconds)") + group1.add_option("--sasl-mechanism", action="store", type="string", metavar="<mech>", + help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override.") + group1.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + group1.add_option("--ssl-certificate", action="store", type="string", metavar="<cert>", help="Client SSL certificate (PEM Format)") + group1.add_option("--ssl-key", action="store", type="string", metavar="<key>", help="Client SSL private key (PEM Format)") + group1.add_option("--ha-admin", action="store_true", help="Allow connection to a HA backup broker.") + parser.add_option_group(group1) + + group2 = OptionGroup(parser, "Command Options") + group2.add_option("-g", "--general", help="Show General Broker Stats", action="store_const", const="g", dest="show") + group2.add_option("-c", "--connections", help="Show Connections", action="store_const", const="c", dest="show") + group2.add_option("-e", "--exchanges", help="Show Exchanges", action="store_const", const="e", dest="show") + group2.add_option("-q", "--queues", help="Show Queues", action="store_const", const="q", dest="show") + group2.add_option("-u", "--subscriptions", help="Show Subscriptions", action="store_const", const="u", dest="show") + group2.add_option("-m", "--memory", help="Show Broker Memory Stats", action="store_const", const="m", dest="show") + group2.add_option( "--acl", help="Show Access Control List Stats", action="store_const", const="acl", dest="show") + parser.add_option_group(group2) + + group3 = OptionGroup(parser, "Display Options") + group3.add_option("-S", "--sort-by", metavar="<colname>", help="Sort by column name") + group3.add_option("-I", "--increasing", action="store_true", default=False, help="Sort by increasing value (default = decreasing)") + group3.add_option("-L", "--limit", type="int", default=50, metavar="<n>", help="Limit output to n rows") + parser.add_option_group(group3) + + opts, args = parser.parse_args(args=argv) + + if not opts.show: + parser.error("You must specify one of these options: -g, -c, -e, -q, -m, or -u. For details, try $ qpid-stat --help") + + config._types = opts.show + config._sortcol = opts.sort_by + config._host = opts.broker + config._connTimeout = opts.timeout + config._increasing = opts.increasing + config._limit = opts.limit + + if opts.sasl_mechanism: + conn_options['sasl_mechanisms'] = opts.sasl_mechanism + if opts.sasl_service_name: + conn_options['sasl_service'] = opts.sasl_service_name + if opts.ssl_certificate: + conn_options['ssl_certfile'] = opts.ssl_certificate + if opts.ssl_key: + if not opts.ssl_certificate: + parser.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = opts.ssl_key + if opts.ha_admin: + conn_options['client_properties'] = {'qpid.ha-admin' : 1} + + return args + +class BrokerManager: + def __init__(self): + self.brokerName = None + self.connection = None + self.broker = None + self.cluster = None + + def SetBroker(self, brokerUrl): + self.url = brokerUrl + self.connection = Connection.establish(self.url, **conn_options) + self.broker = BrokerAgent(self.connection) + + def Disconnect(self): + """ Release any allocated brokers. Ignore any failures as the tool is + shutting down. + """ + try: + self.connection.close() + except: + pass + + def displayBroker(self): + disp = Display(prefix=" ") + heads = [] + heads.append(Header('uptime', Header.DURATION)) + heads.append(Header('cluster', Header.NONE)) + heads.append(Header('connections', Header.COMMAS)) + heads.append(Header('sessions', Header.COMMAS)) + heads.append(Header('exchanges', Header.COMMAS)) + heads.append(Header('queues', Header.COMMAS)) + rows = [] + broker = self.broker.getBroker() + cluster = self.broker.getCluster() + clusterInfo = cluster and cluster.clusterName + "<" + cluster.status + ">" or "<standalone>" + connections = self.getConnectionMap() + sessions = self.getSessionMap() + exchanges = self.getExchangeMap() + queues = self.getQueueMap() + row = (broker.getUpdateTime() - broker.getCreateTime(), + clusterInfo, + len(connections), len(sessions), + len(exchanges), len(queues)) + rows.append(row) + disp.formattedTable('Broker Summary:', heads, rows) + + if 'queueCount' not in broker.values: + return + + print + heads = [] + heads.append(Header('Statistic')) + heads.append(Header('Messages', Header.COMMAS)) + heads.append(Header('Bytes', Header.COMMAS)) + rows = [] + rows.append(['queue-depth', broker.msgDepth, broker.byteDepth]) + rows.append(['total-enqueues', broker.msgTotalEnqueues, broker.byteTotalEnqueues]) + rows.append(['total-dequeues', broker.msgTotalDequeues, broker.byteTotalDequeues]) + rows.append(['persistent-enqueues', broker.msgPersistEnqueues, broker.bytePersistEnqueues]) + rows.append(['persistent-dequeues', broker.msgPersistDequeues, broker.bytePersistDequeues]) + rows.append(['transactional-enqueues', broker.msgTxnEnqueues, broker.byteTxnEnqueues]) + rows.append(['transactional-dequeues', broker.msgTxnDequeues, broker.byteTxnDequeues]) + rows.append(['flow-to-disk-depth', broker.msgFtdDepth, broker.byteFtdDepth]) + rows.append(['flow-to-disk-enqueues', broker.msgFtdEnqueues, broker.byteFtdEnqueues]) + rows.append(['flow-to-disk-dequeues', broker.msgFtdDequeues, broker.byteFtdDequeues]) + rows.append(['acquires', broker.acquires, None]) + rows.append(['releases', broker.releases, None]) + rows.append(['discards-no-route', broker.discardsNoRoute, None]) + rows.append(['discards-ttl-expired', broker.discardsTtl, None]) + rows.append(['discards-limit-overflow', broker.discardsOverflow, None]) + rows.append(['discards-ring-overflow', broker.discardsRing, None]) + rows.append(['discards-lvq-replace', broker.discardsLvq, None]) + rows.append(['discards-subscriber-reject', broker.discardsSubscriber, None]) + rows.append(['discards-purged', broker.discardsPurge, None]) + rows.append(['reroutes', broker.reroutes, None]) + rows.append(['abandoned', broker.abandoned, None]) + rows.append(['abandoned-via-alt', broker.abandonedViaAlt, None]) + disp.formattedTable('Aggregate Broker Statistics:', heads, rows) + + + def displayConn(self): + disp = Display(prefix=" ") + heads = [] + heads.append(Header('connection')) + heads.append(Header('cproc')) + heads.append(Header('cpid')) + heads.append(Header('mech')) + heads.append(Header('auth')) + heads.append(Header('connected', Header.DURATION)) + heads.append(Header('idle', Header.DURATION)) + heads.append(Header('msgIn', Header.KMG)) + heads.append(Header('msgOut', Header.KMG)) + rows = [] + connections = self.broker.getAllConnections() + broker = self.broker.getBroker() + for conn in connections: + row = [] + row.append(conn.address) + if conn.remoteProcessName: row.append(conn.remoteProcessName) + else: row.append("-") + row.append(conn.remotePid) + if conn.saslMechanism: row.append(conn.saslMechanism) + else: row.append("-") + if conn.authIdentity: row.append(conn.authIdentity) + else: row.append("-") + row.append(broker.getUpdateTime() - conn.getCreateTime()) + row.append(broker.getUpdateTime() - conn.getUpdateTime()) + row.append(conn.msgsFromClient) + row.append(conn.msgsToClient) + rows.append(row) + title = "Connections" + if config._sortcol: + sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) + dispRows = sorter.getSorted() + else: + dispRows = rows + disp.formattedTable(title, heads, dispRows) + + def displaySession(self): + disp = Display(prefix=" ") + + def displayExchange(self): + disp = Display(prefix=" ") + heads = [] + heads.append(Header("exchange")) + heads.append(Header("type")) + heads.append(Header("dur", Header.Y)) + heads.append(Header("bind", Header.KMG)) + heads.append(Header("msgIn", Header.KMG)) + heads.append(Header("msgOut", Header.KMG)) + heads.append(Header("msgDrop", Header.KMG)) + heads.append(Header("byteIn", Header.KMG)) + heads.append(Header("byteOut", Header.KMG)) + heads.append(Header("byteDrop", Header.KMG)) + rows = [] + exchanges = self.broker.getAllExchanges() + for ex in exchanges: + row = [] + row.append(ex.name) + row.append(ex.type) + row.append(ex.durable) + row.append(ex.bindingCount) + row.append(ex.msgReceives) + row.append(ex.msgRoutes) + row.append(ex.msgDrops) + row.append(ex.byteReceives) + row.append(ex.byteRoutes) + row.append(ex.byteDrops) + rows.append(row) + title = "Exchanges" + if config._sortcol: + sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) + dispRows = sorter.getSorted() + else: + dispRows = rows + disp.formattedTable(title, heads, dispRows) + + def displayQueues(self): + disp = Display(prefix=" ") + heads = [] + heads.append(Header("queue")) + heads.append(Header("dur", Header.Y)) + heads.append(Header("autoDel", Header.Y)) + heads.append(Header("excl", Header.Y)) + heads.append(Header("msg", Header.KMG)) + heads.append(Header("msgIn", Header.KMG)) + heads.append(Header("msgOut", Header.KMG)) + heads.append(Header("bytes", Header.KMG)) + heads.append(Header("bytesIn", Header.KMG)) + heads.append(Header("bytesOut", Header.KMG)) + heads.append(Header("cons", Header.KMG)) + heads.append(Header("bind", Header.KMG)) + rows = [] + queues = self.broker.getAllQueues() + for q in queues: + row = [] + row.append(q.name) + row.append(q.durable) + row.append(q.autoDelete) + row.append(q.exclusive) + row.append(q.msgDepth) + row.append(q.msgTotalEnqueues) + row.append(q.msgTotalDequeues) + row.append(q.byteDepth) + row.append(q.byteTotalEnqueues) + row.append(q.byteTotalDequeues) + row.append(q.consumerCount) + row.append(q.bindingCount) + rows.append(row) + title = "Queues" + if config._sortcol: + sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) + dispRows = sorter.getSorted() + else: + dispRows = rows + disp.formattedTable(title, heads, dispRows) + + + def displayQueue(self, name): + queue = self.broker.getQueue(name) + if not queue: + print "Queue '%s' not found" % name + return + + disp = Display(prefix=" ") + heads = [] + heads.append(Header('Name')) + heads.append(Header('Durable', Header.YN)) + heads.append(Header('AutoDelete', Header.YN)) + heads.append(Header('Exclusive', Header.YN)) + heads.append(Header('FlowStopped', Header.YN)) + heads.append(Header('FlowStoppedCount', Header.COMMAS)) + heads.append(Header('Consumers', Header.COMMAS)) + heads.append(Header('Bindings', Header.COMMAS)) + rows = [] + rows.append([queue.name, queue.durable, queue.autoDelete, queue.exclusive, + queue.flowStopped, queue.flowStoppedCount, + queue.consumerCount, queue.bindingCount]) + disp.formattedTable("Properties:", heads, rows) + print + + heads = [] + heads.append(Header('Property')) + heads.append(Header('Value')) + rows = [] + rows.append(['arguments', queue.arguments]) + rows.append(['alt-exchange', queue.altExchange]) + disp.formattedTable("Optional Properties:", heads, rows) + print + + heads = [] + heads.append(Header('Statistic')) + heads.append(Header('Messages', Header.COMMAS)) + heads.append(Header('Bytes', Header.COMMAS)) + rows = [] + rows.append(['queue-depth', queue.msgDepth, queue.byteDepth]) + rows.append(['total-enqueues', queue.msgTotalEnqueues, queue.byteTotalEnqueues]) + rows.append(['total-dequeues', queue.msgTotalDequeues, queue.byteTotalDequeues]) + rows.append(['persistent-enqueues', queue.msgPersistEnqueues, queue.bytePersistEnqueues]) + rows.append(['persistent-dequeues', queue.msgPersistDequeues, queue.bytePersistDequeues]) + rows.append(['transactional-enqueues', queue.msgTxnEnqueues, queue.byteTxnEnqueues]) + rows.append(['transactional-dequeues', queue.msgTxnDequeues, queue.byteTxnDequeues]) + rows.append(['flow-to-disk-depth', queue.msgFtdDepth, queue.byteFtdDepth]) + rows.append(['flow-to-disk-enqueues', queue.msgFtdEnqueues, queue.byteFtdEnqueues]) + rows.append(['flow-to-disk-dequeues', queue.msgFtdDequeues, queue.byteFtdDequeues]) + rows.append(['acquires', queue.acquires, None]) + rows.append(['releases', queue.releases, None]) + rows.append(['discards-ttl-expired', queue.discardsTtl, None]) + rows.append(['discards-limit-overflow', queue.discardsOverflow, None]) + rows.append(['discards-ring-overflow', queue.discardsRing, None]) + rows.append(['discards-lvq-replace', queue.discardsLvq, None]) + rows.append(['discards-subscriber-reject', queue.discardsSubscriber, None]) + rows.append(['discards-purged', queue.discardsPurge, None]) + rows.append(['reroutes', queue.reroutes, None]) + disp.formattedTable("Statistics:", heads, rows) + + + def displaySubscriptions(self): + disp = Display(prefix=" ") + heads = [] + heads.append(Header("subscr")) + heads.append(Header("queue")) + heads.append(Header("conn")) + heads.append(Header("procName")) + heads.append(Header("procId")) + heads.append(Header("browse", Header.Y)) + heads.append(Header("acked", Header.Y)) + heads.append(Header("excl", Header.Y)) + heads.append(Header("creditMode")) + heads.append(Header("delivered", Header.COMMAS)) + heads.append(Header("sessUnacked", Header.COMMAS)) + rows = [] + subscriptions = self.broker.getAllSubscriptions() + sessions = self.getSessionMap() + connections = self.getConnectionMap() + for s in subscriptions: + row = [] + try: + row.append(s.name) + row.append(s.queueRef) + session = sessions[s.sessionRef] + connection = connections[session.connectionRef] + row.append(connection.address) + if connection.remoteProcessName: row.append(connection.remoteProcessName) + else: row.append("-") + row.append(connection.remotePid) + row.append(s.browsing) + row.append(s.acknowledged) + row.append(s.exclusive) + row.append(s.creditMode) + row.append(s.delivered) + row.append(session.unackedMessages) + rows.append(row) + except: + pass + title = "Subscriptions" + if config._sortcol: + sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) + dispRows = sorter.getSorted() + else: + dispRows = rows + disp.formattedTable(title, heads, dispRows) + + def displayMemory(self): + disp = Display(prefix=" ") + heads = [Header('Statistic'), Header('Value', Header.COMMAS)] + rows = [] + memory = self.broker.getMemory() + for k,v in memory.values.items(): + if k != 'name': + rows.append([k, v]) + disp.formattedTable('Broker Memory Statistics:', heads, rows) + + def displayAcl(self): + acl = self.broker.getAcl() + if not acl: + print "ACL Policy Module is not installed" + return + disp = Display(prefix=" ") + heads = [Header('Statistic'), Header('Value')] + rows = [] + rows.append(['policy-file', acl.policyFile]) + rows.append(['enforcing', YN(acl.enforcingAcl)]) + rows.append(['has-transfer-acls', YN(acl.transferAcl)]) + rows.append(['last-acl-load', TimeLong(acl.lastAclLoad)]) + rows.append(['acl-denials', Commas(acl.aclDenyCount)]) + disp.formattedTable('ACL Policy Statistics:', heads, rows) + + def getExchangeMap(self): + exchanges = self.broker.getAllExchanges() + emap = {} + for e in exchanges: + emap[e.name] = e + return emap + + def getQueueMap(self): + queues = self.broker.getAllQueues() + qmap = {} + for q in queues: + qmap[q.name] = q + return qmap + + def getSessionMap(self): + sessions = self.broker.getAllSessions() + smap = {} + for s in sessions: + smap[s.name] = s + return smap + + def getConnectionMap(self): + connections = self.broker.getAllConnections() + cmap = {} + for c in connections: + cmap[c.address] = c + return cmap + + def displayMain(self, names, main): + if main == 'g': self.displayBroker() + elif main == 'c': self.displayConn() + elif main == 's': self.displaySession() + elif main == 'e': self.displayExchange() + elif main == 'q': + if len(names) >= 1: + self.displayQueue(names[0]) + else: + self.displayQueues() + elif main == 'u': self.displaySubscriptions() + elif main == 'm': self.displayMemory() + elif main == 'acl': self.displayAcl() + + def display(self, names): + self.displayMain(names, config._types) + + +def main(argv=None): + + args = OptionsAndArguments(argv) + bm = BrokerManager() + + try: + bm.SetBroker(config._host) + bm.display(args) + bm.Disconnect() + return 0 + except KeyboardInterrupt: + print + except Exception,e: + print "Failed: %s - %s" % (e.__class__.__name__, e) + + bm.Disconnect() # try to deallocate brokers + return 1 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/qpid/cpp/management/python/bin/qpid-stat.bat b/qpid/cpp/management/python/bin/qpid-stat.bat new file mode 100644 index 0000000000..0a03d5177c --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-stat.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-stat %* diff --git a/qpid/cpp/management/python/bin/qpid-store-chk b/qpid/cpp/management/python/bin/qpid-store-chk new file mode 100755 index 0000000000..f6d70cb3c6 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-store-chk @@ -0,0 +1,332 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from qpidstore import jerr, jrnl, janal +import optparse, os, sys + + +#== class StoreChk ============================================================ + +class StoreChk(object): + """ + This class: + 1. Reads a journal jinf file, and from its info: + 2. Analyzes the journal data files to determine which is the last to be written, then + 3. Reads and analyzes all the records in the journal files. + The only public method is run() which kicks off the analysis. + """ + + def __init__(self): + """Constructor""" + # params + self.opts = None + + self._jdir = None + + # recovery analysis objects +# self._jrnl_info = None +# self.jrnl_rdr = None + + self._process_args() + self._jrnl_info = jrnl.JrnlInfo(self._jdir, self.opts.bfn) + # FIXME: This is a hack... find an elegant way of getting the file size to jrec! + jrnl.JRNL_FILE_SIZE = self._jrnl_info.get_jrnl_file_size_bytes() + self.jrnl_anal = janal.JrnlAnalyzer(self._jrnl_info) + self.jrnl_rdr = janal.JrnlReader(self._jrnl_info, self.jrnl_anal, self.opts.qflag, self.opts.rflag, + self.opts.vflag) + + def run(self): + """Run the store check""" + if not self.opts.qflag: + print self._jrnl_info + print self.jrnl_anal + self.jrnl_rdr.run() + self._report() + + def _report(self): + """Print the results of the store check""" + if not self.opts.qflag: + print + print " === REPORT ====" + print + print "Records: %8d non-transactional" % \ + (self.jrnl_rdr.get_msg_cnt() - self.jrnl_rdr.get_txn_msg_cnt()) + print " %8d transactional" % self.jrnl_rdr.get_txn_msg_cnt() + print " %8d total" % self.jrnl_rdr.get_msg_cnt() + print + print "Transactions: %8d aborts" % self.jrnl_rdr.get_abort_cnt() + print " %8d commits" % self.jrnl_rdr.get_commit_cnt() + print " %8d total" % (self.jrnl_rdr.get_abort_cnt() + self.jrnl_rdr.get_commit_cnt()) + print + if self.jrnl_rdr.emap().size() > 0: + print "Remaining enqueued records (sorted by rid): " + rid_list = self.jrnl_rdr.emap().rids() + rid_list.sort() + for rid in rid_list: + l = self.jrnl_rdr.emap().get(rid) + locked = "" + if l[2]: + locked += " (locked)" + print " fid=%d %s%s" % (l[0], l[1], locked) + print "WARNING: Enqueue-Dequeue mismatch, %d enqueued records remain." % self.jrnl_rdr.emap().size() + else: + print "No remaining enqueued records found (emap empty)." + print + if self.jrnl_rdr.tmap().size() > 0: + txn_rec_cnt = 0 + print "Incomplete transactions: " + for xid in self.jrnl_rdr.tmap().xids(): + jrnl.Utils.format_xid(xid) + recs = self.jrnl_rdr.tmap().get(xid) + for l in recs: + print " fid=%d %s" % (l[0], l[1]) + print " Total: %d records for %s" % (len(recs), jrnl.Utils.format_xid(xid)) + print + txn_rec_cnt += len(recs) + print "WARNING: Incomplete transactions found, %d xids remain containing a total of %d records." % \ + (self.jrnl_rdr.tmap().size(), txn_rec_cnt) + else: + print "No incomplete transactions found (tmap empty)." + print + print "%d enqueues, %d journal records processed." % \ + (self.jrnl_rdr.get_msg_cnt(), self.jrnl_rdr.get_rec_cnt()) + + + def _process_args(self): + """Process the command-line arguments""" + opt = optparse.OptionParser(usage="%prog [options] DIR", version="%prog 1.0") + opt.add_option("-b", "--base-filename", + action="store", dest="bfn", default="JournalData", + help="Base filename for old journal files") + opt.add_option("-q", "--quiet", + action="store_true", dest="qflag", + help="Quiet (suppress all non-error output)") + opt.add_option("-r", "--records", + action="store_true", dest="rflag", + help="Print all records and transactions (including consumed/closed)") + opt.add_option("-v", "--verbose", + action="store_true", dest="vflag", + help="Verbose output") + (self.opts, args) = opt.parse_args() + if len(args) == 0: + opt.error("No journal directory argument") + elif len(args) > 1: + opt.error("Too many positional arguments: %s" % args) + if self.opts.qflag and self.opts.rflag: + opt.error("Quiet (-q/--quiet) and record (-r/--records) options are mutually exclusive") + if self.opts.qflag and self.opts.vflag: + opt.error("Quiet (-q/--quiet) and verbose (-v/--verbose) options are mutually exclusive") + self._jdir = args[0] + if not os.path.exists(self._jdir): + opt.error("Journal path \"%s\" does not exist" % self._jdir) + + +#== class CsvStoreChk ========================================================= + +class CsvStoreChk(StoreChk): + """ + This class, in addition to analyzing a journal, can compare the journal footprint (ie enqueued/dequeued/transaction + record counts) to expected values from a CSV file. This can be used for additional automated testing, and is + currently in use in the long store tests for journal encode testing. + """ + + # CSV file cols + TEST_NUM_COL = 0 + NUM_MSGS_COL = 5 + MIN_MSG_SIZE_COL = 7 + MAX_MSG_SIZE_COL = 8 + MIN_XID_SIZE_COL = 9 + MAX_XID_SIZE_COL = 10 + AUTO_DEQ_COL = 11 + TRANSIENT_COL = 12 + EXTERN_COL = 13 + COMMENT_COL = 20 + + def __init__(self): + """Constructor""" + StoreChk.__init__(self) + + # csv params + self.num_msgs = None + self.msg_len = None + self.auto_deq = None + self.xid_len = None + self.transient = None + self.extern = None + + self._warning = [] + + self.jrnl_rdr.set_callbacks(self, CsvStoreChk._csv_pre_run_chk, CsvStoreChk._csv_enq_chk, + CsvStoreChk._csv_deq_chk, CsvStoreChk._csv_txn_chk, CsvStoreChk._csv_post_run_chk) + self._get_csv_test() + + def _get_csv_test(self): + """Get a test from the CSV reader""" + if self.opts.csvfn != None and self.opts.tnum != None: + tparams = self._read_csv_file(self.opts.csvfn, self.opts.tnum) + if tparams == None: + print "ERROR: Test %d not found in CSV file \"%s\"" % (self.opts.tnum, self.opts.csvfn) + sys.exit(1) + self.num_msgs = tparams["num_msgs"] + if tparams["min_size"] == tparams["max_size"]: + self.msg_len = tparams["max_size"] + else: + self.msg_len = 0 + self.auto_deq = tparams["auto_deq"] + if tparams["xid_min_size"] == tparams["xid_max_size"]: + self.xid_len = tparams["xid_max_size"] + else: + self.xid_len = 0 + self.transient = tparams["transient"] + self.extern = tparams["extern"] + + def _read_csv_file(self, filename, tnum): + """Read the CSV test parameter file""" + try: + csvf = open(filename, "r") + except IOError: + print "ERROR: Unable to open CSV file \"%s\"" % filename + sys.exit(1) + for line in csvf: + str_list = line.strip().split(",") + if len(str_list[0]) > 0 and str_list[0][0] != "\"": + try: + if (int(str_list[self.TEST_NUM_COL]) == tnum): + return { "num_msgs": int(str_list[self.NUM_MSGS_COL]), + "min_size": int(str_list[self.MIN_MSG_SIZE_COL]), + "max_size": int(str_list[self.MAX_MSG_SIZE_COL]), + "auto_deq": not (str_list[self.AUTO_DEQ_COL] == "FALSE" or + str_list[self.AUTO_DEQ_COL] == "0"), + "xid_min_size": int(str_list[self.MIN_XID_SIZE_COL]), + "xid_max_size": int(str_list[self.MAX_XID_SIZE_COL]), + "transient": not (str_list[self.TRANSIENT_COL] == "FALSE" or + str_list[self.TRANSIENT_COL] == "0"), + "extern": not (str_list[self.EXTERN_COL] == "FALSE" or + str_list[self.EXTERN_COL] == "0"), + "comment": str_list[self.COMMENT_COL] } + except Exception: + pass + return None + + def _process_args(self): + """Process command-line arguments""" + opt = optparse.OptionParser(usage="%prog [options] DIR", version="%prog 1.0") + opt.add_option("-b", "--base-filename", + action="store", dest="bfn", default="JournalData", + help="Base filename for old journal files") + opt.add_option("-c", "--csv-filename", + action="store", dest="csvfn", + help="CSV filename containing test parameters") + opt.add_option("-q", "--quiet", + action="store_true", dest="qflag", + help="Quiet (suppress all non-error output)") + opt.add_option("-r", "--records", + action="store_true", dest="rflag", + help="Print all records and transactions (including consumed/closed)") + opt.add_option("-t", "--test-num", + action="store", type="int", dest="tnum", + help="Test number from CSV file - only valid if CSV file named") + opt.add_option("-v", "--verbose", + action="store_true", dest="vflag", + help="Verbose output") + (self.opts, args) = opt.parse_args() + if len(args) == 0: + opt.error("No journal directory argument") + elif len(args) > 1: + opt.error("Too many positional arguments: %s" % args) + if self.opts.qflag and self.opts.rflag: + opt.error("Quiet (-q/--quiet) and record (-r/--records) options are mutually exclusive") + if self.opts.qflag and self.opts.vflag: + opt.error("Quiet (-q/--quiet) and verbose (-v/--verbose) options are mutually exclusive") + self._jdir = args[0] + if not os.path.exists(self._jdir): + opt.error("Journal path \"%s\" does not exist" % self._jdir) + + # Callbacks for checking against CSV test parameters. Return False if ok, True to raise error. + + #@staticmethod + def _csv_pre_run_chk(csv_store_chk): + """Check performed before a test runs""" + if csv_store_chk.num_msgs == None: + return + if csv_store_chk.jrnl_anal.is_empty() and csv_store_chk.num_msgs > 0: + raise jerr.AllJrnlFilesEmptyCsvError(csv_store_chk.get_opts().tnum, csv_store_chk.num_msgs) + return False + _csv_pre_run_chk = staticmethod(_csv_pre_run_chk) + + #@staticmethod + def _csv_enq_chk(csv_store_chk, hdr): + """Check performed before each enqueue operation""" + #if csv_store_chk.num_msgs == None: return + # + if csv_store_chk.extern != None: + if csv_store_chk.extern != hdr.extern: + raise jerr.ExternFlagCsvError(csv_store_chk.opts.tnum, csv_store_chk.extern) + if hdr.extern and hdr.data != None: + raise jerr.ExternFlagWithDataCsvError(csv_store_chk.opts.tnum) + if csv_store_chk.msg_len != None and csv_store_chk.msg_len > 0 and hdr.data != None and \ + len(hdr.data) != csv_store_chk.msg_len: + raise jerr.MessageLengthCsvError(csv_store_chk.opts.tnum, csv_store_chk.msg_len, len(hdr.data)) + if csv_store_chk.xid_len != None and csv_store_chk.xid_len > 0 and len(hdr.xid) != csv_store_chk.xid_len: + raise jerr.XidLengthCsvError(csv_store_chk.opts.tnum, csv_store_chk.xid_len, len(hdr.xid)) + if csv_store_chk.transient != None and hdr.transient != csv_store_chk.transient: + raise jerr.TransactionCsvError(csv_store_chk.opts.tnum, csv_store_chk.transient) + return False + _csv_enq_chk = staticmethod(_csv_enq_chk) + + #@staticmethod + def _csv_deq_chk(csv_store_chk, hdr): + """Check performed before each dequeue operation""" + if csv_store_chk.auto_deq != None and not csv_store_chk.auto_deq: + raise jerr.JWarning("[CSV %d] WARNING: Dequeue record rid=%d found in non-dequeue test - ignoring." % + (csv_store_chk.opts.tnum, hdr.rid)) + #self._warning.append("[CSV %d] WARNING: Dequeue record rid=%d found in non-dequeue test - ignoring." % + # (csv_store_chk.opts.tnum, hdr.rid)) + return False + _csv_deq_chk = staticmethod(_csv_deq_chk) + + #@staticmethod + def _csv_txn_chk(csv_store_chk, hdr): + """Check performed before each transaction commit/abort""" + return False + _csv_txn_chk = staticmethod(_csv_txn_chk) + + #@staticmethod + def _csv_post_run_chk(csv_store_chk): + """Cehck performed after the completion of the test""" + # Exclude this check if lastFileFlag is set - the count may be less than the number of msgs sent because + # of journal overwriting + if csv_store_chk.num_msgs != None and not csv_store_chk.jrnl_rdr.is_last_file() and \ + csv_store_chk.num_msgs != csv_store_chk.jrnl_rdr.get_msg_cnt(): + raise jerr.NumMsgsCsvError(csv_store_chk.opts.tnum, csv_store_chk.num_msgs, + csv_store_chk.jrnl_rdr.get_msg_cnt()) + return False + _csv_post_run_chk = staticmethod(_csv_post_run_chk) + +#============================================================================== +# main program +#============================================================================== + +if __name__ == "__main__": + M = CsvStoreChk() + try: + M.run() + except Exception, e: + sys.exit(e) diff --git a/qpid/cpp/management/python/bin/qpid-store-resize b/qpid/cpp/management/python/bin/qpid-store-resize new file mode 100755 index 0000000000..38d8eaf1ad --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-store-resize @@ -0,0 +1,350 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from qpidstore import jerr, jrnl, janal +import glob, optparse, os, sys, time + + +#== class Resize ============================================================== + +class Resize(object): + """ + Creates a new store journal and copies records from old journal to new. The new journal may be of + different size from the old one. The records are packed into the new journal (ie only remaining + enqueued records and associated transactions - if any - are copied over without spaces between them). + + The default action is to push the old journal down into a 'bak' sub-directory and then create a + new journal of the same size and pack it with the records from the old. However, it is possible to + suppress the pushdown (using --no-pushdown), in which case either a new journal id (using + --new-base-filename) or an old journal id (usnig --old-base-filename) must be supplied. In the former + case,a new journal will be created using the new base file name alongside the old one. In the latter + case, the old journal will be renamed to the supplied name, and the new one will take the default. + Note that both can be specified together with the --no-pushdown option. + + To resize the journal, use the optional --num-jfiles and/or --jfile-size parameters. These + should be large enough to write all the records or an error will result. If the size is large enough + to write all records, but too small to keep below the enqueue threshold, a warning will be printed. + Note that as any valid size will be accepted, a journal can also be shrunk, as long as it is sufficiently + big to accept the transferred records. + """ + + BAK_DIR = "bak" + JFILE_SIZE_PGS_MIN = 1 + JFILE_SIZE_PGS_MAX = 32768 + NUM_JFILES_MIN = 4 + NUM_JFILES_MAX = 64 + + def __init__(self): + """Constructor""" + self._opts = None + self._jdir = None + self._fname = None + self._fnum = None + self._file = None + self._file_rec_wr_cnt = None + self._filler_wr_cnt = None + self._last_rec_fid = None + self._last_rec_offs = None + self._rec_wr_cnt = None + + self._jrnl_info = None + self._jrnl_analysis = None + self._jrnl_reader = None + + self._process_args() + self._jrnl_info = jrnl.JrnlInfo(self._jdir, self._opts.bfn) + # FIXME: This is a hack... find an elegant way of getting the file size to jrec! + jrnl.JRNL_FILE_SIZE = self._jrnl_info.get_jrnl_file_size_bytes() + self._jrnl_analysis = janal.JrnlAnalyzer(self._jrnl_info) + self._jrnl_reader = janal.JrnlReader(self._jrnl_info, self._jrnl_analysis, self._opts.qflag, self._opts.rflag, + self._opts.vflag) + + def run(self): + """Perform the action of resizing the journal""" + if not self._opts.qflag: + print self._jrnl_analysis + self._jrnl_reader.run() + if self._opts.vflag: + print self._jrnl_info + if not self._opts.qflag: + print self._jrnl_reader.report(self._opts.vflag, self._opts.rflag) + self._handle_old_files() + self._create_new_files() + if not self._opts.qflag: + print "Transferred %d records to new journal." % self._rec_wr_cnt + self._chk_free() + + def _chk_free(self): + """Check if sufficient space is available in resized journal to be able to enqueue. Raise a warning if not.""" + if self._last_rec_fid == None or self._last_rec_offs == None: + return + wr_capacity_bytes = self._last_rec_fid * self._jrnl_info.get_jrnl_data_size_bytes() + self._last_rec_offs + tot_capacity_bytes = self._jrnl_info.get_tot_jrnl_data_size_bytes() + percent_full = 100.0 * wr_capacity_bytes / tot_capacity_bytes + if percent_full > 80.0: + raise jerr.JWarning("WARNING: Journal %s is %2.1f%% full and will likely not allow enqueuing of new records" + " until some existing records are dequeued." % + (self._jrnl_info.get_jrnl_id(), percent_full)) + + def _create_new_files(self): + """Create new journal files""" + # Assemble records to be transfered + master_record_list = {} + txn_record_list = self._jrnl_reader.txn_obj_list() + if self._opts.vflag and self._jrnl_reader.emap().size() > 0: + print "* Assembling %d records from emap" % self._jrnl_reader.emap().size() + for tup in self._jrnl_reader.emap().get_rec_list(): + hdr = tup[1] + hdr.flags &= ~jrnl.Hdr.OWI_MASK # Turn off owi + master_record_list[long(hdr.rid)] = hdr + if hdr.xidsize > 0 and hdr.xid in txn_record_list: + txn_hdr = txn_record_list[hdr.xid] + del(txn_record_list[hdr.xid]) + txn_hdr.flags &= ~jrnl.Hdr.OWI_MASK # Turn off owi + master_record_list[long(txn_hdr.rid)] = txn_hdr + if self._opts.vflag and self._jrnl_reader.tmap().size() > 0: + print "* Assembling %d records from tmap" % self._jrnl_reader.tmap().size() + for xid in self._jrnl_reader.tmap().xids(): + for l in self._jrnl_reader.tmap().get(xid): + hdr = l[1] + hdr.flags &= ~jrnl.Hdr.OWI_MASK # Turn off owi + master_record_list[hdr.rid] = hdr + rid_list = master_record_list.keys() + rid_list.sort() + + # get base filename + bfn = self._opts.bfn + if self._opts.nbfn != None: + bfn = self._opts.nbfn + + # write jinf file + self._jrnl_info.resize(self._opts.njf, self._opts.jfs) + self._jrnl_info.write(self._jdir, bfn) + + # write records + if self._opts.vflag: + print "* Transferring records to new journal files" + fro = self._jrnl_info.get_jrnl_sblk_size_bytes() + while len(rid_list) > 0: + hdr = master_record_list[rid_list.pop(0)] + rec = hdr.encode() + pos = 0 + while pos < len(rec): + if self._file == None or self._file.tell() >= self._jrnl_info.get_jrnl_file_size_bytes(): + if self._file == None: + rid = hdr.rid + elif len(rid_list) == 0: + rid = 0 + else: + rid = rid_list[0] + if not self._rotate_file(rid, fro): + raise jerr.JournalSpaceExceededError() + if len(rec) - pos <= self._jrnl_info.get_jrnl_file_size_bytes() - self._file.tell(): + self._file.write(rec[pos:]) + self._fill_file(jrnl.Utils.size_in_bytes_to_blk(self._file.tell(), + self._jrnl_info.get_jrnl_dblk_size_bytes())) + pos = len(rec) + fro = self._jrnl_info.get_jrnl_sblk_size_bytes() + else: + flen = self._jrnl_info.get_jrnl_file_size_bytes() - self._file.tell() + self._file.write(rec[pos:pos + flen]) + pos += flen + rem = len(rec) - pos + if rem <= self._jrnl_info.get_jrnl_data_size_bytes(): + fro = (jrnl.Utils.size_in_bytes_to_blk(self._jrnl_info.get_jrnl_sblk_size_bytes() + rem, + self._jrnl_info.get_jrnl_dblk_size_bytes())) + else: + fro = 0 + self._rec_wr_cnt += 1 + self._file_rec_wr_cnt += 1 + self._fill_file(add_filler_recs = True) + while self._rotate_file(): + pass + + def _fill_file(self, to_posn = None, add_filler_recs = False): + """Fill a file to a known offset""" + if self._file == None: + return + if add_filler_recs: + nfr = int(jrnl.Utils.rem_bytes_in_blk(self._file, self._jrnl_info.get_jrnl_sblk_size_bytes()) / + self._jrnl_info.get_jrnl_dblk_size_bytes()) + if nfr > 0: + self._filler_wr_cnt = nfr + for i in range(0, nfr): + self._file.write("RHMx") + self._fill_file(jrnl.Utils.size_in_bytes_to_blk(self._file.tell(), + self._jrnl_info.get_jrnl_dblk_size_bytes())) + self._last_rec_fid = self._fnum + self._last_rec_offs = self._file.tell() + if to_posn == None: + to_posn = self._jrnl_info.get_jrnl_file_size_bytes() + elif to_posn > self._jrnl_info.get_jrnl_file_size_bytes(): + raise jerr.FillExceedsFileSizeError(to_posn, self._jrnl_info.get_jrnl_file_size_bytes()) + diff = to_posn - self._file.tell() + self._file.write(str("\0" * diff)) + #DEBUG + if self._file.tell() != to_posn: + raise jerr.FillSizeError(self._file.tell(), to_posn) + + def _rotate_file(self, rid = None, fro = None): + """Switch to the next logical file""" + if self._file != None: + self._file.close() + if self._opts.vflag: + if self._file_rec_wr_cnt == 0: + print " (empty)" + elif self._filler_wr_cnt == None: + print " (%d records)" % self._file_rec_wr_cnt + else: + print " (%d records + %d filler(s))" % (self._file_rec_wr_cnt, self._filler_wr_cnt) + if self._fnum == None: + self._fnum = 0 + self._rec_wr_cnt = 0 + elif self._fnum == self._jrnl_info.get_num_jrnl_files() - 1: + return False + else: + self._fnum += 1 + self._file_rec_wr_cnt = 0 + self._fname = os.path.join(self._jrnl_info.get_jrnl_dir(), "%s.%04x.jdat" % + (self._jrnl_info.get_jrnl_base_name(), self._fnum)) + if self._opts.vflag: + print "* Opening file %s" % self._fname, + self._file = open(self._fname, "w") + if rid == None or fro == None: + self._fill_file() + else: + now = time.time() + fhdr = jrnl.FileHdr(0, "RHMf", jrnl.Hdr.HDR_VER, int(jrnl.Hdr.BIG_ENDIAN), 0, rid) + fhdr.init(self._file, 0, self._fnum, self._fnum, fro, int(now), 1000000000*(now - int(now))) + self._file.write(fhdr.encode()) + self._fill_file(self._jrnl_info.get_jrnl_sblk_size_bytes()) + return True + + def _handle_old_files(self): + """Push old journal down into a backup directory""" + target_dir = self._jdir + if not self._opts.npd: + target_dir = os.path.join(self._jdir, self.BAK_DIR) + if os.path.exists(target_dir): + if self._opts.vflag: + print "* Pushdown directory %s exists, deleting content" % target_dir + for fname in glob.glob(os.path.join(target_dir, "*")): + os.unlink(fname) + else: + if self._opts.vflag: + print "* Creating new pushdown directory %s" % target_dir + os.mkdir(target_dir) + + if not self._opts.npd or self._opts.obfn != None: + if self._opts.obfn != None and self._opts.vflag: + print "* Renaming old journal files using base name %s" % self._opts.obfn + # .jdat files + for fname in glob.glob(os.path.join(self._jdir, "%s.*.jdat" % self._opts.bfn)): + tbfn = os.path.basename(fname) + if self._opts.obfn != None: + per1 = tbfn.rfind(".") + if per1 >= 0: + per2 = tbfn.rfind(".", 0, per1) + if per2 >= 0: + tbfn = "%s%s" % (self._opts.obfn, tbfn[per2:]) + os.rename(fname, os.path.join(target_dir, tbfn)) + # .jinf file + self._jrnl_info.write(target_dir, self._opts.obfn) + os.unlink(os.path.join(self._jdir, "%s.jinf" % self._opts.bfn)) + + def _print_options(self): + """Print program options""" + if self._opts.vflag: + print "Journal dir: %s" % self._jdir + print "Options: Base filename: %s" % self._opts.bfn + print " New base filename: %s" % self._opts.nbfn + print " Old base filename: %s" % self._opts.obfn + print " Pushdown: %s" % self._opts.npd + print " No. journal files: %d" % self._opts.njf + print " Journal file size: %d 64kiB blocks" % self._opts.jfs + print " Show records flag: %s" % self._opts.rflag + print " Verbose flag: %s" % True + print + + def _process_args(self): + """Process the command-line arguments""" + opt = optparse.OptionParser(usage="%prog [options] DIR", version="%prog 1.0") + opt.add_option("-b", "--base-filename", + action="store", dest="bfn", default="JournalData", + help="Base filename for old journal files") + opt.add_option("-B", "--new-base-filename", + action="store", dest="nbfn", + help="Base filename for new journal files") + opt.add_option("-n", "--no-pushdown", + action="store_true", dest="npd", + help="Suppress pushdown of old files into \"bak\" dir; old files will remain in existing dir") + opt.add_option("-N", "--num-jfiles", + action="store", type="int", dest="njf", default=8, + help="Number of files for new journal (%d-%d)" % (self.NUM_JFILES_MIN, self.NUM_JFILES_MAX)) + opt.add_option("-o", "--old-base-filename", + action="store", dest="obfn", + help="Base filename for old journal files") + opt.add_option("-q", "--quiet", + action="store_true", dest="qflag", + help="Quiet (suppress all non-error output)") + opt.add_option("-r", "--records", + action="store_true", dest="rflag", + help="Print remaining records and transactions") + opt.add_option("-s", "--jfile-size-pgs", + action="store", type="int", dest="jfs", default=24, + help="Size of each new journal file in 64kiB blocks (%d-%d)" % + (self.JFILE_SIZE_PGS_MIN, self.JFILE_SIZE_PGS_MAX)) + opt.add_option("-v", "--verbose", + action="store_true", dest="vflag", + help="Verbose output") + (self._opts, args) = opt.parse_args() + if len(args) == 0: + opt.error("No journal directory argument") + elif len(args) > 1: + opt.error("Too many positional arguments: %s" % args) + if self._opts.qflag and self._opts.rflag: + opt.error("Quiet (-q/--quiet) and record (-r/--records) options are mutually exclusive") + if self._opts.qflag and self._opts.vflag: + opt.error("Quiet (-q/--quiet) and verbose (-v/--verbose) options are mutually exclusive") + if self._opts.njf != None and (self._opts.njf < self.NUM_JFILES_MIN or self._opts.njf > self.NUM_JFILES_MAX): + opt.error("Number of files (%d) is out of range (%d-%d)" % + (self._opts.njf, self.NUM_JFILES_MIN, self.NUM_JFILES_MAX)) + if self._opts.jfs != None and (self._opts.jfs < self.JFILE_SIZE_PGS_MIN or + self._opts.jfs > self.JFILE_SIZE_PGS_MAX): + opt.error("File size (%d) is out of range (%d-%d)" % + (self._opts.jfs, self.JFILE_SIZE_PGS_MIN, self.JFILE_SIZE_PGS_MAX)) + if self._opts.npd != None and (self._opts.nbfn == None and self._opts.obfn == None): + opt.error("If (-n/--no-pushdown) is used, then at least one of (-B/--new-base-filename) and" + " (-o/--old-base-filename) must be used.") + self._jdir = args[0] + if not os.path.exists(self._jdir): + opt.error("Journal path \"%s\" does not exist" % self._jdir) + self._print_options() + +#============================================================================== +# main program +#============================================================================== + +if __name__ == "__main__": + R = Resize() + try: + R.run() + except Exception, e: + sys.exit(e) diff --git a/qpid/cpp/management/python/bin/qpid-tool b/qpid/cpp/management/python/bin/qpid-tool new file mode 100755 index 0000000000..09ca2b8c13 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-tool @@ -0,0 +1,799 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import optparse +import sys +import socket +import locale +from types import * +from cmd import Cmd +from shlex import split +from threading import Lock +from time import strftime, gmtime +from qpidtoollibs import Display +from qmf.console import Session, Console, SchemaClass, ObjectId + +class Mcli(Cmd): + """ Management Command Interpreter """ + + def __init__(self, dataObject, dispObject): + Cmd.__init__(self) + self.dataObject = dataObject + self.dispObject = dispObject + self.dataObject.setCli(self) + self.prompt = "qpid: " + + def emptyline(self): + pass + + def setPromptMessage(self, p): + if p == None: + self.prompt = "qpid: " + else: + self.prompt = "qpid[%s]: " % p + + def do_help(self, data): + print "Management Tool for QPID" + print + print "Commands:" + print " agents - Print a list of the known Agents" + print " list - Print summary of existing objects by class" + print " list <className> - Print list of objects of the specified class" + print " list <className> active - Print list of non-deleted objects of the specified class" +# print " show <className> - Print contents of all objects of specified class" +# print " show <className> active - Print contents of all non-deleted objects of specified class" + print " show <ID> - Print contents of an object (infer className)" +# print " show <className> <list-of-IDs> - Print contents of one or more objects" +# print " list is space-separated, ranges may be specified (i.e. 1004-1010)" + print " call <ID> <methodName> [<args>] - Invoke a method on an object" + print " schema - Print summary of object classes seen on the target" + print " schema <className> - Print details of an object class" + print " set time-format short - Select short timestamp format (default)" + print " set time-format long - Select long timestamp format" + print " quit or ^D - Exit the program" + print + + def complete_set(self, text, line, begidx, endidx): + """ Command completion for the 'set' command """ + tokens = split(line) + if len(tokens) < 2: + return ["time-format "] + elif tokens[1] == "time-format": + if len(tokens) == 2: + return ["long", "short"] + elif len(tokens) == 3: + if "long".find(text) == 0: + return ["long"] + elif "short".find(text) == 0: + return ["short"] + elif "time-format".find(text) == 0: + return ["time-format "] + return [] + + def do_set(self, data): + tokens = split(data) + try: + if tokens[0] == "time-format": + self.dispObject.do_setTimeFormat(tokens[1]) + except: + pass + + def complete_schema(self, text, line, begidx, endidx): + tokens = split(line) + if len(tokens) > 2: + return [] + return self.dataObject.classCompletions(text) + + def do_schema(self, data): + try: + self.dataObject.do_schema(data) + except Exception, e: + print "Exception in do_schema: %r" % e + + def do_agents(self, data): + try: + self.dataObject.do_agents(data) + except Exception, e: + print "Exception in do_agents: %r" % e + + def do_id(self, data): + try: + self.dataObject.do_id(data) + except Exception, e: + print "Exception in do_id: %r" % e + + def complete_list(self, text, line, begidx, endidx): + tokens = split(line) + if len(tokens) > 2: + return [] + return self.dataObject.classCompletions(text) + + def do_list(self, data): + try: + self.dataObject.do_list(data) + except Exception, e: + print "Exception in do_list: %r" % e + + def do_show(self, data): + try: + self.dataObject.do_show(data) + except Exception, e: + print "Exception in do_show: %r" % e + + def do_call(self, data): + try: + self.dataObject.do_call(data) + except Exception, e: + print "Exception in do_call: %r" % e + + def do_EOF(self, data): + print "quit" + try: + self.dataObject.do_exit() + except: + pass + return True + + def do_quit(self, data): + try: + self.dataObject.do_exit() + except: + pass + return True + + def postcmd(self, stop, line): + return stop + + def postloop(self): + print "Exiting..." + self.dataObject.close() + +#====================================================================================================== +# QmfData +#====================================================================================================== +class QmfData(Console): + """ + """ + def __init__(self, disp, url, conn_options): + self.disp = disp + self.url = url + self.session = Session(self, manageConnections=True) + self.broker = self.session.addBroker(self.url, **conn_options) + self.lock = Lock() + self.connected = None + self.closing = None + self.first_connect = True + self.cli = None + self.idRegistry = IdRegistry() + self.objects = {} + + #======================= + # Methods to support CLI + #======================= + def setCli(self, cli): + self.cli = cli + + def close(self): + try: + self.closing = True + if self.session and self.broker: + self.session.delBroker(self.broker) + except: + pass # we're shutting down - ignore any errors + + def classCompletions(self, text): + pass + + def do_schema(self, data): + if data == "": + self.schemaSummary() + else: + self.schemaTable(data) + + def do_agents(self, data): + agents = self.session.getAgents() + rows = [] + for agent in agents: + version = 1 + if agent.isV2: + version = 2 + rows.append(("%d.%s" % (agent.getBrokerBank(), agent.getAgentBank()), agent.label, agent.epoch, version)) + self.disp.table("QMF Agents:", ("Agent Name", "Label", "Epoch", "QMF Version"), rows) + + def do_id(self, data): + tokens = data.split() + for token in tokens: + if not token.isdigit(): + print "Value %s is non-numeric" % token + return + title = "Translation of Display IDs:" + heads = ('DisplayID', 'Epoch', 'Agent', 'ObjectName') + if len(tokens) == 0: + tokens = self.idRegistry.getDisplayIds() + rows = [] + for token in tokens: + rows.append(self.idRegistry.getIdInfo(int(token))) + self.disp.table(title, heads, rows) + + def do_list(self, data): + tokens = data.split() + if len(tokens) == 0: + self.listClasses() + else: + self.listObjects(tokens) + + def do_show(self, data): + tokens = data.split() + if len(tokens) == 0: + print "Missing Class or ID" + return + keys = self.classKeysByToken(tokens[0]) + if keys: + self.showObjectsByKey(keys) + elif tokens[0].isdigit(): + self.showObjectById(int(tokens[0])) + + def _build_object_name(self, obj): + values = [] + for p,v in obj.getProperties(): + if p.name != "vhostRef" and p.index == 1: + if p.name == "brokerRef": # reference to broker + values.append('org.apache.qpid.broker:broker:amqp-broker') + else: + values.append(str(v)) + + object_key = ",".join(values) + class_key = obj.getClassKey(); + return class_key.getPackageName() + ":" + class_key.getClassName() + ":" + object_key + + + def do_call(self, data): + tokens = data.split() + if len(tokens) < 2: + print "Not enough arguments supplied" + return + displayId = long(tokens[0]) + methodName = tokens[1] + args = [] + for arg in tokens[2:]: + ## + ## If the argument is a map, list, boolean, integer, or floating (one decimal point), + ## run it through the Python evaluator so it is converted to the correct type. + ## + ## TODO: use a regex for this instead of this convoluted logic, + ## or even consider passing all args through eval() [which would + ## be a minor change to the interface as string args would then + ## always need to be quoted as strings within a map/list would + ## now] + if arg[0] == '{' or arg[0] == '[' or arg[0] == '"' or arg[0] == '\'' or arg == "True" or arg == "False" or \ + ((arg.count('.') < 2 and (arg.count('-') == 0 or \ + (arg.count('-') == 1 and arg[0] == '-')) and \ + arg.replace('.','').replace('-','').isdigit())): + args.append(eval(arg)) + else: + args.append(arg) + + obj = None + try: + self.lock.acquire() + if displayId not in self.objects: + print "Unknown ID" + return + obj = self.objects[displayId] + finally: + self.lock.release() + + object_id = obj.getObjectId(); + if not object_id.isV2 and obj.getAgent().isV2: + object_name = self._build_object_name(obj) + object_id = ObjectId.create(object_id.agentName, object_name) + + self.session._sendMethodRequest(self.broker, obj.getClassKey(), object_id, methodName, args) + + + def do_exit(self): + pass + + #==================== + # Sub-Command Methods + #==================== + def schemaSummary(self, package_filter=None): + rows = [] + packages = self.session.getPackages() + for package in packages: + if package_filter and package_filter != package: + continue + keys = self.session.getClasses(package) + for key in keys: + kind = "object" + schema = self.session.getSchema(key) + if schema: + if schema.kind == SchemaClass.CLASS_KIND_EVENT: + kind = "event" + if schema.kind == SchemaClass.CLASS_KIND_TABLE: + # + # Don't display event schemata. This will be a future feature. + # + rows.append((package, key.getClassName(), kind)) + self.disp.table("QMF Classes:", ("Package", "Name", "Kind"), rows) + + def schemaTable(self, text): + packages = self.session.getPackages() + if text in packages: + self.schemaSummary(package_filter=text) + for package in packages: + keys = self.session.getClasses(package) + for key in keys: + if text == key.getClassName() or text == package + ":" + key.getClassName(): + schema = self.session.getSchema(key) + if schema.kind == SchemaClass.CLASS_KIND_TABLE: + self.schemaObject(schema) + else: + self.schemaEvent(schema) + + def schemaObject(self, schema): + rows = [] + title = "Object Class: %s" % schema.__repr__() + heads = ("Element", "Type", "Access", "Unit", "Notes", "Description") + for prop in schema.getProperties(): + notes = "" + if prop.index : notes += "index " + if prop.optional : notes += "optional " + row = (prop.name, self.typeName(prop.type), self.accessName(prop.access), + self.notNone(prop.unit), notes, self.notNone(prop.desc)) + rows.append(row) + for stat in schema.getStatistics(): + row = (stat.name, self.typeName(stat.type), "", self.notNone(stat.unit), "", self.notNone(stat.desc)) + rows.append(row) + self.disp.table(title, heads, rows) + + for method in schema.methods: + rows = [] + heads = ("Argument", "Type", "Direction", "Unit", "Description") + title = " Method: %s" % method.name + for arg in method.arguments: + row = (arg.name, self.typeName(arg.type), arg.dir, self.notNone(arg.unit), self.notNone(arg.desc)) + rows.append(row) + print + self.disp.table(title, heads, rows) + + def schemaEvent(self, schema): + rows = [] + title = "Event Class: %s" % schema.__repr__() + heads = ("Element", "Type", "Unit", "Description") + for arg in schema.arguments: + row = (arg.name, self.typeName(arg.type), self.notNone(arg.unit), self.notNone(arg.desc)) + rows.append(row) + self.disp.table(title, heads, rows) + + def listClasses(self): + title = "Summary of Objects by Type:" + heads = ("Package", "Class", "Active", "Deleted") + rows = [] + totals = {} + try: + self.lock.acquire() + for dispId in self.objects: + obj = self.objects[dispId] + key = obj.getClassKey() + index = (key.getPackageName(), key.getClassName()) + if index in totals: + stats = totals[index] + else: + stats = (0, 0) + if obj.isDeleted(): + stats = (stats[0], stats[1] + 1) + else: + stats = (stats[0] + 1, stats[1]) + totals[index] = stats + finally: + self.lock.release() + + for index in totals: + stats = totals[index] + rows.append((index[0], index[1], stats[0], stats[1])) + self.disp.table(title, heads, rows) + + def listObjects(self, tokens): + ckeys = self.classKeysByToken(tokens[0]) + show_deleted = True + if len(tokens) > 1 and tokens[1] == 'active': + show_deleted = None + heads = ("ID", "Created", "Destroyed", "Index") + rows = [] + try: + self.lock.acquire() + for dispId in self.objects: + obj = self.objects[dispId] + if obj.getClassKey() in ckeys: + utime, ctime, dtime = obj.getTimestamps() + dtimestr = self.disp.timestamp(dtime) + if dtime == 0: + dtimestr = "-" + if dtime == 0 or (dtime > 0 and show_deleted): + row = (dispId, self.disp.timestamp(ctime), dtimestr, self.objectIndex(obj)) + rows.append(row) + finally: + self.lock.release() + self.disp.table("Object Summary:", heads, rows) + + def showObjectsByKey(self, key): + pass + + def showObjectById(self, dispId): + heads = ("Attribute", str(dispId)) + rows = [] + try: + self.lock.acquire() + if dispId in self.objects: + obj = self.objects[dispId] + caption = "Object of type: %r" % obj.getClassKey() + for prop in obj.getProperties(): + row = (prop[0].name, self.valueByType(prop[0].type, prop[1])) + rows.append(row) + for stat in obj.getStatistics(): + row = (stat[0].name, self.valueByType(stat[0].type, stat[1])) + rows.append(row) + else: + print "No object found with ID %d" % dispId + return + finally: + self.lock.release() + self.disp.table(caption, heads, rows) + + def classKeysByToken(self, token): + """ + Given a token, return a list of matching class keys (if found): + token formats: <class-name> + <package-name>:<class-name> + """ + pname = None + cname = None + parts = token.split(':') + if len(parts) == 1: + cname = parts[0] + elif len(parts) == 2: + pname = parts[0] + cname = parts[1] + else: + raise ValueError("Invalid Class Name: %s" % token) + + keys = [] + packages = self.session.getPackages() + for p in packages: + if pname == None or pname == p: + classes = self.session.getClasses(p) + for key in classes: + if key.getClassName() == cname: + keys.append(key) + return keys + + def typeName (self, typecode): + """ Convert type-codes to printable strings """ + if typecode == 1: return "uint8" + elif typecode == 2: return "uint16" + elif typecode == 3: return "uint32" + elif typecode == 4: return "uint64" + elif typecode == 5: return "bool" + elif typecode == 6: return "short-string" + elif typecode == 7: return "long-string" + elif typecode == 8: return "abs-time" + elif typecode == 9: return "delta-time" + elif typecode == 10: return "reference" + elif typecode == 11: return "boolean" + elif typecode == 12: return "float" + elif typecode == 13: return "double" + elif typecode == 14: return "uuid" + elif typecode == 15: return "field-table" + elif typecode == 16: return "int8" + elif typecode == 17: return "int16" + elif typecode == 18: return "int32" + elif typecode == 19: return "int64" + elif typecode == 20: return "object" + elif typecode == 21: return "list" + elif typecode == 22: return "array" + else: + raise ValueError ("Invalid type code: %s" % str(typecode)) + + def valueByType(self, typecode, val): + if type(val) is type(None): + return "absent" + if typecode == 1: return "%d" % val + elif typecode == 2: return "%d" % val + elif typecode == 3: return "%d" % val + elif typecode == 4: return "%d" % val + elif typecode == 6: return val + elif typecode == 7: return val + elif typecode == 8: return strftime("%c", gmtime(val / 1000000000)) + elif typecode == 9: + if val < 0: val = 0 + sec = val / 1000000000 + min = sec / 60 + hour = min / 60 + day = hour / 24 + result = "" + if day > 0: + result = "%dd " % day + if hour > 0 or result != "": + result += "%dh " % (hour % 24) + if min > 0 or result != "": + result += "%dm " % (min % 60) + result += "%ds" % (sec % 60) + return result + + elif typecode == 10: return str(self.idRegistry.displayId(val)) + elif typecode == 11: + if val: + return "True" + else: + return "False" + + elif typecode == 12: return "%f" % val + elif typecode == 13: return "%f" % val + elif typecode == 14: return "%r" % val + elif typecode == 15: return "%r" % val + elif typecode == 16: return "%d" % val + elif typecode == 17: return "%d" % val + elif typecode == 18: return "%d" % val + elif typecode == 19: return "%d" % val + elif typecode == 20: return "%r" % val + elif typecode == 21: return "%r" % val + elif typecode == 22: return "%r" % val + else: + raise ValueError ("Invalid type code: %s" % str(typecode)) + + def accessName (self, code): + """ Convert element access codes to printable strings """ + if code == '1': return "ReadCreate" + elif code == '2': return "ReadWrite" + elif code == '3': return "ReadOnly" + else: + raise ValueError ("Invalid access code: %s" % str(code)) + + def notNone (self, text): + if text == None: + return "" + else: + return text + + def objectIndex(self, obj): + if obj._objectId.isV2: + return obj._objectId.getObject() + result = "" + first = True + props = obj.getProperties() + for prop in props: + if prop[0].index: + if not first: + result += "." + result += self.valueByType(prop[0].type, prop[1]) + first = None + return result + + + #===================== + # Methods from Console + #===================== + def brokerConnectionFailed(self, broker): + """ Invoked when a connection to a broker fails """ + if self.first_connect: + self.first_connect = None + print "Failed to connect: ", broker.error + + def brokerConnected(self, broker): + """ Invoked when a connection is established to a broker """ + try: + self.lock.acquire() + self.connected = True + finally: + self.lock.release() + if not self.first_connect: + print "Broker connected:", broker + self.first_connect = None + + def brokerDisconnected(self, broker): + """ Invoked when the connection to a broker is lost """ + try: + self.lock.acquire() + self.connected = None + finally: + self.lock.release() + if not self.closing: + print "Broker disconnected:", broker + + def objectProps(self, broker, record): + """ Invoked when an object is updated. """ + oid = record.getObjectId() + dispId = self.idRegistry.displayId(oid) + try: + self.lock.acquire() + if dispId in self.objects: + self.objects[dispId].mergeUpdate(record) + else: + self.objects[dispId] = record + finally: + self.lock.release() + + def objectStats(self, broker, record): + """ Invoked when an object is updated. """ + oid = record.getObjectId() + dispId = self.idRegistry.displayId(oid) + try: + self.lock.acquire() + if dispId in self.objects: + self.objects[dispId].mergeUpdate(record) + finally: + self.lock.release() + + def event(self, broker, event): + """ Invoked when an event is raised. """ + pass + + def methodResponse(self, broker, seq, response): + print response + + +#====================================================================================================== +# IdRegistry +#====================================================================================================== +class IdRegistry(object): + """ + """ + def __init__(self): + self.next_display_id = 101 + self.oid_to_display = {} + self.display_to_oid = {} + self.lock = Lock() + + def displayId(self, oid): + try: + self.lock.acquire() + if oid in self.oid_to_display: + return self.oid_to_display[oid] + newId = self.next_display_id + self.next_display_id += 1 + self.oid_to_display[oid] = newId + self.display_to_oid[newId] = oid + return newId + finally: + self.lock.release() + + def objectId(self, displayId): + try: + self.lock.acquire() + if displayId in self.display_to_oid: + return self.display_to_oid[displayId] + return None + finally: + self.lock.release() + + def getDisplayIds(self): + result = [] + for displayId in self.display_to_oid: + result.append(str(displayId)) + return result + + def getIdInfo(self, displayId): + """ + Given a display ID, return a tuple of (displayID, bootSequence/Durable, AgentBank/Name, ObjectName) + """ + oid = self.objectId(displayId) + if oid == None: + return (displayId, "?", "unknown", "unknown") + bootSeq = oid.getSequence() + if bootSeq == 0: + bootSeq = '<durable>' + agent = oid.getAgentBank() + if agent == '0': + agent = 'Broker' + return (displayId, bootSeq, agent, oid.getObject()) + +#========================================================= +# Option Parsing +#========================================================= + +def parse_options( argv ): + _usage = """qpid-tool [OPTIONS] [[<username>/<password>@]<target-host>[:<tcp-port>]]""" + + parser = optparse.OptionParser(usage=_usage) + parser.add_option("-b", "--broker", action="store", type="string", metavar="<address>", help="Address of qpidd broker with syntax: [username/password@] hostname | ip-address [:<port>]") + parser.add_option("--sasl-mechanism", action="store", type="string", metavar="<mech>", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override.") + parser.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + parser.add_option("--ssl-certificate", + action="store", type="string", metavar="<path>", + help="SSL certificate for client authentication") + parser.add_option("--ssl-key", + action="store", type="string", metavar="<path>", + help="Private key (if not contained in certificate)") + + opts, encArgs = parser.parse_args(args=argv) + try: + encoding = locale.getpreferredencoding() + args = [a.decode(encoding) for a in encArgs] + except: + args = encArgs + + conn_options = {} + broker_option = None + if opts.broker: + broker_option = opts.broker + if opts.ssl_certificate: + conn_options['ssl_certfile'] = opts.ssl_certificate + if opts.ssl_key: + if not opts.ssl_certificate: + parser.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = opts.ssl_key + if opts.sasl_mechanism: + conn_options['mechanisms'] = opts.sasl_mechanism + if opts.sasl_service_name: + conn_options['service'] = opts.sasl_service_name + return broker_option, conn_options, args[1:] + +#========================================================= +# Main Program +#========================================================= + + +# Get options specified on the command line +broker_option, conn_options, cargs = parse_options(sys.argv) + +_host = "localhost" +if broker_option is not None: + _host = broker_option +elif len(cargs) > 0: + _host = cargs[0] + +# note: prior to supporting options, qpid-tool assumed positional parameters. +# the first argument was assumed to be the broker address. The second argument +# was optional, and, if supplied, was assumed to be the path to the +# certificate. To preserve backward compatibility, accept the certificate if +# supplied via the second parameter. +# +if 'ssl_certfile' not in conn_options: + if len(cargs) > 1: + conn_options['ssl_certfile'] = cargs[1] + +disp = Display() + +# Attempt to make a connection to the target broker +try: + data = QmfData(disp, _host, conn_options) +except Exception, e: + if str(e).find("Exchange not found") != -1: + print "Management not enabled on broker: Use '-m yes' option on broker startup." + else: + print "Failed: %s - %s" % (e.__class__.__name__, e) + sys.exit(1) + +# Instantiate the CLI interpreter and launch it. +cli = Mcli(data, disp) +print("Management Tool for QPID") +try: + cli.cmdloop() +except KeyboardInterrupt: + print + print "Exiting..." +except Exception, e: + print "Failed: %s - %s" % (e.__class__.__name__, e) + +# alway attempt to cleanup broker resources +data.close() diff --git a/qpid/cpp/management/python/bin/qpid-tool.bat b/qpid/cpp/management/python/bin/qpid-tool.bat new file mode 100644 index 0000000000..7eb0210da2 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-tool.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-tool %* diff --git a/qpid/cpp/src/tests/multiq_perftest b/qpid/cpp/management/python/lib/.gitignore index 9673dd2e6d..628d81888c 100755..100644 --- a/qpid/cpp/src/tests/multiq_perftest +++ b/qpid/cpp/management/python/lib/.gitignore @@ -1,22 +1,22 @@ -#!/usr/bin/env bash # -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at +# # -# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an +# http://www.apache.org/licenses/LICENSE-2.0 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# "License"); you may not use this file except in compliance # KIND, either express or implied. See the License for the +# Licensed to the Apache Software Foundation (ASF) under one +# Unless required by applicable law or agreed to in writing, +# distributed with this work for additional information +# or more contributor license agreements. See the NOTICE file +# regarding copyright ownership. The ASF licenses this file +# software distributed under the License is distributed on an # specific language governing permissions and limitations +# to you under the Apache License, Version 2.0 (the # under the License. -# - -exec `dirname $0`/run_perftest 10000 --mode shared --qt 16 +# with the License. You may obtain a copy of the License at +/qpid-configc +/qpid-hac +/qpid-routec diff --git a/qpid/cpp/management/python/lib/README.txt b/qpid/cpp/management/python/lib/README.txt new file mode 100644 index 0000000000..cabeb1be02 --- /dev/null +++ b/qpid/cpp/management/python/lib/README.txt @@ -0,0 +1,4 @@ +To run these programs, please set PYTHONPATH to include: + + qpid/python + qpid/extras/qmf/src/py diff --git a/qpid/cpp/src/tests/config.null b/qpid/cpp/management/python/lib/qlslibs/__init__.py index e2f355768b..d8a500d9d8 100644 --- a/qpid/cpp/src/tests/config.null +++ b/qpid/cpp/management/python/lib/qlslibs/__init__.py @@ -17,5 +17,3 @@ # under the License. # -# Deliberately empty configuration file for tests. - diff --git a/qpid/cpp/management/python/lib/qlslibs/analyze.py b/qpid/cpp/management/python/lib/qlslibs/analyze.py new file mode 100644 index 0000000000..8c5de05b9e --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/analyze.py @@ -0,0 +1,606 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" +Module: qlslibs.analyze + +Classes for recovery and analysis of a Qpid Linear Store (QLS). +""" + +import os.path +import qlslibs.err +import qlslibs.jrnl +import qlslibs.utils + +class HighCounter(object): + def __init__(self): + self.num = 0 + def check(self, num): + if self.num < num: + self.num = num + def get(self): + return self.num + def get_next(self): + self.num += 1 + return self.num + +class JournalRecoveryManager(object): + TPL_DIR_NAME = 'tpl2' + JRNL_DIR_NAME = 'jrnl2' + def __init__(self, directory, args): + if not os.path.exists(directory): + raise qlslibs.err.InvalidQlsDirectoryNameError(directory) + self.directory = directory + self.args = args + self.tpl = None + self.journals = {} + self.high_rid_counter = HighCounter() + self.prepared_list = None + def report(self): + self._reconcile_transactions(self.prepared_list, self.args.txn) + if self.tpl is not None: + self.tpl.report(self.args) + for queue_name in sorted(self.journals.keys()): + self.journals[queue_name].report(self.args) + def run(self): + tpl_dir = os.path.join(self.directory, JournalRecoveryManager.TPL_DIR_NAME) + if os.path.exists(tpl_dir): + self.tpl = Journal(tpl_dir, None, self.args) + self.tpl.recover(self.high_rid_counter) + if self.args.show_recovery_recs or self.args.show_all_recs: + print + jrnl_dir = os.path.join(self.directory, JournalRecoveryManager.JRNL_DIR_NAME) + self.prepared_list = self.tpl.txn_map.get_prepared_list() if self.tpl is not None else {} + if os.path.exists(jrnl_dir): + for dir_entry in sorted(os.listdir(jrnl_dir)): + jrnl = Journal(os.path.join(jrnl_dir, dir_entry), self.prepared_list, self.args) + jrnl.recover(self.high_rid_counter) + self.journals[jrnl.get_queue_name()] = jrnl + if self.args.show_recovery_recs or self.args.show_all_recs: + print + print + def _reconcile_transactions(self, prepared_list, txn_flag): + print 'Transaction reconciliation report:' + print '==================================' + print 'Transaction Prepared List (TPL) contains %d open transaction(s):' % len(prepared_list) + for xid in prepared_list.keys(): + commit_flag = prepared_list[xid] + if commit_flag is None: + status = '[Prepared, neither committed nor aborted - assuming commit]' + elif commit_flag: + status = '[Prepared, but interrupted during commit phase]' + else: + status = '[Prepared, but interrupted during abort phase]' + print ' ', qlslibs.utils.format_xid(xid), status + if prepared_list[xid] is None: # Prepared, but not committed or aborted + enqueue_record = self.tpl.get_txn_map_record(xid)[0][1] + dequeue_record = qlslibs.utils.create_record(qlslibs.jrnl.DequeueRecord.MAGIC, \ + qlslibs.jrnl.DequeueRecord.TXN_COMPLETE_COMMIT_FLAG, \ + self.tpl.current_journal_file, \ + self.high_rid_counter.get_next(), \ + enqueue_record.record_id, xid, None) + if txn_flag: + self.tpl.add_record(dequeue_record) + print + print 'Open transactions found in queues:' + print '----------------------------------' + for queue_name in sorted(self.journals.keys()): + self.journals[queue_name].reconcile_transactions(prepared_list, txn_flag) + print + if len(prepared_list) > 0: + print 'Creating commit records for the following prepared transactions in TPL:' + for xid in prepared_list.keys(): + print ' ', qlslibs.utils.format_xid(xid) + transaction_record = qlslibs.utils.create_record(qlslibs.jrnl.TransactionRecord.MAGIC_COMMIT, 0, \ + self.tpl.current_journal_file, \ + self.high_rid_counter.get_next(), None, xid, None) + if txn_flag: + self.tpl.add_record(transaction_record) + print + +class EnqueueMap(object): + """ + Map of enqueued records in a QLS journal + """ + def __init__(self, journal): + self.journal = journal + self.enq_map = {} + def add(self, journal_file, enq_record, locked_flag): + if enq_record.record_id in self.enq_map: + raise qlslibs.err.DuplicateRecordIdError(self.journal.current_file_header, enq_record) + self.enq_map[enq_record.record_id] = [journal_file, enq_record, locked_flag] + def contains(self, rid): + """Return True if the map contains the given rid""" + return rid in self.enq_map + def delete(self, journal_file, deq_record): + if deq_record.dequeue_record_id in self.enq_map: + enq_list = self.enq_map[deq_record.dequeue_record_id] + del self.enq_map[deq_record.dequeue_record_id] + return enq_list + else: + raise qlslibs.err.RecordIdNotFoundError(journal_file.file_header, deq_record) + def get(self, record_id): + if record_id in self.enq_map: + return self.enq_map[record_id] + return None + def lock(self, journal_file, dequeue_record): + if dequeue_record.dequeue_record_id not in self.enq_map: + raise qlslibs.err.RecordIdNotFoundError(journal_file.file_header, dequeue_record) + self.enq_map[dequeue_record.dequeue_record_id][2] = True + def report_str(self, args): + """Return a string containing a text report for all records in the map""" + if len(self.enq_map) == 0: + return 'No enqueued records found.' + rstr = '%d enqueued records found' % len(self.enq_map) + if args.show_recovered_recs: + rstr += ":" + rid_list = self.enq_map.keys() + rid_list.sort() + for rid in rid_list: + journal_file, record, locked_flag = self.enq_map[rid] + rstr += '\n 0x%x:' % journal_file.file_header.file_num + rstr += record.to_string(args.show_xids, args.show_data, args.txtest) + if locked_flag: + rstr += ' [LOCKED]' + else: + rstr += '.' + return rstr + def unlock(self, journal_file, dequeue_record): + """Set the transaction lock for a given record_id to False""" + if dequeue_record.dequeue_record_id in self.enq_map: + if self.enq_map[dequeue_record.dequeue_record_id][2]: + self.enq_map[dequeue_record.dequeue_record_id][2] = False + else: + raise qlslibs.err.RecordNotLockedError(journal_file.file_header, dequeue_record) + else: + raise qlslibs.err.RecordIdNotFoundError(journal_file.file_header, dequeue_record) + +class TransactionMap(object): + """ + Map of open transactions used while recovering a QLS journal + """ + def __init__(self, enq_map): + self.txn_map = {} + self.enq_map = enq_map + def abort(self, xid): + """Perform an abort operation for the given xid record""" + for journal_file, record, _ in self.txn_map[xid]: + if isinstance(record, qlslibs.jrnl.DequeueRecord): + if self.enq_map.contains(record.dequeue_record_id): + self.enq_map.unlock(journal_file, record) + else: + journal_file.decr_enq_cnt(record) + del self.txn_map[xid] + def add(self, journal_file, record): + if record.xid is None: + raise qlslibs.err.NonTransactionalRecordError(journal_file.file_header, record, 'TransactionMap.add()') + if isinstance(record, qlslibs.jrnl.DequeueRecord): + try: + self.enq_map.lock(journal_file, record) + except qlslibs.err.RecordIdNotFoundError: + # Not in emap, look for rid in tmap - should not happen in practice + txn_op = self._find_record_id(record.xid, record.dequeue_record_id) + if txn_op != None: + if txn_op[2]: + raise qlslibs.err.AlreadyLockedError(journal_file.file_header, record) + txn_op[2] = True + if record.xid in self.txn_map: + self.txn_map[record.xid].append([journal_file, record, False]) # append to existing list + else: + self.txn_map[record.xid] = [[journal_file, record, False]] # create new list + def commit(self, xid): + """Perform a commit operation for the given xid record""" + mismatch_list = [] + for journal_file, record, lock in self.txn_map[xid]: + if isinstance(record, qlslibs.jrnl.EnqueueRecord): + self.enq_map.add(journal_file, record, lock) # Transfer enq to emap + else: + if self.enq_map.contains(record.dequeue_record_id): + self.enq_map.unlock(journal_file, record) + self.enq_map.delete(journal_file, record)[0].decr_enq_cnt(record) + else: + mismatch_list.append('0x%x' % record.dequeue_record_id) + del self.txn_map[xid] + return mismatch_list + def contains(self, xid): + """Return True if the xid exists in the map; False otherwise""" + return xid in self.txn_map + def delete(self, journal_file, transaction_record): + """Remove a transaction record from the map using either a commit or abort header""" + if transaction_record.magic[-1] == 'c': + return self.commit(transaction_record.xid) + if transaction_record.magic[-1] == 'a': + self.abort(transaction_record.xid) + else: + raise qlslibs.err.InvalidRecordTypeError(journal_file.file_header, transaction_record, + 'delete from Transaction Map') + def get(self, xid): + if xid in self.txn_map: + return self.txn_map[xid] + return None + def get_prepared_list(self): + """ + Prepared list is a map of xid(key) to one of None, True or False. These represent respectively: + None: prepared, but neither committed or aborted (interrupted before commit or abort) + False: prepared and aborted (interrupted before abort complete) + True: prepared and committed (interrupted before commit complete) + """ + prepared_list = {} + for xid in self.get_xid_list(): + for _, record, _ in self.txn_map[xid]: + if isinstance(record, qlslibs.jrnl.EnqueueRecord): + prepared_list[xid] = None + else: + prepared_list[xid] = record.is_transaction_complete_commit() + return prepared_list + def get_xid_list(self): + return self.txn_map.keys() + def report_str(self, args): + """Return a string containing a text report for all records in the map""" + if len(self.txn_map) == 0: + return 'No outstanding transactions found.' + rstr = '%d outstanding transaction(s)' % len(self.txn_map) + if args.show_recovered_recs: + rstr += ':' + for xid, op_list in self.txn_map.iteritems(): + rstr += '\n %s containing %d operations:' % (qlslibs.utils.format_xid(xid), len(op_list)) + for journal_file, record, _ in op_list: + rstr += '\n 0x%x:' % journal_file.file_header.file_num + rstr += record.to_string(args.show_xids, args.show_data, args.txtest) + else: + rstr += '.' + return rstr + def _find_record_id(self, xid, record_id): + """ Search for and return map list with supplied rid.""" + if xid in self.txn_map: + for txn_op in self.txn_map[xid]: + if txn_op[1].record_id == record_id: + return txn_op + for this_xid in self.txn_map.iterkeys(): + for txn_op in self.txn_map[this_xid]: + if txn_op[1].record_id == record_id: + return txn_op + return None + +class JournalStatistics(object): + """Journal statistics""" + def __init__(self): + self.total_record_count = 0 + self.transient_record_count = 0 + self.filler_record_count = 0 + self.enqueue_count = 0 + self.dequeue_count = 0 + self.transaction_record_count = 0 + self.transaction_enqueue_count = 0 + self.transaction_dequeue_count = 0 + self.transaction_commit_count = 0 + self.transaction_abort_count = 0 + self.transaction_operation_count = 0 + def __str__(self): + fstr = 'Total record count: %d\n' + \ + 'Transient record count: %d\n' + \ + 'Filler_record_count: %d\n' + \ + 'Enqueue_count: %d\n' + \ + 'Dequeue_count: %d\n' + \ + 'Transaction_record_count: %d\n' + \ + 'Transaction_enqueue_count: %d\n' + \ + 'Transaction_dequeue_count: %d\n' + \ + 'Transaction_commit_count: %d\n' + \ + 'Transaction_abort_count: %d\n' + \ + 'Transaction_operation_count: %d\n' + return fstr % (self.total_record_count, + self.transient_record_count, + self.filler_record_count, + self.enqueue_count, + self.dequeue_count, + self.transaction_record_count, + self.transaction_enqueue_count, + self.transaction_dequeue_count, + self.transaction_commit_count, + self.transaction_abort_count, + self.transaction_operation_count) + +class Journal(object): + """ + Instance of a Qpid Linear Store (QLS) journal. + """ + JRNL_SUFFIX = 'jrnl' + def __init__(self, directory, xid_prepared_list, args): + self.directory = directory + self.queue_name = os.path.basename(directory) + self.files = {} + self.file_num_list = None + self.file_num_itr = None + self.enq_map = EnqueueMap(self) + self.txn_map = TransactionMap(self.enq_map) + self.current_journal_file = None + self.first_rec_flag = None + self.statistics = JournalStatistics() + self.xid_prepared_list = xid_prepared_list # This is None for the TPL instance only + self.args = args + self.last_record_offset = None # TODO: Move into JournalFile + self.num_filler_records_required = None # TODO: Move into JournalFile + self.fill_to_offset = None + def add_record(self, record): + """Used for reconciling transactions only - called from JournalRecoveryManager._reconcile_transactions()""" + if isinstance(record, qlslibs.jrnl.EnqueueRecord) or isinstance(record, qlslibs.jrnl.DequeueRecord): + if record.xid_size > 0: + self.txn_map.add(self.current_journal_file, record) + else: + self.enq_map.add(self.current_journal_file, record, False) + elif isinstance(record, qlslibs.jrnl.TransactionRecord): + self.txn_map.delete(self.current_journal_file, record) + else: + raise qlslibs.err.InvalidRecordTypeError(self.current_journal_file, record, 'add to Journal') + def get_enq_map_record(self, rid): + return self.enq_map.get(rid) + def get_txn_map_record(self, xid): + return self.txn_map.get(xid) + def get_outstanding_txn_list(self): + return self.txn_map.get_xid_list() + def get_queue_name(self): + return self.queue_name + def recover(self, high_rid_counter): + print 'Recovering %s...' % self.queue_name, + self._analyze_files() + try: + while self._get_next_record(high_rid_counter): + pass + self._check_alignment() + except qlslibs.err.NoMoreFilesInJournalError: + print 'No more files in journal' + except qlslibs.err.FirstRecordOffsetMismatchError as err: + print '0x%08x: **** FRO ERROR: queue=\"%s\" fid=0x%x fro actual=0x%08x expected=0x%08x' % \ + (err.get_expected_fro(), err.get_queue_name(), err.get_file_number(), err.get_record_offset(), + err.get_expected_fro()) + print 'done' + def reconcile_transactions(self, prepared_list, txn_flag): + xid_list = self.txn_map.get_xid_list() + if len(xid_list) > 0: + print self.queue_name, 'contains', len(xid_list), 'open transaction(s):' + for xid in xid_list: + if xid in prepared_list.keys(): + commit_flag = prepared_list[xid] + if commit_flag is None: + print ' ', qlslibs.utils.format_xid(xid), '- Assuming commit after prepare' + if txn_flag: + self.txn_map.commit(xid) + elif commit_flag: + print ' ', qlslibs.utils.format_xid(xid), '- Completing interrupted commit operation' + if txn_flag: + self.txn_map.commit(xid) + else: + print ' ', qlslibs.utils.format_xid(xid), '- Completing interrupted abort operation' + if txn_flag: + self.txn_map.abort(xid) + else: + print ' ', qlslibs.utils.format_xid(xid), '- Ignoring, not in prepared transaction list' + if txn_flag: + self.txn_map.abort(xid) + def report(self, args): + print 'Journal "%s":' % self.queue_name + print '=' * (11 + len(self.queue_name)) + if args.stats: + print str(self.statistics) + print self.enq_map.report_str(args) + print self.txn_map.report_str(args) + JournalFile.report_header() + for file_num in sorted(self.files.keys()): + self.files[file_num].report() + #TODO: move this to JournalFile, append to file info + if self.num_filler_records_required is not None and self.fill_to_offset is not None: + print '0x%x:0x%08x: %d filler records required for DBLK alignment to 0x%08x' % \ + (self.current_journal_file.file_header.file_num, self.last_record_offset, + self.num_filler_records_required, self.fill_to_offset) + print + #--- protected functions --- + def _analyze_files(self): + for dir_entry in os.listdir(self.directory): + dir_entry_bits = dir_entry.split('.') + if len(dir_entry_bits) == 2 and dir_entry_bits[1] == Journal.JRNL_SUFFIX: + fq_file_name = os.path.join(self.directory, dir_entry) + file_handle = open(fq_file_name) + args = qlslibs.utils.load_args(file_handle, qlslibs.jrnl.RecordHeader) + file_hdr = qlslibs.jrnl.FileHeader(*args) + file_hdr.init(file_handle, *qlslibs.utils.load_args(file_handle, qlslibs.jrnl.FileHeader)) + if file_hdr.is_header_valid(file_hdr): + file_hdr.load(file_handle) + if file_hdr.is_valid(False): + qlslibs.utils.skip(file_handle, + file_hdr.file_header_size_sblks * qlslibs.utils.DEFAULT_SBLK_SIZE) + self.files[file_hdr.file_num] = JournalFile(file_hdr) + self.file_num_list = sorted(self.files.keys()) + self.file_num_itr = iter(self.file_num_list) + def _check_alignment(self): # TODO: Move into JournalFile + if self.last_record_offset is None: # Empty file, _check_file() never run + return + remaining_sblks = self.last_record_offset % qlslibs.utils.DEFAULT_SBLK_SIZE + if remaining_sblks == 0: + self.num_filler_records_required = 0 + else: + self.num_filler_records_required = (qlslibs.utils.DEFAULT_SBLK_SIZE - remaining_sblks) / \ + qlslibs.utils.DEFAULT_DBLK_SIZE + self.fill_to_offset = self.last_record_offset + \ + (self.num_filler_records_required * qlslibs.utils.DEFAULT_DBLK_SIZE) + if self.args.show_recovery_recs or self.args.show_all_recs: + print '0x%x:0x%08x: %d filler records required for DBLK alignment to 0x%08x' % \ + (self.current_journal_file.file_header.file_num, self.last_record_offset, + self.num_filler_records_required, self.fill_to_offset) + def _check_file(self): + if self.current_journal_file is not None: + if not self.current_journal_file.file_header.is_end_of_file(): + return True + if self.current_journal_file.file_header.is_end_of_file(): + self.last_record_offset = self.current_journal_file.file_header.file_handle.tell() + if not self._get_next_file(): + return False + fhdr = self.current_journal_file.file_header + fhdr.file_handle.seek(fhdr.first_record_offset) + return True + def _get_next_file(self): + if self.current_journal_file is not None: + file_handle = self.current_journal_file.file_header.file_handle + if not file_handle.closed: # sanity check, should not be necessary + file_handle.close() + file_num = 0 + try: + while file_num == 0: + file_num = self.file_num_itr.next() + except StopIteration: + pass + if file_num == 0: + return False + self.current_journal_file = self.files[file_num] + self.first_rec_flag = True + if self.args.show_recovery_recs or self.args.show_all_recs: + file_header = self.current_journal_file.file_header + print '0x%x:%s' % (file_header.file_num, file_header.to_string()) + return True + def _get_next_record(self, high_rid_counter): + if not self._check_file(): + return False + self.last_record_offset = self.current_journal_file.file_header.file_handle.tell() + this_record = qlslibs.utils.load(self.current_journal_file.file_header.file_handle, qlslibs.jrnl.RecordHeader) + if not this_record.is_header_valid(self.current_journal_file.file_header): + return False + if self.first_rec_flag: + if this_record.file_offset != self.current_journal_file.file_header.first_record_offset: + raise qlslibs.err.FirstRecordOffsetMismatchError(self.current_journal_file.file_header, this_record) + self.first_rec_flag = False + self.statistics.total_record_count += 1 + start_journal_file = self.current_journal_file + if isinstance(this_record, qlslibs.jrnl.EnqueueRecord): + ok_flag = self._handle_enqueue_record(this_record, start_journal_file) + high_rid_counter.check(this_record.record_id) + if self.args.show_recovery_recs or self.args.show_all_recs: + print '0x%x:%s' % (start_journal_file.file_header.file_num, \ + this_record.to_string(self.args.show_xids, self.args.show_data, self.args.txtest)) + elif isinstance(this_record, qlslibs.jrnl.DequeueRecord): + ok_flag = self._handle_dequeue_record(this_record, start_journal_file) + high_rid_counter.check(this_record.record_id) + if self.args.show_recovery_recs or self.args.show_all_recs: + print '0x%x:%s' % (start_journal_file.file_header.file_num, this_record.to_string(self.args.show_xids, None, None)) + elif isinstance(this_record, qlslibs.jrnl.TransactionRecord): + ok_flag = self._handle_transaction_record(this_record, start_journal_file) + high_rid_counter.check(this_record.record_id) + if self.args.show_recovery_recs or self.args.show_all_recs: + print '0x%x:%s' % (start_journal_file.file_header.file_num, this_record.to_string(self.args.show_xids, None, None)) + else: + self.statistics.filler_record_count += 1 + ok_flag = True + if self.args.show_all_recs: + print '0x%x:%s' % (start_journal_file.file_header.file_num, this_record) + qlslibs.utils.skip(self.current_journal_file.file_header.file_handle, qlslibs.utils.DEFAULT_DBLK_SIZE) + return ok_flag + def _handle_enqueue_record(self, enqueue_record, start_journal_file): + while enqueue_record.load(self.current_journal_file.file_header.file_handle): + if not self._get_next_file(): + enqueue_record.truncated_flag = True + return False + if not enqueue_record.is_valid(start_journal_file): + return False + if enqueue_record.is_external() and enqueue_record.data != None: + raise qlslibs.err.ExternalDataError(self.current_journal_file.file_header, enqueue_record) + if enqueue_record.is_transient(): + self.statistics.transient_record_count += 1 + return True + if enqueue_record.xid_size > 0: + self.txn_map.add(start_journal_file, enqueue_record) + self.statistics.transaction_operation_count += 1 + self.statistics.transaction_record_count += 1 + self.statistics.transaction_enqueue_count += 1 + else: + self.enq_map.add(start_journal_file, enqueue_record, False) + start_journal_file.incr_enq_cnt() + self.statistics.enqueue_count += 1 + return True + def _handle_dequeue_record(self, dequeue_record, start_journal_file): + while dequeue_record.load(self.current_journal_file.file_header.file_handle): + if not self._get_next_file(): + dequeue_record.truncated_flag = True + return False + if not dequeue_record.is_valid(start_journal_file): + return False + if dequeue_record.xid_size > 0: + if self.xid_prepared_list is None: # ie this is the TPL + dequeue_record.transaction_prepared_list_flag = True + elif not self.enq_map.contains(dequeue_record.dequeue_record_id): + dequeue_record.warnings.append('NOT IN EMAP') # Only for non-TPL records + self.txn_map.add(start_journal_file, dequeue_record) + self.statistics.transaction_operation_count += 1 + self.statistics.transaction_record_count += 1 + self.statistics.transaction_dequeue_count += 1 + else: + try: + self.enq_map.delete(start_journal_file, dequeue_record)[0].decr_enq_cnt(dequeue_record) + except qlslibs.err.RecordIdNotFoundError: + dequeue_record.warnings.append('NOT IN EMAP') + self.statistics.dequeue_count += 1 + return True + def _handle_transaction_record(self, transaction_record, start_journal_file): + while transaction_record.load(self.current_journal_file.file_header.file_handle): + if not self._get_next_file(): + transaction_record.truncated_flag = True + return False + if not transaction_record.is_valid(start_journal_file): + return False + if transaction_record.magic[-1] == 'a': # Abort + self.statistics.transaction_abort_count += 1 + elif transaction_record.magic[-1] == 'c': # Commit + self.statistics.transaction_commit_count += 1 + else: + raise InvalidRecordTypeError('Unknown transaction record magic \'%s\'' % transaction_record.magic) + if self.txn_map.contains(transaction_record.xid): + self.txn_map.delete(self.current_journal_file, transaction_record) + else: + transaction_record.warnings.append('NOT IN TMAP') +# if transaction_record.magic[-1] == 'c': # commits only +# self._txn_obj_list[hdr.xid] = hdr + self.statistics.transaction_record_count += 1 + return True + def _load_data(self, record): + while not record.is_complete: + record.load(self.current_journal_file.file_handle) + +class JournalFile(object): + def __init__(self, file_header): + self.file_header = file_header + self.enq_cnt = 0 + self.deq_cnt = 0 + self.num_filler_records_required = None + def incr_enq_cnt(self): + self.enq_cnt += 1 + def decr_enq_cnt(self, record): + if self.enq_cnt <= self.deq_cnt: + raise qlslibs.err.EnqueueCountUnderflowError(self.file_header, record) + self.deq_cnt += 1 + def get_enq_cnt(self): + return self.enq_cnt - self.deq_cnt + def is_outstanding_enq(self): + return self.enq_cnt > self.deq_cnt + @staticmethod + def report_header(): + print 'file_num enq_cnt p_no efp journal_file' + print '-------- ------- ---- ----- ------------' + def report(self): + comment = '<uninitialized>' if self.file_header.file_num == 0 else '' + file_num_str = '0x%x' % self.file_header.file_num + print '%8s %7d %4d %4dk %s %s' % (file_num_str, self.get_enq_cnt(), self.file_header.partition_num, + self.file_header.efp_data_size_kb, + os.path.basename(self.file_header.file_handle.name), comment) diff --git a/qpid/cpp/management/python/lib/qlslibs/efp.py b/qpid/cpp/management/python/lib/qlslibs/efp.py new file mode 100644 index 0000000000..1c751c3d06 --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/efp.py @@ -0,0 +1,327 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" +Module: qlslibs.efp + +Contains empty file pool (EFP) classes. +""" + +import os +import os.path +import qlslibs.err +import shutil +import uuid + +class EfpManager(object): + """ + Top level class to analyze the Qpid Linear Store (QLS) directory for the partitions that make up the + Empty File Pool (EFP). + """ + def __init__(self, directory, disk_space_required_kb): + if not os.path.exists(directory): + raise qlslibs.err.InvalidQlsDirectoryNameError(directory) + self.directory = directory + self.disk_space_required_kb = disk_space_required_kb + self.efp_partitions = [] + self.efp_pools = {} + self.total_num_files = 0 + self.total_cum_file_size_kb = 0 + self.current_efp_partition = None + def add_file_pool(self, file_size_kb, num_files): + """ Add an EFP in the specified partition of the specified size containing the specified number of files """ + dir_name = EmptyFilePool.get_directory_name(file_size_kb) + print 'Adding pool \'%s\' to partition %s' % (dir_name, self.current_efp_partition.partition_number) + self.total_cum_file_size_kb += self.current_efp_partition.create_new_efp(file_size_kb, num_files) + self.total_num_files += num_files + def freshen_file_pool(self, file_size_kb, num_files): + """ Freshen an EFP in the specified partition and of the specified size to the specified number of files """ + if self.current_efp_partition is None: + partition_list = self.efp_partitions + partition_str = 'all partitions' + else: + partition_list = [self.current_efp_partition] + partition_str = 'partition %d' % self.current_efp_partition.partition_number + if file_size_kb is None: + pool_str = 'all pools' + else: + pool_str = 'pool \'%s\'' % EmptyFilePool.get_directory_name(int(file_size_kb)) + print 'Freshening %s in %s to %d files' % (pool_str, partition_str, num_files) + for self.current_efp_partition in partition_list: # Partition objects + if file_size_kb is None: + file_size_list = self.current_efp_partition.efp_pools.keys() + else: + file_size_list = ['%sk' % file_size_kb] + for file_size in file_size_list: + efp = self.current_efp_partition.efp_pools[file_size] + num_files_needed = num_files - efp.get_tot_file_count() + if num_files_needed > 0: + self.current_efp_partition.create_new_efp_files(qlslibs.utils.efp_directory_size(file_size), + num_files_needed) + else: + print ' WARNING: Pool %s in partition %s already contains %d files: no action taken' % \ + (self.current_efp_partition.efp_pools[file_size].size_str, + self.current_efp_partition.partition_number, efp.get_num_files()) + def remove_file_pool(self, file_size_kb): + """ Remove an existing EFP from the specified partition and of the specified size """ + dir_name = EmptyFilePool.get_directory_name(file_size_kb) + print 'Removing pool \'%s\' from partition %s' % (dir_name, self.current_efp_partition.partition_number) + self.efp_partitions.remove(self.current_efp_partition) + shutil.rmtree(os.path.join(self.current_efp_partition.efp_directory, dir_name)) + def report(self): + print 'Empty File Pool (EFP) report' + print '============================' + print 'Found', len(self.efp_partitions), 'partition(s)' + if (len(self.efp_partitions)) > 0: + sorted_efp_partitions = sorted(self.efp_partitions, key=lambda x: x.partition_number) + EfpPartition.print_report_table_header() + for ptn in sorted_efp_partitions: + ptn.print_report_table_line() + print + for ptn in sorted_efp_partitions: + ptn.report() + def run(self, arg_tup): + self._analyze_efp() + if arg_tup is not None: + _, arg_file_size, arg_num_files, arg_add, arg_remove, arg_freshen, arg_list = arg_tup + self._check_args(arg_tup) + if arg_add: + self.add_file_pool(int(arg_file_size), int(arg_num_files)) + if arg_remove: + self.remove_file_pool(int(arg_file_size)) + if arg_freshen: + self.freshen_file_pool(arg_file_size, int(arg_num_files)) + if arg_list: + self.report() + def _analyze_efp(self): + for dir_entry in os.listdir(self.directory): + try: + efp_partition = EfpPartition(os.path.join(self.directory, dir_entry), self.disk_space_required_kb) + efp_partition.scan() + self.efp_partitions.append(efp_partition) + for efpl in efp_partition.efp_pools.iterkeys(): + if efpl not in self.efp_pools: + self.efp_pools[efpl] = [] + self.efp_pools[efpl].append(efp_partition.efp_pools[efpl]) + self.total_num_files += efp_partition.tot_file_count + self.total_cum_file_size_kb += efp_partition.tot_file_size_kb + except qlslibs.err.InvalidPartitionDirectoryNameError: + pass + def _check_args(self, arg_tup): + """ Value check of args. The names of partitions and pools are validated against the discovered instances """ + arg_partition, arg_file_size, _, arg_add, arg_remove, arg_freshen, _ = arg_tup + if arg_partition is not None: + try: + if arg_partition[0] == 'p': # string partition name, eg 'p001' + partition_num = int(arg_partition[1:]) + else: # numeric partition, eg '1' + partition_num = int(arg_partition) + found = False + for partition in self.efp_partitions: + if partition.partition_number == partition_num: + self.current_efp_partition = partition + found = True + break + if not found: + raise qlslibs.err.PartitionDoesNotExistError(arg_partition) + except ValueError: + raise qlslibs.err.InvalidPartitionDirectoryNameError(arg_partition) + if self.current_efp_partition is not None: + pool_list = self.current_efp_partition.efp_pools.keys() + efp_directory_name = EmptyFilePool.get_directory_name(int(arg_file_size)) + if arg_add and efp_directory_name in pool_list: + raise qlslibs.err.PoolDirectoryAlreadyExistsError(efp_directory_name) + if (arg_remove or arg_freshen) and efp_directory_name not in pool_list: + raise qlslibs.err.PoolDirectoryDoesNotExistError(efp_directory_name) + +class EfpPartition(object): + """ + Class that represents a EFP partition. Each partition contains one or more Empty File Pools (EFPs). + """ + PTN_DIR_PREFIX = 'p' + EFP_DIR_NAME = 'efp' + def __init__(self, directory, disk_space_required_kb): + self.directory = directory + self.partition_number = None + self.efp_pools = {} + self.tot_file_count = 0 + self.tot_file_size_kb = 0 + self._validate_partition_directory(disk_space_required_kb) + def create_new_efp_files(self, file_size_kb, num_files): + """ Create new EFP files in this partition """ + dir_name = EmptyFilePool.get_directory_name(file_size_kb) + if dir_name in self.efp_pools.keys(): + efp = self.efp_pools[dir_name] + else: + efp = EmptyFilePool(os.path.join(self.directory, EfpPartition.EFP_DIR_NAME), dir_name) + this_tot_file_size_kb = efp.create_new_efp_files(num_files) + self.tot_file_size_kb += this_tot_file_size_kb + self.tot_file_count += num_files + return this_tot_file_size_kb + @staticmethod + def print_report_table_header(): + print 'p_no no_efp tot_files tot_size_kb directory' + print '---- ------ --------- ----------- ---------' + def print_report_table_line(self): + print '%4d %6d %9d %11d %s' % (self.partition_number, len(self.efp_pools), self.tot_file_count, + self.tot_file_size_kb, self.directory) + def report(self): + print 'Partition %s:' % os.path.basename(self.directory) + if len(self.efp_pools) > 0: + EmptyFilePool.print_report_table_header() + for dir_name in self.efp_pools.keys(): + self.efp_pools[dir_name].print_report_table_line() + else: + print '<empty - no EFPs found in this partition>' + print + def scan(self): + if os.path.exists(self.directory): + efp_dir = os.path.join(self.directory, EfpPartition.EFP_DIR_NAME) + for dir_entry in os.listdir(efp_dir): + efp = EmptyFilePool(os.path.join(efp_dir, dir_entry), self.partition_number) + efp.scan() + self.tot_file_count += efp.get_tot_file_count() + self.tot_file_size_kb += efp.get_tot_file_size_kb() + self.efp_pools[dir_entry] = efp + def _validate_partition_directory(self, disk_space_required_kb): + if os.path.basename(self.directory)[0] is not EfpPartition.PTN_DIR_PREFIX: + raise qlslibs.err.InvalidPartitionDirectoryNameError(self.directory) + try: + self.partition_number = int(os.path.basename(self.directory)[1:]) + except ValueError: + raise qlslibs.err.InvalidPartitionDirectoryNameError(self.directory) + if not qlslibs.utils.has_write_permission(self.directory): + raise qlslibs.err.WritePermissionError(self.directory) + if disk_space_required_kb is not None: + space_avail = qlslibs.utils.get_avail_disk_space(self.directory) + if space_avail < (disk_space_required_kb * 1024): + raise qlslibs.err.InsufficientSpaceOnDiskError(self.directory, space_avail, + disk_space_required_kb * 1024) + +class EmptyFilePool(object): + """ + Class that represents a single Empty File Pool within a partition. Each EFP contains pre-formatted linear store + journal files (but it may also be empty). + """ + EFP_DIR_SUFFIX = 'k' + EFP_JRNL_EXTENTION = '.jrnl' + EFP_INUSE_DIRNAME = 'in_use' + EFP_RETURNED_DIRNAME = 'returned' + def __init__(self, directory, partition_number): + self.base_dir_name = os.path.basename(directory) + self.directory = directory + self.partition_number = partition_number + self.data_size_kb = None + self.efp_files = [] + self.in_use_files = [] + self.returned_files = [] + self._validate_efp_directory() + def create_new_efp_files(self, num_files): + """ Create one or more new empty journal files of the prescribed size for this EFP """ + this_total_file_size = 0 + for _ in range(num_files): + this_total_file_size += self._create_new_efp_file() + return this_total_file_size + def get_directory(self): + return self.directory + @staticmethod + def get_directory_name(file_size_kb): + """ Static function to create an EFP directory name from the size of the files it contains """ + return '%dk' % file_size_kb + def get_tot_file_count(self): + return len(self.efp_files) + def get_tot_file_size_kb(self): + return self.data_size_kb * len(self.efp_files) + @staticmethod + def print_report_table_header(): + print ' ---------- efp ------------ --------- in_use ---------- -------- returned ---------' + print 'data_size_kb file_count tot_file_size_kb file_count tot_file_size_kb file_count tot_file_size_kb efp_directory' + print '------------ ---------- ---------------- ---------- ---------------- ---------- ---------------- -------------' + def print_report_table_line(self): + print '%12d %10d %16d %10d %16d %10d %16d %s' % (self.data_size_kb, len(self.efp_files), + self.data_size_kb * len(self.efp_files), + len(self.in_use_files), + self.data_size_kb * len(self.in_use_files), + len(self.returned_files), + self.data_size_kb * len(self.returned_files), + self.get_directory()) + def scan(self): + for efp_file in os.listdir(self.directory): + if efp_file == self.EFP_INUSE_DIRNAME: + for in_use_file in os.listdir(os.path.join(self.directory, self.EFP_INUSE_DIRNAME)): + self.in_use_files.append(in_use_file) + continue + if efp_file == self.EFP_RETURNED_DIRNAME: + for returned_file in os.listdir(os.path.join(self.directory, self.EFP_RETURNED_DIRNAME)): + self.returned_files.append(returned_file) + continue + if self._validate_efp_file(os.path.join(self.directory, efp_file)): + self.efp_files.append(efp_file) + def _add_efp_file(self, efp_file_name): + """ Add a single journal file of the appropriate size to this EFP. No file size check is made here. """ + self.efp_files.append(efp_file_name) + def _create_new_efp_file(self): + """ Create a single new empty journal file of the prescribed size for this EFP """ + file_name = str(uuid.uuid4()) + EmptyFilePool.EFP_JRNL_EXTENTION + file_header = qlslibs.jrnl.FileHeader(0, qlslibs.jrnl.FileHeader.MAGIC, qlslibs.utils.DEFAULT_RECORD_VERSION, + 0, 0, 0) + file_header.init(None, None, qlslibs.utils.DEFAULT_HEADER_SIZE_SBLKS, self.partition_number, self.data_size_kb, + 0, 0, 0, 0, 0) + efh = file_header.encode() + efh_bytes = len(efh) + file_handle = open(os.path.join(self.directory, file_name), 'wb') + file_handle.write(efh) + file_handle.write('\xff' * (qlslibs.utils.DEFAULT_SBLK_SIZE - efh_bytes)) + file_handle.write('\x00' * (int(self.data_size_kb) * 1024)) + file_handle.close() + fqfn = os.path.join(self.directory, file_name) + self._add_efp_file(fqfn) + return os.path.getsize(fqfn) + def _validate_efp_directory(self): + if self.base_dir_name[-1] is not EmptyFilePool.EFP_DIR_SUFFIX: + raise qlslibs.err.InvalidEfpDirectoryNameError(self.directory) + try: + self.data_size_kb = int(os.path.basename(self.base_dir_name)[:-1]) + except ValueError: + raise qlslibs.err.InvalidEfpDirectoryNameError(self.directory) + def _validate_efp_file(self, efp_file): + file_size = os.path.getsize(efp_file) + expected_file_size = (self.data_size_kb * 1024) + qlslibs.utils.DEFAULT_SBLK_SIZE + if file_size != expected_file_size: + print 'WARNING: File %s not of correct size (size=%d, expected=%d): Ignoring' % (efp_file, file_size, + expected_file_size) + return False + file_handle = open(efp_file) + args = qlslibs.utils.load_args(file_handle, qlslibs.jrnl.RecordHeader) + file_hdr = qlslibs.jrnl.FileHeader(*args) + file_hdr.init(file_handle, *qlslibs.utils.load_args(file_handle, qlslibs.jrnl.FileHeader)) + if not file_hdr.is_header_valid(file_hdr): + file_handle.close() + return False + file_hdr.load(file_handle) + file_handle.close() + if not file_hdr.is_valid(True): + return False + return True + + +# ============================================================================= + +if __name__ == "__main__": + print "This is a library, and cannot be executed." diff --git a/qpid/cpp/management/python/lib/qlslibs/err.py b/qpid/cpp/management/python/lib/qlslibs/err.py new file mode 100644 index 0000000000..f47632ce6a --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/err.py @@ -0,0 +1,261 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" +Module: qlslibs.err + +Contains error classes. +""" + +# --- Parent classes + +class QlsError(Exception): + """Base error class for QLS errors and exceptions""" + def __init__(self): + Exception.__init__(self) + def __str__(self): + return '' + +class QlsRecordError(QlsError): + """Base error class for individual records""" + def __init__(self, file_header, record): + QlsError.__init__(self) + self.file_header = file_header + self.record = record + def get_expected_fro(self): + return self.file_header.first_record_offset + def get_file_number(self): + return self.file_header.file_num + def get_queue_name(self): + return self.file_header.queue_name + def get_record_id(self): + return self.record.record_id + def get_record_offset(self): + return self.record.file_offset + def __str__(self): + return 'queue="%s" file_id=0x%x record_offset=0x%x record_id=0x%x' % \ + (self.file_header.queue_name, self.file_header.file_num, self.record.file_offset, self.record.record_id) + +# --- Error classes + +class AlreadyLockedError(QlsRecordError): + """Transactional record to be locked is already locked""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Transactional operation already locked in TransactionMap: ' + QlsRecordError.__str__(self) + +class DataSizeError(QlsError): + """Error class for Data size mismatch""" + def __init__(self, expected_size, actual_size, data_str): + QlsError.__init__(self) + self.expected_size = expected_size + self.actual_size = actual_size + self.xid_str = data_str + def __str__(self): + return 'Inconsistent data size: expected:%d; actual:%d; data="%s"' % \ + (self.expected_size, self.actual_size, self.data_str) + +class DuplicateRecordIdError(QlsRecordError): + """Duplicate Record Id in Enqueue Map""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Duplicate Record Id in enqueue map: ' + QlsRecordError.__str__(self) + +class EnqueueCountUnderflowError(QlsRecordError): + """Attempted to decrement enqueue count past 0""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Enqueue record count underflow: ' + QlsRecordError.__str__(self) + +class ExternalDataError(QlsRecordError): + """Data present in Enqueue record when external data flag is set""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Data present in external data record: ' + QlsRecordError.__str__(self) + +class FirstRecordOffsetMismatchError(QlsRecordError): + """First Record Offset (FRO) does not match file header""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'First record offset mismatch: ' + QlsRecordError.__str__(self) + ' expected_offset=0x%x' % \ + self.file_header.first_record_offset + +class InsufficientSpaceOnDiskError(QlsError): + """Insufficient space on disk""" + def __init__(self, directory, space_avail, space_requried): + QlsError.__init__(self) + self.directory = directory + self.space_avail = space_avail + self.space_required = space_requried + def __str__(self): + return 'Insufficient space on disk: directory=%s; avail_space=%d required_space=%d' % \ + (self.directory, self.space_avail, self.space_required) + +class InvalidClassError(QlsError): + """Invalid class name or type""" + def __init__(self, class_name): + QlsError.__init__(self) + self.class_name = class_name + def __str__(self): + return 'Invalid class name "%s"' % self.class_name + +class InvalidEfpDirectoryNameError(QlsError): + """Invalid EFP directory name - should be NNNNk, where NNNN is a number (of any length)""" + def __init__(self, directory_name): + QlsError.__init__(self) + self.directory_name = directory_name + def __str__(self): + return 'Invalid EFP directory name "%s"' % self.directory_name + +#class InvalidFileSizeString(QlsError): +# """Invalid file size string""" +# def __init__(self, file_size_string): +# QlsError.__init__(self) +# self.file_size_string = file_size_string +# def __str__(self): +# return 'Invalid file size string "%s"' % self.file_size_string + +class InvalidPartitionDirectoryNameError(QlsError): + """Invalid EFP partition name - should be pNNN, where NNN is a 3-digit partition number""" + def __init__(self, directory_name): + QlsError.__init__(self) + self.directory_name = directory_name + def __str__(self): + return 'Invalid partition directory name "%s"' % self.directory_name + +class InvalidQlsDirectoryNameError(QlsError): + """Invalid QLS directory name""" + def __init__(self, directory_name): + QlsError.__init__(self) + self.directory_name = directory_name + def __str__(self): + return 'Invalid QLS directory name "%s"' % self.directory_name + +class InvalidRecordTypeError(QlsRecordError): + """Error class for any operation using an invalid record type""" + def __init__(self, file_header, record, error_msg): + QlsRecordError.__init__(self, file_header, record) + self.error_msg = error_msg + def __str__(self): + return 'Invalid record type: ' + QlsRecordError.__str__(self) + ':' + self.error_msg + +class InvalidRecordVersionError(QlsRecordError): + """Invalid record version""" + def __init__(self, file_header, record, expected_version): + QlsRecordError.__init__(self, file_header, record) + self.expected_version = expected_version + def __str__(self): + return 'Invalid record version: queue="%s" ' + QlsRecordError.__str__(self) + \ + ' ver_found=0x%x ver_expected=0x%x' % (self.record_header.version, self.expected_version) + +class NoMoreFilesInJournalError(QlsError): + """Raised when trying to obtain the next file in the journal and there are no more files""" + def __init__(self, queue_name): + QlsError.__init__(self) + self.queue_name = queue_name + def __str__(self): + return 'No more journal files in queue "%s"' % self.queue_name + +class NonTransactionalRecordError(QlsRecordError): + """Transactional operation on non-transactional record""" + def __init__(self, file_header, record, operation): + QlsRecordError.__init__(self, file_header, record) + self.operation = operation + def __str__(self): + return 'Transactional operation on non-transactional record: ' + QlsRecordError.__str__() + \ + ' operation=%s' % self.operation + +class PartitionDoesNotExistError(QlsError): + """Partition name does not exist on disk""" + def __init__(self, partition_directory): + QlsError.__init__(self) + self.partition_directory = partition_directory + def __str__(self): + return 'Partition %s does not exist' % self.partition_directory + +class PoolDirectoryAlreadyExistsError(QlsError): + """Pool directory already exists""" + def __init__(self, pool_directory): + QlsError.__init__(self) + self.pool_directory = pool_directory + def __str__(self): + return 'Pool directory %s already exists' % self.pool_directory + +class PoolDirectoryDoesNotExistError(QlsError): + """Pool directory does not exist""" + def __init__(self, pool_directory): + QlsError.__init__(self) + self.pool_directory = pool_directory + def __str__(self): + return 'Pool directory %s does not exist' % self.pool_directory + +class RecordIdNotFoundError(QlsRecordError): + """Record Id not found in enqueue map""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Record Id not found in enqueue map: ' + QlsRecordError.__str__() + +class RecordNotLockedError(QlsRecordError): + """Record in enqueue map is not locked""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Record in enqueue map is not locked: ' + QlsRecordError.__str__() + +class UnexpectedEndOfFileError(QlsError): + """The bytes read from a file is less than that expected""" + def __init__(self, size_read, size_expected, file_offset, file_name): + QlsError.__init__(self) + self.size_read = size_read + self.size_expected = size_expected + self.file_offset = file_offset + self.file_name = file_name + def __str__(self): + return 'Tried to read %d at offset %d in file "%s"; only read %d' % \ + (self.size_read, self.file_offset, self.file_name, self.size_expected) + +class WritePermissionError(QlsError): + """No write permission""" + def __init__(self, directory): + QlsError.__init__(self) + self.directory = directory + def __str__(self): + return 'No write permission in directory %s' % self.directory + +class XidSizeError(QlsError): + """Error class for Xid size mismatch""" + def __init__(self, expected_size, actual_size, xid_str): + QlsError.__init__(self) + self.expected_size = expected_size + self.actual_size = actual_size + self.xid_str = xid_str + def __str__(self): + return 'Inconsistent xid size: expected:%d; actual:%d; xid="%s"' % \ + (self.expected_size, self.actual_size, self.xid_str) + +# ============================================================================= + +if __name__ == "__main__": + print "This is a library, and cannot be executed." diff --git a/qpid/cpp/management/python/lib/qlslibs/jrnl.py b/qpid/cpp/management/python/lib/qlslibs/jrnl.py new file mode 100644 index 0000000000..5e65890393 --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/jrnl.py @@ -0,0 +1,394 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" +Module: qlslibs.jrnl + +Contains journal record classes. +""" + +import qlslibs.err +import qlslibs.utils +import string +import struct +import time + +class RecordHeader(object): + FORMAT = '<4s2H2Q' + def __init__(self, file_offset, magic, version, user_flags, serial, record_id): + self.file_offset = file_offset + self.magic = magic + self.version = version + self.user_flags = user_flags + self.serial = serial + self.record_id = record_id + self.warnings = [] + self.truncated_flag = False + def encode(self): + return struct.pack(RecordHeader.FORMAT, self.magic, self.version, self.user_flags, self.serial, self.record_id) + def load(self, file_handle): + pass + @staticmethod + def discriminate(args): + """Use the last char in the header magic to determine the header type""" + return CLASSES.get(args[1][-1], RecordHeader) + def is_empty(self): + """Return True if this record is empty (ie has a magic of 0x0000""" + return self.magic == '\x00'*4 + def is_header_valid(self, file_header): + """Check that this record is valid""" + if self.is_empty(): + return False + if self.magic[:3] != 'QLS' or self.magic[3] not in ['a', 'c', 'd', 'e', 'f', 'x']: + return False + if self.magic[-1] != 'x': + if self.version != qlslibs.utils.DEFAULT_RECORD_VERSION: + raise qlslibs.err.InvalidRecordVersionError(file_header, self, qlslibs.utils.DEFAULT_RECORD_VERSION) + if self.serial != file_header.serial: + return False + return True + def to_rh_string(self): + """Return string representation of this header""" + if self.is_empty(): + return '0x%08x: <empty>' % (self.file_offset) + if self.magic[-1] == 'x': + return '0x%08x: [X]' % (self.file_offset) + if self.magic[-1] in ['a', 'c', 'd', 'e', 'f', 'x']: + return '0x%08x: [%c v=%d f=0x%04x rid=0x%x]' % \ + (self.file_offset, self.magic[-1].upper(), self.version, self.user_flags, self.record_id) + return '0x%08x: <error, unknown magic "%s" (possible overwrite boundary?)>' % (self.file_offset, self.magic) + def _get_warnings(self): + warn_str = '' + for warn in self.warnings: + warn_str += '<%s>' % warn + return warn_str + +class RecordTail(object): + FORMAT = '<4sL2Q' + def __init__(self, file_handle): # TODO - clumsy, only allows reading from disk. Move all disk stuff to laod() + self.file_offset = file_handle.tell() if file_handle is not None else 0 + self.complete = False + self.read_size = struct.calcsize(RecordTail.FORMAT) + self.fbin = file_handle.read(self.read_size) if file_handle is not None else None + self.valid_flag = None + if self.fbin is not None and len(self.fbin) >= self.read_size: + self.complete = True + self.xmagic, self.checksum, self.serial, self.record_id = struct.unpack(RecordTail.FORMAT, self.fbin) + def load(self, file_handle): + """Used to continue load of RecordTail object if it is split between files""" + if not self.is_complete: + self.fbin += file_handle.read(self.read_size - len(self.fbin)) + if (len(self.fbin)) >= self.read_size: + self.complete = True + self.xmagic, self.checksum, self.serial, self.record_id = struct.unpack(RecordTail.FORMAT, self.fbin) + def is_complete(self): + return self.complete + def is_valid(self, record): + if self.valid_flag is None: + if not self.complete: + return False + self.valid_flag = qlslibs.utils.inv_str(self.xmagic) == record.magic and \ + self.serial == record.serial and \ + self.record_id == record.record_id and \ + qlslibs.utils.adler32(record.checksum_encode()) == self.checksum + return self.valid_flag + def to_string(self): + """Return a string representation of the this RecordTail instance""" + if self.valid_flag is not None: + if not self.valid_flag: + return '[INVALID RECORD TAIL]' + magic = qlslibs.utils.inv_str(self.xmagic) + magic_char = magic[-1].upper() if magic[-1] in string.printable else '?' + return '[%c cs=0x%08x rid=0x%x]' % (magic_char, self.checksum, self.record_id) + +class FileHeader(RecordHeader): + FORMAT = '<2H4x5QH' + MAGIC = 'QLSf' + def init(self, file_handle, _, file_header_size_sblks, partition_num, efp_data_size_kb, first_record_offset, + timestamp_sec, timestamp_ns, file_num, queue_name_len): + self.file_handle = file_handle + self.file_header_size_sblks = file_header_size_sblks + self.partition_num = partition_num + self.efp_data_size_kb = efp_data_size_kb + self.first_record_offset = first_record_offset + self.timestamp_sec = timestamp_sec + self.timestamp_ns = timestamp_ns + self.file_num = file_num + self.queue_name_len = queue_name_len + self.queue_name = None + def encode(self): + if self.queue_name is None: + return RecordHeader.encode(self) + struct.pack(self.FORMAT, self.file_header_size_sblks, \ + self.partition_num, self.efp_data_size_kb, \ + self.first_record_offset, self.timestamp_sec, \ + self.timestamp_ns, self.file_num, 0) + return RecordHeader.encode(self) + struct.pack(self.FORMAT, self.file_header_size_sblks, self.partition_num, \ + self.efp_data_size_kb, self.first_record_offset, \ + self.timestamp_sec, self.timestamp_ns, self.file_num, \ + self.queue_name_len) + self.queue_name + def get_file_size(self): + """Sum of file header size and data size""" + return (self.file_header_size_sblks * qlslibs.utils.DEFAULT_SBLK_SIZE) + (self.efp_data_size_kb * 1024) + def load(self, file_handle): + self.queue_name = file_handle.read(self.queue_name_len) + def is_end_of_file(self): + return self.file_handle.tell() >= self.get_file_size() + def is_valid(self, is_empty): + if not RecordHeader.is_header_valid(self, self): + return False + if self.file_handle is None or self.file_header_size_sblks == 0 or self.partition_num == 0 or \ + self.efp_data_size_kb == 0: + return False + if is_empty: + if self.first_record_offset != 0 or self.timestamp_sec != 0 or self.timestamp_ns != 0 or \ + self.file_num != 0 or self.queue_name_len != 0: + return False + else: + if self.first_record_offset == 0 or self.timestamp_sec == 0 or self.timestamp_ns == 0 or \ + self.file_num == 0 or self.queue_name_len == 0: + return False + if self.queue_name is None: + return False + if len(self.queue_name) != self.queue_name_len: + return False + return True + def timestamp_str(self): + """Get the timestamp of this record in string format""" + now = time.gmtime(self.timestamp_sec) + fstr = '%%a %%b %%d %%H:%%M:%%S.%09d %%Y' % (self.timestamp_ns) + return time.strftime(fstr, now) + def to_string(self): + """Return a string representation of the this FileHeader instance""" + return '%s fnum=0x%x fro=0x%08x p=%d s=%dk t=%s %s' % (self.to_rh_string(), self.file_num, + self.first_record_offset, self.partition_num, + self.efp_data_size_kb, self.timestamp_str(), + self._get_warnings()) + +class EnqueueRecord(RecordHeader): + FORMAT = '<2Q' + MAGIC = 'QLSe' + EXTERNAL_FLAG_MASK = 0x20 + TRANSIENT_FLAG_MASK = 0x10 + def init(self, _, xid_size, data_size): + self.xid_size = xid_size + self.data_size = data_size + self.xid = None + self.xid_complete = False + self.data = None + self.data_complete = False + self.record_tail = None + def checksum_encode(self): # encode excluding record tail + cs_bytes = RecordHeader.encode(self) + struct.pack(self.FORMAT, self.xid_size, self.data_size) + if self.xid is not None: + cs_bytes += self.xid + if self.data is not None: + cs_bytes += self.data + return cs_bytes + def is_external(self): + return self.user_flags & EnqueueRecord.EXTERNAL_FLAG_MASK > 0 + def is_transient(self): + return self.user_flags & EnqueueRecord.TRANSIENT_FLAG_MASK > 0 + def is_valid(self, journal_file): + if not RecordHeader.is_header_valid(self, journal_file.file_header): + return False + if not (self.xid_complete and self.data_complete): + return False + if self.xid_size > 0 and len(self.xid) != self.xid_size: + return False + if self.data_size > 0 and len(self.data) != self.data_size: + return False + if self.xid_size > 0 or self.data_size > 0: + if self.record_tail is None: + return False + if not self.record_tail.is_valid(self): + return False + return True + def load(self, file_handle): + """Return True when load is incomplete and must be called again with new file handle""" + self.xid, self.xid_complete = qlslibs.utils.load_data(file_handle, self.xid, self.xid_size) + if not self.xid_complete: + return True + if self.is_external(): + self.data_complete = True + else: + self.data, self.data_complete = qlslibs.utils.load_data(file_handle, self.data, self.data_size) + if not self.data_complete: + return True + if self.xid_size > 0 or self.data_size > 0: + if self.record_tail is None: + self.record_tail = RecordTail(file_handle) + elif not self.record_tail.is_complete(): + self.record_tail.load(file_handle) # Continue loading partially loaded tail + if self.record_tail.is_complete(): + self.record_tail.is_valid(self) + else: + return True + return False + def to_string(self, show_xid_flag, show_data_flag, txtest_flag): + """Return a string representation of the this EnqueueRecord instance""" + if self.truncated_flag: + return '%s xid(%d) data(%d) [Truncated, no more files in journal]' % (RecordHeader.__str__(self), + self.xid_size, self.data_size) + if self.record_tail is None: + record_tail_str = '' + else: + record_tail_str = self.record_tail.to_string() + return '%s %s %s %s %s %s' % (self.to_rh_string(), + qlslibs.utils.format_xid(self.xid, self.xid_size, show_xid_flag), + qlslibs.utils.format_data(self.data, self.data_size, show_data_flag, txtest_flag), + record_tail_str, self._print_flags(), self._get_warnings()) + def _print_flags(self): + """Utility function to decode the flags field in the header and print a string representation""" + fstr = '' + if self.is_transient(): + fstr = '[TRANSIENT' + if self.is_external(): + if len(fstr) > 0: + fstr += ',EXTERNAL' + else: + fstr = '*EXTERNAL' + if len(fstr) > 0: + fstr += ']' + return fstr + +class DequeueRecord(RecordHeader): + FORMAT = '<2Q' + MAGIC = 'QLSd' + TXN_COMPLETE_COMMIT_FLAG = 0x10 + def init(self, _, dequeue_record_id, xid_size): + self.dequeue_record_id = dequeue_record_id + self.xid_size = xid_size + self.transaction_prepared_list_flag = False + self.xid = None + self.xid_complete = False + self.record_tail = None + def checksum_encode(self): # encode excluding record tail + return RecordHeader.encode(self) + struct.pack(self.FORMAT, self.dequeue_record_id, self.xid_size) + \ + self.xid + def is_transaction_complete_commit(self): + return self.user_flags & DequeueRecord.TXN_COMPLETE_COMMIT_FLAG > 0 + def is_valid(self, journal_file): + if not RecordHeader.is_header_valid(self, journal_file.file_header): + return False + if self.xid_size > 0: + if not self.xid_complete: + return False + if self.xid_size > 0 and len(self.xid) != self.xid_size: + return False + if self.record_tail is None: + return False + if not self.record_tail.is_valid(self): + return False + return True + def load(self, file_handle): + """Return True when load is incomplete and must be called again with new file handle""" + self.xid, self.xid_complete = qlslibs.utils.load_data(file_handle, self.xid, self.xid_size) + if not self.xid_complete: + return True + if self.xid_size > 0: + if self.record_tail is None: + self.record_tail = RecordTail(file_handle) + elif not self.record_tail.is_complete(): + self.record_tail.load(file_handle) + if self.record_tail.is_complete(): + self.record_tail.is_valid(self) + else: + return True + return False + def to_string(self, show_xid_flag, _u1, _u2): + """Return a string representation of the this DequeueRecord instance""" + if self.truncated_flag: + return '%s xid(%d) drid=0x%x [Truncated, no more files in journal]' % (RecordHeader.__str__(self), + self.xid_size, + self.dequeue_record_id) + if self.record_tail is None: + record_tail_str = '' + else: + record_tail_str = self.record_tail.to_string() + return '%s drid=0x%x %s %s %s %s' % (self.to_rh_string(), self.dequeue_record_id, + qlslibs.utils.format_xid(self.xid, self.xid_size, show_xid_flag), + record_tail_str, self._print_flags(), self._get_warnings()) + def _print_flags(self): + """Utility function to decode the flags field in the header and print a string representation""" + if self.transaction_prepared_list_flag: + if self.is_transaction_complete_commit(): + return '[COMMIT]' + else: + return '[ABORT]' + return '' + +class TransactionRecord(RecordHeader): + FORMAT = '<Q' + MAGIC_ABORT = 'QLSa' + MAGIC_COMMIT = 'QLSc' + def init(self, _, xid_size): + self.xid_size = xid_size + self.xid = None + self.xid_complete = False + self.record_tail = None + def checksum_encode(self): # encode excluding record tail + return RecordHeader.encode(self) + struct.pack(self.FORMAT, self.xid_size) + self.xid + def is_valid(self, journal_file): + if not RecordHeader.is_header_valid(self, journal_file.file_header): + return False + if not self.xid_complete or len(self.xid) != self.xid_size: + return False + if self.record_tail is None: + return False + if not self.record_tail.is_valid(self): + return False + return True + def load(self, file_handle): + """Return True when load is incomplete and must be called again with new file handle""" + self.xid, self.xid_complete = qlslibs.utils.load_data(file_handle, self.xid, self.xid_size) + if not self.xid_complete: + return True + if self.xid_size > 0: + if self.record_tail is None: + self.record_tail = RecordTail(file_handle) + elif not self.record_tail.is_complete(): + self.record_tail.load(file_handle) + if self.record_tail.is_complete(): + self.record_tail.is_valid(self) + else: + return True + return False + def to_string(self, show_xid_flag, _u1, _u2): + """Return a string representation of the this TransactionRecord instance""" + if self.truncated_flag: + return '%s xid(%d) [Truncated, no more files in journal]' % (RecordHeader.__str__(self), self.xid_size) + if self.record_tail is None: + record_tail_str = '' + else: + record_tail_str = self.record_tail.to_string() + return '%s %s %s %s' % (self.to_rh_string(), + qlslibs.utils.format_xid(self.xid, self.xid_size, show_xid_flag), + record_tail_str, self._get_warnings()) + +# ============================================================================= + +CLASSES = { + 'a': TransactionRecord, + 'c': TransactionRecord, + 'd': DequeueRecord, + 'e': EnqueueRecord, +} + +if __name__ == '__main__': + print 'This is a library, and cannot be executed.' diff --git a/qpid/cpp/management/python/lib/qlslibs/utils.py b/qpid/cpp/management/python/lib/qlslibs/utils.py new file mode 100644 index 0000000000..dfa760a839 --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/utils.py @@ -0,0 +1,216 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" +Module: qlslibs.utils + +Contains helper functions for qpid_qls_analyze. +""" + +import os +import qlslibs.jrnl +import stat +import string +import struct +import subprocess +import zlib + +DEFAULT_DBLK_SIZE = 128 +DEFAULT_SBLK_SIZE = 4096 # 32 dblks +DEFAULT_SBLK_SIZE_KB = DEFAULT_SBLK_SIZE / 1024 +DEFAULT_RECORD_VERSION = 2 +DEFAULT_HEADER_SIZE_SBLKS = 1 + +def adler32(data): + """return the adler32 checksum of data""" + return zlib.adler32(data) & 0xffffffff + +def create_record(magic, uflags, journal_file, record_id, dequeue_record_id, xid, data): + """Helper function to construct a record with xid, data (where applicable) and consistent tail with checksum""" + record_class = qlslibs.jrnl.CLASSES.get(magic[-1]) + record = record_class(0, magic, DEFAULT_RECORD_VERSION, uflags, journal_file.file_header.serial, record_id) + xid_length = len(xid) if xid is not None else 0 + if isinstance(record, qlslibs.jrnl.EnqueueRecord): + data_length = len(data) if data is not None else 0 + record.init(None, xid_length, data_length) + elif isinstance(record, qlslibs.jrnl.DequeueRecord): + record.init(None, dequeue_record_id, xid_length) + elif isinstance(record, qlslibs.jrnl.TransactionRecord): + record.init(None, xid_length) + else: + raise qlslibs.err.InvalidClassError(record.__class__.__name__) + if xid is not None: + record.xid = xid + record.xid_complete = True + if data is not None: + record.data = data + record.data_complete = True + record.record_tail = _mk_record_tail(record) + return record + +def efp_directory_size(directory_name): + """"Decode the directory name in the format NNNk to a numeric size, where NNN is a number string""" + try: + if directory_name[-1] == 'k': + return int(directory_name[:-1]) + except ValueError: + pass + return 0 + +def format_data(data, data_size=None, show_data_flag=True, txtest_flag=False): + """Format binary data for printing""" + return _format_binary(data, data_size, show_data_flag, 'data', qlslibs.err.DataSizeError, False, txtest_flag) + +def format_xid(xid, xid_size=None, show_xid_flag=True): + """Format binary XID for printing""" + return _format_binary(xid, xid_size, show_xid_flag, 'xid', qlslibs.err.XidSizeError, True, False) + +def get_avail_disk_space(path): + df_proc = subprocess.Popen(["df", path], stdout=subprocess.PIPE) + output = df_proc.communicate()[0] + return int(output.split('\n')[1].split()[3]) + +def has_write_permission(path): + stat_info = os.stat(path) + return bool(stat_info.st_mode & stat.S_IRGRP) + +def inv_str(in_string): + """Perform a binary 1's compliment (invert all bits) on a binary string""" + istr = '' + for index in range(0, len(in_string)): + istr += chr(~ord(in_string[index]) & 0xff) + return istr + +def load(file_handle, klass): + """Load a record of class klass from a file""" + args = load_args(file_handle, klass) + subclass = klass.discriminate(args) + result = subclass(*args) # create instance of record + if subclass != klass: + result.init(*load_args(file_handle, subclass)) + return result + +def load_args(file_handle, klass): + """Load the arguments from class klass""" + size = struct.calcsize(klass.FORMAT) + foffs = file_handle.tell(), + fbin = file_handle.read(size) + if len(fbin) != size: + raise qlslibs.err.UnexpectedEndOfFileError(len(fbin), size, foffs, file_handle.name) + return foffs + struct.unpack(klass.FORMAT, fbin) + +def load_data(file_handle, element, element_size): + """Read element_size bytes of binary data from file_handle into element""" + if element_size == 0: + return element, True + if element is None: + element = file_handle.read(element_size) + else: + read_size = element_size - len(element) + element += file_handle.read(read_size) + return element, len(element) == element_size + +def skip(file_handle, boundary): + """Read and discard disk bytes until the next multiple of boundary""" + if not file_handle.closed: + file_handle.read(_rem_bytes_in_block(file_handle, boundary)) + +#--- protected functions --- + +def _format_binary(bin_str, bin_size, show_bin_flag, prefix, err_class, hex_num_flag, txtest_flag): + """Format binary XID for printing""" + if bin_str is None and bin_size is not None: + if bin_size > 0: + raise err_class(bin_size, len(bin_str), bin_str) + return '' + if bin_size is None: + bin_size = len(bin_str) + elif bin_size != len(bin_str): + raise err_class(bin_size, len(bin_str), bin_str) + out_str = '%s(%d)' % (prefix, bin_size) + if txtest_flag: + out_str += '=\'%s\'' % _txtest_msg_str(bin_str) + elif show_bin_flag: + if _is_printable(bin_str): + binstr = '"%s"' % _split_str(bin_str) + elif hex_num_flag: + binstr = '0x%s' % _str_to_hex_num(bin_str) + else: + binstr = _hex_split_str(bin_str, 50, 10, 10) + out_str += '=\'%s\'' % binstr + return out_str + +def _hex_str(in_str, begin, end): + """Return a binary string as a hex string""" + hstr = '' + for index in range(begin, end): + if _is_printable(in_str[index]): + hstr += in_str[index] + else: + hstr += '\\%02x' % ord(in_str[index]) + return hstr + +def _hex_split_str(in_str, split_size, head_size, tail_size): + """Split a hex string into two parts separated by an ellipsis""" + if len(in_str) <= split_size: + return _hex_str(in_str, 0, len(in_str)) + return _hex_str(in_str, 0, head_size) + ' ... ' + _hex_str(in_str, len(in_str)-tail_size, len(in_str)) + +def _txtest_msg_str(bin_str): + """Extract the message number used in qpid-txtest""" + msg_index = bin_str.find('msg') + if msg_index >= 0: + end_index = bin_str.find('\x00', msg_index) + assert end_index >= 0 + return bin_str[msg_index:end_index] + return None + +def _is_printable(in_str): + """Return True if in_str in printable; False otherwise.""" + for this_char in in_str: + if this_char not in string.letters and this_char not in string.digits and this_char not in string.punctuation: + return False + return True + +def _mk_record_tail(record): + record_tail = qlslibs.jrnl.RecordTail(None) + record_tail.xmagic = inv_str(record.magic) + record_tail.checksum = adler32(record.checksum_encode()) + record_tail.serial = record.serial + record_tail.record_id = record.record_id + return record_tail + +def _rem_bytes_in_block(file_handle, block_size): + """Return the remaining bytes in a block""" + foffs = file_handle.tell() + return (_size_in_blocks(foffs, block_size) * block_size) - foffs + +def _size_in_blocks(size, block_size): + """Return the size in terms of data blocks""" + return int((size + block_size - 1) / block_size) + +def _split_str(in_str, split_size = 50): + """Split a string into two parts separated by an ellipsis if it is longer than split_size""" + if len(in_str) < split_size: + return in_str + return in_str[:25] + ' ... ' + in_str[-25:] + +def _str_to_hex_num(in_str): + """Turn a string into a hex number representation, little endian assumed (ie LSB is first, MSB is last)""" + return ''.join(x.encode('hex') for x in reversed(in_str)) diff --git a/qpid/cpp/src/tests/quick_perftest b/qpid/cpp/management/python/lib/qmf/__init__.py index 698af60324..31d5a2ef58 100755..100644 --- a/qpid/cpp/src/tests/quick_perftest +++ b/qpid/cpp/management/python/lib/qmf/__init__.py @@ -1,5 +1,3 @@ -#!/usr/bin/env bash - # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -18,5 +16,3 @@ # specific language governing permissions and limitations # under the License. # - -exec `dirname $0`/run_test ./qpid-perftest --summary --count 100 diff --git a/qpid/cpp/management/python/lib/qmf/console.py b/qpid/cpp/management/python/lib/qmf/console.py new file mode 100644 index 0000000000..405c5dcb62 --- /dev/null +++ b/qpid/cpp/management/python/lib/qmf/console.py @@ -0,0 +1,4054 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" Console API for Qpid Management Framework """ + +import os +import platform +import qpid +import struct +import socket +import re +import sys +from qpid.datatypes import UUID +from qpid.datatypes import timestamp +from qpid.datatypes import datetime +from qpid.exceptions import Closed +from qpid.session import SessionDetached +from qpid.connection import Connection, ConnectionFailed, Timeout +from qpid.datatypes import Message, RangedSet, UUID +from qpid.util import connect, ssl, URL +from qpid.codec010 import StringCodec as Codec +from threading import Lock, Condition, Thread, Semaphore +from Queue import Queue, Empty +from time import time, strftime, gmtime, sleep +from cStringIO import StringIO + +#import qpid.log +#qpid.log.enable(name="qpid.io.cmd", level=qpid.log.DEBUG) + +#=================================================================================================== +# CONSOLE +#=================================================================================================== +class Console: + """ To access the asynchronous operations, a class must be derived from + Console with overrides of any combination of the available methods. """ + + def brokerConnected(self, broker): + """ Invoked when a connection is established to a broker """ + pass + + def brokerConnectionFailed(self, broker): + """ Invoked when a connection to a broker fails """ + pass + + def brokerDisconnected(self, broker): + """ Invoked when the connection to a broker is lost """ + pass + + def newPackage(self, name): + """ Invoked when a QMF package is discovered. """ + pass + + def newClass(self, kind, classKey): + """ Invoked when a new class is discovered. Session.getSchema can be + used to obtain details about the class.""" + pass + + def newAgent(self, agent): + """ Invoked when a QMF agent is discovered. """ + pass + + def delAgent(self, agent): + """ Invoked when a QMF agent disconects. """ + pass + + def objectProps(self, broker, record): + """ Invoked when an object is updated. """ + pass + + def objectStats(self, broker, record): + """ Invoked when an object is updated. """ + pass + + def event(self, broker, event): + """ Invoked when an event is raised. """ + pass + + def heartbeat(self, agent, timestamp): + """ Invoked when an agent heartbeat is received. """ + pass + + def brokerInfo(self, broker): + """ Invoked when the connection sequence reaches the point where broker information is available. """ + pass + + def methodResponse(self, broker, seq, response): + """ Invoked when a method response from an asynchronous method call is received. """ + pass + + +#=================================================================================================== +# BrokerURL +#=================================================================================================== +class BrokerURL(URL): + def __init__(self, *args, **kwargs): + URL.__init__(self, *args, **kwargs) + if self.port is None: + if self.scheme == URL.AMQPS: + self.port = 5671 + else: + self.port = 5672 + self.authName = None + self.authPass = None + if self.user: + self.authName = str(self.user) + if self.password: + self.authPass = str(self.password) + + def name(self): + return str(self) + + def match(self, host, port): + return socket.getaddrinfo(self.host, self.port)[0][4] == socket.getaddrinfo(host, port)[0][4] + +#=================================================================================================== +# Object +#=================================================================================================== +class Object(object): + """ + This class defines a 'proxy' object representing a real managed object on an agent. + Actions taken on this proxy are remotely affected on the real managed object. + """ + def __init__(self, agent, schema, codec=None, prop=None, stat=None, v2Map=None, agentName=None, kwargs={}): + self._agent = agent + self._session = None + self._broker = None + if agent: + self._session = agent.session + self._broker = agent.broker + self._schema = schema + self._properties = [] + self._statistics = [] + self._currentTime = None + self._createTime = None + self._deleteTime = 0 + self._objectId = None + if v2Map: + self.v2Init(v2Map, agentName) + return + + if self._agent: + self._currentTime = codec.read_uint64() + self._createTime = codec.read_uint64() + self._deleteTime = codec.read_uint64() + self._objectId = ObjectId(codec) + if codec: + if prop: + notPresent = self._parsePresenceMasks(codec, schema) + for property in schema.getProperties(): + if property.name in notPresent: + self._properties.append((property, None)) + else: + self._properties.append((property, self._session._decodeValue(codec, property.type, self._broker))) + if stat: + for statistic in schema.getStatistics(): + self._statistics.append((statistic, self._session._decodeValue(codec, statistic.type, self._broker))) + else: + for property in schema.getProperties(): + if property.optional: + self._properties.append((property, None)) + else: + self._properties.append((property, self._session._defaultValue(property, self._broker, kwargs))) + for statistic in schema.getStatistics(): + self._statistics.append((statistic, self._session._defaultValue(statistic, self._broker, kwargs))) + + def v2Init(self, omap, agentName): + if omap.__class__ != dict: + raise Exception("QMFv2 object data must be a map/dict") + if '_values' not in omap: + raise Exception("QMFv2 object must have '_values' element") + + values = omap['_values'] + for prop in self._schema.getProperties(): + if prop.name in values: + if prop.type == 10: # Reference + self._properties.append((prop, ObjectId(values[prop.name], agentName=agentName))) + else: + self._properties.append((prop, values[prop.name])) + for stat in self._schema.getStatistics(): + if stat.name in values: + self._statistics.append((stat, values[stat.name])) + if '_subtypes' in omap: + self._subtypes = omap['_subtypes'] + if '_object_id' in omap: + self._objectId = ObjectId(omap['_object_id'], agentName=agentName) + else: + self._objectId = None + + self._currentTime = omap.get("_update_ts", 0) + self._createTime = omap.get("_create_ts", 0) + self._deleteTime = omap.get("_delete_ts", 0) + + def getAgent(self): + """ Return the agent from which this object was sent """ + return self._agent + + def getBroker(self): + """ Return the broker from which this object was sent """ + return self._broker + + def getV2RoutingKey(self): + """ Get the QMFv2 routing key to address this object """ + return self._agent.getV2RoutingKey() + + def getObjectId(self): + """ Return the object identifier for this object """ + return self._objectId + + def getClassKey(self): + """ Return the class-key that references the schema describing this object. """ + return self._schema.getKey() + + def getSchema(self): + """ Return the schema that describes this object. """ + return self._schema + + def getMethods(self): + """ Return a list of methods available for this object. """ + return self._schema.getMethods() + + def getTimestamps(self): + """ Return the current, creation, and deletion times for this object. """ + return self._currentTime, self._createTime, self._deleteTime + + def isDeleted(self): + """ Return True iff this object has been deleted. """ + return self._deleteTime != 0 + + def isManaged(self): + """ Return True iff this object is a proxy for a managed object on an agent. """ + return self._objectId and self._agent + + def getIndex(self): + """ Return a string describing this object's primary key. """ + if self._objectId.isV2: + return self._objectId.getObject() + result = u"" + for prop, value in self._properties: + if prop.index: + if result != u"": + result += u":" + try: + valstr = unicode(self._session._displayValue(value, prop.type)) + except Exception, e: + valstr = u"<undecodable>" + result += valstr + return result + + def getProperties(self): + """ Return a list of object properties """ + return self._properties + + def getStatistics(self): + """ Return a list of object statistics """ + return self._statistics + + def mergeUpdate(self, newer): + """ Replace properties and/or statistics with a newly received update """ + if not self.isManaged(): + raise Exception("Object is not managed") + if self._objectId != newer._objectId: + raise Exception("Objects with different object-ids") + if len(newer.getProperties()) > 0: + self._properties = newer.getProperties() + if len(newer.getStatistics()) > 0: + self._statistics = newer.getStatistics() + self._currentTime = newer._currentTime + self._deleteTime = newer._deleteTime + + def update(self): + """ Contact the agent and retrieve the lastest property and statistic values for this object. """ + if not self.isManaged(): + raise Exception("Object is not managed") + obj = self._agent.getObjects(_objectId=self._objectId) + if obj: + self.mergeUpdate(obj[0]) + else: + raise Exception("Underlying object no longer exists") + + def __repr__(self): + if self.isManaged(): + id = self.getObjectId().__repr__() + else: + id = "unmanaged" + key = self.getClassKey() + return key.getPackageName() + ":" + key.getClassName() +\ + "[" + id + "] " + self.getIndex().encode("utf8") + + def __getattr__(self, name): + for method in self._schema.getMethods(): + if name == method.name: + return lambda *args, **kwargs : self._invoke(name, args, kwargs) + for prop, value in self._properties: + if name == prop.name: + return value + if name == "_" + prop.name + "_" and prop.type == 10: # Dereference references + deref = self._agent.getObjects(_objectId=value) + if len(deref) != 1: + return None + else: + return deref[0] + for stat, value in self._statistics: + if name == stat.name: + return value + + # + # Check to see if the name is in the schema. If so, return None (i.e. this is a not-present attribute) + # + for prop in self._schema.getProperties(): + if name == prop.name: + return None + for stat in self._schema.getStatistics(): + if name == stat.name: + return None + raise Exception("Type Object has no attribute '%s'" % name) + + def __setattr__(self, name, value): + if name[0] == '_': + super.__setattr__(self, name, value) + return + + for prop, unusedValue in self._properties: + if name == prop.name: + newprop = (prop, value) + newlist = [] + for old, val in self._properties: + if name == old.name: + newlist.append(newprop) + else: + newlist.append((old, val)) + self._properties = newlist + return + super.__setattr__(self, name, value) + + def _parseDefault(self, typ, val): + try: + if typ in (2, 3, 4): # 16, 32, 64 bit numbers + val = int(val, 0) + elif typ == 11: # bool + val = val.lower() in ("t", "true", "1", "yes", "y") + elif typ == 15: # map + val = eval(val) + except: + pass + return val + + def _handleDefaultArguments(self, method, args, kwargs): + count = len([x for x in method.arguments if x.dir.find("I") != -1]) + for kwarg in kwargs.keys(): + if not [x for x in method.arguments if x.dir.find("I") != -1 and \ + x.name == kwarg]: + del kwargs[kwarg] + + # If there were not enough args supplied, add any defaulted arguments + # from the schema (starting at the end) until we either get enough + # arguments or run out of defaults + while count > len(args) + len(kwargs): + for arg in reversed(method.arguments): + if arg.dir.find("I") != -1 and getattr(arg, "default") is not None and \ + arg.name not in kwargs: + # add missing defaulted value to the kwargs dict + kwargs[arg.name] = self._parseDefault(arg.type, arg.default) + break + else: + # no suitable defaulted args found, end the while loop + break + + return count + + def _sendMethodRequest(self, name, args, kwargs, synchronous=False, timeWait=None): + for method in self._schema.getMethods(): + if name == method.name: + aIdx = 0 + sendCodec = Codec() + seq = self._session.seqMgr._reserve((method, synchronous)) + + count = self._handleDefaultArguments(method, args, kwargs) + if count != len(args) + len(kwargs): + raise Exception("Incorrect number of arguments: expected %d, got %d" % (count, len(args) + len(kwargs))) + + if self._agent.isV2: + # + # Compose and send a QMFv2 method request + # + call = {} + call['_object_id'] = self._objectId.asMap() + call['_method_name'] = name + argMap = {} + for arg in method.arguments: + if arg.dir.find("I") != -1: + # If any kwargs match this schema arg, insert them in the proper place + if arg.name in kwargs: + argMap[arg.name] = kwargs[arg.name] + elif aIdx < len(args): + argMap[arg.name] = args[aIdx] + aIdx += 1 + call['_arguments'] = argMap + + dp = self._broker.amqpSession.delivery_properties() + dp.routing_key = self.getV2RoutingKey() + mp = self._broker.amqpSession.message_properties() + mp.content_type = "amqp/map" + if self._broker.saslUser: + mp.user_id = self._broker.saslUser + mp.correlation_id = str(seq) + mp.app_id = "qmf2" + mp.reply_to = self._broker.amqpSession.reply_to("qmf.default.direct", self._broker.v2_direct_queue) + mp.application_headers = {'qmf.opcode':'_method_request'} + sendCodec.write_map(call) + smsg = Message(dp, mp, sendCodec.encoded) + exchange = "qmf.default.direct" + + else: + # + # Associate this sequence with the agent hosting the object so we can correctly + # route the method-response + # + agent = self._broker.getAgent(self._broker.getBrokerBank(), self._objectId.getAgentBank()) + self._broker._setSequence(seq, agent) + + # + # Compose and send a QMFv1 method request + # + self._broker._setHeader(sendCodec, 'M', seq) + self._objectId.encode(sendCodec) + self._schema.getKey().encode(sendCodec) + sendCodec.write_str8(name) + + for arg in method.arguments: + if arg.dir.find("I") != -1: + self._session._encodeValue(sendCodec, args[aIdx], arg.type) + aIdx += 1 + smsg = self._broker._message(sendCodec.encoded, "agent.%d.%s" % + (self._objectId.getBrokerBank(), self._objectId.getAgentBank())) + exchange = "qpid.management" + + if synchronous: + try: + self._broker.cv.acquire() + self._broker.syncInFlight = True + finally: + self._broker.cv.release() + self._broker._send(smsg, exchange) + return seq + return None + + def _invoke(self, name, args, kwargs): + if not self.isManaged(): + raise Exception("Object is not managed") + if "_timeout" in kwargs: + timeout = kwargs["_timeout"] + else: + timeout = self._broker.SYNC_TIME + + if "_async" in kwargs and kwargs["_async"]: + sync = False + if "_timeout" not in kwargs: + timeout = None + else: + sync = True + + # Remove special "meta" kwargs before handing to _sendMethodRequest() to process + if "_timeout" in kwargs: del kwargs["_timeout"] + if "_async" in kwargs: del kwargs["_async"] + + seq = self._sendMethodRequest(name, args, kwargs, sync, timeout) + if seq: + if not sync: + return seq + self._broker.cv.acquire() + try: + starttime = time() + while self._broker.syncInFlight and self._broker.error == None: + self._broker.cv.wait(timeout) + if time() - starttime > timeout: + raise RuntimeError("Timed out waiting for method to respond") + finally: + self._session.seqMgr._release(seq) + self._broker.cv.release() + if self._broker.error != None: + errorText = self._broker.error + self._broker.error = None + raise Exception(errorText) + return self._broker.syncResult + raise Exception("Invalid Method (software defect) [%s]" % name) + + def _encodeUnmanaged(self, codec): + codec.write_uint8(20) + codec.write_str8(self._schema.getKey().getPackageName()) + codec.write_str8(self._schema.getKey().getClassName()) + codec.write_bin128(self._schema.getKey().getHash()) + + # emit presence masks for optional properties + mask = 0 + bit = 0 + for prop, value in self._properties: + if prop.optional: + if bit == 0: + bit = 1 + if value: + mask |= bit + bit = bit << 1 + if bit == 256: + bit = 0 + codec.write_uint8(mask) + mask = 0 + if bit != 0: + codec.write_uint8(mask) + + # encode properties + for prop, value in self._properties: + if value != None: + self._session._encodeValue(codec, value, prop.type) + + # encode statistics + for stat, value in self._statistics: + self._session._encodeValue(codec, value, stat.type) + + def _parsePresenceMasks(self, codec, schema): + excludeList = [] + bit = 0 + for property in schema.getProperties(): + if property.optional: + if bit == 0: + mask = codec.read_uint8() + bit = 1 + if (mask & bit) == 0: + excludeList.append(property.name) + bit *= 2 + if bit == 256: + bit = 0 + return excludeList + + +#=================================================================================================== +# Session +#=================================================================================================== +class Session: + """ + An instance of the Session class represents a console session running + against one or more QMF brokers. A single instance of Session is needed + to interact with the management framework as a console. + """ + _CONTEXT_SYNC = 1 + _CONTEXT_STARTUP = 2 + _CONTEXT_MULTIGET = 3 + + DEFAULT_GET_WAIT_TIME = 60 + + ENCODINGS = { + str: 7, + timestamp: 8, + datetime: 8, + int: 9, + long: 9, + float: 13, + UUID: 14, + Object: 20, + list: 21 + } + + + def __init__(self, console=None, rcvObjects=True, rcvEvents=True, rcvHeartbeats=True, + manageConnections=False, userBindings=False): + """ + Initialize a session. If the console argument is provided, the + more advanced asynchronous features are available. If console is + defaulted, the session will operate in a simpler, synchronous manner. + + The rcvObjects, rcvEvents, and rcvHeartbeats arguments are meaningful only if 'console' + is provided. They control whether object updates, events, and agent-heartbeats are + subscribed to. If the console is not interested in receiving one or more of the above, + setting the argument to False will reduce tha bandwidth used by the API. + + If manageConnections is set to True, the Session object will manage connections to + the brokers. This means that if a broker is unreachable, it will retry until a connection + can be established. If a connection is lost, the Session will attempt to reconnect. + + If manageConnections is set to False, the user is responsible for handing failures. In + this case, an unreachable broker will cause addBroker to raise an exception. + + If userBindings is set to False (the default) and rcvObjects is True, the console will + receive data for all object classes. If userBindings is set to True, the user must select + which classes the console shall receive by invoking the bindPackage or bindClass methods. + This allows the console to be configured to receive only information that is relavant to + a particular application. If rcvObjects id False, userBindings has no meaning. + """ + self.console = console + self.brokers = [] + self.schemaCache = SchemaCache() + self.seqMgr = SequenceManager() + self.cv = Condition() + self.syncSequenceList = [] + self.getResult = [] + self.getSelect = [] + self.error = None + self.rcvObjects = rcvObjects + self.rcvEvents = rcvEvents + self.rcvHeartbeats = rcvHeartbeats + self.userBindings = userBindings + if self.console == None: + self.rcvObjects = False + self.rcvEvents = False + self.rcvHeartbeats = False + self.v1BindingKeyList, self.v2BindingKeyList = self._bindingKeys() + self.manageConnections = manageConnections + # callback filters: + self.agent_filter = [] # (vendor, product, instance) || v1-agent-label-str + self.class_filter = [] # (pkg, class) + self.event_filter = [] # (pkg, event) + self.agent_heartbeat_min = 10 # minimum agent heartbeat timeout interval + self.agent_heartbeat_miss = 3 # # of heartbeats to miss before deleting agent + + if self.userBindings and not self.console: + raise Exception("userBindings can't be set unless a console is provided.") + + def close(self): + """ Releases all resources held by the session. Must be called by the + application when it is done with the Session object. + """ + self.cv.acquire() + try: + while len(self.brokers): + b = self.brokers.pop() + try: + b._shutdown() + except: + pass + finally: + self.cv.release() + + def _getBrokerForAgentAddr(self, agent_addr): + try: + self.cv.acquire() + key = (1, agent_addr) + for b in self.brokers: + if key in b.agents: + return b + finally: + self.cv.release() + return None + + + def _getAgentForAgentAddr(self, agent_addr): + try: + self.cv.acquire() + key = agent_addr + for b in self.brokers: + if key in b.agents: + return b.agents[key] + finally: + self.cv.release() + return None + + + def __repr__(self): + return "QMF Console Session Manager (brokers: %d)" % len(self.brokers) + + + def addBroker(self, target="localhost", timeout=None, mechanisms=None, sessTimeout=None, **connectArgs): + """ Connect to a Qpid broker. Returns an object of type Broker. + Will raise an exception if the session is not managing the connection and + the connection setup to the broker fails. + """ + if isinstance(target, BrokerURL): + url = target + else: + url = BrokerURL(target) + broker = Broker(self, url.host, url.port, mechanisms, url.authName, url.authPass, + ssl = url.scheme == URL.AMQPS, connTimeout=timeout, sessTimeout=sessTimeout, **connectArgs) + + self.brokers.append(broker) + return broker + + + def delBroker(self, broker): + """ Disconnect from a broker, and deallocate the broker proxy object. The + 'broker' argument is the object returned from the addBroker call. Errors + are ignored. + """ + broker._shutdown() + self.brokers.remove(broker) + del broker + + + def getPackages(self): + """ Get the list of known QMF packages """ + for broker in self.brokers: + broker._waitForStable() + return self.schemaCache.getPackages() + + + def getClasses(self, packageName): + """ Get the list of known classes within a QMF package """ + for broker in self.brokers: + broker._waitForStable() + return self.schemaCache.getClasses(packageName) + + + def getSchema(self, classKey): + """ Get the schema for a QMF class """ + for broker in self.brokers: + broker._waitForStable() + return self.schemaCache.getSchema(classKey) + + + def bindPackage(self, packageName): + """ Filter object and event callbacks to only those elements of the + specified package. Also filters newPackage and newClass callbacks to the + given package. Only valid if userBindings is True. + """ + if not self.userBindings: + raise Exception("userBindings option must be set for this Session.") + if not self.rcvObjects and not self.rcvEvents: + raise Exception("Session needs to be configured to receive events or objects.") + v1keys = ["console.obj.*.*.%s.#" % packageName, "console.event.*.*.%s.#" % packageName] + v2keys = ["agent.ind.data.%s.#" % packageName.replace(".", "_"), + "agent.ind.event.%s.#" % packageName.replace(".", "_"),] + if (packageName, None) not in self.class_filter: + self.class_filter.append((packageName, None)) + if (packageName, None) not in self.event_filter: + self.event_filter.append((packageName, None)) + self.v1BindingKeyList.extend(v1keys) + self.v2BindingKeyList.extend(v2keys) + for broker in self.brokers: + if broker.isConnected(): + for v1key in v1keys: + broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key) + if broker.brokerSupportsV2: + for v2key in v2keys: + # data indications should arrive on the unsolicited indication queue + broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key) + + + def bindClass(self, pname, cname=None): + """ Filter object callbacks to only those objects of the specified package + and optional class. Will also filter newPackage/newClass callbacks to the + specified package and class. Only valid if userBindings is True and + rcvObjects is True. + """ + if not self.userBindings: + raise Exception("userBindings option must be set for this Session.") + if not self.rcvObjects: + raise Exception("Session needs to be configured with rcvObjects=True.") + if cname is not None: + v1key = "console.obj.*.*.%s.%s.#" % (pname, cname) + v2key = "agent.ind.data.%s.%s.#" % (pname.replace(".", "_"), cname.replace(".", "_")) + else: + v1key = "console.obj.*.*.%s.#" % pname + v2key = "agent.ind.data.%s.#" % pname.replace(".", "_") + self.v1BindingKeyList.append(v1key) + self.v2BindingKeyList.append(v2key) + if (pname, cname) not in self.class_filter: + self.class_filter.append((pname, cname)) + for broker in self.brokers: + if broker.isConnected(): + broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key) + if broker.brokerSupportsV2: + # data indications should arrive on the unsolicited indication queue + broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key) + + + def bindClassKey(self, classKey): + """ Filter object callbacks to only those objects of the specified + class. Will also filter newPackage/newClass callbacks to the specified + package and class. Only valid if userBindings is True and rcvObjects is + True. + """ + pname = classKey.getPackageName() + cname = classKey.getClassName() + self.bindClass(pname, cname) + + def bindEvent(self, pname, ename=None): + """ Filter event callbacks only from a particular class by package and + event name, or all events in a package if ename=None. Will also filter + newPackage/newClass callbacks to the specified package and class. Only + valid if userBindings is True and rcvEvents is True. + """ + if not self.userBindings: + raise Exception("userBindings option must be set for this Session.") + if not self.rcvEvents: + raise Exception("Session needs to be configured with rcvEvents=True.") + if ename is not None: + v1key = "console.event.*.*.%s.%s.#" % (pname, ename) + v2key = "agent.ind.event.%s.%s.#" % (pname.replace(".", "_"), ename.replace(".", "_")) + else: + v1key = "console.event.*.*.%s.#" % pname + v2key = "agent.ind.event.%s.#" % pname.replace(".", "_") + self.v1BindingKeyList.append(v1key) + self.v2BindingKeyList.append(v2key) + if (pname, ename) not in self.event_filter: + self.event_filter.append((pname, ename)) + for broker in self.brokers: + if broker.isConnected(): + broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key) + if broker.brokerSupportsV2: + # event indications should arrive on the unsolicited indication queue + broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key) + + def bindEventKey(self, eventKey): + """ Filter event callbacks only from a particular class key. Will also + filter newPackage/newClass callbacks to the specified package and + class. Only valid if userBindings is True and rcvEvents is True. + """ + pname = eventKey.getPackageName() + ename = eventKey.getClassName() + self.bindEvent(pname, ename) + + def bindAgent(self, vendor=None, product=None, instance=None, label=None): + """ Receive heartbeats, newAgent and delAgent callbacks only for those + agent(s) that match the passed identification criteria: + V2 agents: vendor, optionally product and instance strings + V1 agents: the label string. + Only valid if userBindings is True. + """ + if not self.userBindings: + raise Exception("Session not configured for binding specific agents.") + if vendor is None and label is None: + raise Exception("Must specify at least a vendor (V2 agents)" + " or label (V1 agents).") + + if vendor: # V2 agent identification + if product is not None: + v2key = "agent.ind.heartbeat.%s.%s.#" % (vendor.replace(".", "_"), product.replace(".", "_")) + else: + v2key = "agent.ind.heartbeat.%s.#" % vendor.replace(".", "_") + self.v2BindingKeyList.append(v2key) + + # allow wildcards - only add filter if a non-wildcarded component is given + if vendor == "*": + vendor = None + if product == "*": + product = None + if instance == "*": + instance = None + if vendor or product or instance: + if (vendor, product, instance) not in self.agent_filter: + self.agent_filter.append((vendor, product, instance)) + + for broker in self.brokers: + if broker.isConnected(): + if broker.brokerSupportsV2: + # heartbeats should arrive on the heartbeat queue + broker.amqpSession.exchange_bind(exchange="qmf.default.topic", + queue=broker.v2_topic_queue_hb, + binding_key=v2key) + elif label != "*": # non-wildcard V1 agent label + # V1 format heartbeats do not have any agent identifier in the routing + # key, so we cannot filter them by bindings. + if label not in self.agent_filter: + self.agent_filter.append(label) + + + def getAgents(self, broker=None): + """ Get a list of currently known agents """ + brokerList = [] + if broker == None: + for b in self.brokers: + brokerList.append(b) + else: + brokerList.append(broker) + + for b in brokerList: + b._waitForStable() + agentList = [] + for b in brokerList: + for a in b.getAgents(): + agentList.append(a) + return agentList + + + def makeObject(self, classKey, **kwargs): + """ Create a new, unmanaged object of the schema indicated by classKey """ + schema = self.getSchema(classKey) + if schema == None: + raise Exception("Schema not found for classKey") + return Object(None, schema, None, True, True, kwargs) + + + def getObjects(self, **kwargs): + """ Get a list of objects from QMF agents. + All arguments are passed by name(keyword). + + The class for queried objects may be specified in one of the following ways: + + _schema = <schema> - supply a schema object returned from getSchema. + _key = <key> - supply a classKey from the list returned by getClasses. + _class = <name> - supply a class name as a string. If the class name exists + in multiple packages, a _package argument may also be supplied. + _objectId = <id> - get the object referenced by the object-id + + If objects should be obtained from only one agent, use the following argument. + Otherwise, the query will go to all agents. + + _agent = <agent> - supply an agent from the list returned by getAgents. + + If the get query is to be restricted to one broker (as opposed to all connected brokers), + add the following argument: + + _broker = <broker> - supply a broker as returned by addBroker. + + The default timeout for this synchronous operation is 60 seconds. To change the timeout, + use the following argument: + + _timeout = <time in seconds> + + If additional arguments are supplied, they are used as property selectors. For example, + if the argument name="test" is supplied, only objects whose "name" property is "test" + will be returned in the result. + """ + if "_broker" in kwargs: + brokerList = [] + brokerList.append(kwargs["_broker"]) + else: + brokerList = self.brokers + for broker in brokerList: + broker._waitForStable() + if broker.isConnected(): + if "_package" not in kwargs or "_class" not in kwargs or \ + kwargs["_package"] != "org.apache.qpid.broker" or \ + kwargs["_class"] != "agent": + self.getObjects(_package = "org.apache.qpid.broker", _class = "agent", + _agent = broker.getAgent(1,0)) + + agentList = [] + if "_agent" in kwargs: + agent = kwargs["_agent"] + if agent.broker not in brokerList: + raise Exception("Supplied agent is not accessible through the supplied broker") + if agent.broker.isConnected(): + agentList.append(agent) + else: + if "_objectId" in kwargs: + oid = kwargs["_objectId"] + for broker in brokerList: + for agent in broker.getAgents(): + if agent.getBrokerBank() == oid.getBrokerBank() and agent.getAgentBank() == oid.getAgentBank(): + agentList.append(agent) + else: + for broker in brokerList: + for agent in broker.getAgents(): + if agent.broker.isConnected(): + agentList.append(agent) + + if len(agentList) == 0: + return [] + + # + # We now have a list of agents to query, start the queries and gather the results. + # + request = SessionGetRequest(len(agentList)) + for agent in agentList: + agent.getObjects(request, **kwargs) + timeout = 60 + if '_timeout' in kwargs: + timeout = kwargs['_timeout'] + request.wait(timeout) + return request.result + + + def addEventFilter(self, **kwargs): + """Filter unsolicited events based on package and event name. + QMF v2 also can filter on vendor, product, and severity values. + + By default, a console receives unsolicted events by binding to: + + qpid.management/console.event.# (v1) + + qmf.default.topic/agent.ind.event.# (v2) + + A V1 event filter binding uses the pattern: + + qpid.management/console.event.*.*[.<package>[.<event>]].# + + A V2 event filter binding uses the pattern: + + qmf.default.topic/agent.ind.event.<Vendor|*>.<Product|*>.<severity|*>.<package|*>.<event|*>.# + """ + package = kwargs.get("package", "*") + event = kwargs.get("event", "*") + vendor = kwargs.get("vendor", "*") + product = kwargs.get("product", "*") + severity = kwargs.get("severity", "*") + + if package == "*" and event != "*": + raise Exception("'package' parameter required if 'event' parameter" + " supplied") + + # V1 key - can only filter on package (and event) + if package == "*": + key = "console.event.*.*." + str(package) + if event != "*": + key += "." + str(event) + key += ".#" + + if key not in self.v1BindingKeyList: + self.v1BindingKeyList.append(key) + try: + # remove default wildcard binding + self.v1BindingKeyList.remove("console.event.#") + except: + pass + + # V2 key - escape any "." in the filter strings + + key = "agent.ind.event." + str(package).replace(".", "_") \ + + "." + str(event).replace(".", "_") \ + + "." + str(severity).replace(".", "_") \ + + "." + str(vendor).replace(".", "_") \ + + "." + str(product).replace(".", "_") \ + + ".#" + + if key not in self.v2BindingKeyList: + self.v2BindingKeyList.append(key) + try: + # remove default wildcard binding + self.v2BindingKeyList.remove("agent.ind.event.#") + except: + pass + + if package != "*": + if event != "*": + f = (package, event) + else: + f = (package, None) + if f not in self.event_filter: + self.event_filter.append(f) + + + def addAgentFilter(self, vendor, product=None): + """ Deprecate - use bindAgent() instead + """ + self.addHeartbeatFilter(vendor=vendor, product=product) + + def addHeartbeatFilter(self, **kwargs): + """ Deprecate - use bindAgent() instead. + """ + vendor = kwargs.get("vendor") + product = kwargs.get("product") + if vendor is None: + raise Exception("vendor parameter required!") + + # V1 heartbeats do not have any agent identifier - we cannot + # filter them by agent. + + # build the binding key - escape "."s... + key = "agent.ind.heartbeat." + str(vendor).replace(".", "_") + if product is not None: + key += "." + str(product).replace(".", "_") + key += ".#" + + if key not in self.v2BindingKeyList: + self.v2BindingKeyList.append(key) + self.agent_filter.append((vendor, product, None)) + + # be sure we don't ever filter the local broker + local_broker_key = "agent.ind.heartbeat." + "org.apache".replace(".", "_") \ + + "." + "qpidd".replace(".", "_") + ".#" + if local_broker_key not in self.v2BindingKeyList: + self.v2BindingKeyList.append(local_broker_key) + + # remove the wildcard key if present + try: + self.v2BindingKeyList.remove("agent.ind.heartbeat.#") + except: + pass + + def _bindingKeys(self): + v1KeyList = [] + v2KeyList = [] + v1KeyList.append("schema.#") + # note well: any binding that starts with 'agent.ind.heartbeat' will be + # bound to the heartbeat queue, otherwise it will be bound to the + # unsolicited indication queue. See _decOutstanding() for the binding. + if not self.userBindings: + if self.rcvObjects and self.rcvEvents and self.rcvHeartbeats: + v1KeyList.append("console.#") + v2KeyList.append("agent.ind.data.#") + v2KeyList.append("agent.ind.event.#") + v2KeyList.append("agent.ind.heartbeat.#") + else: + # need heartbeats for V2 newAgent()/delAgent() + v2KeyList.append("agent.ind.heartbeat.#") + if self.rcvObjects: + v1KeyList.append("console.obj.#") + v2KeyList.append("agent.ind.data.#") + else: + v1KeyList.append("console.obj.*.*.org.apache.qpid.broker.agent") + if self.rcvEvents: + v1KeyList.append("console.event.#") + v2KeyList.append("agent.ind.event.#") + else: + v1KeyList.append("console.event.*.*.org.apache.qpid.broker.agent") + if self.rcvHeartbeats: + v1KeyList.append("console.heartbeat.#") + else: + # mandatory bindings + v1KeyList.append("console.obj.*.*.org.apache.qpid.broker.agent") + v1KeyList.append("console.event.*.*.org.apache.qpid.broker.agent") + v1KeyList.append("console.heartbeat.#") # no way to turn this on later + v2KeyList.append("agent.ind.heartbeat.org_apache.qpidd.#") + + return (v1KeyList, v2KeyList) + + + def _handleBrokerConnect(self, broker): + if self.console: + for agent in broker.getAgents(): + self._newAgentCallback(agent) + self.console.brokerConnected(broker) + + + def _handleBrokerDisconnect(self, broker): + if self.console: + for agent in broker.getAgents(): + self._delAgentCallback(agent) + self.console.brokerDisconnected(broker) + + + def _handleBrokerResp(self, broker, codec, seq): + broker.brokerId = codec.read_uuid() + if self.console != None: + self.console.brokerInfo(broker) + + # Send a package request + # (effectively inc and dec outstanding by not doing anything) + sendCodec = Codec() + seq = self.seqMgr._reserve(self._CONTEXT_STARTUP) + broker._setHeader(sendCodec, 'P', seq) + smsg = broker._message(sendCodec.encoded) + broker._send(smsg) + + + def _handlePackageInd(self, broker, codec, seq): + pname = str(codec.read_str8()) + notify = self.schemaCache.declarePackage(pname) + if notify and self.console != None: + self._newPackageCallback(pname) + + # Send a class request + broker._incOutstanding() + sendCodec = Codec() + seq = self.seqMgr._reserve(self._CONTEXT_STARTUP) + broker._setHeader(sendCodec, 'Q', seq) + sendCodec.write_str8(pname) + smsg = broker._message(sendCodec.encoded) + broker._send(smsg) + + + def _handleCommandComplete(self, broker, codec, seq, agent): + code = codec.read_uint32() + text = codec.read_str8() + context = self.seqMgr._release(seq) + if context == self._CONTEXT_STARTUP: + broker._decOutstanding() + elif context == self._CONTEXT_SYNC and seq == broker.syncSequence: + try: + broker.cv.acquire() + broker.syncInFlight = False + broker.cv.notify() + finally: + broker.cv.release() + elif context == self._CONTEXT_MULTIGET and seq in self.syncSequenceList: + try: + self.cv.acquire() + self.syncSequenceList.remove(seq) + if len(self.syncSequenceList) == 0: + self.cv.notify() + finally: + self.cv.release() + + if agent: + agent._handleV1Completion(seq, code, text) + + + def _handleClassInd(self, broker, codec, seq): + kind = codec.read_uint8() + classKey = ClassKey(codec) + classKey._setType(kind) + schema = self.schemaCache.getSchema(classKey) + + if not schema: + # Send a schema request for the unknown class + broker._incOutstanding() + sendCodec = Codec() + seq = self.seqMgr._reserve(self._CONTEXT_STARTUP) + broker._setHeader(sendCodec, 'S', seq) + classKey.encode(sendCodec) + smsg = broker._message(sendCodec.encoded) + broker._send(smsg) + + + def _handleHeartbeatInd(self, broker, codec, seq, msg): + brokerBank = 1 + agentBank = 0 + dp = msg.get("delivery_properties") + if dp: + key = dp["routing_key"] + if key: + keyElements = key.split(".") + if len(keyElements) == 4: + brokerBank = int(keyElements[2]) + agentBank = int(keyElements[3]) + else: + # If there's no routing key in the delivery properties, + # assume the message is from the broker. + brokerBank = 1 + agentBank = 0 + + agent = broker.getAgent(brokerBank, agentBank) + if self.rcvHeartbeats and self.console != None and agent != None: + timestamp = codec.read_uint64() + self._heartbeatCallback(agent, timestamp) + + + def _handleSchemaResp(self, broker, codec, seq, agent_addr): + kind = codec.read_uint8() + classKey = ClassKey(codec) + classKey._setType(kind) + _class = SchemaClass(kind, classKey, codec, self) + new_pkg, new_cls = self.schemaCache.declareClass(classKey, _class) + ctx = self.seqMgr._release(seq) + if ctx: + broker._decOutstanding() + if self.console != None: + if new_pkg: + self._newPackageCallback(classKey.getPackageName()) + if new_cls: + self._newClassCallback(kind, classKey) + + if agent_addr and (agent_addr.__class__ == str or agent_addr.__class__ == unicode): + agent = self._getAgentForAgentAddr(agent_addr) + if agent: + agent._schemaInfoFromV2Agent() + + + def _v2HandleHeartbeatInd(self, broker, mp, ah, content): + try: + agentName = ah["qmf.agent"] + values = content["_values"] + + if '_timestamp' in values: + timestamp = values["_timestamp"] + else: + timestamp = values['timestamp'] + + if '_heartbeat_interval' in values: + interval = values['_heartbeat_interval'] + else: + interval = values['heartbeat_interval'] + + epoch = 0 + if '_epoch' in values: + epoch = values['_epoch'] + elif 'epoch' in values: + epoch = values['epoch'] + except Exception,e: + return + + if self.agent_filter: + # only allow V2 agents that satisfy the filter + v = agentName.split(":", 2) + if len(v) != 3 or ((v[0], None, None) not in self.agent_filter + and (v[0], v[1], None) not in self.agent_filter + and (v[0], v[1], v[2]) not in self.agent_filter): + return + + ## + ## We already have the "local-broker" agent in our list as ['0']. + ## + if '_vendor' in values and values['_vendor'] == 'apache.org' and \ + '_product' in values and values['_product'] == 'qpidd': + agent = broker.getBrokerAgent() + else: + agent = broker.getAgent(1, agentName) + if agent == None: + agent = Agent(broker, agentName, "QMFv2 Agent", True, interval) + agent.setEpoch(epoch) + broker._addAgent(agentName, agent) + else: + agent.touch() + if self.rcvHeartbeats and self.console and agent: + self._heartbeatCallback(agent, timestamp) + agent.update_schema_timestamp(values.get("_schema_updated", 0)) + + + def _v2HandleAgentLocateRsp(self, broker, mp, ah, content): + self._v2HandleHeartbeatInd(broker, mp, ah, content) + + + def _handleError(self, error): + try: + self.cv.acquire() + if len(self.syncSequenceList) > 0: + self.error = error + self.syncSequenceList = [] + self.cv.notify() + finally: + self.cv.release() + + + def _selectMatch(self, object): + """ Check the object against self.getSelect to check for a match """ + for key, value in self.getSelect: + for prop, propval in object.getProperties(): + if key == prop.name and value != propval: + return False + return True + + + def _decodeValue(self, codec, typecode, broker=None): + """ Decode, from the codec, a value based on its typecode. """ + if typecode == 1: data = codec.read_uint8() # U8 + elif typecode == 2: data = codec.read_uint16() # U16 + elif typecode == 3: data = codec.read_uint32() # U32 + elif typecode == 4: data = codec.read_uint64() # U64 + elif typecode == 6: data = codec.read_str8() # SSTR + elif typecode == 7: data = codec.read_str16() # LSTR + elif typecode == 8: data = codec.read_int64() # ABSTIME + elif typecode == 9: data = codec.read_uint64() # DELTATIME + elif typecode == 10: data = ObjectId(codec) # REF + elif typecode == 11: data = codec.read_uint8() != 0 # BOOL + elif typecode == 12: data = codec.read_float() # FLOAT + elif typecode == 13: data = codec.read_double() # DOUBLE + elif typecode == 14: data = codec.read_uuid() # UUID + elif typecode == 16: data = codec.read_int8() # S8 + elif typecode == 17: data = codec.read_int16() # S16 + elif typecode == 18: data = codec.read_int32() # S32 + elif typecode == 19: data = codec.read_int64() # S63 + elif typecode == 15: data = codec.read_map() # FTABLE + elif typecode == 20: # OBJECT + # Peek at the type, and if it is still 20 pull it decode. If + # Not, call back into self. + inner_type_code = codec.read_uint8() + if inner_type_code == 20: + classKey = ClassKey(codec) + schema = self.schemaCache.getSchema(classKey) + if not schema: + return None + data = Object(self, broker, schema, codec, True, True, False) + else: + data = self._decodeValue(codec, inner_type_code, broker) + elif typecode == 21: data = codec.read_list() # List + elif typecode == 22: #Array + #taken from codec10.read_array + sc = Codec(codec.read_vbin32()) + count = sc.read_uint32() + type = sc.read_uint8() + data = [] + while count > 0: + data.append(self._decodeValue(sc,type,broker)) + count -= 1 + else: + raise ValueError("Invalid type code: %d" % typecode) + return data + + + def _encodeValue(self, codec, value, typecode): + """ Encode, into the codec, a value based on its typecode. """ + if typecode == 1: codec.write_uint8 (int(value)) # U8 + elif typecode == 2: codec.write_uint16 (int(value)) # U16 + elif typecode == 3: codec.write_uint32 (long(value)) # U32 + elif typecode == 4: codec.write_uint64 (long(value)) # U64 + elif typecode == 6: codec.write_str8 (value) # SSTR + elif typecode == 7: codec.write_str16 (value) # LSTR + elif typecode == 8: codec.write_int64 (long(value)) # ABSTIME + elif typecode == 9: codec.write_uint64 (long(value)) # DELTATIME + elif typecode == 10: value.encode (codec) # REF + elif typecode == 11: codec.write_uint8 (int(value)) # BOOL + elif typecode == 12: codec.write_float (float(value)) # FLOAT + elif typecode == 13: codec.write_double (float(value)) # DOUBLE + elif typecode == 14: codec.write_uuid (value.bytes) # UUID + elif typecode == 16: codec.write_int8 (int(value)) # S8 + elif typecode == 17: codec.write_int16 (int(value)) # S16 + elif typecode == 18: codec.write_int32 (int(value)) # S32 + elif typecode == 19: codec.write_int64 (int(value)) # S64 + elif typecode == 20: value._encodeUnmanaged(codec) # OBJECT + elif typecode == 15: codec.write_map (value) # FTABLE + elif typecode == 21: codec.write_list (value) # List + elif typecode == 22: # Array + sc = Codec() + self._encodeValue(sc, len(value), 3) + if len(value) > 0: + ltype = self.encoding(value[0]) + self._encodeValue(sc,ltype,1) + for o in value: + self._encodeValue(sc, o, ltype) + codec.write_vbin32(sc.encoded) + else: + raise ValueError ("Invalid type code: %d" % typecode) + + + def encoding(self, value): + return self._encoding(value.__class__) + + + def _encoding(self, klass): + if Session.ENCODINGS.has_key(klass): + return self.ENCODINGS[klass] + for base in klass.__bases__: + result = self._encoding(base) + if result != None: + return result + + + def _displayValue(self, value, typecode): + """ """ + if typecode == 1: return unicode(value) + elif typecode == 2: return unicode(value) + elif typecode == 3: return unicode(value) + elif typecode == 4: return unicode(value) + elif typecode == 6: return value + elif typecode == 7: return value + elif typecode == 8: return unicode(strftime("%c", gmtime(value / 1000000000))) + elif typecode == 9: return unicode(value) + elif typecode == 10: return unicode(value.__repr__()) + elif typecode == 11: + if value: return u"T" + else: return u"F" + elif typecode == 12: return unicode(value) + elif typecode == 13: return unicode(value) + elif typecode == 14: return unicode(value.__repr__()) + elif typecode == 15: return unicode(value.__repr__()) + elif typecode == 16: return unicode(value) + elif typecode == 17: return unicode(value) + elif typecode == 18: return unicode(value) + elif typecode == 19: return unicode(value) + elif typecode == 20: return unicode(value.__repr__()) + elif typecode == 21: return unicode(value.__repr__()) + elif typecode == 22: return unicode(value.__repr__()) + else: + raise ValueError ("Invalid type code: %d" % typecode) + + + def _defaultValue(self, stype, broker=None, kwargs={}): + """ """ + typecode = stype.type + if typecode == 1: return 0 + elif typecode == 2: return 0 + elif typecode == 3: return 0 + elif typecode == 4: return 0 + elif typecode == 6: return "" + elif typecode == 7: return "" + elif typecode == 8: return 0 + elif typecode == 9: return 0 + elif typecode == 10: return ObjectId(None) + elif typecode == 11: return False + elif typecode == 12: return 0.0 + elif typecode == 13: return 0.0 + elif typecode == 14: return UUID(bytes=[0 for i in range(16)]) + elif typecode == 15: return {} + elif typecode == 16: return 0 + elif typecode == 17: return 0 + elif typecode == 18: return 0 + elif typecode == 19: return 0 + elif typecode == 21: return [] + elif typecode == 22: return [] + elif typecode == 20: + try: + if "classKeys" in kwargs: + keyList = kwargs["classKeys"] + else: + keyList = None + classKey = self._bestClassKey(stype.refPackage, stype.refClass, keyList) + if classKey: + return self.makeObject(classKey, broker, kwargs) + except: + pass + return None + else: + raise ValueError ("Invalid type code: %d" % typecode) + + + def _bestClassKey(self, pname, cname, preferredList): + """ """ + if pname == None or cname == None: + if len(preferredList) == 0: + return None + return preferredList[0] + for p in preferredList: + if p.getPackageName() == pname and p.getClassName() == cname: + return p + clist = self.getClasses(pname) + for c in clist: + if c.getClassName() == cname: + return c + return None + + + def _sendMethodRequest(self, broker, schemaKey, objectId, name, argList): + """ This is a legacy function that is used by qpid-tool to invoke methods + using the broker, objectId and schema. + Methods are now invoked on the object itself. + """ + objs = self.getObjects(_objectId=objectId) + if objs: + return objs[0]._sendMethodRequest(name, argList, {}) + return None + + def _newPackageCallback(self, pname): + """ + Invokes the console.newPackage() callback if the callback is present and + the package is not filtered. + """ + if self.console: + if len(self.class_filter) == 0 and len(self.event_filter) == 0: + self.console.newPackage(pname) + else: + for x in self.class_filter: + if x[0] == pname: + self.console.newPackage(pname) + return + + for x in self.event_filter: + if x[0] == pname: + self.console.newPackage(pname) + return + + + def _newClassCallback(self, ctype, ckey): + """ + Invokes the console.newClass() callback if the callback is present and the + class is not filtered. + """ + if self.console: + if ctype == ClassKey.TYPE_DATA: + if (len(self.class_filter) == 0 + or (ckey.getPackageName(), ckey.getClassName()) in self.class_filter): + self.console.newClass(ctype, ckey) + elif ctype == ClassKey.TYPE_EVENT: + if (len(self.event_filter) == 0 + or (ckey.getPackageName(), ckey.getClassName()) in self.event_filter): + self.console.newClass(ctype, ckey) + else: # old class keys did not contain type info, check both filters + if ((len(self.class_filter) == 0 and len(self.event_filter) == 0) + or (ckey.getPackageName(), ckey.getClassName()) in self.class_filter + or (ckey.getPackageName(), ckey.getClassName()) in self.event_filter): + self.console.newClass(ctype, ckey) + + def _agentAllowed(self, agentName, isV2): + """ True if the agent is NOT filtered. + """ + if self.agent_filter: + if isV2: + v = agentName.split(":", 2) + return ((len(v) > 2 and (v[0], v[1], v[2]) in self.agent_filter) + or (len(v) > 1 and (v[0], v[1], None) in self.agent_filter) + or (v and (v[0], None, None) in self.agent_filter)); + else: + return agentName in self.agent_filter + return True + + def _heartbeatCallback(self, agent, timestamp): + """ + Invokes the console.heartbeat() callback if the callback is present and the + agent is not filtered. + """ + if self.console and self.rcvHeartbeats: + if ((agent.isV2 and self._agentAllowed(agent.agentBank, True)) + or ((not agent.isV2) and self._agentAllowed(agent.label, False))): + self.console.heartbeat(agent, timestamp) + + def _newAgentCallback(self, agent): + """ + Invokes the console.newAgent() callback if the callback is present and the + agent is not filtered. + """ + if self.console: + if ((agent.isV2 and self._agentAllowed(agent.agentBank, True)) + or ((not agent.isV2) and self._agentAllowed(agent.label, False))): + self.console.newAgent(agent) + + def _delAgentCallback(self, agent): + """ + Invokes the console.delAgent() callback if the callback is present and the + agent is not filtered. + """ + if self.console: + if ((agent.isV2 and self._agentAllowed(agent.agentBank, True)) + or ((not agent.isV2) and self._agentAllowed(agent.label, False))): + self.console.delAgent(agent) + +#=================================================================================================== +# SessionGetRequest +#=================================================================================================== +class SessionGetRequest(object): + """ + This class is used to track get-object queries at the Session level. + """ + def __init__(self, agentCount): + self.agentCount = agentCount + self.result = [] + self.cv = Condition() + self.waiting = True + + def __call__(self, **kwargs): + """ + Callable entry point for gathering collected objects. + """ + try: + self.cv.acquire() + if 'qmf_object' in kwargs: + self.result.append(kwargs['qmf_object']) + elif 'qmf_complete' in kwargs or 'qmf_exception' in kwargs: + self.agentCount -= 1 + if self.agentCount == 0: + self.waiting = None + self.cv.notify() + finally: + self.cv.release() + + def wait(self, timeout): + starttime = time() + try: + self.cv.acquire() + while self.waiting: + if (time() - starttime) > timeout: + raise Exception("Timed out after %d seconds" % timeout) + self.cv.wait(1) + finally: + self.cv.release() + + +#=================================================================================================== +# SchemaCache +#=================================================================================================== +class SchemaCache(object): + """ + The SchemaCache is a data structure that stores learned schema information. + """ + def __init__(self): + """ + Create a map of schema packages and a lock to protect this data structure. + Note that this lock is at the bottom of any lock hierarchy. If it is held, no other + lock in the system should attempt to be acquired. + """ + self.packages = {} + self.lock = Lock() + + def getPackages(self): + """ Get the list of known QMF packages """ + list = [] + try: + self.lock.acquire() + for package in self.packages: + list.append(package) + finally: + self.lock.release() + return list + + def getClasses(self, packageName): + """ Get the list of known classes within a QMF package """ + list = [] + try: + self.lock.acquire() + if packageName in self.packages: + for pkey in self.packages[packageName]: + if isinstance(self.packages[packageName][pkey], SchemaClass): + list.append(self.packages[packageName][pkey].getKey()) + elif self.packages[packageName][pkey] is not None: + # schema not present yet, but we have schema type + list.append(ClassKey({"_package_name": packageName, + "_class_name": pkey[0], + "_hash": pkey[1], + "_type": self.packages[packageName][pkey]})) + finally: + self.lock.release() + return list + + def getSchema(self, classKey): + """ Get the schema for a QMF class, return None if schema not available """ + pname = classKey.getPackageName() + pkey = classKey.getPackageKey() + try: + self.lock.acquire() + if pname in self.packages: + if (pkey in self.packages[pname] and + isinstance(self.packages[pname][pkey], SchemaClass)): + # hack: value may be schema type info if schema not available + return self.packages[pname][pkey] + finally: + self.lock.release() + return None + + def declarePackage(self, pname): + """ Maybe add a package to the cache. Return True if package was added, None if it pre-existed. """ + try: + self.lock.acquire() + if pname in self.packages: + return None + self.packages[pname] = {} + finally: + self.lock.release() + return True + + def declareClass(self, classKey, classDef=None): + """ Add a class definition to the cache, if supplied. Return a pair + indicating if the package or class is new. + """ + new_package = False + new_class = False + pname = classKey.getPackageName() + pkey = classKey.getPackageKey() + try: + self.lock.acquire() + if pname not in self.packages: + self.packages[pname] = {} + new_package = True + packageMap = self.packages[pname] + if pkey not in packageMap or not isinstance(packageMap[pkey], SchemaClass): + if classDef is not None: + new_class = True + packageMap[pkey] = classDef + elif classKey.getType() is not None: + # hack: don't indicate "new_class" to caller unless the classKey type + # information is present. "new_class" causes the console.newClass() + # callback to be invoked, which -requires- a valid classKey type! + new_class = True + # store the type for the getClasses() method: + packageMap[pkey] = classKey.getType() + + finally: + self.lock.release() + return (new_package, new_class) + + +#=================================================================================================== +# ClassKey +#=================================================================================================== +class ClassKey: + """ A ClassKey uniquely identifies a class from the schema. """ + + TYPE_DATA = "_data" + TYPE_EVENT = "_event" + + def __init__(self, constructor): + if constructor.__class__ == str: + # construct from __repr__ string + try: + # supports two formats: + # type present = P:C:T(H) + # no type present = P:C(H) + tmp = constructor.split(":") + if len(tmp) == 3: + self.pname, self.cname, rem = tmp + self.type, hsh = rem.split("(") + else: + self.pname, rem = tmp + self.cname, hsh = rem.split("(") + self.type = None + hsh = hsh.strip(")") + hexValues = hsh.split("-") + h0 = int(hexValues[0], 16) + h1 = int(hexValues[1], 16) + h2 = int(hexValues[2], 16) + h3 = int(hexValues[3], 16) + h4 = int(hexValues[4][0:4], 16) + h5 = int(hexValues[4][4:12], 16) + self.hash = UUID(bytes=struct.pack("!LHHHHL", h0, h1, h2, h3, h4, h5)) + except: + raise Exception("Invalid ClassKey format") + elif constructor.__class__ == dict: + # construct from QMFv2 map + try: + self.pname = constructor['_package_name'] + self.cname = constructor['_class_name'] + self.hash = constructor['_hash'] + self.type = constructor.get('_type') + except: + raise Exception("Invalid ClassKey map format %s" % str(constructor)) + else: + # construct from codec + codec = constructor + self.pname = str(codec.read_str8()) + self.cname = str(codec.read_str8()) + self.hash = UUID(bytes=codec.read_bin128()) + # old V1 codec did not include "type" + self.type = None + + def encode(self, codec): + # old V1 codec did not include "type" + codec.write_str8(self.pname) + codec.write_str8(self.cname) + codec.write_bin128(self.hash.bytes) + + def asMap(self): + m = {'_package_name': self.pname, + '_class_name': self.cname, + '_hash': self.hash} + if self.type is not None: + m['_type'] = self.type + return m + + def getPackageName(self): + return self.pname + + def getClassName(self): + return self.cname + + def getHash(self): + return self.hash + + def getType(self): + return self.type + + def getHashString(self): + return str(self.hash) + + def getPackageKey(self): + return (self.cname, self.hash) + + def __repr__(self): + if self.type is None: + return self.pname + ":" + self.cname + "(" + self.getHashString() + ")" + return self.pname + ":" + self.cname + ":" + self.type + "(" + self.getHashString() + ")" + + def _setType(self, _type): + if _type == 2 or _type == ClassKey.TYPE_EVENT: + self.type = ClassKey.TYPE_EVENT + else: + self.type = ClassKey.TYPE_DATA + + def __hash__(self): + ss = self.pname + self.cname + self.getHashString() + return ss.__hash__() + + def __eq__(self, other): + return self.__repr__() == other.__repr__() + +#=================================================================================================== +# SchemaClass +#=================================================================================================== +class SchemaClass: + """ """ + CLASS_KIND_TABLE = 1 + CLASS_KIND_EVENT = 2 + + def __init__(self, kind, key, codec, session): + self.kind = kind + self.classKey = key + self.properties = [] + self.statistics = [] + self.methods = [] + self.arguments = [] + self.session = session + + hasSupertype = 0 #codec.read_uint8() + if self.kind == self.CLASS_KIND_TABLE: + propCount = codec.read_uint16() + statCount = codec.read_uint16() + methodCount = codec.read_uint16() + if hasSupertype == 1: + self.superTypeKey = ClassKey(codec) + else: + self.superTypeKey = None ; + for idx in range(propCount): + self.properties.append(SchemaProperty(codec)) + for idx in range(statCount): + self.statistics.append(SchemaStatistic(codec)) + for idx in range(methodCount): + self.methods.append(SchemaMethod(codec)) + + elif self.kind == self.CLASS_KIND_EVENT: + argCount = codec.read_uint16() + if (hasSupertype): + self.superTypeKey = ClassKey(codec) + else: + self.superTypeKey = None ; + for idx in range(argCount): + self.arguments.append(SchemaArgument(codec, methodArg=False)) + + def __repr__(self): + if self.kind == self.CLASS_KIND_TABLE: + kindStr = "Table" + elif self.kind == self.CLASS_KIND_EVENT: + kindStr = "Event" + else: + kindStr = "Unsupported" + result = "%s Class: %s " % (kindStr, self.classKey.__repr__()) + return result + + def getKey(self): + """ Return the class-key for this class. """ + return self.classKey + + def getProperties(self): + """ Return the list of properties for the class. """ + if (self.superTypeKey == None): + return self.properties + else: + return self.properties + self.session.getSchema(self.superTypeKey).getProperties() + + def getStatistics(self): + """ Return the list of statistics for the class. """ + if (self.superTypeKey == None): + return self.statistics + else: + return self.statistics + self.session.getSchema(self.superTypeKey).getStatistics() + + def getMethods(self): + """ Return the list of methods for the class. """ + if (self.superTypeKey == None): + return self.methods + else: + return self.methods + self.session.getSchema(self.superTypeKey).getMethods() + + def getArguments(self): + """ Return the list of events for the class. """ + """ Return the list of methods for the class. """ + if (self.superTypeKey == None): + return self.arguments + else: + return self.arguments + self.session.getSchema(self.superTypeKey).getArguments() + + +#=================================================================================================== +# SchemaProperty +#=================================================================================================== +class SchemaProperty: + """ """ + def __init__(self, codec): + map = codec.read_map() + self.name = str(map["name"]) + self.type = map["type"] + self.access = str(map["access"]) + self.index = map["index"] != 0 + self.optional = map["optional"] != 0 + self.refPackage = None + self.refClass = None + self.unit = None + self.min = None + self.max = None + self.maxlen = None + self.desc = None + + for key, value in map.items(): + if key == "unit" : self.unit = value + elif key == "min" : self.min = value + elif key == "max" : self.max = value + elif key == "maxlen" : self.maxlen = value + elif key == "desc" : self.desc = value + elif key == "refPackage" : self.refPackage = value + elif key == "refClass" : self.refClass = value + + def __repr__(self): + return self.name + + +#=================================================================================================== +# SchemaStatistic +#=================================================================================================== +class SchemaStatistic: + """ """ + def __init__(self, codec): + map = codec.read_map() + self.name = str(map["name"]) + self.type = map["type"] + self.unit = None + self.desc = None + + for key, value in map.items(): + if key == "unit" : self.unit = value + elif key == "desc" : self.desc = value + + def __repr__(self): + return self.name + + +#=================================================================================================== +# SchemaMethod +#=================================================================================================== +class SchemaMethod: + """ """ + def __init__(self, codec): + map = codec.read_map() + self.name = str(map["name"]) + argCount = map["argCount"] + if "desc" in map: + self.desc = map["desc"] + else: + self.desc = None + self.arguments = [] + + for idx in range(argCount): + self.arguments.append(SchemaArgument(codec, methodArg=True)) + + def __repr__(self): + result = self.name + "(" + first = True + for arg in self.arguments: + if arg.dir.find("I") != -1: + if first: + first = False + else: + result += ", " + result += arg.name + result += ")" + return result + + +#=================================================================================================== +# SchemaArgument +#=================================================================================================== +class SchemaArgument: + """ """ + def __init__(self, codec, methodArg): + map = codec.read_map() + self.name = str(map["name"]) + self.type = map["type"] + if methodArg: + self.dir = str(map["dir"]).upper() + self.unit = None + self.min = None + self.max = None + self.maxlen = None + self.desc = None + self.default = None + self.refPackage = None + self.refClass = None + + for key, value in map.items(): + if key == "unit" : self.unit = value + elif key == "min" : self.min = value + elif key == "max" : self.max = value + elif key == "maxlen" : self.maxlen = value + elif key == "desc" : self.desc = value + elif key == "default" : self.default = value + elif key == "refPackage" : self.refPackage = value + elif key == "refClass" : self.refClass = value + + +#=================================================================================================== +# ObjectId +#=================================================================================================== +class ObjectId: + """ Object that represents QMF object identifiers """ + def __init__(self, constructor, first=0, second=0, agentName=None): + if constructor.__class__ == dict: + self.isV2 = True + self.agentName = agentName + self.agentEpoch = 0 + if '_agent_name' in constructor: self.agentName = constructor['_agent_name'] + if '_agent_epoch' in constructor: self.agentEpoch = constructor['_agent_epoch'] + if '_object_name' not in constructor: + raise Exception("QMFv2 OBJECT_ID must have the '_object_name' field.") + self.objectName = constructor['_object_name'] + else: + self.isV2 = None + if not constructor: + first = first + second = second + else: + first = constructor.read_uint64() + second = constructor.read_uint64() + self.agentName = str(first & 0x000000000FFFFFFF) + self.agentEpoch = (first & 0x0FFF000000000000) >> 48 + self.objectName = str(second) + + def _create(cls, agent_name, object_name, epoch=0): + oid = {"_agent_name": agent_name, + "_object_name": object_name, + "_agent_epoch": epoch} + return cls(oid) + create = classmethod(_create) + + def __cmp__(self, other): + if other == None or not isinstance(other, ObjectId) : + return 1 + + if self.objectName < other.objectName: + return -1 + if self.objectName > other.objectName: + return 1 + + if self.agentName < other.agentName: + return -1 + if self.agentName > other.agentName: + return 1 + + if self.agentEpoch < other.agentEpoch: + return -1 + if self.agentEpoch > other.agentEpoch: + return 1 + return 0 + + def __repr__(self): + return "%d-%d-%d-%s-%s" % (self.getFlags(), self.getSequence(), + self.getBrokerBank(), self.getAgentBank(), self.getObject()) + + def index(self): + return self.__repr__() + + def getFlags(self): + return 0 + + def getSequence(self): + return self.agentEpoch + + def getBrokerBank(self): + return 1 + + def getAgentBank(self): + return self.agentName + + def getV2RoutingKey(self): + if self.agentName == '0': + return "broker" + return self.agentName + + def getObject(self): + return self.objectName + + def isDurable(self): + return self.getSequence() == 0 + + def encode(self, codec): + first = (self.agentEpoch << 48) + (1 << 28) + second = 0 + + try: + first += int(self.agentName) + except: + pass + + try: + second = int(self.objectName) + except: + pass + + codec.write_uint64(first) + codec.write_uint64(second) + + def asMap(self): + omap = {'_agent_name': self.agentName, '_object_name': self.objectName} + if self.agentEpoch != 0: + omap['_agent_epoch'] = self.agentEpoch + return omap + + def __hash__(self): + return self.__repr__().__hash__() + + def __eq__(self, other): + return self.__repr__().__eq__(other) + + +#=================================================================================================== +# MethodResult +#=================================================================================================== +class MethodResult(object): + """ """ + def __init__(self, status, text, outArgs): + """ """ + self.status = status + self.text = text + self.outArgs = outArgs + + def __getattr__(self, name): + if name in self.outArgs: + return self.outArgs[name] + + def __repr__(self): + return "%s (%d) - %s" % (self.text, self.status, self.outArgs) + + +#=================================================================================================== +# Broker +#=================================================================================================== +class Broker(Thread): + """ This object represents a connection (or potential connection) to a QMF broker. """ + SYNC_TIME = 60 + nextSeq = 1 + + # for connection recovery + DELAY_MIN = 1 + DELAY_MAX = 128 + DELAY_FACTOR = 2 + + class _q_item: + """ Broker-private class to encapsulate data sent to the broker thread + queue. + """ + type_wakeup = 0 + type_v1msg = 1 + type_v2msg = 2 + + def __init__(self, typecode, data): + self.typecode = typecode + self.data = data + + def __init__(self, session, host, port, authMechs, authUser, authPass, + ssl=False, connTimeout=None, sessTimeout=None, **connectArgs): + """ Create a broker proxy and setup a connection to the broker. Will raise + an exception if the connection fails and the session is not configured to + retry connection setup (manageConnections = False). + + Spawns a thread to manage the broker connection. Call _shutdown() to + shutdown the thread when releasing the broker. + """ + Thread.__init__(self) + self.session = session + self.host = host + self.port = port + self.mechanisms = authMechs + self.ssl = ssl + if connTimeout is not None: + connTimeout = float(connTimeout) + self.connTimeout = connTimeout + if sessTimeout is not None: + sessTimeout = float(sessTimeout) + else: + sessTimeout = self.SYNC_TIME + self.sessTimeout = sessTimeout + self.authUser = authUser + self.authPass = authPass + self.saslUser = None + self.cv = Condition() + self.seqToAgentMap = {} + self.error = None + self.conn_exc = None # exception hit by _tryToConnect() + self.brokerId = None + self.connected = False + self.brokerAgent = None + self.brokerSupportsV2 = None + self.rcv_queue = Queue() # for msg received on session + self.conn = None + self.amqpSession = None + self.amqpSessionId = "%s.%d.%d" % (platform.uname()[1], os.getpid(), Broker.nextSeq) + Broker.nextSeq += 1 + self.last_age_check = time() + self.connectArgs = connectArgs + # thread control + self.setDaemon(True) + self.setName("Thread for broker: %s:%d" % (host, port)) + self.canceled = False + self.ready = Semaphore(0) + self.start() + if not self.session.manageConnections: + # wait for connection setup to complete in subthread. + # On failure, propagate exception to caller + self.ready.acquire() + if self.conn_exc: + self._shutdown() # wait for the subthread to clean up... + raise self.conn_exc + # connection up - wait for stable... + try: + self._waitForStable() + agent = self.getBrokerAgent() + if agent: + agent.getObjects(_class="agent") + except: + self._shutdown() # wait for the subthread to clean up... + raise + + + def isConnected(self): + """ Return True if there is an active connection to the broker. """ + return self.connected + + def getError(self): + """ Return the last error message seen while trying to connect to the broker. """ + return self.error + + def getBrokerId(self): + """ Get broker's unique identifier (UUID) """ + return self.brokerId + + def getBrokerBank(self): + """ Return the broker-bank value. This is the value that the broker assigns to + objects within its control. This value appears as a field in the ObjectId + of objects created by agents controlled by this broker. """ + return 1 + + def getAgent(self, brokerBank, agentBank): + """ Return the agent object associated with a particular broker and agent bank value.""" + bankKey = str(agentBank) + try: + self.cv.acquire() + if bankKey in self.agents: + return self.agents[bankKey] + finally: + self.cv.release() + return None + + def getBrokerAgent(self): + return self.brokerAgent + + def getSessionId(self): + """ Get the identifier of the AMQP session to the broker """ + return self.amqpSessionId + + def getAgents(self): + """ Get the list of agents reachable via this broker """ + try: + self.cv.acquire() + return self.agents.values() + finally: + self.cv.release() + + def getAmqpSession(self): + """ Get the AMQP session object for this connected broker. """ + return self.amqpSession + + def getUrl(self): + """ """ + return BrokerURL(host=self.host, port=self.port) + + def getFullUrl(self, noAuthIfGuestDefault=True): + """ """ + if self.ssl: + scheme = "amqps" + else: + scheme = "amqp" + if self.authUser == "" or \ + (noAuthIfGuestDefault and self.authUser == "guest" and self.authPass == "guest"): + return BrokerURL(scheme=scheme, host=self.host, port=(self.port or 5672)) + else: + return BrokerURL(scheme=scheme, user=self.authUser, password=self.authPass, host=self.host, port=(self.port or 5672)) + + def __repr__(self): + if self.connected: + return "Broker connected at: %s" % self.getUrl() + else: + return "Disconnected Broker" + + def _setSequence(self, sequence, agent): + try: + self.cv.acquire() + self.seqToAgentMap[sequence] = agent + finally: + self.cv.release() + + def _clearSequence(self, sequence): + try: + self.cv.acquire() + self.seqToAgentMap.pop(sequence) + finally: + self.cv.release() + + def _tryToConnect(self): + """ Connect to the broker. Returns True if connection setup completes + successfully, otherwise returns False and sets self.error/self.conn_exc + with error info. Does not raise exceptions. + """ + self.error = None + self.conn_exc = None + try: + try: + self.cv.acquire() + self.agents = {} + finally: + self.cv.release() + + self.topicBound = False + self.syncInFlight = False + self.syncRequest = 0 + self.syncResult = None + self.reqsOutstanding = 1 + + try: + if self.amqpSession: + self.amqpSession.close() + except: + pass + self.amqpSession = None + + try: + if self.conn: + self.conn.close(5) + except: + pass + self.conn = None + + sock = connect(self.host, self.port) + sock.settimeout(5) + oldTimeout = sock.gettimeout() + sock.settimeout(self.connTimeout) + connSock = None + force_blocking = False + if self.ssl: + # Bug (QPID-4337): the "old" implementation of python SSL + # fails if the socket is set to non-blocking (which settimeout() + # may change). + if sys.version_info[:2] < (2, 6): # 2.6+ uses openssl - it's ok + force_blocking = True + sock.setblocking(1) + certfile = None + if 'ssl_certfile' in self.connectArgs: + certfile = self.connectArgs['ssl_certfile'] + keyfile = None + if 'ssl_keyfile' in self.connectArgs: + keyfile = self.connectArgs['ssl_keyfile'] + connSock = ssl(sock, certfile=certfile, keyfile=keyfile) + else: + connSock = sock + if not 'service' in self.connectArgs: + self.connectArgs['service'] = 'qpidd' + self.conn = Connection(connSock, username=self.authUser, password=self.authPass, + mechanism = self.mechanisms, host=self.host, + **self.connectArgs) + def aborted(): + raise Timeout("Waiting for connection to be established with broker") + oldAborted = self.conn.aborted + self.conn.aborted = aborted + self.conn.start() + + # Bug (QPID-4337): don't enable non-blocking (timeouts) for old SSL + if not force_blocking: + sock.settimeout(oldTimeout) + self.conn.aborted = oldAborted + uid = self.conn.user_id + if uid.__class__ == tuple and len(uid) == 2: + self.saslUser = uid[1] + elif type(uid) is str: + self.saslUser = uid; + else: + self.saslUser = None + + # prevent topic queues from filling up (and causing the agents to + # disconnect) by discarding the oldest queued messages when full. + topic_queue_options = {"qpid.policy_type":"ring"} + + self.replyName = "reply-%s" % self.amqpSessionId + self.amqpSession = self.conn.session(self.amqpSessionId) + self.amqpSession.timeout = self.sessTimeout + self.amqpSession.auto_sync = True + self.amqpSession.queue_declare(queue=self.replyName, exclusive=True, auto_delete=True) + self.amqpSession.exchange_bind(exchange="amq.direct", + queue=self.replyName, binding_key=self.replyName) + self.amqpSession.message_subscribe(queue=self.replyName, destination="rdest", + accept_mode=self.amqpSession.accept_mode.none, + acquire_mode=self.amqpSession.acquire_mode.pre_acquired) + self.amqpSession.incoming("rdest").listen(self._v1Cb, self._exceptionCb) + self.amqpSession.message_set_flow_mode(destination="rdest", flow_mode=self.amqpSession.flow_mode.window) + self.amqpSession.message_flow(destination="rdest", unit=self.amqpSession.credit_unit.byte, value=0xFFFFFFFFL) + self.amqpSession.message_flow(destination="rdest", unit=self.amqpSession.credit_unit.message, value=200) + + self.topicName = "topic-%s" % self.amqpSessionId + self.amqpSession.queue_declare(queue=self.topicName, exclusive=True, + auto_delete=True, + arguments=topic_queue_options) + self.amqpSession.message_subscribe(queue=self.topicName, destination="tdest", + accept_mode=self.amqpSession.accept_mode.none, + acquire_mode=self.amqpSession.acquire_mode.pre_acquired) + self.amqpSession.incoming("tdest").listen(self._v1Cb, self._exceptionCb) + self.amqpSession.message_set_flow_mode(destination="tdest", flow_mode=self.amqpSession.flow_mode.window) + self.amqpSession.message_flow(destination="tdest", unit=self.amqpSession.credit_unit.byte, value=0xFFFFFFFFL) + self.amqpSession.message_flow(destination="tdest", unit=self.amqpSession.credit_unit.message, value=200) + + ## + ## Check to see if the broker has QMFv2 exchanges configured + ## + direct_result = self.amqpSession.exchange_query("qmf.default.direct") + topic_result = self.amqpSession.exchange_query("qmf.default.topic") + self.brokerSupportsV2 = not (direct_result.not_found or topic_result.not_found) + + try: + self.cv.acquire() + self.agents = {} + self.brokerAgent = Agent(self, 0, "BrokerAgent", isV2=self.brokerSupportsV2) + self.agents['0'] = self.brokerAgent + finally: + self.cv.release() + + ## + ## Set up connectivity for QMFv2 + ## + if self.brokerSupportsV2: + # set up 3 queues: + # 1 direct queue - for responses destined to this console. + # 2 topic queues - one for heartbeats (hb), one for unsolicited data + # and event indications (ui). + self.v2_direct_queue = "qmfc-v2-%s" % self.amqpSessionId + self.amqpSession.queue_declare(queue=self.v2_direct_queue, exclusive=True, auto_delete=True) + self.v2_topic_queue_ui = "qmfc-v2-ui-%s" % self.amqpSessionId + self.amqpSession.queue_declare(queue=self.v2_topic_queue_ui, + exclusive=True, auto_delete=True, + arguments=topic_queue_options) + self.v2_topic_queue_hb = "qmfc-v2-hb-%s" % self.amqpSessionId + self.amqpSession.queue_declare(queue=self.v2_topic_queue_hb, + exclusive=True, auto_delete=True, + arguments=topic_queue_options) + + self.amqpSession.exchange_bind(exchange="qmf.default.direct", + queue=self.v2_direct_queue, binding_key=self.v2_direct_queue) + ## Other bindings here... + + self.amqpSession.message_subscribe(queue=self.v2_direct_queue, destination="v2dest", + accept_mode=self.amqpSession.accept_mode.none, + acquire_mode=self.amqpSession.acquire_mode.pre_acquired) + self.amqpSession.incoming("v2dest").listen(self._v2Cb, self._exceptionCb) + self.amqpSession.message_set_flow_mode(destination="v2dest", flow_mode=self.amqpSession.flow_mode.window) + self.amqpSession.message_flow(destination="v2dest", unit=self.amqpSession.credit_unit.byte, value=0xFFFFFFFFL) + self.amqpSession.message_flow(destination="v2dest", unit=self.amqpSession.credit_unit.message, value=50) + + self.amqpSession.message_subscribe(queue=self.v2_topic_queue_ui, destination="v2TopicUI", + accept_mode=self.amqpSession.accept_mode.none, + acquire_mode=self.amqpSession.acquire_mode.pre_acquired) + self.amqpSession.incoming("v2TopicUI").listen(self._v2Cb, self._exceptionCb) + self.amqpSession.message_set_flow_mode(destination="v2TopicUI", flow_mode=self.amqpSession.flow_mode.window) + self.amqpSession.message_flow(destination="v2TopicUI", unit=self.amqpSession.credit_unit.byte, value=0xFFFFFFFFL) + self.amqpSession.message_flow(destination="v2TopicUI", unit=self.amqpSession.credit_unit.message, value=25) + + + self.amqpSession.message_subscribe(queue=self.v2_topic_queue_hb, destination="v2TopicHB", + accept_mode=self.amqpSession.accept_mode.none, + acquire_mode=self.amqpSession.acquire_mode.pre_acquired) + self.amqpSession.incoming("v2TopicHB").listen(self._v2Cb, self._exceptionCb) + self.amqpSession.message_set_flow_mode(destination="v2TopicHB", flow_mode=self.amqpSession.flow_mode.window) + self.amqpSession.message_flow(destination="v2TopicHB", unit=self.amqpSession.credit_unit.byte, value=0xFFFFFFFFL) + self.amqpSession.message_flow(destination="v2TopicHB", unit=self.amqpSession.credit_unit.message, value=100) + + codec = Codec() + self._setHeader(codec, 'B') + msg = self._message(codec.encoded) + self._send(msg) + + return True # connection complete + + except Exception, e: + self.error = "Exception during connection setup: %s - %s" % (e.__class__.__name__, e) + self.conn_exc = e + if self.session.console: + self.session.console.brokerConnectionFailed(self) + return False # connection failed + + def _updateAgent(self, obj): + """ + Just received an object of class "org.apache.qpid.broker:agent", which + represents a V1 agent. Add or update the list of agent proxies. + """ + bankKey = str(obj.agentBank) + agent = None + if obj._deleteTime == 0: + try: + self.cv.acquire() + if bankKey not in self.agents: + # add new agent only if label is not filtered + if len(self.session.agent_filter) == 0 or obj.label in self.session.agent_filter: + agent = Agent(self, obj.agentBank, obj.label) + self.agents[bankKey] = agent + finally: + self.cv.release() + if agent and self.session.console: + self.session._newAgentCallback(agent) + else: + try: + self.cv.acquire() + agent = self.agents.pop(bankKey, None) + if agent: + agent.close() + finally: + self.cv.release() + if agent and self.session.console: + self.session._delAgentCallback(agent) + + def _addAgent(self, name, agent): + try: + self.cv.acquire() + self.agents[name] = agent + finally: + self.cv.release() + if self.session.console: + self.session._newAgentCallback(agent) + + def _ageAgents(self): + if (time() - self.last_age_check) < self.session.agent_heartbeat_min: + # don't age if it's too soon + return + self.cv.acquire() + try: + to_delete = [] + to_notify = [] + for key in self.agents: + if self.agents[key].isOld(): + to_delete.append(key) + for key in to_delete: + agent = self.agents.pop(key) + agent.close() + to_notify.append(agent) + self.last_age_check = time() + finally: + self.cv.release() + if self.session.console: + for agent in to_notify: + self.session._delAgentCallback(agent) + + def _v2SendAgentLocate(self, predicate=[]): + """ + Broadcast an agent-locate request to cause all agents in the domain to tell us who they are. + """ + # @todo: send locate only to those agents in agent_filter? + dp = self.amqpSession.delivery_properties() + dp.routing_key = "console.request.agent_locate" + mp = self.amqpSession.message_properties() + mp.content_type = "amqp/list" + if self.saslUser: + mp.user_id = self.saslUser + mp.app_id = "qmf2" + mp.reply_to = self.amqpSession.reply_to("qmf.default.direct", self.v2_direct_queue) + mp.application_headers = {'qmf.opcode':'_agent_locate_request'} + sendCodec = Codec() + sendCodec.write_list(predicate) + msg = Message(dp, mp, sendCodec.encoded) + self._send(msg, "qmf.default.topic") + + def _setHeader(self, codec, opcode, seq=0): + """ Compose the header of a management message. """ + codec.write_uint8(ord('A')) + codec.write_uint8(ord('M')) + codec.write_uint8(ord('2')) + codec.write_uint8(ord(opcode)) + codec.write_uint32(seq) + + def _checkHeader(self, codec): + """ Check the header of a management message and extract the opcode and class. """ + try: + octet = chr(codec.read_uint8()) + if octet != 'A': + return None, None + octet = chr(codec.read_uint8()) + if octet != 'M': + return None, None + octet = chr(codec.read_uint8()) + if octet != '2': + return None, None + opcode = chr(codec.read_uint8()) + seq = codec.read_uint32() + return opcode, seq + except: + return None, None + + def _message (self, body, routing_key="broker", ttl=None): + dp = self.amqpSession.delivery_properties() + dp.routing_key = routing_key + if ttl: + dp.ttl = ttl + mp = self.amqpSession.message_properties() + mp.content_type = "x-application/qmf" + if self.saslUser: + mp.user_id = self.saslUser + mp.reply_to = self.amqpSession.reply_to("amq.direct", self.replyName) + return Message(dp, mp, body) + + def _send(self, msg, dest="qpid.management"): + self.amqpSession.message_transfer(destination=dest, message=msg) + + def _disconnect(self, err_info=None): + """ Called when the remote broker has disconnected. Re-initializes all + state associated with the broker. + """ + # notify any waiters, and callback + self.cv.acquire() + try: + if err_info is not None: + self.error = err_info + _agents = self.agents + self.agents = {} + for agent in _agents.itervalues(): + agent.close() + self.syncInFlight = False + self.reqsOutstanding = 0 + self.cv.notifyAll() + finally: + self.cv.release() + + if self.session.console: + for agent in _agents.itervalues(): + self.session._delAgentCallback(agent) + + def _shutdown(self, _timeout=10): + """ Disconnect from a broker, and release its resources. Errors are + ignored. + """ + if self.isAlive(): + # kick the thread + self.canceled = True + self.rcv_queue.put(Broker._q_item(Broker._q_item.type_wakeup, None)) + self.join(_timeout) + + # abort any pending transactions and delete agents + self._disconnect("broker shutdown") + + try: + if self.amqpSession: + self.amqpSession.close(); + except: + pass + self.amqpSession = None + try: + if self.conn: + self.conn.close(_timeout) + except: + pass + self.conn = None + self.connected = False + + def _waitForStable(self): + try: + self.cv.acquire() + if not self.connected: + return + if self.reqsOutstanding == 0: + return + self.syncInFlight = True + starttime = time() + while self.reqsOutstanding != 0: + self.cv.wait(self.SYNC_TIME) + if time() - starttime > self.SYNC_TIME: + raise RuntimeError("Timed out waiting for broker to synchronize") + finally: + self.cv.release() + + def _incOutstanding(self): + try: + self.cv.acquire() + self.reqsOutstanding += 1 + finally: + self.cv.release() + + def _decOutstanding(self): + try: + self.cv.acquire() + self.reqsOutstanding -= 1 + if self.reqsOutstanding == 0 and not self.topicBound: + self.topicBound = True + for key in self.session.v1BindingKeyList: + self.amqpSession.exchange_bind(exchange="qpid.management", + queue=self.topicName, binding_key=key) + if self.brokerSupportsV2: + # do not drop heartbeat indications when under load from data + # or event indications. Put heartbeats on their own dedicated + # queue. + # + for key in self.session.v2BindingKeyList: + if key.startswith("agent.ind.heartbeat"): + self.amqpSession.exchange_bind(exchange="qmf.default.topic", + queue=self.v2_topic_queue_hb, + binding_key=key) + else: + self.amqpSession.exchange_bind(exchange="qmf.default.topic", + queue=self.v2_topic_queue_ui, + binding_key=key) + # solicit an agent locate now, after we bind to agent.ind.data, + # because the agent locate will cause the agent to publish a + # data indication - and now we're able to receive it! + self._v2SendAgentLocate() + + + if self.reqsOutstanding == 0 and self.syncInFlight: + self.syncInFlight = False + self.cv.notify() + finally: + self.cv.release() + + def _v1Cb(self, msg): + """ Callback from session receive thread for V1 messages + """ + self.rcv_queue.put(Broker._q_item(Broker._q_item.type_v1msg, msg)) + + def _v1Dispatch(self, msg): + try: + self._v1DispatchProtected(msg) + except Exception, e: + print "EXCEPTION in Broker._v1Cb:", e + import traceback + traceback.print_exc() + + def _v1DispatchProtected(self, msg): + """ + This is the general message handler for messages received via the QMFv1 exchanges. + """ + try: + agent = None + agent_addr = None + mp = msg.get("message_properties") + ah = mp.application_headers + if ah and 'qmf.agent' in ah: + agent_addr = ah['qmf.agent'] + + if not agent_addr: + # + # See if we can determine the agent identity from the routing key + # + dp = msg.get("delivery_properties") + rkey = None + if dp and dp.routing_key: + rkey = dp.routing_key + items = rkey.split('.') + if len(items) >= 4: + if items[0] == 'console' and items[3].isdigit(): + agent_addr = str(items[3]) # The QMFv1 Agent Bank + if agent_addr != None and agent_addr in self.agents: + agent = self.agents[agent_addr] + + codec = Codec(msg.body) + alreadyTried = None + while True: + opcode, seq = self._checkHeader(codec) + + if not agent and not alreadyTried: + alreadyTried = True + try: + self.cv.acquire() + if seq in self.seqToAgentMap: + agent = self.seqToAgentMap[seq] + finally: + self.cv.release() + + if opcode == None: break + if opcode == 'b': self.session._handleBrokerResp (self, codec, seq) + elif opcode == 'p': self.session._handlePackageInd (self, codec, seq) + elif opcode == 'q': self.session._handleClassInd (self, codec, seq) + elif opcode == 's': self.session._handleSchemaResp (self, codec, seq, agent_addr) + elif opcode == 'h': self.session._handleHeartbeatInd (self, codec, seq, msg) + elif opcode == 'z': self.session._handleCommandComplete (self, codec, seq, agent) + elif agent: + agent._handleQmfV1Message(opcode, seq, mp, ah, codec) + agent.touch() # mark agent as being alive + + finally: # always ack the message! + try: + # ignore failures as the session may be shutting down... + self.amqpSession.receiver._completed.add(msg.id) + self.amqpSession.channel.session_completed(self.amqpSession.receiver._completed) + except: + pass + + + def _v2Cb(self, msg): + """ Callback from session receive thread for V2 messages + """ + self.rcv_queue.put(Broker._q_item(Broker._q_item.type_v2msg, msg)) + + def _v2Dispatch(self, msg): + try: + self._v2DispatchProtected(msg) + except Exception, e: + print "EXCEPTION in Broker._v2Cb:", e + import traceback + traceback.print_exc() + + def _v2DispatchProtected(self, msg): + """ + This is the general message handler for messages received via QMFv2 exchanges. + """ + try: + mp = msg.get("message_properties") + ah = mp["application_headers"] + codec = Codec(msg.body) + + if 'qmf.opcode' in ah: + opcode = ah['qmf.opcode'] + if mp.content_type == "amqp/list": + try: + content = codec.read_list() + if not content: + content = [] + except: + # malformed list - ignore + content = None + elif mp.content_type == "amqp/map": + try: + content = codec.read_map() + if not content: + content = {} + except: + # malformed map - ignore + content = None + else: + content = None + + if content != None: + ## + ## Directly handle agent heartbeats and agent locate responses as these are broker-scope (they are + ## used to maintain the broker's list of agent proxies. + ## + if opcode == '_agent_heartbeat_indication': self.session._v2HandleHeartbeatInd(self, mp, ah, content) + elif opcode == '_agent_locate_response': self.session._v2HandleAgentLocateRsp(self, mp, ah, content) + else: + ## + ## All other opcodes are agent-scope and are forwarded to the agent proxy representing the sender + ## of the message. + ## + # the broker's agent is mapped to index ['0'] + agentName = ah['qmf.agent'] + v = agentName.split(":") + if agentName == 'broker' or (len(v) >= 2 and v[0] == 'apache.org' + and v[1] == 'qpidd'): + agentName = '0' + if agentName in self.agents: + agent = self.agents[agentName] + agent._handleQmfV2Message(opcode, mp, ah, content) + agent.touch() + + finally: # always ack the message! + try: + # ignore failures as the session may be shutting down... + self.amqpSession.receiver._completed.add(msg.id) + self.amqpSession.channel.session_completed(self.amqpSession.receiver._completed) + except: + pass + + def _exceptionCb(self, data): + """ Exception notification callback from session receive thread. + """ + self.cv.acquire() + try: + self.connected = False + self.error = "exception received from messaging layer: %s" % str(data) + finally: + self.cv.release() + self.rcv_queue.put(Broker._q_item(Broker._q_item.type_wakeup, None)) + + def run(self): + """ Main body of the running thread. """ + + # First, attempt a connection. In the unmanaged case, + # failure to connect needs to cause the Broker() + # constructor to raise an exception. + delay = self.DELAY_MIN + while not self.canceled: + if self._tryToConnect(): # connection up + break + # unmanaged connection - fail & wake up constructor + if not self.session.manageConnections: + self.ready.release() + return + # managed connection - try again + count = 0 + while not self.canceled and count < delay: + sleep(1) + count += 1 + if delay < self.DELAY_MAX: + delay *= self.DELAY_FACTOR + + if self.canceled: + self.ready.release() + return + + # connection successful! + self.cv.acquire() + try: + self.connected = True + finally: + self.cv.release() + + self.session._handleBrokerConnect(self) + self.ready.release() + + while not self.canceled: + + try: + item = self.rcv_queue.get(timeout=self.session.agent_heartbeat_min) + except Empty: + item = None + + while not self.canceled and item is not None: + + if not self.connected: + # connection failure + while item: + # drain the queue + try: + item = self.rcv_queue.get(block=False) + except Empty: + item = None + break + + self._disconnect() # clean up any pending agents + self.session._handleError(self.error) + self.session._handleBrokerDisconnect(self) + + if not self.session.manageConnections: + return # do not attempt recovery + + # retry connection setup + delay = self.DELAY_MIN + while not self.canceled: + if self._tryToConnect(): + break + # managed connection - try again + count = 0 + while not self.canceled and count < delay: + sleep(1) + count += 1 + if delay < self.DELAY_MAX: + delay *= self.DELAY_FACTOR + + if self.canceled: + return + + # connection successful! + self.cv.acquire() + try: + self.connected = True + finally: + self.cv.release() + + self.session._handleBrokerConnect(self) + + elif item.typecode == Broker._q_item.type_v1msg: + self._v1Dispatch(item.data) + elif item.typecode == Broker._q_item.type_v2msg: + self._v2Dispatch(item.data) + + try: + item = self.rcv_queue.get(block=False) + except Empty: + item = None + + # queue drained, age the agents... + if not self.canceled: + self._ageAgents() + +#=================================================================================================== +# Agent +#=================================================================================================== +class Agent: + """ + This class represents a proxy for a remote agent being managed + """ + def __init__(self, broker, agentBank, label, isV2=False, interval=0): + self.broker = broker + self.session = broker.session + self.schemaCache = self.session.schemaCache + self.brokerBank = broker.getBrokerBank() + self.agentBank = str(agentBank) + self.label = label + self.isV2 = isV2 + self.heartbeatInterval = 0 + if interval: + if interval < self.session.agent_heartbeat_min: + self.heartbeatInterval = self.session.agent_heartbeat_min + else: + self.heartbeatInterval = interval + self.lock = Lock() + self.seqMgr = self.session.seqMgr + self.contextMap = {} + self.unsolicitedContext = RequestContext(self, self) + self.lastSeenTime = time() + self.closed = None + self.epoch = 0 + self.schema_timestamp = None + + + def _checkClosed(self): + if self.closed: + raise Exception("Agent is disconnected") + + + def __call__(self, **kwargs): + """ + This is the handler for unsolicited stuff received from the agent + """ + if 'qmf_object' in kwargs: + if self.session.console: + obj = kwargs['qmf_object'] + if self.session.class_filter and obj.getClassKey(): + # slow path: check classKey against event_filter + pname = obj.getClassKey().getPackageName() + cname = obj.getClassKey().getClassName() + if ((pname, cname) not in self.session.class_filter + and (pname, None) not in self.session.class_filter): + return + if obj.getProperties(): + self.session.console.objectProps(self.broker, obj) + if obj.getStatistics(): + # QMFv2 objects may also contain statistic updates + self.session.console.objectStats(self.broker, obj) + elif 'qmf_object_stats' in kwargs: + if self.session.console: + obj = kwargs['qmf_object_stats'] + if len(self.session.class_filter) == 0: + self.session.console.objectStats(self.broker, obj) + elif obj.getClassKey(): + # slow path: check classKey against event_filter + pname = obj.getClassKey().getPackageName() + cname = obj.getClassKey().getClassName() + if ((pname, cname) in self.session.class_filter + or (pname, None) in self.session.class_filter): + self.session.console.objectStats(self.broker, obj) + elif 'qmf_event' in kwargs: + if self.session.console: + event = kwargs['qmf_event'] + if len(self.session.event_filter) == 0: + self.session.console.event(self.broker, event) + elif event.classKey: + # slow path: check classKey against event_filter + pname = event.classKey.getPackageName() + ename = event.classKey.getClassName() + if ((pname, ename) in self.session.event_filter + or (pname, None) in self.session.event_filter): + self.session.console.event(self.broker, event) + elif 'qmf_schema_id' in kwargs: + ckey = kwargs['qmf_schema_id'] + new_pkg, new_cls = self.session.schemaCache.declareClass(ckey) + if self.session.console: + if new_pkg: + self.session._newPackageCallback(ckey.getPackageName()) + if new_cls: + # translate V2's string based type value to legacy + # integer value for backward compatibility + cls_type = ckey.getType() + if str(cls_type) == ckey.TYPE_DATA: + cls_type = 1 + elif str(cls_type) == ckey.TYPE_EVENT: + cls_type = 2 + self.session._newClassCallback(cls_type, ckey) + + def touch(self): + if self.heartbeatInterval: + self.lastSeenTime = time() + + + def setEpoch(self, epoch): + self.epoch = epoch + + def update_schema_timestamp(self, timestamp): + """ Check the latest schema timestamp from the agent V2 heartbeat. Issue a + query for all packages & classes should the timestamp change. + """ + self.lock.acquire() + try: + if self.schema_timestamp == timestamp: + return + self.schema_timestamp = timestamp + + context = RequestContext(self, self) + sequence = self.seqMgr._reserve(context) + + self.contextMap[sequence] = context + context.setSequence(sequence) + + finally: + self.lock.release() + + self._v2SendSchemaIdQuery(sequence, {}) + + + def epochMismatch(self, epoch): + if epoch == 0 or self.epoch == 0: + return None + if epoch == self.epoch: + return None + return True + + + def isOld(self): + if self.heartbeatInterval == 0: + return None + if time() - self.lastSeenTime > (self.session.agent_heartbeat_miss * self.heartbeatInterval): + return True + return None + + + def close(self): + self.closed = True + copy = {} + try: + self.lock.acquire() + for seq in self.contextMap: + copy[seq] = self.contextMap[seq] + finally: + self.lock.release() + + for seq in copy: + context = copy[seq] + context.cancel("Agent disconnected") + self.seqMgr._release(seq) + + + def __repr__(self): + if self.isV2: + ver = "v2" + else: + ver = "v1" + return "Agent(%s) at bank %d.%s (%s)" % (ver, self.brokerBank, self.agentBank, self.label) + + + def getBroker(self): + return self.broker + + + def getBrokerBank(self): + return self.brokerBank + + + def getAgentBank(self): + return self.agentBank + + + def getV2RoutingKey(self): + if self.agentBank == '0': + return 'broker' + return self.agentBank + + + def getObjects(self, notifiable=None, **kwargs): + """ Get a list of objects from QMF agents. + All arguments are passed by name(keyword). + + If 'notifiable' is None (default), this call will block until completion or timeout. + If supplied, notifiable is assumed to be a callable object that will be called when the + list of queried objects arrives. The single argument to the call shall be a list of + the returned objects. + + The class for queried objects may be specified in one of the following ways: + + _schema = <schema> - supply a schema object returned from getSchema. + _key = <key> - supply a classKey from the list returned by getClasses. + _class = <name> - supply a class name as a string. If the class name exists + in multiple packages, a _package argument may also be supplied. + _objectId = <id> - get the object referenced by the object-id + + The default timeout for this synchronous operation is 60 seconds. To change the timeout, + use the following argument: + + _timeout = <time in seconds> + + If additional arguments are supplied, they are used as property selectors. For example, + if the argument name="test" is supplied, only objects whose "name" property is "test" + will be returned in the result. + """ + self._checkClosed() + if notifiable: + if not callable(notifiable): + raise Exception("notifiable object must be callable") + + # + # Isolate the selectors from the kwargs + # + selectors = {} + for key in kwargs: + value = kwargs[key] + if key[0] != '_': + selectors[key] = value + + # + # Allocate a context to track this asynchronous request. + # + context = RequestContext(self, notifiable, selectors) + sequence = self.seqMgr._reserve(context) + try: + self.lock.acquire() + self.contextMap[sequence] = context + context.setSequence(sequence) + finally: + self.lock.release() + + # + # Compose and send the query message to the agent using the appropriate protocol for the + # agent's QMF version. + # + if self.isV2: + self._v2SendGetQuery(sequence, kwargs) + else: + self.broker._setSequence(sequence, self) + self._v1SendGetQuery(sequence, kwargs) + + # + # If this is a synchronous call, block and wait for completion. + # + if not notifiable: + timeout = 60 + if '_timeout' in kwargs: + timeout = kwargs['_timeout'] + context.waitForSignal(timeout) + if context.exception: + raise Exception(context.exception) + result = context.queryResults + return result + + + def _clearContext(self, sequence): + try: + self.lock.acquire() + try: + self.contextMap.pop(sequence) + self.seqMgr._release(sequence) + except KeyError: + pass # @todo - shouldn't happen, log a warning. + finally: + self.lock.release() + + + def _schemaInfoFromV2Agent(self): + """ + We have just received new schema information from this agent. Check to see if there's + more work that can now be done. + """ + try: + self.lock.acquire() + copy_of_map = {} + for item in self.contextMap: + copy_of_map[item] = self.contextMap[item] + finally: + self.lock.release() + + self.unsolicitedContext.reprocess() + for context in copy_of_map: + copy_of_map[context].reprocess() + + + def _handleV1Completion(self, sequence, code, text): + """ + Called if one of this agent's V1 commands completed + """ + context = None + try: + self.lock.acquire() + if sequence in self.contextMap: + context = self.contextMap[sequence] + finally: + self.lock.release() + + if context: + if code != 0: + ex = "Error %d: %s" % (code, text) + context.setException(ex) + context.signal() + self.broker._clearSequence(sequence) + + + def _v1HandleMethodResp(self, codec, seq): + """ + Handle a QMFv1 method response + """ + code = codec.read_uint32() + text = codec.read_str16() + outArgs = {} + self.broker._clearSequence(seq) + pair = self.seqMgr._release(seq) + if pair == None: + return + method, synchronous = pair + if code == 0: + for arg in method.arguments: + if arg.dir.find("O") != -1: + outArgs[arg.name] = self.session._decodeValue(codec, arg.type, self.broker) + result = MethodResult(code, text, outArgs) + if synchronous: + try: + self.broker.cv.acquire() + self.broker.syncResult = result + self.broker.syncInFlight = False + self.broker.cv.notify() + finally: + self.broker.cv.release() + else: + if self.session.console: + self.session.console.methodResponse(self.broker, seq, result) + + + def _v1HandleEventInd(self, codec, seq): + """ + Handle a QMFv1 event indication + """ + event = Event(self, codec) + self.unsolicitedContext.doEvent(event) + + + def _v1HandleContentInd(self, codec, sequence, prop=False, stat=False): + """ + Handle a QMFv1 content indication + """ + classKey = ClassKey(codec) + schema = self.schemaCache.getSchema(classKey) + if not schema: + return + + obj = Object(self, schema, codec, prop, stat) + if classKey.getPackageName() == "org.apache.qpid.broker" and classKey.getClassName() == "agent" and prop: + self.broker._updateAgent(obj) + + context = self.unsolicitedContext + try: + self.lock.acquire() + if sequence in self.contextMap: + context = self.contextMap[sequence] + finally: + self.lock.release() + + context.addV1QueryResult(obj, prop, stat) + + + def _v2HandleDataInd(self, mp, ah, content): + """ + Handle a QMFv2 data indication from the agent. Note: called from context + of the Broker thread. + """ + if content.__class__ != list: + return + + if mp.correlation_id: + try: + self.lock.acquire() + sequence = int(mp.correlation_id) + if sequence not in self.contextMap: + return + context = self.contextMap[sequence] + finally: + self.lock.release() + else: + context = self.unsolicitedContext + + kind = "_data" + if "qmf.content" in ah: + kind = ah["qmf.content"] + if kind == "_data": + for omap in content: + context.addV2QueryResult(omap) + context.processV2Data() + if 'partial' not in ah: + context.signal() + + elif kind == "_event": + for omap in content: + event = Event(self, v2Map=omap) + if event.classKey is None or event.schema: + # schema optional or present + context.doEvent(event) + else: + # schema not optional and not present + if context.addPendingEvent(event): + self._v2SendSchemaRequest(event.classKey) + + elif kind == "_schema_id": + for sid in content: + try: + ckey = ClassKey(sid) + except: + # @todo: log error + ckey = None + if ckey is not None: + # @todo: for now, the application cannot directly send a query for + # _schema_id. This request _must_ have been initiated by the framework + # in order to update the schema cache. + context.notifiable(qmf_schema_id=ckey) + + + def _v2HandleMethodResp(self, mp, ah, content): + """ + Handle a QMFv2 method response from the agent + """ + context = None + sequence = None + if mp.correlation_id: + try: + self.lock.acquire() + seq = int(mp.correlation_id) + finally: + self.lock.release() + else: + return + + pair = self.seqMgr._release(seq) + if pair == None: + return + method, synchronous = pair + + result = MethodResult(0, 'OK', content['_arguments']) + if synchronous: + try: + self.broker.cv.acquire() + self.broker.syncResult = result + self.broker.syncInFlight = False + self.broker.cv.notify() + finally: + self.broker.cv.release() + else: + if self.session.console: + self.session.console.methodResponse(self.broker, seq, result) + + def _v2HandleException(self, mp, ah, content): + """ + Handle a QMFv2 exception + """ + context = None + if mp.correlation_id: + try: + self.lock.acquire() + seq = int(mp.correlation_id) + finally: + self.lock.release() + else: + return + + values = {} + if '_values' in content: + values = content['_values'] + + code = 7 + text = "error" + if 'error_code' in values: + code = values['error_code'] + if 'error_text' in values: + text = values['error_text'] + + pair = self.seqMgr._release(seq) + if pair == None: + return + + if pair.__class__ == RequestContext: + pair.cancel(text) + return + + method, synchronous = pair + + result = MethodResult(code, text, {}) + if synchronous: + try: + self.broker.cv.acquire() + self.broker.syncResult = result + self.broker.syncInFlight = False + self.broker.cv.notify() + finally: + self.broker.cv.release() + else: + if self.session.console: + self.session.console.methodResponse(self.broker, seq, result) + + + def _v1SendGetQuery(self, sequence, kwargs): + """ + Send a get query to a QMFv1 agent. + """ + # + # Build the query map + # + query = {} + if '_class' in kwargs: + query['_class'] = kwargs['_class'] + if '_package' in kwargs: + query['_package'] = kwargs['_package'] + elif '_key' in kwargs: + key = kwargs['_key'] + query['_class'] = key.getClassName() + query['_package'] = key.getPackageName() + elif '_objectId' in kwargs: + query['_objectid'] = kwargs['_objectId'].__repr__() + + # + # Construct and transmit the message + # + sendCodec = Codec() + self.broker._setHeader(sendCodec, 'G', sequence) + sendCodec.write_map(query) + smsg = self.broker._message(sendCodec.encoded, "agent.%d.%s" % (self.brokerBank, self.agentBank)) + self.broker._send(smsg) + + + def _v2SendQuery(self, query, sequence): + """ + Given a query map, construct and send a V2 Query message. + """ + dp = self.broker.amqpSession.delivery_properties() + dp.routing_key = self.getV2RoutingKey() + mp = self.broker.amqpSession.message_properties() + mp.content_type = "amqp/map" + if self.broker.saslUser: + mp.user_id = self.broker.saslUser + mp.correlation_id = str(sequence) + mp.app_id = "qmf2" + mp.reply_to = self.broker.amqpSession.reply_to("qmf.default.direct", self.broker.v2_direct_queue) + mp.application_headers = {'qmf.opcode':'_query_request'} + sendCodec = Codec() + sendCodec.write_map(query) + msg = Message(dp, mp, sendCodec.encoded) + self.broker._send(msg, "qmf.default.direct") + + + def _v2SendGetQuery(self, sequence, kwargs): + """ + Send a get query to a QMFv2 agent. + """ + # + # Build the query map + # + query = {'_what': 'OBJECT'} + if '_class' in kwargs: + schemaMap = {'_class_name': kwargs['_class']} + if '_package' in kwargs: + schemaMap['_package_name'] = kwargs['_package'] + query['_schema_id'] = schemaMap + elif '_key' in kwargs: + query['_schema_id'] = kwargs['_key'].asMap() + elif '_objectId' in kwargs: + query['_object_id'] = kwargs['_objectId'].asMap() + + self._v2SendQuery(query, sequence) + + + def _v2SendSchemaIdQuery(self, sequence, kwargs): + """ + Send a query for all schema ids to a QMFv2 agent. + """ + # + # Build the query map + # + query = {'_what': 'SCHEMA_ID'} + # @todo - predicate support. For now, return all known schema ids. + + self._v2SendQuery(query, sequence) + + + def _v2SendSchemaRequest(self, schemaId): + """ + Send a query to an agent to request details on a particular schema class. + IMPORTANT: This function currently sends a QMFv1 schema-request to the address of + the agent. The agent will send its response to amq.direct/<our-key>. + Eventually, this will be converted to a proper QMFv2 schema query. + """ + sendCodec = Codec() + seq = self.seqMgr._reserve(None) + self.broker._setHeader(sendCodec, 'S', seq) + schemaId.encode(sendCodec) + smsg = self.broker._message(sendCodec.encoded, self.agentBank) + self.broker._send(smsg, "qmf.default.direct") + + + def _handleQmfV1Message(self, opcode, seq, mp, ah, codec): + """ + Process QMFv1 messages arriving from an agent. Note well: this method is + called from the context of the Broker thread. + """ + if opcode == 'm': self._v1HandleMethodResp(codec, seq) + elif opcode == 'e': self._v1HandleEventInd(codec, seq) + elif opcode == 'c': self._v1HandleContentInd(codec, seq, prop=True) + elif opcode == 'i': self._v1HandleContentInd(codec, seq, stat=True) + elif opcode == 'g': self._v1HandleContentInd(codec, seq, prop=True, stat=True) + + + def _handleQmfV2Message(self, opcode, mp, ah, content): + """ + Process QMFv2 messages arriving from an agent. Note well: this method is + called from the context of the Broker thread. + """ + if opcode == '_data_indication': self._v2HandleDataInd(mp, ah, content) + elif opcode == '_query_response': self._v2HandleDataInd(mp, ah, content) + elif opcode == '_method_response': self._v2HandleMethodResp(mp, ah, content) + elif opcode == '_exception': self._v2HandleException(mp, ah, content) + + +#=================================================================================================== +# RequestContext +#=================================================================================================== +class RequestContext(object): + """ + This class tracks an asynchronous request sent to an agent. + TODO: Add logic for client-side selection and filtering deleted objects from get-queries + """ + def __init__(self, agent, notifiable, selectors={}): + self.sequence = None + self.agent = agent + self.schemaCache = self.agent.schemaCache + self.notifiable = notifiable + self.selectors = selectors + self.startTime = time() + self.rawQueryResults = [] + self.queryResults = [] + self.pendingEvents = {} + self.exception = None + self.waitingForSchema = None + self.pendingSignal = None + self.cv = Condition() + self.blocked = notifiable == None + + + def setSequence(self, sequence): + self.sequence = sequence + + + def addV1QueryResult(self, data, has_props, has_stats): + values = {} + if has_props: + for prop, val in data.getProperties(): + values[prop.name] = val + if has_stats: + for stat, val in data.getStatistics(): + values[stat.name] = val + for key in values: + val = values[key] + if key in self.selectors and val != self.selectors[key]: + return + + if self.notifiable: + if has_props: + self.notifiable(qmf_object=data) + if has_stats: + self.notifiable(qmf_object_stats=data) + else: + self.queryResults.append(data) + + + def addV2QueryResult(self, data): + values = data['_values'] + for key in values: + val = values[key] + if key in self.selectors: + sel_val = self.selectors[key] + if sel_val.__class__ == ObjectId: + val = ObjectId(val, agentName=self.agent.getAgentBank()) + if val != sel_val: + return + self.rawQueryResults.append(data) + + def addPendingEvent(self, event): + """ Stores a received event that is pending a schema. Returns True if this + event is the first instance of a given schema identifier. + """ + self.cv.acquire() + try: + if event.classKey in self.pendingEvents: + self.pendingEvents[event.classKey].append((event, time())) + return False + self.pendingEvents[event.classKey] = [(event, time())] + return True + finally: + self.cv.release() + + def processPendingEvents(self): + """ Walk the pending events looking for schemas that are now + available. Remove any events that now have schema, and process them. + """ + keysToDelete = [] + events = [] + self.cv.acquire() + try: + for key in self.pendingEvents.iterkeys(): + schema = self.schemaCache.getSchema(key) + if schema: + keysToDelete.append(key) + for item in self.pendingEvents[key]: + # item is (timestamp, event-obj) tuple. + # hack: I have no idea what a valid lifetime for an event + # should be. 60 seconds??? + if (time() - item[1]) < 60: + item[0].schema = schema + events.append(item[0]) + for key in keysToDelete: + self.pendingEvents.pop(key) + finally: + self.cv.release() + for event in events: + self.doEvent(event) + + def doEvent(self, data): + if self.notifiable: + self.notifiable(qmf_event=data) + + + def setException(self, ex): + self.exception = ex + + + def getAge(self): + return time() - self.startTime + + + def cancel(self, exception): + self.setException(exception) + try: + self.cv.acquire() + self.blocked = None + self.waitingForSchema = None + self.cv.notify() + finally: + self.cv.release() + self._complete() + + + def waitForSignal(self, timeout): + try: + self.cv.acquire() + while self.blocked: + if (time() - self.startTime) > timeout: + self.exception = "Request timed out after %d seconds" % timeout + return + self.cv.wait(1) + finally: + self.cv.release() + + + def signal(self): + try: + self.cv.acquire() + if self.waitingForSchema: + self.pendingSignal = True + return + else: + self.blocked = None + self.cv.notify() + finally: + self.cv.release() + self._complete() + + + def _complete(self): + if self.notifiable: + if self.exception: + self.notifiable(qmf_exception=self.exception) + else: + self.notifiable(qmf_complete=True) + + if self.sequence: + self.agent._clearContext(self.sequence) + + + def processV2Data(self): + """ + Attempt to make progress on the entries in the raw_query_results queue. If an entry has a schema + that is in our schema cache, process it. Otherwise, send a request for the schema information + to the agent that manages the object. + """ + schemaId = None + queryResults = [] + try: + self.cv.acquire() + if self.waitingForSchema: + return + while (not self.waitingForSchema) and len(self.rawQueryResults) > 0: + head = self.rawQueryResults[0] + schemaId = self._getSchemaIdforV2ObjectLH(head) + schema = self.schemaCache.getSchema(schemaId) + if schema: + obj = Object(self.agent, schema, v2Map=head, agentName=self.agent.agentBank) + queryResults.append(obj) + self.rawQueryResults.pop(0) + else: + self.waitingForSchema = True + finally: + self.cv.release() + + if self.waitingForSchema: + self.agent._v2SendSchemaRequest(schemaId) + + for result in queryResults: + key = result.getClassKey() + if key.getPackageName() == "org.apache.qpid.broker" and key.getClassName() == "agent": + self.agent.broker._updateAgent(result) + if self.notifiable: + self.notifiable(qmf_object=result) + else: + self.queryResults.append(result) + + complete = None + try: + self.cv.acquire() + if not self.waitingForSchema and self.pendingSignal: + self.blocked = None + self.cv.notify() + complete = True + finally: + self.cv.release() + + if complete: + self._complete() + + + def reprocess(self): + """ + New schema information has been added to the schema-cache. Clear our 'waiting' status + and see if we can make more progress on any pending inbound events/objects. + """ + try: + self.cv.acquire() + self.waitingForSchema = None + finally: + self.cv.release() + self.processV2Data() + self.processPendingEvents() + + def _getSchemaIdforV2ObjectLH(self, data): + """ + Given a data map, extract the schema-identifier. + """ + if data.__class__ != dict: + return None + if '_schema_id' in data: + return ClassKey(data['_schema_id']) + return None + + +#=================================================================================================== +# Event +#=================================================================================================== +class Event: + """ """ + def __init__(self, agent, codec=None, v2Map=None): + self.agent = agent + self.session = agent.session + self.broker = agent.broker + + if isinstance(v2Map,dict): + self.isV2 = True + self.classKey = None + self.schema = None + try: + self.arguments = v2Map["_values"] + self.timestamp = long(v2Map["_timestamp"]) + self.severity = v2Map["_severity"] + if "_schema_id" in v2Map: + self.classKey = ClassKey(v2Map["_schema_id"]) + self.classKey._setType(ClassKey.TYPE_EVENT) + except: + raise Exception("Invalid event object: %s " % str(v2Map)) + if self.classKey is not None: + self.schema = self.session.schemaCache.getSchema(self.classKey) + + elif codec is not None: + self.isV2 = None + self.classKey = ClassKey(codec) + self.classKey._setType(ClassKey.TYPE_EVENT) + self.timestamp = codec.read_int64() + self.severity = codec.read_uint8() + self.arguments = {} + self.schema = self.session.schemaCache.getSchema(self.classKey) + if not self.schema: + return + for arg in self.schema.arguments: + self.arguments[arg.name] = self.session._decodeValue(codec, arg.type, + self.broker) + else: + raise Exception("No constructor for event object.") + + + def __repr__(self): + if self.schema == None: + return "<uninterpretable>" + out = strftime("%c", gmtime(self.timestamp / 1000000000)) + out += " " + self._sevName() + " " + self.classKey.getPackageName() + ":" + self.classKey.getClassName() + out += " broker=" + str(self.broker.getUrl()) + for arg in self.schema.arguments: + disp = self.session._displayValue(self.arguments[arg.name], arg.type).encode("utf8") + if " " in disp: + disp = "\"" + disp + "\"" + out += " " + arg.name + "=" + disp + return out + + def _sevName(self): + if self.severity == 0 : return "EMER " + if self.severity == 1 : return "ALERT" + if self.severity == 2 : return "CRIT " + if self.severity == 3 : return "ERROR" + if self.severity == 4 : return "WARN " + if self.severity == 5 : return "NOTIC" + if self.severity == 6 : return "INFO " + if self.severity == 7 : return "DEBUG" + return "INV-%d" % self.severity + + def getClassKey(self): + return self.classKey + + def getArguments(self): + return self.arguments + + def getTimestamp(self): + return self.timestamp + + def getSchema(self): + return self.schema + + +#=================================================================================================== +# SequenceManager +#=================================================================================================== +class SequenceManager: + """ Manage sequence numbers for asynchronous method calls """ + def __init__(self): + self.lock = Lock() + self.sequence = long(time()) # pseudo-randomize the start + self.pending = {} + + def _reserve(self, data): + """ Reserve a unique sequence number """ + try: + self.lock.acquire() + result = self.sequence + self.sequence = self.sequence + 1 + self.pending[result] = data + finally: + self.lock.release() + return result + + def _release(self, seq): + """ Release a reserved sequence number """ + data = None + try: + self.lock.acquire() + if seq in self.pending: + data = self.pending[seq] + del self.pending[seq] + finally: + self.lock.release() + return data + + +#=================================================================================================== +# DebugConsole +#=================================================================================================== +class DebugConsole(Console): + """ """ + def brokerConnected(self, broker): + print "brokerConnected:", broker + + def brokerConnectionFailed(self, broker): + print "brokerConnectionFailed:", broker + + def brokerDisconnected(self, broker): + print "brokerDisconnected:", broker + + def newPackage(self, name): + print "newPackage:", name + + def newClass(self, kind, classKey): + print "newClass:", kind, classKey + + def newAgent(self, agent): + print "newAgent:", agent + + def delAgent(self, agent): + print "delAgent:", agent + + def objectProps(self, broker, record): + print "objectProps:", record + + def objectStats(self, broker, record): + print "objectStats:", record + + def event(self, broker, event): + print "event:", event + + def heartbeat(self, agent, timestamp): + print "heartbeat:", agent + + def brokerInfo(self, broker): + print "brokerInfo:", broker + diff --git a/qpid/cpp/management/python/lib/qpidstore/__init__.py b/qpid/cpp/management/python/lib/qpidstore/__init__.py new file mode 100644 index 0000000000..d8a500d9d8 --- /dev/null +++ b/qpid/cpp/management/python/lib/qpidstore/__init__.py @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + diff --git a/qpid/cpp/management/python/lib/qpidstore/janal.py b/qpid/cpp/management/python/lib/qpidstore/janal.py new file mode 100644 index 0000000000..1a892aca60 --- /dev/null +++ b/qpid/cpp/management/python/lib/qpidstore/janal.py @@ -0,0 +1,617 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import jerr, jrnl +import os.path, sys + + +#== class EnqMap ============================================================== + +class EnqMap(object): + """Class for maintaining a map of enqueued records, indexing the rid against hdr, fid and transaction lock""" + + def __init__(self): + """Constructor""" + self.__map = {} + + def __str__(self): + """Print the contents of the map""" + return self.report(True, True) + + def add(self, fid, hdr, lock = False): + """Add a new record into the map""" + if hdr.rid in self.__map: + raise jerr.DuplicateRidError(hdr.rid) + self.__map[hdr.rid] = [fid, hdr, lock] + + def contains(self, rid): + """Return True if the map contains the given rid""" + return rid in self.__map + + def delete(self, rid): + """Delete the rid and its associated data from the map""" + if rid in self.__map: + if self.get_lock(rid): + raise jerr.DeleteLockedRecordError(rid) + del self.__map[rid] + else: + raise jerr.JWarning("ERROR: Deleting non-existent rid from EnqMap: rid=0x%x" % rid) + + def get(self, rid): + """Return a list [fid, hdr, lock] for the given rid""" + if self.contains(rid): + return self.__map[rid] + return None + + def get_fid(self, rid): + """Return the fid for the given rid""" + if self.contains(rid): + return self.__map[rid][0] + return None + + def get_hdr(self, rid): + """Return the header record for the given rid""" + if self.contains(rid): + return self.__map[rid][1] + return None + + def get_lock(self, rid): + """Return the transaction lock value for the given rid""" + if self.contains(rid): + return self.__map[rid][2] + return None + + def get_rec_list(self): + """Return a list of tuples (fid, hdr, lock) for all entries in the map""" + return self.__map.values() + + def lock(self, rid): + """Set the transaction lock for a given rid to True""" + if rid in self.__map: + if not self.__map[rid][2]: # locked + self.__map[rid][2] = True + else: + raise jerr.AlreadyLockedError(rid) + else: + raise jerr.JWarning("ERROR: Locking non-existent rid in EnqMap: rid=0x%x" % rid) + + def report(self, show_stats, show_records): + """Return a string containing a text report for all records in the map""" + if len(self.__map) == 0: + return "No enqueued records found." + rstr = "%d enqueued records found" % len(self.__map) + if show_records: + rstr += ":" + rid_list = self.__map.keys() + rid_list.sort() + for rid in rid_list: + if self.__map[rid][2]: + lock_str = " [LOCKED]" + else: + lock_str = "" + rstr += "\n lfid=%d %s %s" % (rec[0], rec[1], lock_str) + else: + rstr += "." + return rstr + + def rids(self): + """Return a list of rids in the map""" + return self.__map.keys() + + def size(self): + """Return the number of entries in the map""" + return len(self.__map) + + def unlock(self, rid): + """Set the transaction lock for a given rid to False""" + if rid in self.__map: + if self.__map[rid][2]: + self.__map[rid][2] = False + else: + raise jerr.NotLockedError(rid) + else: + raise jerr.NonExistentRecordError("unlock", rid) + + +#== class TxnMap ============================================================== + +class TxnMap(object): + """Transaction map, which maps xids to a list of outstanding actions""" + + def __init__(self, emap): + """Constructor, requires an existing EnqMap instance""" + self.__emap = emap + self.__map = {} + + def __str__(self): + """Print the contents of the map""" + return self.report(True, True) + + def add(self, fid, hdr): + """Add a new transactional record into the map""" + if isinstance(hdr, jrnl.DeqRec): + try: + self.__emap.lock(hdr.deq_rid) + except jerr.JWarning: + # Not in emap, look for rid in tmap + l = self.find_rid(hdr.deq_rid, hdr.xid) + if l != None: + if l[2]: + raise jerr.AlreadyLockedError(hdr.deq_rid) + l[2] = True + if hdr.xid in self.__map: + self.__map[hdr.xid].append([fid, hdr, False]) # append to existing list + else: + self.__map[hdr.xid] = [[fid, hdr, False]] # create new list + + def contains(self, xid): + """Return True if the xid exists in the map; False otherwise""" + return xid in self.__map + + def delete(self, hdr): + """Remove a transaction record from the map using either a commit or abort header""" + if hdr.magic[-1] == "c": + return self._commit(hdr.xid) + if hdr.magic[-1] == "a": + self._abort(hdr.xid) + else: + raise jerr.InvalidRecordTypeError("delete from TxnMap", hdr.magic, hdr.rid) + + def find_rid(self, rid, xid_hint = None): + """ Search for and return map list with supplied rid. If xid_hint is supplied, try that xid first""" + if xid_hint != None and self.contains(xid_hint): + for l in self.__map[xid_hint]: + if l[1].rid == rid: + return l + for xid in self.__map.iterkeys(): + if xid_hint == None or xid != xid_hint: + for l in self.__map[xid]: + if l[1].rid == rid: + return l + + def get(self, xid): + """Return a list of operations for the given xid""" + if self.contains(xid): + return self.__map[xid] + + def report(self, show_stats, show_records): + """Return a string containing a text report for all records in the map""" + if len(self.__map) == 0: + return "No outstanding transactions found." + rstr = "%d outstanding transactions found" % len(self.__map) + if show_records: + rstr += ":" + for xid, tup in self.__map.iteritems(): + rstr += "\n xid=%s:" % jrnl.Utils.format_xid(xid) + for i in tup: + rstr += "\n %s" % str(i[1]) + else: + rstr += "." + return rstr + + def size(self): + """Return the number of xids in the map""" + return len(self.__map) + + def xids(self): + """Return a list of xids in the map""" + return self.__map.keys() + + def _abort(self, xid): + """Perform an abort operation for the given xid record""" + for _, hdr, _ in self.__map[xid]: + if isinstance(hdr, jrnl.DeqRec): + try: + self.__emap.unlock(hdr.deq_rid) + except jerr.NonExistentRecordError, err: # Not in emap, look in current transaction op list (TPL) + found_rid = False + for _, hdr1, _ in self.__map[xid]: + if isinstance(hdr1, jrnl.EnqRec) and hdr1.rid == hdr.deq_rid: + found_rid = True + break + if not found_rid: # Not found in current transaction op list, re-throw error + raise err + del self.__map[xid] + + def _commit(self, xid): + """Perform a commit operation for the given xid record""" + mismatch_list = [] + for fid, hdr, lock in self.__map[xid]: + if isinstance(hdr, jrnl.EnqRec): + self.__emap.add(fid, hdr, lock) # Transfer enq to emap + else: + if self.__emap.contains(hdr.deq_rid): + self.__emap.unlock(hdr.deq_rid) + self.__emap.delete(hdr.deq_rid) + else: + mismatch_list.append("0x%x" % hdr.deq_rid) + del self.__map[xid] + return mismatch_list + +#== class JrnlAnalyzer ======================================================== + +class JrnlAnalyzer(object): + """ + This class analyzes a set of journal files and determines which is the last to be written + (the newest file), and hence which should be the first to be read for recovery (the oldest + file). + + The analysis is performed on construction; the contents of the JrnlInfo object passed provide + the recovery details. + """ + + def __init__(self, jinf): + """Constructor""" + self.__oldest = None + self.__jinf = jinf + self.__flist = self._analyze() + + def __str__(self): + """String representation of this JrnlAnalyzer instance, will print out results of analysis.""" + ostr = "Journal files analyzed in directory %s (* = earliest full):\n" % self.__jinf.get_current_dir() + if self.is_empty(): + ostr += " <All journal files are empty>\n" + else: + for tup in self.__flist: + tmp = " " + if tup[0] == self.__oldest[0]: + tmp = "*" + ostr += " %s %s: owi=%-5s rid=0x%x, fro=0x%x ts=%s\n" % (tmp, os.path.basename(tup[1]), tup[2], + tup[3], tup[4], tup[5]) + for i in range(self.__flist[-1][0] + 1, self.__jinf.get_num_jrnl_files()): + ostr += " %s.%04x.jdat: <empty>\n" % (self.__jinf.get_jrnl_base_name(), i) + return ostr + + # Analysis + + def get_oldest_file(self): + """Return a tuple (ordnum, jfn, owi, rid, fro, timestamp) for the oldest data file found in the journal""" + return self.__oldest + + def get_oldest_file_index(self): + """Return the ordinal number of the oldest data file found in the journal""" + if self.is_empty(): + return None + return self.__oldest[0] + + def is_empty(self): + """Return true if the analysis found that the journal file has never been written to""" + return len(self.__flist) == 0 + + def _analyze(self): + """Perform the journal file analysis by reading and comparing the file headers of each journal data file""" + owi_found = False + flist = [] + for i in range(0, self.__jinf.get_num_jrnl_files()): + jfn = os.path.join(self.__jinf.get_current_dir(), "%s.%04x.jdat" % (self.__jinf.get_jrnl_base_name(), i)) + fhandle = open(jfn) + fhdr = jrnl.Utils.load(fhandle, jrnl.Hdr) + if fhdr.empty(): + break + this_tup = (i, jfn, fhdr.owi(), fhdr.rid, fhdr.fro, fhdr.timestamp_str()) + flist.append(this_tup) + if i == 0: + init_owi = fhdr.owi() + self.__oldest = this_tup + elif fhdr.owi() != init_owi and not owi_found: + self.__oldest = this_tup + owi_found = True + return flist + + +#== class JrnlReader ==================================================== + +class JrnlReader(object): + """ + This class contains an Enqueue Map (emap), a transaction map (tmap) and a transaction + object list (txn_obj_list) which are populated by reading the journals from the oldest + to the newest and analyzing each record. The JrnlInfo and JrnlAnalyzer + objects supplied on construction provide the information used for the recovery. + + The analysis is performed on construction. + """ + + def __init__(self, jinfo, jra, qflag = False, rflag = False, vflag = False): + """Constructor, which reads all """ + self._jinfo = jinfo + self._jra = jra + self._qflag = qflag + self._rflag = rflag + self._vflag = vflag + + # test callback functions for CSV tests + self._csv_store_chk = None + self._csv_start_cb = None + self._csv_enq_cb = None + self._csv_deq_cb = None + self._csv_txn_cb = None + self._csv_end_cb = None + + self._emap = EnqMap() + self._tmap = TxnMap(self._emap) + self._txn_obj_list = {} + + self._file = None + self._file_hdr = None + self._file_num = None + self._first_rec_flag = None + self._fro = None + self._last_file_flag = None + self._start_file_num = None + self._file_hdr_owi = None + self._warning = [] + + self._abort_cnt = 0 + self._commit_cnt = 0 + self._msg_cnt = 0 + self._rec_cnt = 0 + self._txn_msg_cnt = 0 + + def __str__(self): + """Print out all the undequeued records""" + return self.report(True, self._rflag) + + def emap(self): + """Get the enqueue map""" + return self._emap + + def get_abort_cnt(self): + """Get the cumulative number of transactional aborts found""" + return self._abort_cnt + + def get_commit_cnt(self): + """Get the cumulative number of transactional commits found""" + return self._commit_cnt + + def get_msg_cnt(self): + """Get the cumulative number of messages found""" + return self._msg_cnt + + def get_rec_cnt(self): + """Get the cumulative number of journal records (including fillers) found""" + return self._rec_cnt + + def is_last_file(self): + """Return True if the last file is being read""" + return self._last_file_flag + + def report(self, show_stats = True, show_records = False): + """Return a string containing a report on the file analysis""" + rstr = self._emap.report(show_stats, show_records) + "\n" + self._tmap.report(show_stats, show_records) + #TODO - print size analysis here - ie how full, sparse, est. space remaining before enq threshold + return rstr + + def run(self): + """Perform the read of the journal""" + if self._csv_start_cb != None and self._csv_start_cb(self._csv_store_chk): + return + if self._jra.is_empty(): + return + stop = self._advance_jrnl_file(*self._jra.get_oldest_file()) + while not stop and not self._get_next_record(): + pass + if self._csv_end_cb != None and self._csv_end_cb(self._csv_store_chk): + return + if not self._qflag: + print + + def set_callbacks(self, csv_store_chk, csv_start_cb = None, csv_enq_cb = None, csv_deq_cb = None, csv_txn_cb = None, + csv_end_cb = None): + """Set callbacks for checks to be made at various points while reading the journal""" + self._csv_store_chk = csv_store_chk + self._csv_start_cb = csv_start_cb + self._csv_enq_cb = csv_enq_cb + self._csv_deq_cb = csv_deq_cb + self._csv_txn_cb = csv_txn_cb + self._csv_end_cb = csv_end_cb + + def tmap(self): + """Return the transaction map""" + return self._tmap + + def get_txn_msg_cnt(self): + """Get the cumulative transactional message count""" + return self._txn_msg_cnt + + def txn_obj_list(self): + """Get a cumulative list of transaction objects (commits and aborts)""" + return self._txn_obj_list + + def _advance_jrnl_file(self, *oldest_file_info): + """Rotate to using the next journal file. Return False if the operation was successful, True if there are no + more files to read.""" + fro_seek_flag = False + if len(oldest_file_info) > 0: + self._start_file_num = self._file_num = oldest_file_info[0] + self._fro = oldest_file_info[4] + fro_seek_flag = True # jump to fro to start reading + if not self._qflag and not self._rflag: + if self._vflag: + print "Recovering journals..." + else: + print "Recovering journals", + if self._file != None and self._is_file_full(): + self._file.close() + self._file_num = self._incr_file_num() + if self._file_num == self._start_file_num: + return True + if self._start_file_num == 0: + self._last_file_flag = self._file_num == self._jinfo.get_num_jrnl_files() - 1 + else: + self._last_file_flag = self._file_num == self._start_file_num - 1 + if self._file_num < 0 or self._file_num >= self._jinfo.get_num_jrnl_files(): + raise jerr.BadFileNumberError(self._file_num) + jfn = os.path.join(self._jinfo.get_current_dir(), "%s.%04x.jdat" % + (self._jinfo.get_jrnl_base_name(), self._file_num)) + self._file = open(jfn) + self._file_hdr = jrnl.Utils.load(self._file, jrnl.Hdr) + if fro_seek_flag and self._file.tell() != self._fro: + self._file.seek(self._fro) + self._first_rec_flag = True + if not self._qflag: + if self._rflag: + print jfn, ": ", self._file_hdr + elif self._vflag: + print "* Reading %s" % jfn + else: + print ".", + sys.stdout.flush() + return False + + def _check_owi(self, hdr): + """Return True if the header's owi indicator matches that of the file header record; False otherwise. This can + indicate whether the last record in a file has been read and now older records which have not yet been + overwritten are now being read.""" + return self._file_hdr_owi == hdr.owi() + + def _is_file_full(self): + """Return True if the current file is full (no more write space); false otherwise""" + return self._file.tell() >= self._jinfo.get_jrnl_file_size_bytes() + + def _get_next_record(self): + """Get the next record in the file for analysis""" + if self._is_file_full(): + if self._advance_jrnl_file(): + return True + try: + hdr = jrnl.Utils.load(self._file, jrnl.Hdr) + except: + return True + if hdr.empty(): + return True + if hdr.check(): + return True + self._rec_cnt += 1 + self._file_hdr_owi = self._file_hdr.owi() + if self._first_rec_flag: + if self._file_hdr.fro != hdr.foffs: + raise jerr.FirstRecordOffsetMismatch(self._file_hdr.fro, hdr.foffs) + else: + if self._rflag: + print " * fro ok: 0x%x" % self._file_hdr.fro + self._first_rec_flag = False + stop = False + if isinstance(hdr, jrnl.EnqRec): + stop = self._handle_enq_rec(hdr) + elif isinstance(hdr, jrnl.DeqRec): + stop = self._handle_deq_rec(hdr) + elif isinstance(hdr, jrnl.TxnRec): + stop = self._handle_txn_rec(hdr) + wstr = "" + for warn in self._warning: + wstr += " (%s)" % warn + if self._rflag: + print " > %s %s" % (hdr, wstr) + self._warning = [] + return stop + + def _handle_deq_rec(self, hdr): + """Process a dequeue ("RHMd") record""" + if self._load_rec(hdr): + return True + + # Check OWI flag + if not self._check_owi(hdr): + self._warning.append("WARNING: OWI mismatch - could be overwrite boundary.") + return True + # Test hook + if self._csv_deq_cb != None and self._csv_deq_cb(self._csv_store_chk, hdr): + return True + + try: + if hdr.xid == None: + self._emap.delete(hdr.deq_rid) + else: + self._tmap.add(self._file_hdr.fid, hdr) + except jerr.JWarning, warn: + self._warning.append(str(warn)) + return False + + def _handle_enq_rec(self, hdr): + """Process a dequeue ("RHMe") record""" + if self._load_rec(hdr): + return True + + # Check extern flag + if hdr.extern and hdr.data != None: + raise jerr.ExternFlagDataError(hdr) + # Check OWI flag + if not self._check_owi(hdr): + self._warning.append("WARNING: OWI mismatch - could be overwrite boundary.") + return True + # Test hook + if self._csv_enq_cb != None and self._csv_enq_cb(self._csv_store_chk, hdr): + return True + + if hdr.xid == None: + self._emap.add(self._file_hdr.fid, hdr) + else: + self._txn_msg_cnt += 1 + self._tmap.add(self._file_hdr.fid, hdr) + self._msg_cnt += 1 + return False + + def _handle_txn_rec(self, hdr): + """Process a transaction ("RHMa or RHMc") record""" + if self._load_rec(hdr): + return True + + # Check OWI flag + if not self._check_owi(hdr): + self._warning.append("WARNING: OWI mismatch - could be overwrite boundary.") + return True + # Test hook + if self._csv_txn_cb != None and self._csv_txn_cb(self._csv_store_chk, hdr): + return True + + if hdr.magic[-1] == "a": + self._abort_cnt += 1 + else: + self._commit_cnt += 1 + + if self._tmap.contains(hdr.xid): + mismatched_rids = self._tmap.delete(hdr) + if mismatched_rids != None and len(mismatched_rids) > 0: + self._warning.append("WARNING: transactional dequeues not found in enqueue map; rids=%s" % + mismatched_rids) + else: + self._warning.append("WARNING: %s not found in transaction map" % jrnl.Utils.format_xid(hdr.xid)) + if hdr.magic[-1] == "c": # commits only + self._txn_obj_list[hdr.xid] = hdr + return False + + def _incr_file_num(self): + """Increment the number of files read with wraparound (ie after file n-1, go to 0)""" + self._file_num += 1 + if self._file_num >= self._jinfo.get_num_jrnl_files(): + self._file_num = 0 + return self._file_num + + def _load_rec(self, hdr): + """Load a single record for the given header. There may be arbitrarily large xids and data components.""" + while not hdr.complete(): + if self._advance_jrnl_file(): + return True + hdr.load(self._file) + return False + +# ============================================================================= + +if __name__ == "__main__": + print "This is a library, and cannot be executed." diff --git a/qpid/cpp/management/python/lib/qpidstore/jerr.py b/qpid/cpp/management/python/lib/qpidstore/jerr.py new file mode 100644 index 0000000000..448f881ce3 --- /dev/null +++ b/qpid/cpp/management/python/lib/qpidstore/jerr.py @@ -0,0 +1,219 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# == Warnings ================================================================= + +class JWarning(Exception): + """Class to convey a warning""" + def __init__(self, err): + """Constructor""" + Exception.__init__(self, err) + +# == Errors =================================================================== + +class AllJrnlFilesEmptyCsvError(Exception): + """All journal files are empty (never been written)""" + def __init__(self, tnum, exp_num_msgs): + """Constructor""" + Exception.__init__(self, "[CSV %d] All journal files are empty, but test expects %d msg(s)." % + (tnum, exp_num_msgs)) + +class AlreadyLockedError(Exception): + """Error class for trying to lock a record that is already locked""" + def __init__(self, rid): + """Constructor""" + Exception.__init__(self, "Locking record which is already locked in EnqMap: rid=0x%x" % rid) + +class BadFileNumberError(Exception): + """Error class for incorrect or unexpected file number""" + def __init__(self, file_num): + """Constructor""" + Exception.__init__(self, "Bad file number %d" % file_num) + +class DataSizeError(Exception): + """Error class for data size mismatch""" + def __init__(self, exp_size, act_size, data_str): + """Constructor""" + Exception.__init__(self, "Inconsistent data size: expected:%d; actual:%d; data=\"%s\"" % + (exp_size, act_size, data_str)) + +class DeleteLockedRecordError(Exception): + """Error class for deleting a locked record from the enqueue map""" + def __init__(self, rid): + """Constructor""" + Exception.__init__(self, "Deleting locked record from EnqMap: rid=0x%s" % rid) + +class DequeueNonExistentEnqueueError(Exception): + """Error class for attempting to dequeue a non-existent enqueue record (rid)""" + def __init__(self, deq_rid): + """Constructor""" + Exception.__init__(self, "Dequeuing non-existent enqueue record: rid=0x%s" % deq_rid) + +class DuplicateRidError(Exception): + """Error class for placing duplicate rid into enqueue map""" + def __init__(self, rid): + """Constructor""" + Exception.__init__(self, "Adding duplicate record to EnqMap: rid=0x%x" % rid) + +class EndianMismatchError(Exception): + """Error class mismatched record header endian flag""" + def __init__(self, exp_endianness): + """Constructor""" + Exception.__init__(self, "Endian mismatch: expected %s, but current record is %s" % + self.endian_str(exp_endianness)) + #@staticmethod + def endian_str(endianness): + """Return a string tuple for the endianness error message""" + if endianness: + return "big", "little" + return "little", "big" + endian_str = staticmethod(endian_str) + +class ExternFlagDataError(Exception): + """Error class for the extern flag being set and the internal size > 0""" + def __init__(self, hdr): + """Constructor""" + Exception.__init__(self, "Message data found (msg size > 0) on record with external flag set: hdr=%s" % hdr) + +class ExternFlagCsvError(Exception): + """External flag mismatch between record and CSV test file""" + def __init__(self, tnum, exp_extern_flag): + """Constructor""" + Exception.__init__(self, "[CSV %d] External flag mismatch: expected %s" % (tnum, exp_extern_flag)) + +class ExternFlagWithDataCsvError(Exception): + """External flag set and Message data found""" + def __init__(self, tnum): + """Constructor""" + Exception.__init__(self, "[CSV %d] Message data found on record with external flag set" % tnum) + +class FillExceedsFileSizeError(Exception): + """Internal error from a fill operation which will exceed the specified file size""" + def __init__(self, cur_size, file_size): + """Constructor""" + Exception.__init__(self, "Filling to size %d > max file size %d" % (cur_size, file_size)) + +class FillSizeError(Exception): + """Internal error from a fill operation that did not match the calculated end point in the file""" + def __init__(self, cur_posn, exp_posn): + """Constructor""" + Exception.__init__(self, "Filled to size %d > expected file posn %d" % (cur_posn, exp_posn)) + +class FirstRecordOffsetMismatch(Exception): + """Error class for file header fro mismatch with actual record""" + def __init__(self, fro, actual_offs): + """Constructor""" + Exception.__init__(self, "File header first record offset mismatch: fro=0x%x; actual offs=0x%x" % + (fro, actual_offs)) + +class InvalidHeaderVersionError(Exception): + """Error class for invalid record header version""" + def __init__(self, exp_ver, act_ver): + """Constructor""" + Exception.__init__(self, "Invalid header version: expected:%d, actual:%d." % (exp_ver, act_ver)) + +class InvalidRecordTypeError(Exception): + """Error class for any operation using an invalid record type""" + def __init__(self, operation, magic, rid): + """Constructor""" + Exception.__init__(self, "Invalid record type for operation: operation=%s record magic=%s, rid=0x%x" % + (operation, magic, rid)) + +class InvalidRecordTailError(Exception): + """Error class for invalid record tail""" + def __init__(self, magic_err, rid_err, rec): + """Constructor""" + Exception.__init__(self, " > %s *INVALID TAIL RECORD (%s)*" % (rec, self.tail_err_str(magic_err, rid_err))) + #@staticmethod + def tail_err_str(magic_err, rid_err): + """Return a string indicating the tail record error(s)""" + estr = "" + if magic_err: + estr = "magic bad" + if rid_err: + estr += ", " + if rid_err: + estr += "rid mismatch" + return estr + tail_err_str = staticmethod(tail_err_str) + +class NonExistentRecordError(Exception): + """Error class for any operation on an non-existent record""" + def __init__(self, operation, rid): + """Constructor""" + Exception.__init__(self, "Operation on non-existent record: operation=%s; rid=0x%x" % (operation, rid)) + +class NotLockedError(Exception): + """Error class for unlocking a record which is not locked in the first place""" + def __init__(self, rid): + """Constructor""" + Exception.__init__(self, "Unlocking record which is not locked in EnqMap: rid=0x%x" % rid) + +class JournalSpaceExceededError(Exception): + """Error class for when journal space of resized journal is too small to contain the transferred records""" + def __init__(self): + """Constructor""" + Exception.__init__(self, "Ran out of journal space while writing records") + +class MessageLengthCsvError(Exception): + """Message length mismatch between record and CSV test file""" + def __init__(self, tnum, exp_msg_len, actual_msg_len): + """Constructor""" + Exception.__init__(self, "[CSV %d] Message length mismatch: expected %d; found %d" % + (tnum, exp_msg_len, actual_msg_len)) + +class NumMsgsCsvError(Exception): + """Number of messages found mismatched with CSV file""" + def __init__(self, tnum, exp_num_msgs, actual_num_msgs): + """Constructor""" + Exception.__init__(self, "[CSV %s] Incorrect number of messages: expected %d, found %d" % + (tnum, exp_num_msgs, actual_num_msgs)) + +class TransactionCsvError(Exception): + """Transaction mismatch between record and CSV file""" + def __init__(self, tnum, exp_transactional): + """Constructor""" + Exception.__init__(self, "[CSV %d] Transaction mismatch: expected %s" % (tnum, exp_transactional)) + +class UnexpectedEndOfFileError(Exception): + """Error class for unexpected end-of-file during reading""" + def __init__(self, exp_size, curr_offs): + """Constructor""" + Exception.__init__(self, "Unexpected end-of-file: expected file size:%d; current offset:%d" % + (exp_size, curr_offs)) + +class XidLengthCsvError(Exception): + """Message Xid length mismatch between record and CSV file""" + def __init__(self, tnum, exp_xid_len, actual_msg_len): + """Constructor""" + Exception.__init__(self, "[CSV %d] Message XID mismatch: expected %d; found %d" % + (tnum, exp_xid_len, actual_msg_len)) + +class XidSizeError(Exception): + """Error class for Xid size mismatch""" + def __init__(self, exp_size, act_size, xid_str): + """Constructor""" + Exception.__init__(self, "Inconsistent xid size: expected:%d; actual:%d; xid=\"%s\"" % + (exp_size, act_size, xid_str)) + +# ============================================================================= + +if __name__ == "__main__": + print "This is a library, and cannot be executed." + diff --git a/qpid/cpp/management/python/lib/qpidstore/jrnl.py b/qpid/cpp/management/python/lib/qpidstore/jrnl.py new file mode 100644 index 0000000000..7c4d6de4a9 --- /dev/null +++ b/qpid/cpp/management/python/lib/qpidstore/jrnl.py @@ -0,0 +1,794 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import jerr +import os.path, sys, xml.parsers.expat +from struct import pack, unpack, calcsize +from time import gmtime, strftime + +# TODO: Get rid of these! Use jinf instance instead +DBLK_SIZE = 128 +SBLK_SIZE = 4 * DBLK_SIZE + +# TODO - this is messy - find a better way to handle this +# This is a global, but is set directly by the calling program +JRNL_FILE_SIZE = None + +#== class Utils ====================================================================== + +class Utils(object): + """Class containing utility functions for dealing with the journal""" + + __printchars = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{\|}~ " + + # The @staticmethod declarations are not supported in RHEL4 (python 2.3.x) + # When RHEL4 support ends, restore these declarations and remove the older + # staticmethod() declaration. + + #@staticmethod + def format_data(dsize, data): + """Format binary data for printing""" + if data == None: + return "" + if Utils._is_printable(data): + datastr = Utils._split_str(data) + else: + datastr = Utils._hex_split_str(data) + if dsize != len(data): + raise jerr.DataSizeError(dsize, len(data), datastr) + return "data(%d)=\"%s\" " % (dsize, datastr) + format_data = staticmethod(format_data) + + #@staticmethod + def format_xid(xid, xidsize=None): + """Format binary XID for printing""" + if xid == None and xidsize != None: + if xidsize > 0: + raise jerr.XidSizeError(xidsize, 0, None) + return "" + if Utils._is_printable(xid): + xidstr = Utils._split_str(xid) + else: + xidstr = Utils._hex_split_str(xid) + if xidsize == None: + xidsize = len(xid) + elif xidsize != len(xid): + raise jerr.XidSizeError(xidsize, len(xid), xidstr) + return "xid(%d)=\"%s\" " % (xidsize, xidstr) + format_xid = staticmethod(format_xid) + + #@staticmethod + def inv_str(string): + """Perform a binary 1's compliment (invert all bits) on a binary string""" + istr = "" + for index in range(0, len(string)): + istr += chr(~ord(string[index]) & 0xff) + return istr + inv_str = staticmethod(inv_str) + + #@staticmethod + def load(fhandle, klass): + """Load a record of class klass from a file""" + args = Utils._load_args(fhandle, klass) + subclass = klass.discriminate(args) + result = subclass(*args) # create instance of record + if subclass != klass: + result.init(fhandle, *Utils._load_args(fhandle, subclass)) + result.skip(fhandle) + return result + load = staticmethod(load) + + #@staticmethod + def load_file_data(fhandle, size, data): + """Load the data portion of a message from file""" + if size == 0: + return (data, True) + if data == None: + loaded = 0 + else: + loaded = len(data) + foverflow = fhandle.tell() + size - loaded > JRNL_FILE_SIZE + if foverflow: + rsize = JRNL_FILE_SIZE - fhandle.tell() + else: + rsize = size - loaded + fbin = fhandle.read(rsize) + if data == None: + data = unpack("%ds" % (rsize), fbin)[0] + else: + data = data + unpack("%ds" % (rsize), fbin)[0] + return (data, not foverflow) + load_file_data = staticmethod(load_file_data) + + #@staticmethod + def rem_bytes_in_blk(fhandle, blk_size): + """Return the remaining bytes in a block""" + foffs = fhandle.tell() + return Utils.size_in_bytes_to_blk(foffs, blk_size) - foffs + rem_bytes_in_blk = staticmethod(rem_bytes_in_blk) + + #@staticmethod + def size_in_blks(size, blk_size): + """Return the size in terms of data blocks""" + return int((size + blk_size - 1) / blk_size) + size_in_blks = staticmethod(size_in_blks) + + #@staticmethod + def size_in_bytes_to_blk(size, blk_size): + """Return the bytes remaining until the next block boundary""" + return Utils.size_in_blks(size, blk_size) * blk_size + size_in_bytes_to_blk = staticmethod(size_in_bytes_to_blk) + + #@staticmethod + def _hex_split_str(in_str, split_size = 50): + """Split a hex string into two parts separated by an ellipsis""" + if len(in_str) <= split_size: + return Utils._hex_str(in_str, 0, len(in_str)) +# if len(in_str) > split_size + 25: +# return Utils._hex_str(in_str, 0, 10) + " ... " + Utils._hex_str(in_str, 55, 65) + " ... " + \ +# Utils._hex_str(in_str, len(in_str)-10, len(in_str)) + return Utils._hex_str(in_str, 0, 10) + " ... " + Utils._hex_str(in_str, len(in_str)-10, len(in_str)) + _hex_split_str = staticmethod(_hex_split_str) + + #@staticmethod + def _hex_str(in_str, begin, end): + """Return a binary string as a hex string""" + hstr = "" + for index in range(begin, end): + if Utils._is_printable(in_str[index]): + hstr += in_str[index] + else: + hstr += "\\%02x" % ord(in_str[index]) + return hstr + _hex_str = staticmethod(_hex_str) + + #@staticmethod + def _is_printable(in_str): + """Return True if in_str in printable; False otherwise.""" + return in_str.strip(Utils.__printchars) == "" + _is_printable = staticmethod(_is_printable) + + #@staticmethod + def _load_args(fhandle, klass): + """Load the arguments from class klass""" + size = calcsize(klass.FORMAT) + foffs = fhandle.tell(), + fbin = fhandle.read(size) + if len(fbin) != size: + raise jerr.UnexpectedEndOfFileError(size, len(fbin)) + return foffs + unpack(klass.FORMAT, fbin) + _load_args = staticmethod(_load_args) + + #@staticmethod + def _split_str(in_str, split_size = 50): + """Split a string into two parts separated by an ellipsis if it is longer than split_size""" + if len(in_str) < split_size: + return in_str + return in_str[:25] + " ... " + in_str[-25:] + _split_str = staticmethod(_split_str) + + +#== class Hdr ================================================================= + +class Hdr: + """Class representing the journal header records""" + + FORMAT = "=4sBBHQ" + HDR_VER = 1 + OWI_MASK = 0x01 + BIG_ENDIAN = sys.byteorder == "big" + REC_BOUNDARY = DBLK_SIZE + + def __init__(self, foffs, magic, ver, endn, flags, rid): + """Constructor""" +# Sizeable.__init__(self) + self.foffs = foffs + self.magic = magic + self.ver = ver + self.endn = endn + self.flags = flags + self.rid = long(rid) + + def __str__(self): + """Return string representation of this header""" + if self.empty(): + return "0x%08x: <empty>" % (self.foffs) + if self.magic[-1] == "x": + return "0x%08x: [\"%s\"]" % (self.foffs, self.magic) + if self.magic[-1] in ["a", "c", "d", "e", "f", "x"]: + return "0x%08x: [\"%s\" v=%d e=%d f=0x%04x rid=0x%x]" % (self.foffs, self.magic, self.ver, self.endn, + self.flags, self.rid) + return "0x%08x: <error, unknown magic \"%s\" (possible overwrite boundary?)>" % (self.foffs, self.magic) + + #@staticmethod + def discriminate(args): + """Use the last char in the header magic to determine the header type""" + return _CLASSES.get(args[1][-1], Hdr) + discriminate = staticmethod(discriminate) + + def empty(self): + """Return True if this record is empty (ie has a magic of 0x0000""" + return self.magic == "\x00"*4 + + def encode(self): + """Encode the header into a binary string""" + return pack(Hdr.FORMAT, self.magic, self.ver, self.endn, self.flags, self.rid) + + def owi(self): + """Return the OWI (overwrite indicator) for this header""" + return self.flags & self.OWI_MASK != 0 + + def skip(self, fhandle): + """Read and discard the remainder of this record""" + fhandle.read(Utils.rem_bytes_in_blk(fhandle, self.REC_BOUNDARY)) + + def check(self): + """Check that this record is valid""" + if self.empty() or self.magic[:3] != "RHM" or self.magic[3] not in ["a", "c", "d", "e", "f", "x"]: + return True + if self.magic[-1] != "x": + if self.ver != self.HDR_VER: + raise jerr.InvalidHeaderVersionError(self.HDR_VER, self.ver) + if bool(self.endn) != self.BIG_ENDIAN: + raise jerr.EndianMismatchError(self.BIG_ENDIAN) + return False + + +#== class FileHdr ============================================================= + +class FileHdr(Hdr): + """Class for file headers, found at the beginning of journal files""" + + FORMAT = "=2H4x3Q" + REC_BOUNDARY = SBLK_SIZE + + def __str__(self): + """Return a string representation of the this FileHdr instance""" + return "%s fid=%d lid=%d fro=0x%08x t=%s" % (Hdr.__str__(self), self.fid, self.lid, self.fro, + self.timestamp_str()) + + def encode(self): + """Encode this class into a binary string""" + return Hdr.encode(self) + pack(FileHdr.FORMAT, self.fid, self.lid, self.fro, self.time_sec, self.time_ns) + + def init(self, fhandle, foffs, fid, lid, fro, time_sec, time_ns): + """Initialize this instance to known values""" + self.fid = fid + self.lid = lid + self.fro = fro + self.time_sec = time_sec + self.time_ns = time_ns + + def timestamp(self): + """Get the timestamp of this record as a tuple (secs, nsecs)""" + return (self.time_sec, self.time_ns) + + def timestamp_str(self): + """Get the timestamp of this record in string format""" + time = gmtime(self.time_sec) + fstr = "%%a %%b %%d %%H:%%M:%%S.%09d %%Y" % (self.time_ns) + return strftime(fstr, time) + + +#== class DeqRec ============================================================== + +class DeqRec(Hdr): + """Class for a dequeue record""" + + FORMAT = "=QQ" + + def __str__(self): + """Return a string representation of the this DeqRec instance""" + return "%s %sdrid=0x%x" % (Hdr.__str__(self), Utils.format_xid(self.xid, self.xidsize), self.deq_rid) + + def init(self, fhandle, foffs, deq_rid, xidsize): + """Initialize this instance to known values""" + self.deq_rid = deq_rid + self.xidsize = xidsize + self.xid = None + self.deq_tail = None + self.xid_complete = False + self.tail_complete = False + self.tail_bin = None + self.tail_offs = 0 + self.load(fhandle) + + def encode(self): + """Encode this class into a binary string""" + buf = Hdr.encode(self) + pack(DeqRec.FORMAT, self.deq_rid, self.xidsize) + if self.xidsize > 0: + fmt = "%ds" % (self.xidsize) + buf += pack(fmt, self.xid) + buf += self.deq_tail.encode() + return buf + + def load(self, fhandle): + """Load the remainder of this record (after the header has been loaded""" + if self.xidsize == 0: + self.xid_complete = True + self.tail_complete = True + else: + if not self.xid_complete: + (self.xid, self.xid_complete) = Utils.load_file_data(fhandle, self.xidsize, self.xid) + if self.xid_complete and not self.tail_complete: + ret = Utils.load_file_data(fhandle, calcsize(RecTail.FORMAT), self.tail_bin) + self.tail_bin = ret[0] + if ret[1]: + self.deq_tail = RecTail(self.tail_offs, *unpack(RecTail.FORMAT, self.tail_bin)) + magic_err = self.deq_tail.magic_inv != Utils.inv_str(self.magic) + rid_err = self.deq_tail.rid != self.rid + if magic_err or rid_err: + raise jerr.InvalidRecordTailError(magic_err, rid_err, self) + self.skip(fhandle) + self.tail_complete = ret[1] + return self.complete() + + def complete(self): + """Returns True if the entire record is loaded, False otherwise""" + return self.xid_complete and self.tail_complete + + +#== class TxnRec ============================================================== + +class TxnRec(Hdr): + """Class for a transaction commit/abort record""" + + FORMAT = "=Q" + + def __str__(self): + """Return a string representation of the this TxnRec instance""" + return "%s %s" % (Hdr.__str__(self), Utils.format_xid(self.xid, self.xidsize)) + + def init(self, fhandle, foffs, xidsize): + """Initialize this instance to known values""" + self.xidsize = xidsize + self.xid = None + self.tx_tail = None + self.xid_complete = False + self.tail_complete = False + self.tail_bin = None + self.tail_offs = 0 + self.load(fhandle) + + def encode(self): + """Encode this class into a binary string""" + return Hdr.encode(self) + pack(TxnRec.FORMAT, self.xidsize) + pack("%ds" % self.xidsize, self.xid) + \ + self.tx_tail.encode() + + def load(self, fhandle): + """Load the remainder of this record (after the header has been loaded""" + if not self.xid_complete: + ret = Utils.load_file_data(fhandle, self.xidsize, self.xid) + self.xid = ret[0] + self.xid_complete = ret[1] + if self.xid_complete and not self.tail_complete: + ret = Utils.load_file_data(fhandle, calcsize(RecTail.FORMAT), self.tail_bin) + self.tail_bin = ret[0] + if ret[1]: + self.tx_tail = RecTail(self.tail_offs, *unpack(RecTail.FORMAT, self.tail_bin)) + magic_err = self.tx_tail.magic_inv != Utils.inv_str(self.magic) + rid_err = self.tx_tail.rid != self.rid + if magic_err or rid_err: + raise jerr.InvalidRecordTailError(magic_err, rid_err, self) + self.skip(fhandle) + self.tail_complete = ret[1] + return self.complete() + + def complete(self): + """Returns True if the entire record is loaded, False otherwise""" + return self.xid_complete and self.tail_complete + + +#== class EnqRec ============================================================== + +class EnqRec(Hdr): + """Class for a enqueue record""" + + FORMAT = "=QQ" + TRANSIENT_MASK = 0x10 + EXTERN_MASK = 0x20 + + def __str__(self): + """Return a string representation of the this EnqRec instance""" + return "%s %s%s %s %s" % (Hdr.__str__(self), Utils.format_xid(self.xid, self.xidsize), + Utils.format_data(self.dsize, self.data), self.enq_tail, self.print_flags()) + + def encode(self): + """Encode this class into a binary string""" + buf = Hdr.encode(self) + pack(EnqRec.FORMAT, self.xidsize, self.dsize) + if self.xidsize > 0: + buf += pack("%ds" % self.xidsize, self.xid) + if self.dsize > 0: + buf += pack("%ds" % self.dsize, self.data) + if self.xidsize > 0 or self.dsize > 0: + buf += self.enq_tail.encode() + return buf + + def init(self, fhandle, foffs, xidsize, dsize): + """Initialize this instance to known values""" + self.xidsize = xidsize + self.dsize = dsize + self.transient = self.flags & self.TRANSIENT_MASK > 0 + self.extern = self.flags & self.EXTERN_MASK > 0 + self.xid = None + self.data = None + self.enq_tail = None + self.xid_complete = False + self.data_complete = False + self.tail_complete = False + self.tail_bin = None + self.tail_offs = 0 + self.load(fhandle) + + def load(self, fhandle): + """Load the remainder of this record (after the header has been loaded""" + if not self.xid_complete: + ret = Utils.load_file_data(fhandle, self.xidsize, self.xid) + self.xid = ret[0] + self.xid_complete = ret[1] + if self.xid_complete and not self.data_complete: + if self.extern: + self.data_complete = True + else: + ret = Utils.load_file_data(fhandle, self.dsize, self.data) + self.data = ret[0] + self.data_complete = ret[1] + if self.data_complete and not self.tail_complete: + ret = Utils.load_file_data(fhandle, calcsize(RecTail.FORMAT), self.tail_bin) + self.tail_bin = ret[0] + if ret[1]: + self.enq_tail = RecTail(self.tail_offs, *unpack(RecTail.FORMAT, self.tail_bin)) + magic_err = self.enq_tail.magic_inv != Utils.inv_str(self.magic) + rid_err = self.enq_tail.rid != self.rid + if magic_err or rid_err: + raise jerr.InvalidRecordTailError(magic_err, rid_err, self) + self.skip(fhandle) + self.tail_complete = ret[1] + return self.complete() + + def complete(self): + """Returns True if the entire record is loaded, False otherwise""" + return self.xid_complete and self.data_complete and self.tail_complete + + def print_flags(self): + """Utility function to decode the flags field in the header and print a string representation""" + fstr = "" + if self.transient: + fstr = "*TRANSIENT" + if self.extern: + if len(fstr) > 0: + fstr += ",EXTERNAL" + else: + fstr = "*EXTERNAL" + if len(fstr) > 0: + fstr += "*" + return fstr + + +#== class RecTail ============================================================= + +class RecTail: + """Class for a record tail - for all records where either an XID or data separate the header from the end of the + record""" + + FORMAT = "=4sQ" + + def __init__(self, foffs, magic_inv, rid): + """Initialize this instance to known values""" + self.foffs = foffs + self.magic_inv = magic_inv + self.rid = long(rid) + + def __str__(self): + """Return a string representation of the this RecTail instance""" + magic = Utils.inv_str(self.magic_inv) + return "[\"%s\" rid=0x%x]" % (magic, self.rid) + + def encode(self): + """Encode this class into a binary string""" + return pack(RecTail.FORMAT, self.magic_inv, self.rid) + + +#== class JrnlInfo ============================================================ + +class JrnlInfo(object): + """ + This object reads and writes journal information files (<basename>.jinf). Methods are provided + to read a file, query its properties and reset just those properties necessary for normalizing + and resizing a journal. + + Normalizing: resetting the directory and/or base filename to different values. This is necessary + if a set of journal files is copied from one location to another before being restored, as the + value of the path in the file no longer matches the actual path. + + Resizing: If the journal geometry parameters (size and number of journal files) changes, then the + .jinf file must reflect these changes, as this file is the source of information for journal + recovery. + + NOTE: Data size vs File size: There are methods which return the data size and file size of the + journal files. + + +-------------+--------------------/ /----------+ + | File header | File data | + +-------------+--------------------/ /----------+ + | | | + | |<---------- Data size ---------->| + |<------------------ File Size ---------------->| + + Data size: The size of the data content of the journal, ie that part which stores the data records. + + File size: The actual disk size of the journal including data and the file header which precedes the + data. + + The file header is fixed to 1 sblk, so file size = jrnl size + sblk size. + """ + + def __init__(self, jdir, bfn = "JournalData"): + """Constructor""" + self.__jdir = jdir + self.__bfn = bfn + self.__jinf_dict = {} + self._read_jinf() + + def __str__(self): + """Create a string containing all of the journal info contained in the jinf file""" + ostr = "Journal info file %s:\n" % os.path.join(self.__jdir, "%s.jinf" % self.__bfn) + for key, val in self.__jinf_dict.iteritems(): + ostr += " %s = %s\n" % (key, val) + return ostr + + def normalize(self, jdir = None, bfn = None): + """Normalize the directory (ie reset the directory path to match the actual current location) for this + jinf file""" + if jdir == None: + self.__jinf_dict["directory"] = self.__jdir + else: + self.__jdir = jdir + self.__jinf_dict["directory"] = jdir + if bfn != None: + self.__bfn = bfn + self.__jinf_dict["base_filename"] = bfn + + def resize(self, num_jrnl_files = None, jrnl_file_size = None): + """Reset the journal size information to allow for resizing the journal""" + if num_jrnl_files != None: + self.__jinf_dict["number_jrnl_files"] = num_jrnl_files + if jrnl_file_size != None: + self.__jinf_dict["jrnl_file_size_sblks"] = jrnl_file_size * self.get_jrnl_dblk_size_bytes() + + def write(self, jdir = None, bfn = None): + """Write the .jinf file""" + self.normalize(jdir, bfn) + if not os.path.exists(self.get_jrnl_dir()): + os.makedirs(self.get_jrnl_dir()) + fhandle = open(os.path.join(self.get_jrnl_dir(), "%s.jinf" % self.get_jrnl_base_name()), "w") + fhandle.write("<?xml version=\"1.0\" ?>\n") + fhandle.write("<jrnl>\n") + fhandle.write(" <journal_version value=\"%d\" />\n" % self.get_jrnl_version()) + fhandle.write(" <journal_id>\n") + fhandle.write(" <id_string value=\"%s\" />\n" % self.get_jrnl_id()) + fhandle.write(" <directory value=\"%s\" />\n" % self.get_jrnl_dir()) + fhandle.write(" <base_filename value=\"%s\" />\n" % self.get_jrnl_base_name()) + fhandle.write(" </journal_id>\n") + fhandle.write(" <creation_time>\n") + fhandle.write(" <seconds value=\"%d\" />\n" % self.get_creation_time()[0]) + fhandle.write(" <nanoseconds value=\"%d\" />\n" % self.get_creation_time()[1]) + fhandle.write(" <string value=\"%s\" />\n" % self.get_creation_time_str()) + fhandle.write(" </creation_time>\n") + fhandle.write(" <journal_file_geometry>\n") + fhandle.write(" <number_jrnl_files value=\"%d\" />\n" % self.get_num_jrnl_files()) + fhandle.write(" <auto_expand value=\"%s\" />\n" % str.lower(str(self.get_auto_expand()))) + fhandle.write(" <jrnl_file_size_sblks value=\"%d\" />\n" % self.get_jrnl_data_size_sblks()) + fhandle.write(" <JRNL_SBLK_SIZE value=\"%d\" />\n" % self.get_jrnl_sblk_size_dblks()) + fhandle.write(" <JRNL_DBLK_SIZE value=\"%d\" />\n" % self.get_jrnl_dblk_size_bytes()) + fhandle.write(" </journal_file_geometry>\n") + fhandle.write(" <cache_geometry>\n") + fhandle.write(" <wcache_pgsize_sblks value=\"%d\" />\n" % self.get_wr_buf_pg_size_sblks()) + fhandle.write(" <wcache_num_pages value=\"%d\" />\n" % self.get_num_wr_buf_pgs()) + fhandle.write(" <JRNL_RMGR_PAGE_SIZE value=\"%d\" />\n" % self.get_rd_buf_pg_size_sblks()) + fhandle.write(" <JRNL_RMGR_PAGES value=\"%d\" />\n" % self.get_num_rd_buf_pgs()) + fhandle.write(" </cache_geometry>\n") + fhandle.write("</jrnl>\n") + fhandle.close() + + # Journal ID + + def get_jrnl_version(self): + """Get the journal version""" + return self.__jinf_dict["journal_version"] + + def get_jrnl_id(self): + """Get the journal id""" + return self.__jinf_dict["id_string"] + + def get_current_dir(self): + """Get the current directory of the store (as opposed to that value saved in the .jinf file)""" + return self.__jdir + + def get_jrnl_dir(self): + """Get the journal directory stored in the .jinf file""" + return self.__jinf_dict["directory"] + + def get_jrnl_base_name(self): + """Get the base filename - that string used to name the journal files <basefilename>-nnnn.jdat and + <basefilename>.jinf""" + return self.__jinf_dict["base_filename"] + + # Journal creation time + + def get_creation_time(self): + """Get journal creation time as a tuple (secs, nsecs)""" + return (self.__jinf_dict["seconds"], self.__jinf_dict["nanoseconds"]) + + def get_creation_time_str(self): + """Get journal creation time as a string""" + return self.__jinf_dict["string"] + + # --- Files and geometry --- + + def get_num_jrnl_files(self): + """Get number of data files in the journal""" + return self.__jinf_dict["number_jrnl_files"] + + def get_auto_expand(self): + """Return True if auto-expand is enabled; False otherwise""" + return self.__jinf_dict["auto_expand"] + + def get_jrnl_sblk_size_dblks(self): + """Get the journal softblock size in dblks""" + return self.__jinf_dict["JRNL_SBLK_SIZE"] + + def get_jrnl_sblk_size_bytes(self): + """Get the journal softblock size in bytes""" + return self.get_jrnl_sblk_size_dblks() * self.get_jrnl_dblk_size_bytes() + + def get_jrnl_dblk_size_bytes(self): + """Get the journal datablock size in bytes""" + return self.__jinf_dict["JRNL_DBLK_SIZE"] + + def get_jrnl_data_size_sblks(self): + """Get the data capacity (excluding the file headers) for one journal file in softblocks""" + return self.__jinf_dict["jrnl_file_size_sblks"] + + def get_jrnl_data_size_dblks(self): + """Get the data capacity (excluding the file headers) for one journal file in datablocks""" + return self.get_jrnl_data_size_sblks() * self.get_jrnl_sblk_size_dblks() + + def get_jrnl_data_size_bytes(self): + """Get the data capacity (excluding the file headers) for one journal file in bytes""" + return self.get_jrnl_data_size_dblks() * self.get_jrnl_dblk_size_bytes() + + def get_jrnl_file_size_sblks(self): + """Get the size of one journal file on disk (including the file headers) in softblocks""" + return self.get_jrnl_data_size_sblks() + 1 + + def get_jrnl_file_size_dblks(self): + """Get the size of one journal file on disk (including the file headers) in datablocks""" + return self.get_jrnl_file_size_sblks() * self.get_jrnl_sblk_size_dblks() + + def get_jrnl_file_size_bytes(self): + """Get the size of one journal file on disk (including the file headers) in bytes""" + return self.get_jrnl_file_size_dblks() * self.get_jrnl_dblk_size_bytes() + + def get_tot_jrnl_data_size_sblks(self): + """Get the size of the entire jouranl's data capacity (excluding the file headers) for all files together in + softblocks""" + return self.get_num_jrnl_files() * self.get_jrnl_data_size_bytes() + + def get_tot_jrnl_data_size_dblks(self): + """Get the size of the entire jouranl's data capacity (excluding the file headers) for all files together in + datablocks""" + return self.get_num_jrnl_files() * self.get_jrnl_data_size_dblks() + + def get_tot_jrnl_data_size_bytes(self): + """Get the size of the entire jouranl's data capacity (excluding the file headers) for all files together in + bytes""" + return self.get_num_jrnl_files() * self.get_jrnl_data_size_bytes() + + # Read and write buffers + + def get_wr_buf_pg_size_sblks(self): + """Get the size of the write buffer pages in softblocks""" + return self.__jinf_dict["wcache_pgsize_sblks"] + + def get_wr_buf_pg_size_dblks(self): + """Get the size of the write buffer pages in datablocks""" + return self.get_wr_buf_pg_size_sblks() * self.get_jrnl_sblk_size_dblks() + + def get_wr_buf_pg_size_bytes(self): + """Get the size of the write buffer pages in bytes""" + return self.get_wr_buf_pg_size_dblks() * self.get_jrnl_dblk_size_bytes() + + def get_num_wr_buf_pgs(self): + """Get the number of write buffer pages""" + return self.__jinf_dict["wcache_num_pages"] + + def get_rd_buf_pg_size_sblks(self): + """Get the size of the read buffer pages in softblocks""" + return self.__jinf_dict["JRNL_RMGR_PAGE_SIZE"] + + def get_rd_buf_pg_size_dblks(self): + """Get the size of the read buffer pages in datablocks""" + return self.get_rd_buf_pg_size_sblks * self.get_jrnl_sblk_size_dblks() + + def get_rd_buf_pg_size_bytes(self): + """Get the size of the read buffer pages in bytes""" + return self.get_rd_buf_pg_size_dblks * self.get_jrnl_dblk_size_bytes() + + def get_num_rd_buf_pgs(self): + """Get the number of read buffer pages""" + return self.__jinf_dict["JRNL_RMGR_PAGES"] + + def _read_jinf(self): + """Read and initialize this instance from an existing jinf file located at the directory named in the + constructor - called by the constructor""" + fhandle = open(os.path.join(self.__jdir, "%s.jinf" % self.__bfn), "r") + parser = xml.parsers.expat.ParserCreate() + parser.StartElementHandler = self._handle_xml_start_elt + parser.CharacterDataHandler = self._handle_xml_char_data + parser.EndElementHandler = self._handle_xml_end_elt + parser.ParseFile(fhandle) + fhandle.close() + + def _handle_xml_start_elt(self, name, attrs): + """Callback for handling XML start elements. Used by the XML parser.""" + # bool values + if name == "auto_expand": + self.__jinf_dict[name] = attrs["value"] == "true" + # long values + elif name == "seconds" or \ + name == "nanoseconds": + self.__jinf_dict[name] = long(attrs["value"]) + # int values + elif name == "journal_version" or \ + name == "number_jrnl_files" or \ + name == "jrnl_file_size_sblks" or \ + name == "JRNL_SBLK_SIZE" or \ + name == "JRNL_DBLK_SIZE" or \ + name == "wcache_pgsize_sblks" or \ + name == "wcache_num_pages" or \ + name == "JRNL_RMGR_PAGE_SIZE" or \ + name == "JRNL_RMGR_PAGES": + self.__jinf_dict[name] = int(attrs["value"]) + # strings + elif "value" in attrs: + self.__jinf_dict[name] = attrs["value"] + + def _handle_xml_char_data(self, data): + """Callback for handling character data (ie within <elt>...</elt>). The jinf file does not use this in its + data. Used by the XML parser.""" + pass + + def _handle_xml_end_elt(self, name): + """Callback for handling XML end elements. Used by XML parser.""" + pass + + +#============================================================================== + +_CLASSES = { + "a": TxnRec, + "c": TxnRec, + "d": DeqRec, + "e": EnqRec, + "f": FileHdr +} + +if __name__ == "__main__": + print "This is a library, and cannot be executed." diff --git a/qpid/cpp/src/tests/topic_perftest b/qpid/cpp/management/python/lib/qpidtoollibs/__init__.py index 04e1cdcffb..2815bac22f 100755..100644 --- a/qpid/cpp/src/tests/topic_perftest +++ b/qpid/cpp/management/python/lib/qpidtoollibs/__init__.py @@ -1,5 +1,3 @@ -#!/usr/bin/env bash - # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -19,4 +17,6 @@ # under the License. # -exec `dirname $0`/run_perftest 10000 --mode topic --qt 16 +from qpidtoollibs.broker import * +from qpidtoollibs.disp import * + diff --git a/qpid/cpp/management/python/lib/qpidtoollibs/broker.py b/qpid/cpp/management/python/lib/qpidtoollibs/broker.py new file mode 100644 index 0000000000..fca6680067 --- /dev/null +++ b/qpid/cpp/management/python/lib/qpidtoollibs/broker.py @@ -0,0 +1,486 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import sys +from qpidtoollibs.disp import TimeLong +try: + from uuid import uuid4 +except ImportError: + from qpid.datatypes import uuid4 + +class BrokerAgent(object): + """ + Proxy for a manageable Qpid Broker - Invoke with an opened qpid.messaging.Connection + or qpid_messaging.Connection + """ + def __init__(self, conn): + # Use the Message class from the same module as conn which could be qpid.messaging + # or qpid_messaging + self.message_class = sys.modules[conn.__class__.__module__].Message + self.conn = conn + self.sess = self.conn.session() + self.reply_to = "qmf.default.topic/direct.%s;{node:{type:topic}}" % str(uuid4()) + self.reply_rx = self.sess.receiver(self.reply_to) + self.reply_rx.capacity = 10 + self.tx = self.sess.sender("qmf.default.direct/broker") + self.next_correlator = 1 + + def close(self): + """ + Close the proxy session. This will not affect the connection used in creating the object. + """ + self.sess.close() + + def _method(self, method, arguments=None, addr="org.apache.qpid.broker:broker:amqp-broker", timeout=10): + props = {'method' : 'request', + 'qmf.opcode' : '_method_request', + 'x-amqp-0-10.app-id' : 'qmf2'} + correlator = str(self.next_correlator) + self.next_correlator += 1 + + content = {'_object_id' : {'_object_name' : addr}, + '_method_name' : method, + '_arguments' : arguments or {}} + + message = self.message_class( + content, reply_to=self.reply_to, correlation_id=correlator, + properties=props, subject="broker") + self.tx.send(message) + response = self.reply_rx.fetch(timeout) + self.sess.acknowledge() + if response.properties['qmf.opcode'] == '_exception': + raise Exception("Exception from Agent: %r" % response.content['_values']) + if response.properties['qmf.opcode'] != '_method_response': + raise Exception("bad response: %r" % response.properties) + return response.content['_arguments'] + + def _sendRequest(self, opcode, content): + props = {'method' : 'request', + 'qmf.opcode' : opcode, + 'x-amqp-0-10.app-id' : 'qmf2'} + correlator = str(self.next_correlator) + self.next_correlator += 1 + message = self.message_class( + content, reply_to=self.reply_to, correlation_id=correlator, + properties=props, subject="broker") + self.tx.send(message) + return correlator + + def _doClassQuery(self, class_name): + query = {'_what' : 'OBJECT', + '_schema_id' : {'_class_name' : class_name}} + correlator = self._sendRequest('_query_request', query) + response = self.reply_rx.fetch(10) + if response.properties['qmf.opcode'] != '_query_response': + raise Exception("bad response") + items = [] + done = False + while not done: + for item in response.content: + items.append(item) + if 'partial' in response.properties: + response = self.reply_rx.fetch(10) + else: + done = True + self.sess.acknowledge() + return items + + def _doNameQuery(self, object_id): + query = {'_what' : 'OBJECT', '_object_id' : {'_object_name' : object_id}} + correlator = self._sendRequest('_query_request', query) + response = self.reply_rx.fetch(10) + if response.properties['qmf.opcode'] != '_query_response': + raise Exception("bad response") + items = [] + done = False + while not done: + for item in response.content: + items.append(item) + if 'partial' in response.properties: + response = self.reply_rx.fetch(10) + else: + done = True + self.sess.acknowledge() + if len(items) == 1: + return items[0] + return None + + def _getAllBrokerObjects(self, cls): + items = self._doClassQuery(cls.__name__.lower()) + objs = [] + for item in items: + objs.append(cls(self, item)) + return objs + + def _getBrokerObject(self, cls, oid): + obj = self._doNameQuery(oid) + if obj: + return cls(self, obj) + return None + + def _getSingleObject(self, cls): + # + # getAllBrokerObjects is used instead of getBrokerObject(Broker, 'amqp-broker') because + # of a bug that used to be in the broker whereby by-name queries did not return the + # object timestamps. + # + objects = self._getAllBrokerObjects(cls) + if objects: return objects[0] + return None + + def getBroker(self): + """ + Get the Broker object that contains broker-scope statistics and operations. + """ + return self._getSingleObject(Broker) + + + def getCluster(self): + return self._getSingleObject(Cluster) + + def getHaBroker(self): + return self._getSingleObject(HaBroker) + + def getAllConnections(self): + return self._getAllBrokerObjects(Connection) + + def getConnection(self, oid): + return self._getBrokerObject(Connection, "org.apache.qpid.broker:connection:%s" % oid) + + def getAllSessions(self): + return self._getAllBrokerObjects(Session) + + def getSession(self, oid): + return self._getBrokerObject(Session, "org.apache.qpid.broker:session:%s" % oid) + + def getAllSubscriptions(self): + return self._getAllBrokerObjects(Subscription) + + def getSubscription(self, oid): + return self._getBrokerObject(Subscription, "org.apache.qpid.broker:subscription:%s" % oid) + + def getAllExchanges(self): + return self._getAllBrokerObjects(Exchange) + + def getExchange(self, name): + return self._getBrokerObject(Exchange, "org.apache.qpid.broker:exchange:%s" % name) + + def getAllQueues(self): + return self._getAllBrokerObjects(Queue) + + def getQueue(self, name): + return self._getBrokerObject(Queue, "org.apache.qpid.broker:queue:%s" % name) + + def getAllBindings(self): + return self._getAllBrokerObjects(Binding) + + def getAllLinks(self): + return self._getAllBrokerObjects(Link) + + def getAcl(self): + return self._getSingleObject(Acl) + + def getMemory(self): + return self._getSingleObject(Memory) + + def echo(self, sequence = 1, body = "Body"): + """Request a response to test the path to the management broker""" + args = {'sequence' : sequence, 'body' : body} + return self._method('echo', args) + + def connect(self, host, port, durable, authMechanism, username, password, transport): + """Establish a connection to another broker""" + pass + + def queueMoveMessages(self, srcQueue, destQueue, qty): + """Move messages from one queue to another""" + self._method("queueMoveMessages", {'srcQueue':srcQueue,'destQueue':destQueue,'qty':qty}) + + def queueRedirect(self, sourceQueue, targetQueue): + """Enable/disable delivery redirect for indicated queues""" + self._method("queueRedirect", {'sourceQueue':sourceQueue,'targetQueue':targetQueue}) + + def setLogLevel(self, level): + """Set the log level""" + self._method("setLogLevel", {'level':level}) + + def getLogLevel(self): + """Get the log level""" + return self._method('getLogLevel') + + def setTimestampConfig(self, receive): + """Set the message timestamping configuration""" + self._method("setTimestampConfig", {'receive':receive}) + + def getTimestampConfig(self): + """Get the message timestamping configuration""" + return self._method('getTimestampConfig') + + def setLogHiresTimestamp(self, logHires): + """Set the high resolution timestamp in logs""" + self._method("setLogHiresTimestamp", {'logHires':logHires}) + + def getLogHiresTimestamp(self): + """Get the high resolution timestamp in logs""" + return self._method('getLogHiresTimestamp') + + def addExchange(self, exchange_type, name, options={}, **kwargs): + properties = {} + properties['exchange-type'] = exchange_type + for k,v in options.items(): + properties[k] = v + for k,v in kwargs.items(): + properties[k] = v + args = {'type': 'exchange', + 'name': name, + 'properties': properties, + 'strict': True} + self._method('create', args) + + def delExchange(self, name): + args = {'type': 'exchange', 'name': name} + self._method('delete', args) + + def addQueue(self, name, options={}, **kwargs): + properties = options + for k,v in kwargs.items(): + properties[k] = v + args = {'type': 'queue', + 'name': name, + 'properties': properties, + 'strict': True} + self._method('create', args) + + def delQueue(self, name, if_empty=True, if_unused=True): + options = {'if_empty': if_empty, + 'if_unused': if_unused} + + args = {'type': 'queue', + 'name': name, + 'options': options} + self._method('delete', args) + + def bind(self, exchange, queue, key="", options={}, **kwargs): + properties = options + for k,v in kwargs.items(): + properties[k] = v + args = {'type': 'binding', + 'name': "%s/%s/%s" % (exchange, queue, key), + 'properties': properties, + 'strict': True} + self._method('create', args) + + def unbind(self, exchange, queue, key, **kwargs): + args = {'type': 'binding', + 'name': "%s/%s/%s" % (exchange, queue, key), + 'strict': True} + self._method('delete', args) + + def reloadAclFile(self): + self._method('reloadACLFile', {}, "org.apache.qpid.acl:acl:org.apache.qpid.broker:broker:amqp-broker") + + def acl_lookup(self, userName, action, aclObj, aclObjName, propMap): + args = {'userId': userName, + 'action': action, + 'object': aclObj, + 'objectName': aclObjName, + 'propertyMap': propMap} + return self._method('Lookup', args, "org.apache.qpid.acl:acl:org.apache.qpid.broker:broker:amqp-broker") + + def acl_lookupPublish(self, userName, exchange, key): + args = {'userId': userName, + 'exchangeName': exchange, + 'routingKey': key} + return self._method('LookupPublish', args, "org.apache.qpid.acl:acl:org.apache.qpid.broker:broker:amqp-broker") + + def Redirect(self, sourceQueue, targetQueue): + args = {'sourceQueue': sourceQueue, + 'targetQueue': targetQueue} + return self._method('queueRedirect', args, "org.apache.qpid.broker:broker:amqp-broker") + + def create(self, _type, name, properties={}, strict=False): + """Create an object of the specified type""" + args = {'type': _type, + 'name': name, + 'properties': properties, + 'strict': strict} + return self._method('create', args) + + def delete(self, _type, name, options): + """Delete an object of the specified type""" + args = {'type': _type, + 'name': name, + 'options': options} + return self._method('delete', args) + + def list(self, _type): + """List objects of the specified type""" + return [i["_values"] for i in self._doClassQuery(_type.lower())] + + def query(self, _type, oid): + """Query the current state of an object""" + return self._getBrokerObject(self, _type, oid) + + +class EventHelper(object): + def eventAddress(self, pkg='*', cls='*', sev='*'): + return "qmf.default.topic/agent.ind.event.%s.%s.%s.#" % (pkg.replace('.', '_'), cls, sev) + + def event(self, msg): + return BrokerEvent(msg) + + +class BrokerEvent(object): + def __init__(self, msg): + self.msg = msg + self.content = msg.content[0] + self.values = self.content['_values'] + self.schema_id = self.content['_schema_id'] + self.name = "%s:%s" % (self.schema_id['_package_name'], self.schema_id['_class_name']) + + def __repr__(self): + rep = "%s %s" % (TimeLong(self.getTimestamp()), self.name) + for k,v in self.values.items(): + rep = rep + " %s=%s" % (k, v) + return rep + + def __getattr__(self, key): + if key not in self.values: + return None + value = self.values[key] + return value + + def getAttributes(self): + return self.values + + def getTimestamp(self): + return self.content['_timestamp'] + + +class BrokerObject(object): + def __init__(self, broker, content): + self.broker = broker + self.content = content + self.values = content['_values'] + + def __getattr__(self, key): + if key not in self.values: + return None + value = self.values[key] + if value.__class__ == dict and '_object_name' in value: + full_name = value['_object_name'] + colon = full_name.find(':') + if colon > 0: + full_name = full_name[colon+1:] + colon = full_name.find(':') + if colon > 0: + return full_name[colon+1:] + return value + + def getObjectId(self): + return self.content['_object_id']['_object_name'] + + def getAttributes(self): + return self.values + + def getCreateTime(self): + return self.content['_create_ts'] + + def getDeleteTime(self): + return self.content['_delete_ts'] + + def getUpdateTime(self): + return self.content['_update_ts'] + + def update(self): + """ + Reload the property values from the agent. + """ + refreshed = self.broker._getBrokerObject(self.__class__, self.getObjectId()) + if refreshed: + self.content = refreshed.content + self.values = self.content['_values'] + else: + raise Exception("No longer exists on the broker") + +class Broker(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + +class Cluster(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + +class HaBroker(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + +class Memory(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + +class Connection(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + + def close(self): + self.broker._method("close", {}, "org.apache.qpid.broker:connection:%s" % self.address) + +class Session(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + +class Subscription(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + + def __repr__(self): + return "subscription name undefined" + +class Exchange(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + +class Binding(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + + def __repr__(self): + return "Binding key: %s" % self.values['bindingKey'] + +class Queue(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + + def purge(self, request): + """Discard all or some messages on a queue""" + self.broker._method("purge", {'request':request}, "org.apache.qpid.broker:queue:%s" % self.name) + + def reroute(self, request, useAltExchange, exchange, filter={}): + """Remove all or some messages on this queue and route them to an exchange""" + self.broker._method("reroute", {'request':request,'useAltExchange':useAltExchange,'exchange':exchange,'filter':filter}, + "org.apache.qpid.broker:queue:%s" % self.name) + +class Link(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) + +class Acl(BrokerObject): + def __init__(self, broker, values): + BrokerObject.__init__(self, broker, values) diff --git a/qpid/cpp/management/python/lib/qpidtoollibs/config.py b/qpid/cpp/management/python/lib/qpidtoollibs/config.py new file mode 100644 index 0000000000..9168215ac3 --- /dev/null +++ b/qpid/cpp/management/python/lib/qpidtoollibs/config.py @@ -0,0 +1,36 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +"""Utilities for managing configuration files""" +import os + +QPID_ENV_PREFIX="QPID_" + +def parse_qpidd_conf(config_file): + """Parse a qpidd.conf configuration file into a dictionary""" + f = open(config_file) + try: + clean = filter(None, [line.split("#")[0].strip() for line in f]) # Strip comments and blanks + def item(line): return [x.strip() for x in line.split("=")] + config = dict(item(line) for line in clean if "=" in line) + finally: f.close() + def name(env_name): return env_name[len(QPID_ENV_PREFIX):].lower() + env = dict((name(i[0]), i[1]) for i in os.environ.iteritems() if i[0].startswith(QPID_ENV_PREFIX)) + config.update(env) # Environment takes precedence + return config diff --git a/qpid/cpp/management/python/lib/qpidtoollibs/disp.py b/qpid/cpp/management/python/lib/qpidtoollibs/disp.py new file mode 100644 index 0000000000..1b7419ba2c --- /dev/null +++ b/qpid/cpp/management/python/lib/qpidtoollibs/disp.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from time import strftime, gmtime + +def YN(val): + if val: + return 'Y' + return 'N' + +def Commas(value): + sval = str(value) + result = "" + while True: + if len(sval) == 0: + return result + left = sval[:-3] + right = sval[-3:] + result = right + result + if len(left) > 0: + result = ',' + result + sval = left + +def TimeLong(value): + return strftime("%c", gmtime(value / 1000000000)) + +def TimeShort(value): + return strftime("%X", gmtime(value / 1000000000)) + + +class Header: + """ """ + NONE = 1 + KMG = 2 + YN = 3 + Y = 4 + TIME_LONG = 5 + TIME_SHORT = 6 + DURATION = 7 + COMMAS = 8 + + def __init__(self, text, format=NONE): + self.text = text + self.format = format + + def __repr__(self): + return self.text + + def __str__(self): + return self.text + + def formatted(self, value): + try: + if value == None: + return '' + if self.format == Header.NONE: + return value + if self.format == Header.KMG: + return self.num(value) + if self.format == Header.YN: + if value: + return 'Y' + return 'N' + if self.format == Header.Y: + if value: + return 'Y' + return '' + if self.format == Header.TIME_LONG: + return TimeLong(value) + if self.format == Header.TIME_SHORT: + return TimeShort(value) + if self.format == Header.DURATION: + if value < 0: value = 0 + sec = value / 1000000000 + min = sec / 60 + hour = min / 60 + day = hour / 24 + result = "" + if day > 0: + result = "%dd " % day + if hour > 0 or result != "": + result += "%dh " % (hour % 24) + if min > 0 or result != "": + result += "%dm " % (min % 60) + result += "%ds" % (sec % 60) + return result + if self.format == Header.COMMAS: + return Commas(value) + except: + return "?" + + def numCell(self, value, tag): + fp = float(value) / 1000. + if fp < 10.0: + return "%1.2f%c" % (fp, tag) + if fp < 100.0: + return "%2.1f%c" % (fp, tag) + return "%4d%c" % (value / 1000, tag) + + def num(self, value): + if value < 1000: + return "%4d" % value + if value < 1000000: + return self.numCell(value, 'k') + value /= 1000 + if value < 1000000: + return self.numCell(value, 'm') + value /= 1000 + return self.numCell(value, 'g') + + +class Display: + """ Display formatting for QPID Management CLI """ + + def __init__(self, spacing=2, prefix=" "): + self.tableSpacing = spacing + self.tablePrefix = prefix + self.timestampFormat = "%X" + + def formattedTable(self, title, heads, rows): + fRows = [] + for row in rows: + fRow = [] + col = 0 + for cell in row: + fRow.append(heads[col].formatted(cell)) + col += 1 + fRows.append(fRow) + headtext = [] + for head in heads: + headtext.append(head.text) + self.table(title, headtext, fRows) + + def table(self, title, heads, rows): + """ Print a table with autosized columns """ + + # Pad the rows to the number of heads + for row in rows: + diff = len(heads) - len(row) + for idx in range(diff): + row.append("") + + print title + if len (rows) == 0: + return + colWidth = [] + col = 0 + line = self.tablePrefix + for head in heads: + width = len (head) + for row in rows: + text = row[col] + if text.__class__ == str: + text = text.decode('utf-8') + cellWidth = len(unicode(text)) + if cellWidth > width: + width = cellWidth + colWidth.append (width + self.tableSpacing) + line = line + head + if col < len (heads) - 1: + for i in range (colWidth[col] - len (head)): + line = line + " " + col = col + 1 + print line + line = self.tablePrefix + for width in colWidth: + line = line + "=" * width + line = line[:255] + print line + + for row in rows: + line = self.tablePrefix + col = 0 + for width in colWidth: + text = row[col] + if text.__class__ == str: + text = text.decode('utf-8') + line = line + unicode(text) + if col < len (heads) - 1: + for i in range (width - len(unicode(text))): + line = line + " " + col = col + 1 + print line + + def do_setTimeFormat (self, fmt): + """ Select timestamp format """ + if fmt == "long": + self.timestampFormat = "%c" + elif fmt == "short": + self.timestampFormat = "%X" + + def timestamp (self, nsec): + """ Format a nanosecond-since-the-epoch timestamp for printing """ + return strftime (self.timestampFormat, gmtime (nsec / 1000000000)) + + def duration(self, nsec): + if nsec < 0: nsec = 0 + sec = nsec / 1000000000 + min = sec / 60 + hour = min / 60 + day = hour / 24 + result = "" + if day > 0: + result = "%dd " % day + if hour > 0 or result != "": + result += "%dh " % (hour % 24) + if min > 0 or result != "": + result += "%dm " % (min % 60) + result += "%ds" % (sec % 60) + return result + +class Sortable: + """ """ + def __init__(self, row, sortIndex): + self.row = row + self.sortIndex = sortIndex + if sortIndex >= len(row): + raise Exception("sort index exceeds row boundary") + + def __cmp__(self, other): + return cmp(self.row[self.sortIndex], other.row[self.sortIndex]) + + def getRow(self): + return self.row + +class Sorter: + """ """ + def __init__(self, heads, rows, sortCol, limit=0, inc=True): + col = 0 + for head in heads: + if head.text == sortCol: + break + col += 1 + if col == len(heads): + raise Exception("sortCol '%s', not found in headers" % sortCol) + + list = [] + for row in rows: + list.append(Sortable(row, col)) + list.sort() + if not inc: + list.reverse() + count = 0 + self.sorted = [] + for row in list: + self.sorted.append(row.getRow()) + count += 1 + if count == limit: + break + + def getSorted(self): + return self.sorted diff --git a/qpid/cpp/management/python/setup.py b/qpid/cpp/management/python/setup.py new file mode 100755 index 0000000000..8eb26c3129 --- /dev/null +++ b/qpid/cpp/management/python/setup.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import platform + +from distutils.core import setup + +pypi_long_description = """ +# Python libraries for the Apache Qpid C++ broker + +## qmf + +The Qpid Management Framework (QMF). + +## qpidtoollibs + +A high-level BrokerAgent object for managing the C++ broker using QMF. + +This library depends on the qpid.messaging python client to send AMQP +messages containing QMF commands to the broker. +""" + +scripts = [ + "bin/qpid-config", + "bin/qpid-ha", + "bin/qpid-printevents", + "bin/qpid-queue-stats", + "bin/qpid-route", + "bin/qpid-stat", + "bin/qpid-tool", +] + +if platform.system() == "Windows": + scripts.append("bin/qpid-config.bat") + scripts.append("bin/qpid-ha.bat") + scripts.append("bin/qpid-printevents.bat") + scripts.append("bin/qpid-queue-stats.bat") + scripts.append("bin/qpid-route.bat") + scripts.append("bin/qpid-stat.bat") + scripts.append("bin/qpid-tool.bat") + +setup(name="qpid-tools", + version="0.35", + author="Apache Qpid", + author_email="users@qpid.apache.org", + package_dir={'' : 'lib'}, + packages=["qpidtoollibs", "qmf"], + scripts=scripts, + data_files=[("libexec", ["bin/qpid-qls-analyze"]), + ("share/qpid-tools/python/qlslibs", + ["lib/qlslibs/__init__.py", + "lib/qlslibs/analyze.py", + "lib/qlslibs/efp.py", + "lib/qlslibs/err.py", + "lib/qlslibs/jrnl.py", + "lib/qlslibs/utils.py"])], + url="http://qpid.apache.org/", + license="Apache Software License", + description="Python libraries for the Apache Qpid C++ broker", + long_description=pypi_long_description, + install_requires=["qpid-python >= 0.26",]) diff --git a/qpid/cpp/management/ruby/.gitignore b/qpid/cpp/management/ruby/.gitignore new file mode 100644 index 0000000000..c89dc10304 --- /dev/null +++ b/qpid/cpp/management/ruby/.gitignore @@ -0,0 +1,23 @@ +# +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# +*.gem +.bundle +pkg/* diff --git a/qpid/cpp/management/ruby/.rspec b/qpid/cpp/management/ruby/.rspec new file mode 100644 index 0000000000..4e1e0d2f72 --- /dev/null +++ b/qpid/cpp/management/ruby/.rspec @@ -0,0 +1 @@ +--color diff --git a/qpid/cpp/management/ruby/Gemfile b/qpid/cpp/management/ruby/Gemfile new file mode 100644 index 0000000000..1bd80c10c1 --- /dev/null +++ b/qpid/cpp/management/ruby/Gemfile @@ -0,0 +1,30 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +source "http://rubygems.org" + +# Specify your gem's dependencies in qpid_config.gemspec +gemspec + +# development deps +gem 'rspec' +gem 'pry' +gem 'pry-stack_explorer' +gem 'pry-debugger' +gem 'yard' diff --git a/qpid/cpp/management/ruby/Gemfile.lock b/qpid/cpp/management/ruby/Gemfile.lock new file mode 100644 index 0000000000..bf35564e90 --- /dev/null +++ b/qpid/cpp/management/ruby/Gemfile.lock @@ -0,0 +1,55 @@ +PATH + remote: . + specs: + qpid_management (1.0) + qpid_messaging + +GEM + remote: http://rubygems.org/ + specs: + binding_of_caller (0.7.1) + debug_inspector (>= 0.0.1) + coderay (1.0.9) + columnize (0.3.6) + debug_inspector (0.0.2) + debugger (1.4.0) + columnize (>= 0.3.1) + debugger-linecache (~> 1.1.1) + debugger-ruby_core_source (~> 1.2.0) + debugger-linecache (1.1.2) + debugger-ruby_core_source (>= 1.1.1) + debugger-ruby_core_source (1.2.0) + diff-lcs (1.2.1) + method_source (0.8.1) + pry (0.9.12) + coderay (~> 1.0.5) + method_source (~> 0.8) + slop (~> 3.4) + pry-debugger (0.2.2) + debugger (~> 1.3) + pry (~> 0.9.10) + pry-stack_explorer (0.4.9) + binding_of_caller (>= 0.7) + pry (~> 0.9.11) + qpid_messaging (0.20.2) + rspec (2.13.0) + rspec-core (~> 2.13.0) + rspec-expectations (~> 2.13.0) + rspec-mocks (~> 2.13.0) + rspec-core (2.13.0) + rspec-expectations (2.13.0) + diff-lcs (>= 1.1.3, < 2.0) + rspec-mocks (2.13.0) + slop (3.4.3) + yard (0.8.5.2) + +PLATFORMS + ruby + +DEPENDENCIES + pry + pry-debugger + pry-stack_explorer + qpid_management! + rspec + yard diff --git a/qpid/cpp/management/ruby/Rakefile b/qpid/cpp/management/ruby/Rakefile new file mode 100644 index 0000000000..7f295eda5c --- /dev/null +++ b/qpid/cpp/management/ruby/Rakefile @@ -0,0 +1,27 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +require "bundler/gem_tasks" +require 'rspec/core/rake_task' + +require 'rake/clean' +CLOBBER.include('pkg') + +RSpec::Core::RakeTask.new('spec') + diff --git a/qpid/cpp/management/ruby/lib/qpid_management.rb b/qpid/cpp/management/ruby/lib/qpid_management.rb new file mode 100644 index 0000000000..0529710693 --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management.rb @@ -0,0 +1,81 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +require 'qpid_management/broker_agent' +require 'qpid_management/broker_object' +require 'qpid_management/acl' +require 'qpid_management/binding' +require 'qpid_management/bridge' +require 'qpid_management/broker' +require 'qpid_management/cluster' +require 'qpid_management/connection' +require 'qpid_management/errors' +require 'qpid_management/exchange' +require 'qpid_management/ha_broker' +require 'qpid_management/link' +require 'qpid_management/memory' +require 'qpid_management/queue' +require 'qpid_management/session' +require 'qpid_management/subscription' + +module Qpid + # The Qpid Management framework is a management framework for Qpid brokers + # that uses QMF2. + # + # ==== Example Usage + # + # Here is a simple example. It TODO. + # + # require 'rubygems' + # require 'qpid_messaging' + # require 'qpid_management' + # + # # create a connection and open it + # conn = Qpid::Messaging::Connection.new(:url => "broker.myqpiddomain.com") + # conn.open() + # + # # create a broker agent + # agent = Qpid::Management::BrokerAgent.new(conn) + # + # # get a reference to the broker + # broker = agent.broker + # + # # print out all exchange names + # puts broker.exchanges.map(&:name) + # + # # print out info about a single exchange + # amq_direct = broker.exchange('amq.direct') + # puts amq_direct + # puts amq_direct.msgDrops + # + # # create an exchange + # broker.add_exchange('topic', 'myexchange') + # + # # print out all queue names + # puts broker.queues.map(&:name) + # + # # create a queue + # broker.add_queue('myqueue') + # + # # print out info about a single queue + # myqueue = broker.queue('myqueue') + # puts myqueue.msgDepth + module Management + end +end diff --git a/qpid/cpp/docs/design/CONTENTS b/qpid/cpp/management/ruby/lib/qpid_management/acl.rb index cc3b868e0e..589b11fa59 100644 --- a/qpid/cpp/docs/design/CONTENTS +++ b/qpid/cpp/management/ruby/lib/qpid_management/acl.rb @@ -17,15 +17,22 @@ # under the License. # -This directory contains documentation about the C++ source -that is expressed in formats that does not fit comfortably -within C++ source files. - -As with all documentation, including comments, it may become -outmoded with respect to the code. - -If you find external code doco useful in your work -- if it -helps you save some time -- please return some of that time -in the form of effort to keep the documentation updated. - - +module Qpid + module Management + # Representation of the access control list (ACL) for the broker. Properties + # include: + # - aclDenyCount + # - brokerRef + # - connectionDenyCount + # - enforcingAcl + # - lastAclLoad + # - maxConnectionsPerIp + # - maxConnectionsPerUser + # - maxQueuesPerUser + # - policyFile + # - queueQuotaDenyCount + # - transferAcl + class Acl < BrokerObject + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/binding.rb b/qpid/cpp/management/ruby/lib/qpid_management/binding.rb new file mode 100644 index 0000000000..cc46d84eeb --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/binding.rb @@ -0,0 +1,31 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of a binding in the broker. Properties include: + # - arguments + # - bindingKey + # - exchangeRef + # - msgMatched + # - queueRef + class Binding < BrokerObject + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/bridge.rb b/qpid/cpp/management/ruby/lib/qpid_management/bridge.rb new file mode 100644 index 0000000000..cece0ba2ed --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/bridge.rb @@ -0,0 +1,39 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of a bridge to another broker. Properties include: + # - channelId + # - dest + # - durable + # - dynamic + # - excludes + # - key + # - linkRef + # - name + # - src + # - srcIsLocal + # - srcIsQueue + # - sync + # - tag + class Bridge < BrokerObject + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/broker.rb b/qpid/cpp/management/ruby/lib/qpid_management/broker.rb new file mode 100644 index 0000000000..31171bdf35 --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/broker.rb @@ -0,0 +1,278 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of the broker. Properties include: + # - abandoned + # - abandonedViaAlt + # - acquires + # - byteDepth + # - byteFtdDepth + # - byteFtdDequeues + # - byteFtdEnqueues + # - bytePersistDequeues + # - bytePersistEnqueues + # - byteTotalDequeues + # - byteTotalEnqueues + # - byteTxnDequeues + # - byteTxnEnqueues + # - connBacklog + # - dataDir + # - discardsLvq + # - discardsNoRoute + # - discardsOverflow + # - discardsPurge + # - discardsRing + # - discardsSubscriber + # - discardsTtl + # - maxConns + # - mgmtPubInterval + # - mgmtPublish + # - msgDepth + # - msgFtdDepth + # - msgFtdDequeues + # - msgFtdEnqueues + # - msgPersistDequeues + # - msgPersistEnqueues + # - msgTotalDequeues + # - msgTotalEnqueues + # - msgTxnDequeues + # - msgTxnEnqueues + # - name + # - port + # - queueCount + # - releases + # - reroutes + # - stagingThreshold + # - systemRef + # - uptime + # - version + # - workerThreads + class Broker < BrokerObject + # Adds methods for the specified collections to be able to access all instances + # of a given collection, as well as a single instance by oid. + # + # == Example + # <tt>has_many :queues</tt> which will add: + # * <tt>#queues</tt> to retrieve all queues + # * <tt>#queue(oid)</tt> to retrieve a queue by oid (note, this is the short form of the object id, e.g. "myqueue" for a queue instead of "org.apache.qpid.broker:queue:myqueue" + # + # @param collections one or more symbols for the collections of objects a broker manages + def self.has_many(*collections) + [*collections].each do |collection| + singular_form = collection.to_s[0..-2] + capitalized_type = singular_form.gsub(/^\w/) { $&.upcase } + + define_method(collection) do + @agent.find_all_by_class(Qpid::Management::const_get(capitalized_type)) + end + + define_method(singular_form) do |oid| + @agent.find_by_object_id(Qpid::Management::const_get(capitalized_type), "org.apache.qpid.broker:#{singular_form}:#{oid}") + end + end + end + + # Adds method for the specified types to be able to access the singular + # instance of a given type. + # + # == Example + # <tt>has_one :acl</tt> which will add: + # * <tt>#acl</tt> to retrieve the Acl data for the Broker + # + # @param types one or more symbols for the singular objects a broker manages + def self.has_one(*types) + [*types].each do |type| + capitalized_type = type.to_s.gsub(/^\w/) { $&.upcase } + + define_method("#{type}") do + @agent.find_first_by_class(Qpid::Management::const_get(capitalized_type)) + end + end + end + + has_many :connections, :sessions, :subscriptions, :exchanges, :queues, :bindings, :links, :bridges + has_one :acl, :memory + + # Adds an exchange to the broker + # @param [String] type exchange type (fanout, direct, topic, headers, xml) + # @param [String] name exchange name + # @param [Hash] options exchange creation options + def add_exchange(type, name, options={}) + create_broker_object('exchange', name, options.merge!({'exchange-type' => type})) + end + + # Deletes an exchange from the broekr + # @param [String] name exchange name + def delete_exchange(name) + invoke_method('delete', {'type' => 'exchange', 'name' => name}) + end + + # Adds a queue to the broker + # @param [String] name queue name + # @param [Hash] options queue creation options + def add_queue(name, options={}) + create_broker_object('queue', name, options) + end + + # Deletes a queue from the broker + # @param [String] name queue name + def delete_queue(name) + invoke_method('delete', {'type' => 'queue', 'name' => name}) + end + + # Adds a binding from an exchange to a queue + # @param [String] exchange exchange name + # @param [String] queue queue name + # @param [String] key binding key + # @param [Hash] options binding creation options + def add_binding(exchange, queue, key="", options={}) + create_broker_object('binding', "#{exchange}/#{queue}/#{key}", options) + end + + # Deletes a binding from an exchange to a queue + # @param [String] exchange exchange name + # @param [String] queue queue name + # @param [String] key binding key + def delete_binding(exchange, queue, key="") + invoke_method('delete', {'type' => 'binding', 'name' => "#{exchange}/#{queue}/#{key}"}) + end + + # Adds a link to a remote broker + # @param [String] name link name + # @param [String] host remote broker host name or IP address + # @param [Fixnum] port remote broker port + # @param [String] transport transport mechanism used to connect to the remote broker + # @param [Boolean] durable should this link be persistent + # @param [String] auth_mechanism authentication mechanism to use + # @param [String] username user name to authenticate with the remote broker + # @param [String] password password for the user name + def add_link(name, host, port, transport='tcp', durable=false, auth_mechanism="", username="", password="") + options = { + 'host' => host, + 'port' => port, + 'transport' => transport, + 'durable' => durable, + 'authMechanism' => auth_mechanism, + 'username' => username, + 'password' => password + } + + create_broker_object('link', name, options) + end + + # Deletes a link to a remote broker + # @param [String] name link name + def delete_link(name) + invoke_method('delete', {'type' => 'link', 'name' => name}) + end + + # Adds a queue route + # @param [String] name the name of the bridge to create + # @param [Hash] options options for the queue route + # @option options [String] :link the name of the link to use (required) + # @option options [String] :queue the name of the source queue from which messages are pulled (required) + # @option options [String] :exchange the name of the destination exchange to which messages are sent (required) + # @option options [Fixnum] :sync the number of messages to send before issuing an explicit session sync (required) + def add_queue_route(name, options={}) + validate_options(options, [:link, :queue, :exchange, :sync]) + + properties = { + 'link' => options[:link], + 'src' => options[:queue], + 'dest' => options[:exchange], + 'srcIsQueue' => true, + 'sync' => options[:sync] + } + + create_broker_object('bridge', name, properties) + end + + # Adds an exchange route + # @param [String] name the name of the bridge to create + # @param [Hash] options options for the exchange route + # @option options [String] :link the name of the link to use (required) + # @option options [String] :exchange the name of the exchange to use (required) + # @option options [String] :key routing key to federate (required) + # @option options [Fixnum] :sync the number of messages to send before issuing an explicit session sync (required) + # @option options [String] :bridge_queue name of the queue to use as a bridge queue (optional) + def add_exchange_route(name, options={}) + validate_options(options, [:link, :exchange, :key, :sync]) + + properties = { + 'link' => options[:link], + 'src' => options[:exchange], + 'dest' => options[:exchange], + 'key' => options[:key], + 'sync' => options[:sync] + } + + properties['queue'] = options[:bridge_queue] if options.has_key?(:bridge_queue) + + create_broker_object('bridge', name, properties) + end + + # Adds a dynamic route + # @param [String] name the name of the bridge to create + # @param [Hash] options options for the dynamic route + # @option options [String] :link the name of the link to use (required) + # @option options [String] :exchange the name of the exchange to use (required) + # @option options [Fixnum] :sync the number of messages to send before issuing an explicit session sync (required) + # @option options [String] :bridge_queue name of the queue to use as a bridge queue (optional) + def add_dynamic_route(name, options={}) + validate_options(options, [:link, :exchange, :sync]) + + properties = { + 'link' => options[:link], + 'src' => options[:exchange], + 'dest' => options[:exchange], + 'dynamic' => true, + 'sync' => options[:sync] + } + + properties['queue'] = options[:bridge_queue] if options.has_key?(:bridge_queue) + + create_broker_object('bridge', name, properties) + end + + # Deletes a bridge (route) + # @param [String] name bridge name + def delete_bridge(name) + invoke_method('delete', {'type' => 'bridge', 'name' => name}) + end + + private + + def create_broker_object(type, name, options) + invoke_method('create', {'type' => type, + 'name' => name, + 'properties' => options, + 'strict' => true}) + end + + def validate_options(options, required) + required.each do |req| + raise "Option :#{req.to_s} is required" unless options.has_key?(req) + end + end + + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/broker_agent.rb b/qpid/cpp/management/ruby/lib/qpid_management/broker_agent.rb new file mode 100644 index 0000000000..800dcf26dc --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/broker_agent.rb @@ -0,0 +1,173 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# for simplistic UUID - may want to consider something better in the future +require 'securerandom' + +# Ruby 1.8 doesn't include SecureRandom#uuid, so let's add it if it's missing +unless SecureRandom.respond_to? :uuid + module SecureRandom + def self.uuid + ary = self.random_bytes(16).unpack("NnnnnN") + ary[2] = (ary[2] & 0x0fff) | 0x4000 + ary[3] = (ary[3] & 0x3fff) | 0x8000 + "%08x-%04x-%04x-%04x-%04x%08x" % ary + end + end +end + +module Qpid + module Management + # This is the primary class that interacts with a Qpid messaging broker for + # querying information from the broker and for configuring it. + class BrokerAgent + # Creates a new BrokerAgent instance. A new Qpid::Messaging::Session, + # Qpid::Messaging::Receiver, and Qpid::Messaging::Sender will be created + # so this instance of the BrokerAgent may send requests to the broker + # and receive replies back. + # @param [Qpid::Messaging::Connection] connection a valid, opened connection + def initialize(connection) + @connection = connection + @session = @connection.create_session() + @reply_to = "qmf.default.topic/direct.#{SecureRandom.uuid}; {node: {type:topic}, link:{x-declare:{auto-delete:True,exclusive:True}}}" + @reply_rx = @session.create_receiver(@reply_to) + @reply_rx.capacity = 10 + @tx = @session.create_sender("qmf.default.direct/broker") + end + + # Closes the Qpid::Messaging::Session for this BrokerAgent. + def close() + @session.close() + end + + # Queries the broker for the Broker QMF object. + # @return [Broker] the broker QMF object + def broker() + find_first_by_class(Broker) + end + + # Queries the broker for the Cluster QMF object. + # @return [Cluster] the cluster QMF object + def cluster + find_first_by_class(Cluster) + end + + # Queries the broker for the HaBroker QMF object. + # @return [HaBroker] the HA broker QMF object + def ha_broker + find_first_by_class(HaBroker) + end + + # Invokes a method on a target object. + # @param [String] method the name of the method to invoke + # @param [Hash] args the arguments to pass to the method + # @param [String] addr the full id of the target object + # @param [Fixnum] timeout the amount of time to wait for the broker to respond to the method invocation + def invoke_method(method, args, addr="org.apache.qpid.broker:broker:amqp-broker", timeout=10) + content = {'_object_id' => {'_object_name' => addr}, + '_method_name' => method, + '_arguments' => args} + + message = Qpid::Messaging::Message.new() + message.content = content + message.reply_to = @reply_to + message['method'] = 'request' + message['qmf.opcode'] = '_method_request' + message['x-amqp-0-10.app-id'] = 'qmf2' + message.subject = 'broker' + + @tx.send(message) + + response = @reply_rx.fetch(Qpid::Messaging::Duration.new(timeout * 1000)) + @session.acknowledge() + + raise "Exception from Agent: #{response.content['_values']}" if response.properties['qmf.opcode'] == '_exception' + raise "Bad response: #{response.properties}" if response.properties['qmf.opcode'] != '_method_response' + + return response.content['_arguments'] + end + + def send_query(query) + message = Qpid::Messaging::Message.new() + message.content = query + message.reply_to = @reply_to + message['method'] = 'request' + message['qmf.opcode'] = '_query_request' + message['x-amqp-0-10.app-id'] = 'qmf2' + message.subject = 'broker' + + @tx.send(message) + + response = @reply_rx.fetch(Qpid::Messaging::Duration.new(10*1000)) + @session.acknowledge() + + raise 'Bad response' if response.properties['qmf.opcode'] != '_query_response' + + items = response.content + + while response.properties.has_key?('partial') + response = @reply_rx.fetch(Qpid::Messaging::Duration.new(10*1000)) + items += response.content + @session.acknowledge() + end + + return items + end + + def find_all_by_class(clazz) + query = { + '_what' => 'OBJECT', + '_schema_id' => { + '_class_name' => BrokerObject.qmf_class(clazz) + } + } + + items = send_query(query) + + [].tap do |objs| + for item in items + objs << clazz.new(self, item) + end + end + end + + def find_first_by_class(clazz) + objects = find_all_by_class(clazz) + return objects[0] if objects.size > 0 + return nil + end + + def find_by_object_id(clazz, oid) + query = { + '_what' => 'OBJECT', + '_object_id' => { + '_object_name' => oid + } + } + + results = send_query(query) + + return clazz.new(self, results[0]) if results.count == 1 and not results[0].nil? + + # return nil if not found + return nil + end + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/broker_object.rb b/qpid/cpp/management/ruby/lib/qpid_management/broker_object.rb new file mode 100644 index 0000000000..fbbe5ff6e2 --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/broker_object.rb @@ -0,0 +1,126 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of an object in the broker retrieved via QMF + class BrokerObject + attr_reader :content + + # Creates a new BrokerObject + # @param [BrokerAgent] agent the agent used to query the data from the broker + # @param [Hash] content the raw QMF response data from the broker + def initialize(agent, content) + @agent = agent + @content = content + @values = content['_values'] + end + + # Refreshes the information associated with this instance by requerying the broker + # @raise [ObjectDeletedError] if the object has been deleted + def refresh! + refreshed = @agent.named_object(self.class, id) + if refreshed + @content = refreshed.content + @values = @content['_values'] + else + raise ObjectDeletedError + end + end + + # Returns the full object id + # @return [String] the full object id + def id + @content['_object_id']['_object_name'] + end + + # Helper method to convert a Class to its QMF name counterpart. For + # example, QpidConfig::Connection will be converted to connection. + # @param [Class] clazz the Class to convert + # @return [String] the converted QMF name counterpart for this Class + def self.qmf_class(clazz) + clazz.name.split(/::/).last.downcase + end + + # Returns the short object id, i.e. without the leading org.apache.qpid.broker:<class name>: + # @return [String] the short object id + def short_id + clazz = BrokerObject.qmf_class(self.class) + if id =~ /org.apache.qpid.broker:#{clazz}:(.*)/ + return $1; + end + return nil + end + + # Returns the time at which this object was created + # @return [Time] the time at which this object was created + def created_at + Time.at(content['_create_ts'] / 1000000000.0) + end + + # Returns the time at which this object was deleted. Only ever applies to + # BrokerObject instances created from a QMF event. + # @return [Time] the time at which this object was deleted + def deleted_at + Time.at(content['_delete_ts'] / 1000000000.0) + end + + # Returns the time at which this object was last updated + # @return [Time] the time at which this object was last updated + def updated_at + Time.at(content['_update_ts'] / 1000000000.0) + end + + # Exposes data from the QMF response + # @param [String] key the key to look up a value, e.g. msgDepth for a queue + # @return the value associated with the key, or nil if not found + def [](key) + return nil unless @values.has_key?(key) + value = @values[key] + if value.is_a?(Hash) and value.has_key?('_object_name') + full_name = value['_object_name'] + colon = full_name.index(':') + unless colon.nil? + full_name = full_name[colon+1..-1] + colon = full_name.index(':') + return full_name[colon+1..-1] unless colon.nil? + end + end + + return value + end + + # Exposes data from the QMF response via methods, e.g. queue.msgDepth + def method_missing(method, *args, &block) + key = method.to_s + return self[key] if args.empty? and not self[key].nil? + super + end + + def to_s + @values.to_s + end + + # Invokes a QMF method + # @see BrokerAgent#invoke_method + def invoke_method(*args) + @agent.invoke_method(*args) + end + end + end +end diff --git a/qpid/cpp/src/tests/fanout_perftest b/qpid/cpp/management/ruby/lib/qpid_management/cluster.rb index 168994d372..4fa7f146a7 100755..100644 --- a/qpid/cpp/src/tests/fanout_perftest +++ b/qpid/cpp/management/ruby/lib/qpid_management/cluster.rb @@ -1,5 +1,3 @@ -#!/usr/bin/env bash - # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -8,9 +6,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -19,4 +17,10 @@ # under the License. # -exec `dirname $0`/run_perftest 10000 --mode fanout --npubs 16 --nsubs 16 --size 64 +module Qpid + module Management + # Representation of a cluster + class Cluster < BrokerObject + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/connection.rb b/qpid/cpp/management/ruby/lib/qpid_management/connection.rb new file mode 100644 index 0000000000..153ccee684 --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/connection.rb @@ -0,0 +1,51 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of a client connection. Properties include: + # - SystemConnection + # - address + # - authIdentity + # - bytesFromClient + # - bytesToClient + # - closing + # - federationLink + # - framesFromClient + # - framesToClient + # - incoming + # - msgsFromClient + # - msgsToClient + # - remoteParentPid + # - remotePid + # - remoteProcessName + # - remoteProperties + # - saslMechanism + # - saslSsf + # - shadow + # - userProxyAuth + # - vhostRef + class Connection < BrokerObject + # Closes this connection to the broker + def close + invoke_method('close', {}, "org.apache.qpid.broker:connection:#{address}") + end + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/errors.rb b/qpid/cpp/management/ruby/lib/qpid_management/errors.rb new file mode 100644 index 0000000000..b922cda680 --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/errors.rb @@ -0,0 +1,28 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + class ObjectNotFoundError < RuntimeError + end + + class ObjectDeletedError < RuntimeError + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/exchange.rb b/qpid/cpp/management/ruby/lib/qpid_management/exchange.rb new file mode 100644 index 0000000000..5a3223aba6 --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/exchange.rb @@ -0,0 +1,44 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of an exchange. Properties include: + # - arguments + # - autoDelete + # - bindingCount + # - bindingCountHigh + # - bindingCountLow + # - byteDrops + # - byteReceives + # - byteRoutes + # - durable + # - msgDrops + # - msgReceives + # - msgRoutes + # - name + # - producerCount + # - producerCountHigh + # - producerCountLow + # - type + # - vhostRef + class Exchange < BrokerObject + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/ha_broker.rb b/qpid/cpp/management/ruby/lib/qpid_management/ha_broker.rb new file mode 100644 index 0000000000..1ac9ea7a17 --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/ha_broker.rb @@ -0,0 +1,26 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of an HA broker + class HaBroker < BrokerObject + end + end +end diff --git a/qpid/cpp/src/tests/install_env.sh.in b/qpid/cpp/management/ruby/lib/qpid_management/link.rb index d29a23930d..455b26a440 100644 --- a/qpid/cpp/src/tests/install_env.sh.in +++ b/qpid/cpp/management/ruby/lib/qpid_management/link.rb @@ -17,10 +17,19 @@ # under the License. # -absdir() { echo `cd $1 && pwd`; } - -prefix=`absdir @prefix@` -export QPID_INSTALL_PREFIX=$prefix -export PATH=$prefix/bin:$prefix/sbin:$prefix/libexec/qpid/tests:$PATH -export LD_LIBRARY_PATH=$prefix/lib:$LD_LIBRARY_PATH -export PYTHONPATH=$prefix/lib/python2.6/site-packages:$PYTHONPATH +module Qpid + module Management + # Representation of a link to a remote broker. Properties include: + # - connectionRef + # - durable + # - host + # - lastError + # - name + # - port + # - state + # - transport + # - vhostRef + class Link < BrokerObject + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/memory.rb b/qpid/cpp/management/ruby/lib/qpid_management/memory.rb new file mode 100644 index 0000000000..39dd803c9a --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/memory.rb @@ -0,0 +1,34 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of the broker's memory. Properties include: + # - malloc_arena + # - malloc_fordblks + # - malloc_hblkhd + # - malloc_hblks + # - malloc_keepcost + # - malloc_ordblks + # - malloc_uordblks + # - name + class Memory < BrokerObject + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/queue.rb b/qpid/cpp/management/ruby/lib/qpid_management/queue.rb new file mode 100644 index 0000000000..c4fae3a53e --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/queue.rb @@ -0,0 +1,97 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of a queue. Properties include: + # - acquires + # - arguments + # - autoDelete + # - bindingCount + # - bindingCountHigh + # - bindingCountLow + # - byteDepth + # - byteFtdDepth + # - byteFtdDequeues + # - byteFtdEnqueues + # - bytePersistDequeues + # - bytePersistEnqueues + # - byteTotalDequeues + # - byteTotalEnqueues + # - byteTxnDequeues + # - byteTxnEnqueues + # - consumerCount + # - consumerCountHigh + # - consumerCountLow + # - discardsLvq + # - discardsOverflow + # - discardsPurge + # - discardsRing + # - discardsSubscriber + # - discardsTtl + # - durable + # - exclusive + # - flowStopped + # - flowStoppedCount + # - messageLatencyAvg + # - messageLatencyCount + # - messageLatencyMax + # - messageLatencyMin + # - msgDepth + # - msgFtdDepth + # - msgFtdDequeues + # - msgFtdEnqueues + # - msgPersistDequeues + # - msgPersistEnqueues + # - msgTotalDequeues + # - msgTotalEnqueues + # - msgTxnDequeues + # - msgTxnEnqueues + # - name + # - releases + # - reroutes + # - unackedMessages + # - unackedMessagesHigh + # - unackedMessagesLow + # - vhostRef + class Queue < BrokerObject + # Purges (removes) messages from this queue + # @param [Fixnum] message_count number of messages to remove from the queue, or 0 for all messages + # @param [Hash] filter an optional filter to use when removing messages + def purge(message_count, filter={}) + invoke_method('purge', {'request' => message_count, 'filter' => filter}, "org.apache.qpid.broker:queue:#{name}") + end + + # Reroutes messages from this queue to an exchange, either the queue's + # alternate exchange, or the specified exchange + # @param [Fixnum] message_count number of messages to reroute from the queue, or 0 for all messages + # @param [Boolean] use_alternate_exchange whether to use the queue's alternate exchange as the destination + # @param [String] exchange name of destination exchange + # @param [Hash] filter an optional filter to use when rerouting messages + def reroute(message_count, use_alternate_exchange, exchange, filter) + args = {'request' => message_count, + 'useAltExchange' => use_alternate_exchange, + 'exchange' => exchange, + 'filter' => filter} + + invoke_method('reroute', args, "org.apache.qpid.broker:queue:#{name}") + end + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/session.rb b/qpid/cpp/management/ruby/lib/qpid_management/session.rb new file mode 100644 index 0000000000..b4fcc7da59 --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/session.rb @@ -0,0 +1,38 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of a session to the broker. Properties include: + # - TxnCommits + # - TxnCount + # - TxnRejects + # - TxnStarts + # - attached + # - channelId + # - clientCredit + # - connectionRef + # - detachedLifespan + # - name + # - unackedMessages + # - vhostRef + class Session < BrokerObject + end + end +end diff --git a/qpid/cpp/management/ruby/lib/qpid_management/subscription.rb b/qpid/cpp/management/ruby/lib/qpid_management/subscription.rb new file mode 100644 index 0000000000..fcff2831c0 --- /dev/null +++ b/qpid/cpp/management/ruby/lib/qpid_management/subscription.rb @@ -0,0 +1,35 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +module Qpid + module Management + # Representation of a subscription. Properties include: + # - acknowledged + # - arguments + # - browsing + # - creditMode + # - delivered + # - exclusive + # - name + # - queueRef + # - sessionRef + class Subscription < BrokerObject + end + end +end diff --git a/qpid/cpp/management/ruby/qpid_management.gemspec b/qpid/cpp/management/ruby/qpid_management.gemspec new file mode 100644 index 0000000000..a6cc1b828e --- /dev/null +++ b/qpid/cpp/management/ruby/qpid_management.gemspec @@ -0,0 +1,36 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# -*- encoding: utf-8 -*- +$:.push File.expand_path("../lib", __FILE__) + +Gem::Specification.new do |s| + s.name = "qpid_management" + s.version = "1.0" + s.authors = ["Apache Qpid Project"] + s.email = ["dev@qpid.apache.org"] + s.homepage = "http://qpid.apache.org" + s.summary = %q{Qpid management library} + s.description = %q{Qpid management library} + s.files = `git ls-files`.split("\n") + s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n") + s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) } + s.require_paths = ["lib"] + s.add_runtime_dependency 'qpid_messaging' +end diff --git a/qpid/cpp/management/ruby/spec/broker_agent_spec.rb b/qpid/cpp/management/ruby/spec/broker_agent_spec.rb new file mode 100644 index 0000000000..7ffbf842e2 --- /dev/null +++ b/qpid/cpp/management/ruby/spec/broker_agent_spec.rb @@ -0,0 +1,43 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +require 'spec_helper' + +describe Qpid::Management::BrokerAgent do + before(:each) do + @broker_port = `qpidd --no-data-dir --auth=no --no-module-dir --daemon --port 0`.chop + @connection = Qpid::Messaging::Connection.new(url:"localhost:#{@broker_port}") + @connection.open() + @agent = Qpid::Management::BrokerAgent.new(@connection) + end + + after(:each) do + @agent.close() + @connection.close() + `qpidd -q --port #{@broker_port}` + end + + describe '#broker' do + let(:broker) { @agent.broker } + + it 'returns the broker' do + broker.class.should == Qpid::Management::Broker + end + end +end diff --git a/qpid/cpp/management/ruby/spec/broker_spec.rb b/qpid/cpp/management/ruby/spec/broker_spec.rb new file mode 100644 index 0000000000..6d6e1106a4 --- /dev/null +++ b/qpid/cpp/management/ruby/spec/broker_spec.rb @@ -0,0 +1,373 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +require 'spec_helper' + +describe Qpid::Management::Broker do + before(:each) do + @broker_port = `qpidd --no-data-dir --auth=no --no-module-dir --daemon --port 0`.chop + @connection = Qpid::Messaging::Connection.new(url:"localhost:#{@broker_port}") + @connection.open() + @agent = Qpid::Management::BrokerAgent.new(@connection) + @broker = @agent.broker + end + + after(:each) do + @agent.close() + @connection.close() + `qpidd --quit --port #{@broker_port}` + end + + def setup_queue_route + @other_port = `qpidd --no-data-dir --auth=no --no-module-dir --daemon --port 0`.chop + @broker.add_link('link1', 'localhost', @other_port) + @broker.add_queue('queue') + @broker.add_queue_route('qr1', + link: 'link1', + queue: 'queue', + exchange: 'amq.direct', + sync: 2) + end + + %w(connection session subscription exchange queue binding link bridge).each do |type| + describe "##{type}s" do + before(:each) do + setup_queue_route if %w(link bridge).include?(type) + end + + after(:each) do + if %w(link bridge).include?(type) + `qpidd --quit --port #{@other_port}` + end + end + + let(:collection) { @broker.send("#{type}s") } + + it "returns at least 1 #{type}" do + if type == 'subscription' + session = @connection.create_session + receiver = session.create_receiver("amq.direct/temp") + end + collection.count.should be > 0 + end + end + + describe "##{type}" do + before(:each) do + setup_queue_route if %w(link bridge).include?(type) + end + + after(:each) do + if %w(link bridge).include?(type) + `qpidd --quit --port #{@other_port}` + end + end + + let(:object) { @broker.send("#{type}s")[0] } + + it "returns a #{type} by oid" do + if type == 'subscription' + session = @connection.create_session + receiver = session.create_receiver("amq.direct/temp") + end + @broker.send(type, object.short_id).id.should == object.id + end + end + end + + describe '#add_exchange' do + %w(fanout direct topic headers).each do |type| + context "when adding a #{type} exchange" do + let(:exchange_name) { "#{type}1" } + before(:each) do + @before_creation = Time.now + @broker.add_exchange(type, exchange_name, {'qpid.replicate' => 'none'}) + end + + subject { @broker.exchange(exchange_name) } + its(:short_id) { should == exchange_name } + its(:type) { should == type } + its(:created_at) { should be > @before_creation } + it 'has the correct arguments' do + subject.arguments.should == {'qpid.replicate' => 'none'} + end + end + end + end + + describe "#delete_exchange" do + before(:each) do + @before_creation = Time.now + @broker.add_exchange('fanout', 'fanout_to_delete') + end + + let(:exchange) { @broker.exchange('fanout_to_delete') } + + context "with a valid exchange name" do + it "deletes the exchange" do + @broker.delete_exchange(exchange.short_id) + expect { exchange.refresh! }.to raise_error + end + end + + context "with an invalid exchange name" do + it "raises a not-found exception" do + expect { @broker.delete_exchange("badname") }.to raise_error(/not-found.*badname/) + end + end + end + + describe "#add_queue" do + before(:each) do + @before_creation = Time.now + @queue_name = 'myqueue' + @broker.add_queue(@queue_name, {'qpid.replicate' => 'none'}) + end + + subject { @broker.queue(@queue_name) } + its(:short_id) { should == @queue_name } + its(:created_at) { should be > @before_creation } + it 'has the correct arguments' do + subject.arguments.should == {'qpid.replicate' => 'none'} + end + end + + describe "#delete_queue" do + before(:each) do + @before_creation = Time.now + @broker.add_queue('queue_to_delete') + end + + let(:queue) { @broker.queue('queue_to_delete') } + + context "with a valid queue name" do + it "deletes the queue" do + @broker.delete_queue(queue.short_id) + expect { queue.refresh! }.to raise_error + end + end + + context "with an invalid name" do + it "raises a not-found exception" do + expect { @broker.delete_queue("badname") }.to raise_error(/not-found.*badname/) + end + end + end + + describe "#add_binding" do + before(:each) do + @broker.add_queue('queue') + end + + it "creates a binding for a fanout exchange" do + @broker.add_exchange('fanout', 'fanout') + @broker.add_binding('fanout', 'queue') + expect { @broker.binding('org.apache.qpid.broker:exchange:fanout,org.apache.qpid.broker:queue:queue,') }.to_not raise_error + end + + it "creates a binding for a direct exchange" do + @broker.add_exchange('direct', 'direct') + @broker.add_binding('direct', 'queue', 'mykey') + expect { @broker.binding('org.apache.qpid.broker:exchange:direct,org.apache.qpid.broker:queue:queue,mykey') }.to_not raise_error + end + + it "creates a binding for a topic exchange" do + @broker.add_exchange('topic', 'topic') + @broker.add_binding('topic', 'queue', 'us.#') + expect { @broker.binding('org.apache.qpid.broker:exchange:topic,org.apache.qpid.broker:queue:queue,us.#') }.to_not raise_error + end + end + + describe "#delete_binding" do + it "deletes an existing binding" do + @broker.add_queue('queue') + @broker.add_exchange('fanout', 'fanout') + @broker.add_binding('fanout', 'queue') + expect { @broker.delete_binding('fanout', 'queue') }.to_not raise_error + end + end + + describe "#add_link" do + before(:each) do + @other_port = `/usr/sbin/qpidd --no-data-dir --auth=no --no-module-dir --daemon --port 0`.chop + end + + after(:each) do + `/usr/sbin/qpidd -q --port #{@other_port}` + end + + it "adds a link" do + @broker.add_link('link1', 'localhost', @other_port) + @broker.links.count.should == 1 + end + end + + describe "#delete_link" do + before(:each) do + @other_port = `/usr/sbin/qpidd --no-data-dir --auth=no --no-module-dir --daemon --port 0`.chop + @broker.add_link('link1', 'localhost', @other_port) + end + + after(:each) do + `/usr/sbin/qpidd -q --port #{@other_port}` + end + + it "deletes a link" do + @broker.delete_link('link1') + @broker.links.count.should == 0 + end + end + + describe "#add_queue_route" do + context "with missing options" do + [:link, :queue, :exchange, :sync].each do |opt| + opts = {link: 'l', queue: 'q', exchange: 'e', sync:2} + opts.delete(opt) + it "raises an error when :#{opt} is missing" do + expect { @broker.add_queue_route('name', opts) }.to raise_error(/Option :#{opt} is required/) + end + end + end + + context "with all required options" do + before(:each) do + @other_port = `/usr/sbin/qpidd --no-data-dir --auth=no --no-module-dir --daemon --port 0`.chop + @broker.add_link('link1', 'localhost', @other_port) + @broker.add_queue('queue') + @broker.add_queue_route('qr1', + link: 'link1', + queue: 'queue', + exchange: 'amq.direct', + sync: 2) + end + + after(:each) do + `/usr/sbin/qpidd -q --port #{@other_port}` + end + + it "adds a queue route" do + @broker.bridges.count.should == 1 + end + + subject { @broker.bridges[0] } + its(:dest) { should == 'amq.direct' } + its(:durable) { should == false } + its(:dynamic) { should == false } + its(:excludes) { should == "" } + its(:key) { should == "" } + its(:name) { should == "qr1" } + its(:src) { should == "queue" } + its(:srcIsLocal) { should == false } + its(:srcIsQueue) { should == true } + its(:sync) { should == 2 } + its(:tag) { should == "" } + end + end + + describe "#add_exchange_route" do + context "with missing options" do + [:link, :exchange, :key, :sync].each do |opt| + opts = {link: 'l', exchange: 'e', key:'rk', sync:2} + opts.delete(opt) + it "raises an error when :#{opt} is missing" do + expect { @broker.add_exchange_route('name', opts) }.to raise_error(/Option :#{opt} is required/) + end + end + end + + context "with all required options" do + before(:each) do + @other_port = `/usr/sbin/qpidd --no-data-dir --auth=no --no-module-dir --daemon --port 0`.chop + @broker.add_link('link1', 'localhost', @other_port) + @broker.add_queue('queue') + @broker.add_exchange_route('er1', + link: 'link1', + exchange: 'amq.direct', + key: 'foo', + sync: 2) + end + + after(:each) do + `/usr/sbin/qpidd -q --port #{@other_port}` + end + + it "adds an exchange route" do + @broker.bridges.count.should == 1 + end + + subject { @broker.bridges[0] } + its(:dest) { should == 'amq.direct' } + its(:durable) { should == false } + its(:dynamic) { should == false } + its(:excludes) { should == "" } + its(:key) { should == "foo" } + its(:name) { should == "er1" } + its(:src) { should == "amq.direct" } + its(:srcIsLocal) { should == false } + its(:srcIsQueue) { should == false } + its(:sync) { should == 2 } + its(:tag) { should == "" } + end + end + + describe "#add_dynamic_route" do + context "with missing options" do + [:link, :exchange, :sync].each do |opt| + opts = {link: 'l', exchange: 'e', sync:2} + opts.delete(opt) + it "raises an error when :#{opt} is missing" do + expect { @broker.add_dynamic_route('name', opts) }.to raise_error(/Option :#{opt} is required/) + end + end + end + + context "with all required options" do + before(:each) do + @other_port = `/usr/sbin/qpidd --no-data-dir --auth=no --no-module-dir --daemon --port 0`.chop + @broker.add_link('link1', 'localhost', @other_port) + @broker.add_queue('queue') + @broker.add_dynamic_route('dr1', + link: 'link1', + exchange: 'amq.direct', + sync: 2) + end + + after(:each) do + `/usr/sbin/qpidd -q --port #{@other_port}` + end + + it "adds an exchange route" do + @broker.bridges.count.should == 1 + end + + subject { @broker.bridges[0] } + its(:dest) { should == 'amq.direct' } + its(:durable) { should == false } + its(:dynamic) { should == true } + its(:excludes) { should == "" } + its(:key) { should == "" } + its(:name) { should == "dr1" } + its(:src) { should == "amq.direct" } + its(:srcIsLocal) { should == false } + its(:srcIsQueue) { should == false } + its(:sync) { should == 2 } + its(:tag) { should == "" } + end + end +end diff --git a/qpid/cpp/management/ruby/spec/spec_helper.rb b/qpid/cpp/management/ruby/spec/spec_helper.rb new file mode 100644 index 0000000000..f552c55888 --- /dev/null +++ b/qpid/cpp/management/ruby/spec/spec_helper.rb @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +require 'qpid_messaging' +require 'qpid_management' diff --git a/qpid/cpp/src/CMakeLists.txt b/qpid/cpp/src/CMakeLists.txt index 45f5987a6c..be10be9b85 100644 --- a/qpid/cpp/src/CMakeLists.txt +++ b/qpid/cpp/src/CMakeLists.txt @@ -502,30 +502,23 @@ if (BUILD_XML) endif (BUILD_XML) -# Build the ACL plugin -set (acl_default ON) - -option(BUILD_ACL "Build ACL enforcement broker plugin" ${acl_default}) - -if (BUILD_ACL) - set (acl_SOURCES - qpid/acl/Acl.cpp - qpid/acl/Acl.h - qpid/acl/AclConnectionCounter.cpp - qpid/acl/AclConnectionCounter.h - qpid/acl/AclData.cpp - qpid/acl/AclData.h - qpid/acl/AclLexer.cpp - qpid/acl/AclLexer.h - qpid/acl/AclPlugin.cpp - qpid/acl/AclReader.cpp - qpid/acl/AclReader.h - qpid/acl/AclResourceCounter.cpp - qpid/acl/AclResourceCounter.h - qpid/acl/AclValidator.cpp - qpid/acl/AclValidator.h - ) -endif (BUILD_ACL) +set (acl_SOURCES + qpid/acl/Acl.cpp + qpid/acl/Acl.h + qpid/acl/AclConnectionCounter.cpp + qpid/acl/AclConnectionCounter.h + qpid/acl/AclData.cpp + qpid/acl/AclData.h + qpid/acl/AclLexer.cpp + qpid/acl/AclLexer.h + qpid/acl/AclPlugin.cpp + qpid/acl/AclReader.cpp + qpid/acl/AclReader.h + qpid/acl/AclResourceCounter.cpp + qpid/acl/AclResourceCounter.h + qpid/acl/AclValidator.cpp + qpid/acl/AclValidator.h + ) set (ha_default ON) diff --git a/qpid/cpp/src/tests/CMakeLists.txt b/qpid/cpp/src/tests/CMakeLists.txt index d2e6c7dd13..efc3176174 100644 --- a/qpid/cpp/src/tests/CMakeLists.txt +++ b/qpid/cpp/src/tests/CMakeLists.txt @@ -25,21 +25,22 @@ if (QPID_LINK_BOOST_DYNAMIC) add_definitions(-DBOOST_TEST_DYN_LINK) endif (QPID_LINK_BOOST_DYNAMIC) -include_directories( ${CMAKE_CURRENT_SOURCE_DIR} ) - -# Using the Boost DLLs triggers warning 4275 on Visual Studio -# (non dll-interface class used as base for dll-interface class). -# This is ok, so suppress the warning. -# Also, boost lengthy names trigger warning 4503, decorated name length exceeded -# and using getenv() triggers insecure CRT warnings which we can silence in the -# test environment. +include_directories(${CMAKE_CURRENT_SOURCE_DIR}) + +# Using the Boost DLLs triggers warning 4275 on Visual Studio (non +# dll-interface class used as base for dll-interface class). This is +# ok, so suppress the warning. +# +# Also, boost lengthy names trigger warning 4503, decorated name +# length exceeded and using getenv() triggers insecure CRT warnings +# which we can silence in the test environment. if (MSVC) - add_definitions( /wd4275 /wd4503 /D_CRT_SECURE_NO_WARNINGS) + add_definitions(/wd4275 /wd4503 /D_CRT_SECURE_NO_WARNINGS) endif (MSVC) # If we're using GCC allow variadic macros (even though they're c99 not c++01) if (CMAKE_COMPILER_IS_GNUCXX) - add_definitions(-Wno-variadic-macros) + add_definitions(-Wno-variadic-macros) endif (CMAKE_COMPILER_IS_GNUCXX) # Windows uses some process-startup calls to ensure that errors, etc. don't @@ -48,95 +49,91 @@ endif (CMAKE_COMPILER_IS_GNUCXX) # instead of windows. If you want to remove this code, build without the # QPID_WINDOWS_DEFAULT_TEST_OUTPUTS ON. if (CMAKE_SYSTEM_NAME STREQUAL Windows) - option(QPID_WINDOWS_DEFAULT_TEST_OUTPUTS "Use default error-handling on Windows tests" OFF) - if (NOT QPID_WINDOWS_DEFAULT_TEST_OUTPUTS) - set(platform_test_additions windows/DisableWin32ErrorWindows.cpp) - endif (NOT QPID_WINDOWS_DEFAULT_TEST_OUTPUTS) + option(QPID_WINDOWS_DEFAULT_TEST_OUTPUTS "Use default error-handling on Windows tests" OFF) + if (NOT QPID_WINDOWS_DEFAULT_TEST_OUTPUTS) + set(platform_test_additions windows/DisableWin32ErrorWindows.cpp) + endif (NOT QPID_WINDOWS_DEFAULT_TEST_OUTPUTS) endif (CMAKE_SYSTEM_NAME STREQUAL Windows) # Some generally useful utilities that just happen to be built in the test area -add_executable (qpid-receive qpid-receive.cpp Statistics.cpp ${platform_test_additions}) -target_link_libraries (qpid-receive qpidmessaging qpidtypes qpidcommon) +add_executable(qpid-receive qpid-receive.cpp Statistics.cpp ${platform_test_additions}) +target_link_libraries(qpid-receive qpidmessaging qpidtypes qpidcommon) -add_executable (qpid-send qpid-send.cpp Statistics.cpp ${platform_test_additions}) -target_link_libraries (qpid-send qpidmessaging qpidtypes qpidcommon) +add_executable(qpid-send qpid-send.cpp Statistics.cpp ${platform_test_additions}) +target_link_libraries(qpid-send qpidmessaging qpidtypes qpidcommon) -install (TARGETS - qpid-receive qpid-send - RUNTIME DESTINATION ${QPID_INSTALL_BINDIR}) +install(TARGETS qpid-receive qpid-send RUNTIME DESTINATION ${QPID_INSTALL_BINDIR}) -add_executable (qpid-perftest qpid-perftest.cpp ${platform_test_additions}) -target_link_libraries (qpid-perftest qpidclient qpidcommon ${Boost_PROGRAM_OPTIONS_LIBRARY}) +add_executable(qpid-perftest qpid-perftest.cpp ${platform_test_additions}) +target_link_libraries(qpid-perftest qpidclient qpidcommon ${Boost_PROGRAM_OPTIONS_LIBRARY}) -add_executable (qpid-latency-test qpid-latency-test.cpp ${platform_test_additions}) -target_link_libraries (qpid-latency-test qpidclient qpidcommon) +add_executable(qpid-latency-test qpid-latency-test.cpp ${platform_test_additions}) +target_link_libraries(qpid-latency-test qpidclient qpidcommon) -add_executable (qpid-client-test qpid-client-test.cpp ${platform_test_additions}) -target_link_libraries (qpid-client-test qpidclient qpidcommon) +add_executable(qpid-client-test qpid-client-test.cpp ${platform_test_additions}) +target_link_libraries(qpid-client-test qpidclient qpidcommon) -add_executable (qpid-ping qpid-ping.cpp ${platform_test_additions}) -target_link_libraries (qpid-ping qpidmessaging qpidtypes qpidcommon) +add_executable(qpid-ping qpid-ping.cpp ${platform_test_additions}) +target_link_libraries(qpid-ping qpidmessaging qpidtypes qpidcommon) -add_executable (qpid-topic-listener qpid-topic-listener.cpp ${platform_test_additions}) -target_link_libraries (qpid-topic-listener qpidclient qpidcommon) +add_executable(qpid-topic-listener qpid-topic-listener.cpp ${platform_test_additions}) +target_link_libraries(qpid-topic-listener qpidclient qpidcommon) -add_executable (qpid-topic-publisher qpid-topic-publisher.cpp ${platform_test_additions}) -target_link_libraries (qpid-topic-publisher qpidclient qpidcommon) +add_executable(qpid-topic-publisher qpid-topic-publisher.cpp ${platform_test_additions}) +target_link_libraries(qpid-topic-publisher qpidclient qpidcommon) -add_executable (receiver receiver.cpp ${platform_test_additions}) -target_link_libraries (receiver qpidclient qpidcommon) +add_executable(receiver receiver.cpp ${platform_test_additions}) +target_link_libraries(receiver qpidclient qpidcommon) # This is bizarre - using both messaging and client libraries -add_executable (sender sender.cpp Statistics.cpp ${platform_test_additions}) -target_link_libraries (sender qpidmessaging qpidtypes qpidclient qpidcommon) +add_executable(sender sender.cpp Statistics.cpp ${platform_test_additions}) +target_link_libraries(sender qpidmessaging qpidtypes qpidclient qpidcommon) -add_executable (qpid-txtest qpid-txtest.cpp ${platform_test_additions}) -target_link_libraries (qpid-txtest qpidclient qpidcommon qpidtypes) -#qpid_txtest_SOURCES=qpid-txtest.cpp TestOptions.h ConnectionOptions.h +add_executable(qpid-txtest qpid-txtest.cpp ${platform_test_additions}) +target_link_libraries(qpid-txtest qpidclient qpidcommon qpidtypes) -add_executable (qpid-txtest2 qpid-txtest2.cpp ${platform_test_additions}) -target_link_libraries (qpid-txtest2 qpidmessaging qpidtypes qpidcommon) +add_executable(qpid-txtest2 qpid-txtest2.cpp ${platform_test_additions}) +target_link_libraries(qpid-txtest2 qpidmessaging qpidtypes qpidcommon) -install (TARGETS - qpid-perftest qpid-latency-test qpid-client-test - qpid-ping - qpid-topic-listener qpid-topic-publisher receiver sender - qpid-txtest qpid-txtest2 - RUNTIME DESTINATION ${QPID_INSTALL_TESTDIR}) +install(TARGETS + qpid-perftest qpid-latency-test qpid-client-test + qpid-ping + qpid-topic-listener qpid-topic-publisher receiver sender + qpid-txtest qpid-txtest2 + RUNTIME DESTINATION ${QPID_INSTALL_TESTDIR}) # Only build test code if testing is turned on if (BUILD_TESTING) -# Create the environment scripts for tests -set (abs_srcdir ${CMAKE_CURRENT_SOURCE_DIR}) -set (abs_builddir ${CMAKE_CURRENT_BINARY_DIR}) -set (abs_top_srcdir ${CMAKE_SOURCE_DIR}) -set (abs_top_builddir ${CMAKE_BINARY_DIR}) -set (builddir_lib_suffix "") - if (CMAKE_SYSTEM_NAME STREQUAL Windows) - configure_file (${CMAKE_CURRENT_SOURCE_DIR}/test_env.ps1.in - ${CMAKE_CURRENT_BINARY_DIR}/test_env.ps1 @ONLY) -else (CMAKE_SYSTEM_NAME STREQUAL Windows) - configure_file (${CMAKE_CURRENT_SOURCE_DIR}/test_env.sh.in - ${CMAKE_CURRENT_BINARY_DIR}/test_env.sh @ONLY) -endif (CMAKE_SYSTEM_NAME STREQUAL Windows) - -# Copy qpidd-p0 script to build directory so tests can find it. -configure_file (${CMAKE_CURRENT_SOURCE_DIR}/qpidd-p0 ${CMAKE_CURRENT_BINARY_DIR} COPYONLY) + # Windows + + set(ENV{OUTDIR} ${EXECUTABLE_OUTPUT_PATH}) -if (CMAKE_SYSTEM_NAME STREQUAL Windows) - set (ENV{OUTDIR} ${EXECUTABLE_OUTPUT_PATH}) - set (test_script_suffix ".ps1") - set (shell "powershell") + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/env.ps1.in + ${CMAKE_CURRENT_BINARY_DIR}/env.ps1 @ONLY) +else (CMAKE_SYSTEM_NAME STREQUAL Windows) + # Posix + + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/env.sh.in + ${CMAKE_CURRENT_BINARY_DIR}/env.sh @ONLY) endif (CMAKE_SYSTEM_NAME STREQUAL Windows) -set(test_wrap ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_test${test_script_suffix} -buildDir ${CMAKE_BINARY_DIR}) -set(python_wrap ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_test${test_script_suffix} -buildDir ${CMAKE_BINARY_DIR} -python) - +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/check_dependencies.py.in + ${CMAKE_CURRENT_BINARY_DIR}/check_dependencies.py @ONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/env.py.in + ${CMAKE_CURRENT_BINARY_DIR}/env.py @ONLY) + +file(COPY . + DESTINATION . + PATTERN "*.cmake" EXCLUDE + PATTERN "*.in" EXCLUDE + PATTERN "*.c" EXCLUDE + PATTERN "*.h" EXCLUDE + PATTERN "*.cpp" EXCLUDE) + if (BUILD_TESTING_UNITTESTS) -# # Unit test program # # Unit tests are built as a single program to reduce valgrind overhead @@ -144,8 +141,8 @@ if (BUILD_TESTING_UNITTESTS) # ccmake and set unit_tests_to_build to the set you want to build. # Like this to work with cmake 2.4 on Unix -set (qpid_test_boost_libs - ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY} ${Boost_SYSTEM_LIBRARY}) +set(qpid_test_boost_libs + ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY} ${Boost_SYSTEM_LIBRARY}) set(all_unit_tests AccumulatedAckTest @@ -200,21 +197,16 @@ set(all_unit_tests Url Uuid Variant - ${xml_tests} - ) - -set(unit_tests_to_build - "" - CACHE STRING "Which unit tests to build" - ) + ${xml_tests}) +set(unit_tests_to_build "" CACHE STRING "Which unit tests to build") mark_as_advanced(unit_tests_to_build) # If no unit_test specifically set then use all unit tests if (unit_tests_to_build) -set(actual_unit_tests ${unit_tests_to_build}) + set(actual_unit_tests ${unit_tests_to_build}) else() -set(actual_unit_tests ${all_unit_tests}) + set(actual_unit_tests ${all_unit_tests}) endif() add_executable (unit_test unit_test @@ -224,148 +216,115 @@ target_link_libraries (unit_test qpidmessaging qpidtypes qpidbroker qpidclient qpidcommon) set_target_properties (unit_test PROPERTIES COMPILE_DEFINITIONS _IN_QPID_BROKER) -add_test (NAME unit_test COMMAND ${test_wrap} -boostTest -- $<TARGET_FILE:unit_test>) - endif (BUILD_TESTING_UNITTESTS) -add_library (shlibtest MODULE shlibtest.cpp) +add_library(shlibtest MODULE shlibtest.cpp) if (BUILD_SASL) - add_custom_command( - OUTPUT sasl_config/qpidd.conf sasl_config/qpidd.sasldb - COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/sasl_test_setup.sh) + add_custom_command(OUTPUT sasl_config/qpidd.conf sasl_config/qpidd.sasldb + COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/sasl_test_setup.sh) - add_custom_target( - sasl_config ALL - DEPENDS sasl_config/qpidd.conf sasl_config/qpidd.sasldb) + add_custom_target(sasl_config ALL + DEPENDS sasl_config/qpidd.conf sasl_config/qpidd.sasldb) endif (BUILD_SASL) -# # Other test programs -# -add_executable (echotest echotest.cpp ${platform_test_additions}) -target_link_libraries (echotest qpidclient qpidcommon) -add_executable (publish publish.cpp ${platform_test_additions}) -target_link_libraries (publish qpidclient qpidcommon) +add_executable(echotest echotest.cpp ${platform_test_additions}) +target_link_libraries(echotest qpidclient qpidcommon) -add_executable (consume consume.cpp ${platform_test_additions}) -target_link_libraries (consume qpidclient qpidcommon) +add_executable(publish publish.cpp ${platform_test_additions}) +target_link_libraries(publish qpidclient qpidcommon) -add_executable (header_test header_test.cpp ${platform_test_additions}) -target_link_libraries (header_test qpidclient qpidcommon) +add_executable(consume consume.cpp ${platform_test_additions}) +target_link_libraries(consume qpidclient qpidcommon) -add_executable (declare_queues declare_queues.cpp ${platform_test_additions}) -target_link_libraries (declare_queues qpidclient qpidcommon) +add_executable(header_test header_test.cpp ${platform_test_additions}) +target_link_libraries(header_test qpidclient qpidcommon) -add_executable (replaying_sender replaying_sender.cpp ${platform_test_additions}) -target_link_libraries (replaying_sender qpidclient qpidcommon) +add_executable(declare_queues declare_queues.cpp ${platform_test_additions}) +target_link_libraries(declare_queues qpidclient qpidcommon) -add_executable (resuming_receiver resuming_receiver.cpp ${platform_test_additions}) -target_link_libraries (resuming_receiver qpidclient qpidcommon) +add_executable(replaying_sender replaying_sender.cpp ${platform_test_additions}) +target_link_libraries(replaying_sender qpidclient qpidcommon) -add_executable (txshift txshift.cpp ${platform_test_additions}) -target_link_libraries (txshift qpidclient qpidcommon) +add_executable(resuming_receiver resuming_receiver.cpp ${platform_test_additions}) +target_link_libraries(resuming_receiver qpidclient qpidcommon) -add_executable (txjob txjob.cpp ${platform_test_additions}) -target_link_libraries (txjob qpidclient qpidcommon) +add_executable(txshift txshift.cpp ${platform_test_additions}) +target_link_libraries(txshift qpidclient qpidcommon) -add_executable (datagen datagen.cpp ${platform_test_additions}) -target_link_libraries (datagen qpidclient qpidcommon) +add_executable(txjob txjob.cpp ${platform_test_additions}) +target_link_libraries(txjob qpidclient qpidcommon) -add_executable (msg_group_test msg_group_test.cpp ${platform_test_additions}) -target_link_libraries (msg_group_test qpidmessaging qpidtypes qpidcommon) +add_executable(datagen datagen.cpp ${platform_test_additions}) +target_link_libraries(datagen qpidclient qpidcommon) -add_executable (ha_test_max_queues ha_test_max_queues.cpp ${platform_test_additions}) -target_link_libraries (ha_test_max_queues qpidclient qpidcommon) +add_executable(msg_group_test msg_group_test.cpp ${platform_test_additions}) +target_link_libraries(msg_group_test qpidmessaging qpidtypes qpidcommon) -if (BUILD_SASL) - add_executable (sasl_version sasl_version.cpp ${platform_test_additions}) -endif (BUILD_SASL) +add_executable(ha_test_max_queues ha_test_max_queues.cpp ${platform_test_additions}) +target_link_libraries(ha_test_max_queues qpidclient qpidcommon) + +add_library(test_store MODULE test_store.cpp) +target_link_libraries(test_store qpidbroker qpidcommon) +set_target_properties(test_store PROPERTIES PREFIX "" COMPILE_DEFINITIONS _IN_QPID_BROKER) -set (python_src ${CMAKE_SOURCE_DIR}/../python) -if (EXISTS ${python_src}) - set (python_bld ${CMAKE_CURRENT_BINARY_DIR}/python) - # This will not pick up added or deleted python files - # In that case you need to rerun CMake - file(GLOB_RECURSE python_files ${python_src}/*.py) - - add_custom_command( - OUTPUT ${python_bld} - DEPENDS ${python_files} - COMMAND ${PYTHON_EXECUTABLE} - setup.py - build --build-base=${python_bld}/build - install --prefix=${python_bld} --install-lib=${python_bld} --install-scripts=${python_bld}/commands - WORKING_DIRECTORY ${python_src} - ) - - add_custom_target( - python_bld ALL - DEPENDS ${python_bld} - ) -endif (EXISTS ${python_src}) +add_library(dlclose_noop MODULE dlclose_noop.c) if (BUILD_SASL) - add_test (NAME sasl_fed COMMAND ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/sasl_fed${test_script_suffix}) - add_test (NAME sasl_fed_ex_dynamic COMMAND ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/sasl_fed_ex${test_script_suffix} dynamic) - add_test (NAME sasl_fed_ex_link COMMAND ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/sasl_fed_ex${test_script_suffix} link) - add_test (NAME sasl_fed_ex_queue COMMAND ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/sasl_fed_ex${test_script_suffix} queue) - add_test (NAME sasl_fed_ex_route COMMAND ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/sasl_fed_ex${test_script_suffix} route) - add_test (NAME sasl_no_dir COMMAND ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/sasl_no_dir${test_script_suffix}) - if (BUILD_SSL) - add_test(NAME ssl_test COMMAND ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/ssl_test${test_script_suffix}) - endif (BUILD_SSL) + add_executable(sasl_version sasl_version.cpp ${platform_test_additions}) endif (BUILD_SASL) -add_test (NAME qpid-client-test COMMAND ${test_wrap} -startBroker -- $<TARGET_FILE:qpid-client-test>) -add_test (NAME quick_perftest COMMAND ${test_wrap} -startBroker -- $<TARGET_FILE:qpid-perftest> --summary --count 100) -add_test (NAME quick_topictest COMMAND ${test_wrap} -startBroker -- ${CMAKE_CURRENT_SOURCE_DIR}/quick_topictest${test_script_suffix}) -add_test (NAME quick_txtest COMMAND ${test_wrap} -startBroker -- $<TARGET_FILE:qpid-txtest> --queues 4 --tx-count 10 --quiet) -add_test (NAME quick_txtest2 COMMAND ${test_wrap} -startBroker -- $<TARGET_FILE:qpid-txtest2> --queues 4 --tx-count 10 --quiet) -add_test (NAME msg_group_tests COMMAND ${test_wrap} -startBroker -- ${CMAKE_CURRENT_SOURCE_DIR}/run_msg_group_tests${test_script_suffix}) -add_test (NAME run_header_test COMMAND ${test_wrap} -startBroker -- ${CMAKE_CURRENT_SOURCE_DIR}/run_header_test${test_script_suffix}) -add_test (NAME python_tests COMMAND ${test_wrap} -startBroker -- ${CMAKE_CURRENT_SOURCE_DIR}/python_tests${test_script_suffix}) -if (NOT CMAKE_SYSTEM_NAME STREQUAL Windows) - # paged queue not yet implemented for windows - add_test (NAME paged_queue_tests COMMAND ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_paged_queue_tests${test_script_suffix}) -endif (NOT CMAKE_SYSTEM_NAME STREQUAL Windows) -if (BUILD_AMQP) - add_test (interop_tests ${python_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/interop_tests.py) -endif (BUILD_AMQP) +# Cross-platform tests + +add_test(NAME unit_tests COMMAND ${PYTHON_EXECUTABLE} run_unit_tests) + +add_test(NAME acl_tests COMMAND ${PYTHON_EXECUTABLE} run_acl_tests) +add_test(NAME cli_tests COMMAND ${PYTHON_EXECUTABLE} run_cli_tests) +add_test(NAME client_tests COMMAND ${PYTHON_EXECUTABLE} run_client_tests) +add_test(NAME federation_tests COMMAND ${PYTHON_EXECUTABLE} run_federation_tests) +add_test(NAME flow_control_tests COMMAND ${PYTHON_EXECUTABLE} run_flow_control_tests) +add_test(NAME ha_tests COMMAND ${PYTHON_EXECUTABLE} run_ha_tests) +add_test(NAME msg_group_tests COMMAND ${PYTHON_EXECUTABLE} run_msg_group_tests) +add_test(NAME performance_tests COMMAND ${PYTHON_EXECUTABLE} run_performance_tests) +add_test(NAME python_tests COMMAND ${PYTHON_EXECUTABLE} run_python_tests) +add_test(NAME queue_redirect_tests COMMAND ${PYTHON_EXECUTABLE} run_queue_redirect_tests) +add_test(NAME qmf_tests COMMAND ${PYTHON_EXECUTABLE} run_qmf_tests) +add_test(NAME transaction_tests COMMAND ${PYTHON_EXECUTABLE} run_transaction_tests) -add_test (ha_tests ${python_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/ha_tests.py) -add_test (qpidd_qmfv2_tests ${python_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/qpidd_qmfv2_tests.py) if (BUILD_AMQP) - add_test (interlink_tests ${python_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/interlink_tests.py) - add_test (idle_timeout_tests ${python_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/idle_timeout_tests.py) + add_test(NAME idle_timeout_tests COMMAND ${PYTHON_EXECUTABLE} run_idle_timeout_tests) + add_test(NAME interlink_tests COMMAND ${PYTHON_EXECUTABLE} run_interlink_tests) + add_test(NAME interop_tests COMMAND ${PYTHON_EXECUTABLE} run_interop_tests) endif (BUILD_AMQP) -add_test (swig_python_tests ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/swig_python_tests${test_script_suffix}) -add_test (ipv6_test ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/ipv6_test${test_script_suffix}) -add_test (federation_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_federation_tests${test_script_suffix}) -add_test (federation_sys_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_federation_sys_tests${test_script_suffix}) -add_test (queue_flow_limit_tests - ${test_wrap} - -startBroker -brokerOptions "--default-flow-stop-threshold=80 --default-flow-resume-threshold=70" - -- ${CMAKE_CURRENT_SOURCE_DIR}/run_queue_flow_limit_tests${test_script_suffix}) -if (BUILD_ACL) - add_test (acl_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_acl_tests${test_script_suffix}) -endif (BUILD_ACL) -add_test (cli_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_cli_tests${test_script_suffix}) -add_test (dynamic_log_level_test ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/dynamic_log_level_test${test_script_suffix}) -add_test (dynamic_log_hires_timestamp ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/dynamic_log_hires_timestamp${test_script_suffix}) -if (BUILD_MSSQL) - add_test (store_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_store_tests${test_script_suffix} MSSQL) -endif (BUILD_MSSQL) -if (BUILD_MSCLFS) - add_test (store_tests_clfs ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_store_tests${test_script_suffix} MSSQL-CLFS) -endif (BUILD_MSCLFS) -add_test (queue_redirect ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_queue_redirect${test_script_suffix}) -add_library(test_store MODULE test_store.cpp) -target_link_libraries (test_store qpidbroker qpidcommon) -set_target_properties (test_store PROPERTIES PREFIX "" COMPILE_DEFINITIONS _IN_QPID_BROKER) +if (CMAKE_SYSTEM_NAME STREQUAL Windows) + # Windows-only tests + + if (BUILD_MSSQL) + add_test(NAME store_tests COMMAND powershell run_store_tests.ps1 MSSQL) + endif (BUILD_MSSQL) + + if (BUILD_MSCLFS) + add_test(NAME store_tests_clfs COMMAND powershell run_store_tests.ps1 MSSQL-CLFS) + endif (BUILD_MSCLFS) +else (CMAKE_SYSTEM_NAME STREQUAL Windows) + # Posix-only tests -add_library (dlclose_noop MODULE dlclose_noop.c) + add_test(NAME logging_tests COMMAND run_logging_tests) # Pretty simple to convert + add_test(NAME ipv6_tests COMMAND run_ipv6_tests) # Also pretty simple to convert + add_test(NAME paged_queue_tests COMMAND run_paged_queue_tests) + add_test(NAME ring_queue_tests COMMAND run_ring_queue_tests) + add_test(NAME topic_tests COMMAND run_topic_tests) + + if (BUILD_SASL) + add_test(NAME sasl_tests COMMAND run_sasl_tests) + + if (BUILD_SSL) + add_test(NAME ssl_tests COMMAND run_ssl_tests) + endif (BUILD_SSL) + endif (BUILD_SASL) +endif (CMAKE_SYSTEM_NAME STREQUAL Windows) endif (BUILD_TESTING) diff --git a/qpid/cpp/src/tests/README.txt b/qpid/cpp/src/tests/README.txt index 8eaa5bbd25..50bd181ab0 100644 --- a/qpid/cpp/src/tests/README.txt +++ b/qpid/cpp/src/tests/README.txt @@ -8,21 +8,15 @@ developers can run tests selectively as explained below. Unit tests use the boost test framework, and are compiled to the programd unit_test -There are several options to control how test results are displayed, see - http://www.boost.org/doc/libs/1_35_0/libs/test/doc/components/utf/parameters/index.html +There are several options to control how test results are displayed. See +http://www.boost.org/doc/libs/1_35_0/libs/test/doc/components/utf/parameters/index.html. == System Tests == -System tests are executables or scripts. You can run executable tests directly -as well as via "make test" or "ctest". Some tests require environment settings -which are set by src/tests/test_env.sh on Unix or by src/tests/test_env.ps1 on -Windows. - -./python_tests: runs ../python/run_tests. This is the main set of -system tests for the broker. - -Other C++ client test executables and scripts under client/test are -system tests for the client. +System tests are executables or scripts. You can run executable tests +directly as well as via "make test" or "ctest". Some tests require +environment settings which are set by src/tests/env.sh on Unix or by +src/tests/env.ps1 on Windows. == Running selected tests == @@ -34,4 +28,3 @@ directly gives you additional options, e.g. This runs tests with names matching the regular expression <regexp> and will print the full output of the tests rather than just listing which tests pass or fail. - diff --git a/qpid/cpp/src/tests/ais_test.cpp b/qpid/cpp/src/tests/ais_test.cpp deleted file mode 100644 index 00c61242e4..0000000000 --- a/qpid/cpp/src/tests/ais_test.cpp +++ /dev/null @@ -1,23 +0,0 @@ -/* - * - * Copyright (c) 2006 The Apache Software Foundation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Defines test_main function to link with actual unit test code. -#define BOOST_AUTO_TEST_MAIN // Boost 1.33 -#define BOOST_TEST_MAIN -#include "unit_test.h" - diff --git a/qpid/cpp/src/tests/allhosts b/qpid/cpp/src/tests/allhosts deleted file mode 100755 index 07bc04fff5..0000000000 --- a/qpid/cpp/src/tests/allhosts +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -usage() { - echo "Usage: $0 [options] command. -Run a command on each host in \$HOSTS. -Options: - -l USER passed to ssh - run as USER. - -t passed to ssh - create a terminal. - -b run in background, wait for commands to complete. - -d run in background, don't wait for commands to complete. - -s SECONDS sleep between starting commands. - -q don't print banner lines for each host. - -o SUFFIX log output of each command to <host>.SUFFIX - -X passed to ssh - forward X connection. -" - exit 1 -} - -while getopts "tl:bs:dqo:X" opt; do - case $opt in - l) SSHOPTS="-l$OPTARG $SSHOPTS" ;; - t) SSHOPTS="-t $SSHOPTS" ;; - b) BACKGROUND=1 ;; - d) BACKGROUND=1; DISOWN=1 ;; - s) SLEEP="sleep $OPTARG" ;; - q) NOBANNER=1 ;; - o) SUFFIX=$OPTARG ;; - X) SSHOPTS="-X $SSHOPTS" ;; - *) usage;; - esac -done -shift `expr $OPTIND - 1` -test "$*" || usage; - -OK_FILE=`mktemp` # Will be deleted if anything goes wrong. -trap "rm -f $OK_FILE" EXIT - -do_ssh() { - h=$1; shift - if test $SUFFIX ; then ssh $SSHOPTS $h "$@" &> $h.$SUFFIX - else ssh $SSHOPTS $h "$@"; fi || rm -rf $OK_FILE; -} - -for h in $HOSTS ; do - test "$NOBANNER" || echo "== ssh $SSHOPTS $h $@ ==" - if [ "$BACKGROUND" = 1 ]; then - do_ssh $h "$@" & - CHILDREN="$! $CHILDREN" - else - do_ssh $h "$@" - fi - $SLEEP -done - -if [ "$DISOWN" = 1 ]; then - for c in $CHILDREN; do disown $c; done -else - wait -fi - -test -f $OK_FILE diff --git a/qpid/cpp/src/tests/brokertest.py b/qpid/cpp/src/tests/brokertest.py index 6fae88092b..8c32faad0c 100644 --- a/qpid/cpp/src/tests/brokertest.py +++ b/qpid/cpp/src/tests/brokertest.py @@ -354,7 +354,7 @@ class Broker(Popen): if (self.test.protocol and qm == qpid_messaging): kwargs.setdefault("protocol", self.test.protocol) return connection_class.establish(self.host_port(), timeout=timeout, **kwargs) - + @property def agent(self, **kwargs): """Return a BrokerAgent for this broker""" @@ -477,14 +477,14 @@ class BrokerTest(TestCase): TestCase.__init__(self, *args, **kwargs) # Environment settings. - qpidd_exec = os.path.abspath(checkenv("QPIDD_EXEC")) + qpidd_exec = "qpidd" ha_lib = os.getenv("HA_LIB") xml_lib = os.getenv("XML_LIB") amqp_lib = os.getenv("AMQP_LIB") - qpid_config_exec = os.getenv("QPID_CONFIG_EXEC") - qpid_route_exec = os.getenv("QPID_ROUTE_EXEC") - receiver_exec = os.getenv("RECEIVER_EXEC") - sender_exec = os.getenv("SENDER_EXEC") + qpid_config_exec = "qpid-config" + qpid_route_exec = "qpid-route" + receiver_exec = "receiver" + sender_exec = "sender" sql_store_lib = os.getenv("STORE_SQL_LIB") sql_clfs_store_lib = os.getenv("STORE_SQL_CLFS_LIB") sql_catalog = os.getenv("STORE_CATALOG") @@ -505,19 +505,7 @@ class BrokerTest(TestCase): PN_TX_VERSION = (0, 9) amqp_tx_supported = PN_VERSION >= PN_TX_VERSION - - @classmethod - def amqp_tx_warning(cls): - if not cls.amqp_tx_supported: - if cls.PN_VERSION == (0, 0): - print "WARNING: Cannot test transactions over AMQP 1.0, proton not on path so version could not be determined" - elif cls.PN_VERSION == (0, 7): - print "WARNING: Cannot test transactions over AMQP 1.0, proton version is 0.7 or less, %s.%s required" % cls.PN_TX_VERSION - else: - print "WARNING: Cannot test transactions over AMQP 1.0, proton version %s.%s < %s.%s" % (cls.PN_VERSION + cls.PN_TX_VERSION) - return False - return True - + def configure(self, config): self.config=config def setUp(self): diff --git a/qpid/cpp/src/tests/check_dependencies.py.in b/qpid/cpp/src/tests/check_dependencies.py.in new file mode 100644 index 0000000000..4a8a544026 --- /dev/null +++ b/qpid/cpp/src/tests/check_dependencies.py.in @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import sys + +def _message(error): + return """ +=============================================================================== +Error! {} + +The tests require Qpid Python, version 1.33 or greater. Make sure +Qpid Python is installed and available on the Python path: + + - Linux distributions: Install 'python-qpid' + - Other: Install qpid-python from source and update PYTHONPATH + +See the Qpid website for more information: + + - Qpid downloads: http://qpid.apache.org/download.html + - Qpid packages: http://qpid.apache.org/packages.html +=============================================================================== +""".format(error) + +sys.path.insert(0, os.path.join("@CMAKE_SOURCE_DIR@", "management", "python", "lib")) + +try: + import qpid +except ImportError: + exit(_message("Can't find Python 'qpid' module")) + +try: + import qpid_tests +except ImportError: + exit(_message("Can't find Python 'qpid_tests' module")) diff --git a/qpid/cpp/src/tests/cli_tests.py b/qpid/cpp/src/tests/cli_tests.py index eee9bc648c..ae0f32d4d1 100755 --- a/qpid/cpp/src/tests/cli_tests.py +++ b/qpid/cpp/src/tests/cli_tests.py @@ -295,7 +295,8 @@ class CliTests(TestBase010): self.helper_create_queue(qname) # now bind the queue to the xchg - foo = self.qpid_config_command("-f test.xquery bind " + xchgname + " " + qname) + xquery_file = self.defines["xquery-file"] + foo = self.qpid_config_command("-f " + xquery_file + " bind " + xchgname + " " + qname) # print foo ret = os.system(foo) self.assertEqual(ret, 0) @@ -468,10 +469,14 @@ class CliTests(TestBase010): return self.cli_dir() + "/qpid-config -b localhost:%d" % self.broker.port + " " + arg def qpid_config_api(self, arg = ""): - script = import_script(checkenv("QPID_CONFIG_EXEC")) + path = os.path.join(os.getenv("SOURCE_DIR"), "management", "python", + "bin", "qpid-config") + script = import_script(path) broker = ["-b", "localhost:"+str(self.broker.port)] return script.main(broker + arg.split()) def qpid_route_api(self, arg = ""): - script = import_script(checkenv("QPID_ROUTE_EXEC")) + path = os.path.join(os.getenv("SOURCE_DIR"), "management", "python", + "bin", "qpid-route") + script = import_script(path) return script.main(arg.split()) diff --git a/qpid/cpp/src/tests/common.py b/qpid/cpp/src/tests/common.py new file mode 100644 index 0000000000..992bc60fb8 --- /dev/null +++ b/qpid/cpp/src/tests/common.py @@ -0,0 +1,297 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from __future__ import print_function + +from env import * + +import atexit as _atexit +import os as _os +import platform as _platform +import re as _re +import signal as _signal +import subprocess as _subprocess +import shutil as _shutil +import time as _time +import uuid as _uuid + +WINDOWS = _platform.system() == "Windows" + +def _unique_id(): + return str(_uuid.uuid4())[:4] + +def make_work_dir(): + prog = file_name(ARGS[0]) + name = "{}_{}".format(prog, _unique_id()) + + return make_dir(join(BUILD_DIR, name)) + +WORK_DIR = make_work_dir() + +notice("Created work dir '{}'", WORK_DIR) + +def _init_valgrind_command(command): + if VALGRIND is None: + return command, None + + log_file = join(WORK_DIR, "valgrind_{}.log".format(_unique_id())) + suppressions_file = join(BUILD_DIR, "src", "tests", ".valgrind.supp") + + valgrind_command = [ + VALGRIND, + "--leak-check=full --num-callers=25 --error-exitcode=100", + "--log-file={}".format(log_file), + "--suppressions={}".format(suppressions_file), + "--", + command, + ] + + return " ".join(valgrind_command), log_file + +def call_with_valgrind(command, *args, **kwargs): + command, valgrind_log_file = _init_valgrind_command(command) + + try: + call(command, *args, **kwargs) + except _subprocess.CalledProcessError as e: + if e.returncode == 100: + error("Valgrind reported errors") + print(read(valgrind_log_file)) + + raise + +def call_for_output_with_valgrind(command, *args, **kwargs): + command, valgrind_log_file = _init_valgrind_command(command) + + try: + return call_for_output(command, *args, **kwargs) + except _subprocess.CalledProcessError as e: + if e.returncode == 100: + error("Valgrind reported errors") + print(read(valgrind_log_file)) + + raise + +_brokers = list() +_brokers_by_port = dict() +_broker_port_expr = _re.compile(r"Listening on TCP/TCP6 port ([0-9]+)") +_broker_config_file = join(BUILD_DIR, "src", "tests", "qpidd-empty.conf") + +class _Broker(object): + def __init__(self, dir): + self.dir = dir + + self.command_file = join(self.dir, "command") + self.log_file = join(self.dir, "log") + self.data_dir = join(self.dir, "data") + + self.port = None + self.proc = None + self.command = None + self.valgrind_log_file = None + + def __repr__(self): + args = self.port, self.proc.pid, self.proc.returncode + return "Broker(port={}, pid={}, exit={})".format(*args) + + def start(self, args): + make_dir(self.dir) + + command = [ + "qpidd", + "--port 0", + "--interface localhost", + "--no-module-dir", + "--log-enable info+", + "--log-source yes", + "--log-to-stderr no", + "--log-to-file {}".format(self.log_file), + "--config {}".format(_broker_config_file), + "--data-dir {}".format(self.data_dir), + ] + + if WINDOWS: + command += [ + "--ssl-cert-store-location LocalMachine", + "--ssl-cert-name localhost", + "--ssl-port 0", + ] + + command += [x for x in args if x is not None] + command = " ".join(command) + command, valgrind_log_file = _init_valgrind_command(command) + + self.command = command + self.valgrind_log_file = valgrind_log_file + + notice("Calling '{}'", self.command) + write(self.command_file, self.command) + + self.proc = _subprocess.Popen(self.command, shell=True, + stdout=_subprocess.PIPE) + self.port = self._wait_for_port() + + assert self.command is not None + assert self.proc is not None + assert self.port is not None + assert self.port not in _brokers_by_port, self.port + + _brokers.append(self) + _brokers_by_port[self.port] = self + + notice("Started {}", self) + + def _wait_for_port(self): + port = None + + while port is None: + _time.sleep(0.4) + port = self._scan_port() + + return port + + def _scan_port(self): + if not exists(self.log_file): + return + + match = _re.search(_broker_port_expr, read(self.log_file)) + + if match: + return match.group(1) + + def stop(self): + if self.proc.poll() is not None: + return + + notice("Stopping {}", self) + + if WINDOWS: + call("taskkill /f /t /pid {}", self.proc.pid) + else: + self.proc.terminate() + + self.proc.wait() + + def check(self): + if WINDOWS: + # Taskkilled windows processes always return 1, so exit + # codes don't mean anything there + return 0 + + notice("Checking {}", self) + + if self.proc.returncode == 0: + return 0 + + error("{} exited with code {}", self, self.proc.returncode) + + if self.proc.returncode == 100: + print("Valgrind reported errors:") + print(read(self.valgrind_log_file)) + else: + print("Last 100 lines of broker log:") + print(tail(self.log_file, 100)) + + flush() + + error("{} exited with code {}", self, self.proc.returncode) + + return self.proc.returncode + +def start_broker(dir, *args, **kwargs): + if not is_absolute(dir): + dir = join(WORK_DIR, dir) + + auth_disabled = kwargs.get("auth_disabled", True) + + if auth_disabled: + args = list(args) + args.append("--auth no") + + broker = _Broker(dir) + broker.start(args) + + return broker.port + +def stop_broker(port): + broker = _brokers_by_port[port] + broker.stop() + +def check_broker(port): + broker = _brokers_by_port[port] + + if broker.check() != 0: + exit("Broker failure") + +def check_results(): + for broker in _brokers: + broker.stop() + + errors = False + + for broker in _brokers: + code = broker.check() + + if code == 0: + continue + + errors = True + + if errors: + exit("Broker failure") + + remove(WORK_DIR) + + notice("Tests completed without error") + +def _exit_handler(): + if exists(WORK_DIR): + notice("Output saved in work dir '{}'", WORK_DIR) + + for broker in _brokers: + broker.stop() + +_atexit.register(_exit_handler) + +def configure_broker(broker_port, *args): + command = [ + "qpid-config", + "--broker localhost:{}".format(broker_port), + ] + + command += [x for x in args if x is not None] + + call(" ".join(command)) + +def run_broker_tests(broker_port, *args): + command = [ + "qpid-python-test", + "--broker localhost:{}".format(broker_port), + "--time", + ] + + command += [x for x in args if x is not None] + + call(" ".join(command)) + +def connect_brokers(*args): + command = ["qpid-route"] + command += [x for x in args if x is not None] + + call(" ".join(command)) diff --git a/qpid/cpp/src/tests/dynamic_log_hires_timestamp b/qpid/cpp/src/tests/dynamic_log_hires_timestamp index 75034f9902..606286d9c3 100755 --- a/qpid/cpp/src/tests/dynamic_log_hires_timestamp +++ b/qpid/cpp/src/tests/dynamic_log_hires_timestamp @@ -20,14 +20,14 @@ # # Run a simple test to verify dynamic log highres timestamp changes -source ./test_env.sh -test -d $PYTHON_DIR || { echo "Skipping python tests, no python dir."; exit 0; } + +source ./env.sh LOG_FILE=hires_test.log trap cleanup EXIT cleanup() { - test -n "$PORT" && $QPIDD_EXEC --no-module-dir --quit --port $PORT + test -n "$PORT" && qpidd --no-module-dir --quit --port $PORT } error() { @@ -36,16 +36,16 @@ error() { } rm -rf $LOG_FILE -PORT=$($QPIDD_EXEC --auth=no --no-module-dir --daemon --port=0 --interface 127.0.0.1 --log-to-file $LOG_FILE) || error "Could not start broker" +PORT=$(qpidd --auth=no --no-module-dir --daemon --port=0 --interface 127.0.0.1 --log-to-file $LOG_FILE) || error "Could not start broker" -echo Broker for log highres timestamp test started on $PORT, pid is $($QPIDD_EXEC --no-module-dir --check --port $PORT) +echo Broker for log highres timestamp test started on $PORT, pid is $(qpidd --no-module-dir --check --port $PORT) -$srcdir/qpid-ctrl -b localhost:$PORT setLogLevel level='debug+:Broker' > /dev/null -$srcdir/qpid-ctrl -b localhost:$PORT echo sequence=1 body=LOWRES > /dev/null -$srcdir/qpid-ctrl -b localhost:$PORT setLogHiresTimestamp logHires='true' > /dev/null -$srcdir/qpid-ctrl -b localhost:$PORT echo sequence=2 body=HI_RES > /dev/null -$srcdir/qpid-ctrl -b localhost:$PORT setLogHiresTimestamp logHires='false' > /dev/null -$srcdir/qpid-ctrl -b localhost:$PORT echo sequence=3 body=LOWRES > /dev/null +qpid-ctrl -b localhost:$PORT setLogLevel level='debug+:Broker' > /dev/null +qpid-ctrl -b localhost:$PORT echo sequence=1 body=LOWRES > /dev/null +qpid-ctrl -b localhost:$PORT setLogHiresTimestamp logHires='true' > /dev/null +qpid-ctrl -b localhost:$PORT echo sequence=2 body=HI_RES > /dev/null +qpid-ctrl -b localhost:$PORT setLogHiresTimestamp logHires='false' > /dev/null +qpid-ctrl -b localhost:$PORT echo sequence=3 body=LOWRES > /dev/null # Expect 3 log entries with 'echo' in them if [[ $(grep echo $LOG_FILE | wc -l) -ne 3 ]]; then diff --git a/qpid/cpp/src/tests/dynamic_log_level_test b/qpid/cpp/src/tests/dynamic_log_level_test index f8fd7a8dd8..0ea40d11b6 100755 --- a/qpid/cpp/src/tests/dynamic_log_level_test +++ b/qpid/cpp/src/tests/dynamic_log_level_test @@ -20,14 +20,14 @@ # # Run a simple test to verify dynamic log level changes -source ./test_env.sh -test -d $PYTHON_DIR || { echo "Skipping python tests, no python dir."; exit 0; } + +source ./env.sh LOG_FILE=log_test.log trap cleanup EXIT cleanup() { - test -n "$PORT" && $QPIDD_EXEC --no-module-dir --quit --port $PORT + test -n "$PORT" && qpidd --no-module-dir --quit --port $PORT } error() { @@ -43,30 +43,30 @@ checklog() { } rm -rf $LOG_FILE -PORT=$($QPIDD_EXEC --auth=no --no-module-dir --daemon --port=0 --interface 127.0.0.1 --log-to-file $LOG_FILE) || error "Could not start broker" +PORT=$(qpidd --auth=no --no-module-dir --daemon --port=0 --interface 127.0.0.1 --log-to-file $LOG_FILE) || error "Could not start broker" -echo Broker for log level test started on $PORT, pid is $($QPIDD_EXEC --no-module-dir --check --port $PORT) +echo Broker for log level test started on $PORT, pid is $(qpidd --no-module-dir --check --port $PORT) # Set level to notice+ and send an echo request # The 'echo' in the log is hidden since it is at debug level. -$srcdir/qpid-ctrl -b localhost:$PORT setLogLevel level='notice+' > /dev/null -$srcdir/qpid-ctrl -b localhost:$PORT echo sequence=1 body=HIDDEN > /dev/null +qpid-ctrl -b localhost:$PORT setLogLevel level='notice+' > /dev/null +qpid-ctrl -b localhost:$PORT echo sequence=1 body=HIDDEN > /dev/null checklog 0 "Step 1 Expected no echo log entries" # Next, enable all Broker logs at debug and higher levels and send another echo # This 'echo' should be in the log. -$srcdir/qpid-ctrl -b localhost:$PORT setLogLevel level='debug+:Broker' > /dev/null -$srcdir/qpid-ctrl -b localhost:$PORT echo sequence=2 body=VISIBLE > /dev/null +qpid-ctrl -b localhost:$PORT setLogLevel level='debug+:Broker' > /dev/null +qpid-ctrl -b localhost:$PORT echo sequence=2 body=VISIBLE > /dev/null checklog 1 "Step 2 Expected one echo log entry" # Now turn on Broker debug messages but specifically disable ManagementMethod logs # The 'echo' should be hidden. -$srcdir/qpid-ctrl -b localhost:$PORT setLogLevel level='debug+:Broker !debug+:broker::Broker::ManagementMethod' > /dev/null -$srcdir/qpid-ctrl -b localhost:$PORT echo sequence=3 body=HIDDEN > /dev/null +qpid-ctrl -b localhost:$PORT setLogLevel level='debug+:Broker !debug+:broker::Broker::ManagementMethod' > /dev/null +qpid-ctrl -b localhost:$PORT echo sequence=3 body=HIDDEN > /dev/null checklog 1 "Step 3 Expected one echo log entry" # Verify that the management get returns what was just set -$srcdir/qpid-ctrl -b localhost:$PORT getLogLevel > dynamic_log_level.tmp +qpid-ctrl -b localhost:$PORT getLogLevel > dynamic_log_level.tmp if [[ $(grep 'level=debug+:Broker,!debug+:broker::Broker::ManagementMethod' dynamic_log_level.tmp | wc -l) -ne 1 ]]; then error "Step 4 getLogLevel returned unexpected value: " `cat dynamic_log_level.tmp` fi @@ -76,10 +76,10 @@ cleanup # Start another broker with --log-disable settings and make sure the management string receives them rm -rf $LOG_FILE -PORT=$($QPIDD_EXEC --auth=no --no-module-dir --daemon --port=0 --interface 127.0.0.1 --log-to-file $LOG_FILE --log-enable debug:foo --log-disable debug:bar) || error "Could not start broker" -echo Broker for log level test started on $PORT, pid is $($QPIDD_EXEC --no-module-dir --check --port $PORT) +PORT=$(qpidd --auth=no --no-module-dir --daemon --port=0 --interface 127.0.0.1 --log-to-file $LOG_FILE --log-enable debug:foo --log-disable debug:bar) || error "Could not start broker" +echo Broker for log level test started on $PORT, pid is $(qpidd --no-module-dir --check --port $PORT) -$srcdir/qpid-ctrl -b localhost:$PORT getLogLevel > dynamic_log_level.tmp +qpid-ctrl -b localhost:$PORT getLogLevel > dynamic_log_level.tmp if [[ $(grep 'level=debug:foo,!debug:bar' dynamic_log_level.tmp | wc -l) -ne 1 ]]; then error "Step 5 getLogLevel returned unexpected value: " `cat dynamic_log_level.tmp` fi diff --git a/qpid/cpp/src/tests/test_env.ps1.in b/qpid/cpp/src/tests/env.ps1.in index 94834a4b5e..94834a4b5e 100644 --- a/qpid/cpp/src/tests/test_env.ps1.in +++ b/qpid/cpp/src/tests/env.ps1.in diff --git a/qpid/cpp/src/tests/env.py.in b/qpid/cpp/src/tests/env.py.in new file mode 100644 index 0000000000..d76e17f573 --- /dev/null +++ b/qpid/cpp/src/tests/env.py.in @@ -0,0 +1,100 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from __future__ import print_function + +from plano import * + +def _export(name, value): + globals()[name] = value + + if value is not None: + ENV[name] = value + +def _export_module(name, path): + _export(name, None) + + if exists(path): + _export(name, path) + +# Variables substituted by cmake + +_export("BUILD_DIR", normalize_path("@CMAKE_BINARY_DIR@")) +_export("SOURCE_DIR", normalize_path("@CMAKE_SOURCE_DIR@")) +_export("SASLPASSWD2", normalize_path("@SASLPASSWD2_EXECUTABLE@")) +_export("PYTHON", normalize_path("@PYTHON_EXECUTABLE@")) +_export("VALGRIND", None) + +if "@ENABLE_VALGRIND@" == "ON": + _export("VALGRIND", normalize_path("@VALGRIND_EXECUTABLE@")) + +# Python path + +_python_path = [ + join(BUILD_DIR, "src", "tests"), + join(BUILD_DIR, "bindings", "qpid", "python"), + join(BUILD_DIR, "management", "python", "lib"), +] + +if "PYTHONPATH" in ENV: + _python_path.extend(ENV["PYTHONPATH"].split(PATH_VAR_SEP)) + +ENV["PYTHONPATH"] = PATH_VAR_SEP.join(_python_path) + +# Path + +_path = [ + join(BUILD_DIR, "src"), + join(BUILD_DIR, "src", "RelWithDebInfo"), + join(BUILD_DIR, "src", "tests"), + join(BUILD_DIR, "src", "tests", "RelWithDebInfo"), + join(BUILD_DIR, "management", "python", "bin"), +] + +if "PATH" in ENV: + _path.extend(ENV["PATH"].split(PATH_VAR_SEP)) + +ENV["PATH"] = PATH_VAR_SEP.join(_path) + +# Modules + +_export_module("HA_LIB", join(BUILD_DIR, "src", "ha.so")) +_export_module("XML_LIB", join(BUILD_DIR, "src", "xml.so")) +_export_module("AMQP_LIB", join(BUILD_DIR, "src", "amqp.so")) +_export_module("TEST_STORE_LIB", join(BUILD_DIR, "src", "tests", "test_store.so")) +_export_module("STORE_LIB", join(BUILD_DIR, "src", "linearstore.so")) + +if STORE_LIB is None: + _export_module("STORE_LIB", join(BUILD_DIR, "src", "legacystore.so")) + +# Summarize + +print("PWD: {}".format(current_dir())) +print("SOURCE_DIR: {}".format(SOURCE_DIR)) +print("BUILD_DIR: {}".format(BUILD_DIR)) + +print("PATH:") + +for item in _path: + print(" {}".format(item)) + +print("PYTHONPATH:") + +for item in _python_path: + print(" {}".format(item)) diff --git a/qpid/cpp/src/tests/env.sh.in b/qpid/cpp/src/tests/env.sh.in new file mode 100644 index 0000000000..ee5b858b94 --- /dev/null +++ b/qpid/cpp/src/tests/env.sh.in @@ -0,0 +1,74 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Environment variables substituted by cmake + +export BUILD_DIR=@CMAKE_BINARY_DIR@ +export SOURCE_DIR=@CMAKE_SOURCE_DIR@ + +export SASLPASSWD2=@SASLPASSWD2_EXECUTABLE@ +export PYTHON=@PYTHON_EXECUTABLE@ + +if [[ "@ENABLE_VALGRIND@" == "ON" ]]; then + export VALGRIND=@VALGRIND_EXECUTABLE@ +fi + +# Python path + +export PYTHONPATH=$BUILD_DIR/src/tests:$BUILD_DIR/bindings/qpid/python:$BUILD_DIR/management/python/lib:${PYTHONPATH-} + +# Path + +export PATH=$BUILD_DIR/src:$BUILD_DIR/src/tests:$BUILD_DIR/management/python/bin:${PATH-} + +# Modules + +function export_module { + [[ -f $BUILD_DIR/src/$2 ]] && export $1=$BUILD_DIR/src/$2 +} + +export_module HA_LIB ha.so +export_module XML_LIB xml.so +export_module AMQP_LIB amqp.so + +[[ ${STORE_LIB-} ]] || export_module STORE_LIB linearstore.so +[[ ${STORE_LIB-} ]] || export_module STORE_LIB legacystore.so + +export TEST_STORE_LIB=$BUILD_DIR/src/tests/test_store.so + +# Qpidd options - Eliminate + +export QPID_NO_MODULE_DIR=1 # Don't accidentally load installed modules +export QPID_DATA_DIR= # Disable persistence +export QPID_CONFIG=$SOURCE_DIR/src/tests/qpidd-empty.conf + +# Options for boost test framework + +[[ ${BOOST_TEST_SHOW_PROGRESS-} ]] || export BOOST_TEST_SHOW_PROGRESS=yes +[[ ${BOOST_TEST_CATCH_SYSTEM_ERRORS-} ]] || export BOOST_TEST_CATCH_SYSTEM_ERRORS=no + +echo "PWD: $PWD" +echo "SOURCE_DIR: ${SOURCE_DIR}" +echo "BUILD_DIR: ${BUILD_DIR}" + +echo "PATH:" +echo -n " "; echo $PATH | sed 's/:/\n /g' + +echo "PYTHONPATH:" +echo -n " "; echo $PYTHONPATH | sed 's/:/\n /g' diff --git a/qpid/cpp/src/tests/federated_topic_test b/qpid/cpp/src/tests/federated_topic_test index 2d31f9af5a..b8356b4b9d 100755 --- a/qpid/cpp/src/tests/federated_topic_test +++ b/qpid/cpp/src/tests/federated_topic_test @@ -42,12 +42,12 @@ while getopts "s:m:b:" opt ; do esac done -source ./test_env.sh +source ./env.sh trap stop_brokers EXIT start_broker() { - $QPIDD_EXEC --daemon --port 0 --interface 127.0.0.1 --no-module-dir --no-data-dir --auth no > qpidd.port + qpidd --daemon --port 0 --interface 127.0.0.1 --no-module-dir --no-data-dir --auth no > qpidd.port } start_brokers() { @@ -61,7 +61,7 @@ start_brokers() { stop_brokers() { for p in $PORT_A $PORT_B $PORT_C; do - $QPIDD_EXEC --no-module-dir -q --port $p + qpidd --no-module-dir -q --port $p done } @@ -75,11 +75,11 @@ subscribe() { echo Subscriber $1 connecting on $MY_PORT LOG="subscriber_$1.log" - ./qpid-topic-listener -p $MY_PORT > $LOG 2>&1 && rm -f $LOG + qpid-topic-listener -p $MY_PORT > $LOG 2>&1 && rm -f $LOG } publish() { - ./qpid-topic-publisher --messages $MESSAGES --batches $BATCHES --subscribers $SUBSCRIBERS -p $PORT_A + qpid-topic-publisher --messages $MESSAGES --batches $BATCHES --subscribers $SUBSCRIBERS -p $PORT_A } setup_routes() { @@ -89,40 +89,39 @@ setup_routes() { if (($VERBOSE)); then echo "Establishing routes for topic..." fi - $QPID_ROUTE_EXEC route add $BROKER_B $BROKER_A amq.topic topic_control B B - $QPID_ROUTE_EXEC route add $BROKER_C $BROKER_B amq.topic topic_control C C + qpid-route route add $BROKER_B $BROKER_A amq.topic topic_control B B + qpid-route route add $BROKER_C $BROKER_B amq.topic topic_control C C if (($VERBOSE)); then echo "linked A->B->C" fi - $QPID_ROUTE_EXEC route add $BROKER_B $BROKER_C amq.topic topic_control B B - $QPID_ROUTE_EXEC route add $BROKER_A $BROKER_B amq.topic topic_control A A + qpid-route route add $BROKER_B $BROKER_C amq.topic topic_control B B + qpid-route route add $BROKER_A $BROKER_B amq.topic topic_control A A if (($VERBOSE)); then echo "linked C->B->A" echo "Establishing routes for response queue..." fi - $QPID_ROUTE_EXEC route add $BROKER_B $BROKER_C amq.direct response B B - $QPID_ROUTE_EXEC route add $BROKER_A $BROKER_B amq.direct response A A + qpid-route route add $BROKER_B $BROKER_C amq.direct response B B + qpid-route route add $BROKER_A $BROKER_B amq.direct response A A if (($VERBOSE)); then echo "linked C->B->A" for b in $BROKER_A $BROKER_B $BROKER_C; do echo "Routes for $b" - $QPID_ROUTE_EXEC route list $b + qpid-route route list $b done fi } -if test -d ${PYTHON_DIR} ; then - start_brokers - if (($VERBOSE)); then - echo "Running federated topic test against brokers on ports $PORT_A $PORT_B $PORT_C" - fi +start_brokers - for ((i=$SUBSCRIBERS ; i--; )); do - subscribe $i & - done +if (($VERBOSE)); then + echo "Running federated topic test against brokers on ports $PORT_A $PORT_B $PORT_C" +fi - setup_routes +for ((i=$SUBSCRIBERS ; i--; )); do + subscribe $i & +done - publish || exit 1 -fi +setup_routes + +publish || exit 1 diff --git a/qpid/cpp/src/tests/ha_test.py b/qpid/cpp/src/tests/ha_test.py index ace225a509..3659185140 100755 --- a/qpid/cpp/src/tests/ha_test.py +++ b/qpid/cpp/src/tests/ha_test.py @@ -160,9 +160,8 @@ acl allow all all @property def qpid_ha_script(self): if not hasattr(self, "_qpid_ha_script"): - qpid_ha_exec = os.getenv("QPID_HA_EXEC") - if not qpid_ha_exec or not os.path.isfile(qpid_ha_exec): - raise Skipped("qpid-ha not available") + qpid_ha_exec = os.path.join(os.getenv("SOURCE_DIR"), "management", + "python", "bin", "qpid-ha") self._qpid_ha_script = import_script(qpid_ha_exec) return self._qpid_ha_script @@ -225,11 +224,8 @@ acl allow all all assert retry(lambda: agent.getQueue(queue) is None, timeout=timeout), "%s: queue %s still present"%(msg,queue) def qpid_config(self, args): - qpid_config_exec = os.getenv("QPID_CONFIG_EXEC") - if not qpid_config_exec or not os.path.isfile(qpid_config_exec): - raise Skipped("qpid-config not available") assert subprocess.call( - [qpid_config_exec, "--broker", self.host_port()]+args, stdout=1, stderr=subprocess.STDOUT + ["qpid-config", "--broker", self.host_port()]+args, stdout=1, stderr=subprocess.STDOUT ) == 0, "qpid-config failed" def config_replicate(self, from_broker, queue): diff --git a/qpid/cpp/src/tests/ha_tests.py b/qpid/cpp/src/tests/ha_tests.py index 0efb8182ec..fdcb314751 100755 --- a/qpid/cpp/src/tests/ha_tests.py +++ b/qpid/cpp/src/tests/ha_tests.py @@ -1450,15 +1450,9 @@ class TransactionTests(HaBrokerTest): "*.tx.*"], stdout=None, stderr=None).assert_exit_ok() if __name__ == "__main__": - qpid_ha_exec = os.getenv("QPID_HA_EXEC") - if qpid_ha_exec and os.path.isfile(qpid_ha_exec): - BrokerTest.amqp_tx_warning() - outdir = "ha_tests.tmp" - shutil.rmtree(outdir, True) - os.execvp("qpid-python-test", - ["qpid-python-test", "-m", "ha_tests", "-DOUTDIR=%s"%outdir] - + sys.argv[1:]) - else: - print "Skipping ha_tests, qpid-ha not available" - + outdir = "ha_tests.tmp" + shutil.rmtree(outdir, True) + os.execvp("qpid-python-test", + ["qpid-python-test", "-m", "ha_tests", "-DOUTDIR=%s"%outdir] + + sys.argv[1:]) diff --git a/qpid/cpp/src/tests/interlink_tests.py b/qpid/cpp/src/tests/interlink_tests.py index 3eec2422f1..8833f06af5 100755 --- a/qpid/cpp/src/tests/interlink_tests.py +++ b/qpid/cpp/src/tests/interlink_tests.py @@ -72,12 +72,7 @@ class AmqpBrokerTest(BrokerTest): return self.popen(cmd, stdout=PIPE) def ready_receiver(self, config): - # NOTE: some tests core dump when run with SWIG binding over proton - # version<=0.6. This is fixed on proton 0.7. - def use_native(): - pv=os.environ.get("QPID_PROTON_VERSION") - return pv and [int(n) for n in pv.split(".")] <= [0,6] - s = self.broker.connect(native=use_native()).session() + s = self.broker.connect().session() r = s.receiver("readyq; {create:always}") cmd = ["qpid-receive", "--broker", config.url, diff --git a/qpid/cpp/src/tests/interop_tests.py b/qpid/cpp/src/tests/interop_tests.py index f76b9f634b..31182f324a 100755 --- a/qpid/cpp/src/tests/interop_tests.py +++ b/qpid/cpp/src/tests/interop_tests.py @@ -192,10 +192,7 @@ class CppTxTest(InteropTest): if __name__ == "__main__": - if not BrokerTest.amqp_tx_supported: - BrokerTest.amqp_tx_warning() - print "Skipping interop_tests" - sys.exit(0) + from env import * outdir = "interop_tests.tmp" shutil.rmtree(outdir, True) cmd = ["qpid-python-test", "-m", "interop_tests", "-DOUTDIR=%s"%outdir] + sys.argv[1:] diff --git a/qpid/cpp/src/tests/legacystore/CMakeLists.txt b/qpid/cpp/src/tests/legacystore/CMakeLists.txt index 9f6f6b7171..20a242b415 100644 --- a/qpid/cpp/src/tests/legacystore/CMakeLists.txt +++ b/qpid/cpp/src/tests/legacystore/CMakeLists.txt @@ -28,8 +28,6 @@ endif (QPID_LINK_BOOST_DYNAMIC) include_directories( ${CMAKE_CURRENT_SOURCE_DIR} ) -set(test_wrap ${shell} ${CMAKE_SOURCE_DIR}/src/tests/run_test${test_script_suffix} -buildDir=${CMAKE_BINARY_DIR}) - if (BUILD_TESTING_UNITTESTS) # Like this to work with cmake 2.4 on Unix @@ -53,7 +51,7 @@ target_link_libraries (${testname} if ("${ARGV1}" STREQUAL "LONG") set_target_properties(${testname} PROPERTIES COMPILE_DEFINITIONS LONG_TEST) endif () -add_test (NAME ${testname} COMMAND ${test_wrap} -boostTest -- $<TARGET_FILE:${testname}>) +add_test (NAME ${testname} COMMAND ${CMAKE_BINARY_DIR}/src/tests/run.sh $<TARGET_FILE:${testname}>) unset (testname) ENDMACRO (define_journal_test) @@ -100,7 +98,7 @@ target_link_libraries (jtt__ut ${Boost_PROGRAM_OPTIONS_LIBRARY} ${clock_gettime_LIB} legacystore_shared) -add_test(journal_jtt_ut ${test_wrap} -boostTest -workingDir=${CMAKE_CURRENT_SOURCE_DIR}/jrnl/jtt -- ${CMAKE_CURRENT_BINARY_DIR}/jtt__ut) +add_test(journal_jtt_ut ${CMAKE_BINARY_DIR}/src/tests/run.sh ${CMAKE_CURRENT_BINARY_DIR}/jtt__ut) endif (BUILD_TESTING_UNITTESTS) @@ -127,6 +125,6 @@ target_link_libraries (jtt add_test(journal_jtt ${CMAKE_CURRENT_BINARY_DIR}/jtt -c ${CMAKE_CURRENT_SOURCE_DIR}/jrnl/jtt/jtt.csv) -add_test (legacystore_python_tests ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/run_python_tests${test_script_suffix}) +add_test(legacystore_python_tests ${PYTHON_EXECUTABLE} run_python_tests) endif (BUILD_LEGACYSTORE AND BUILD_TESTING) diff --git a/qpid/cpp/src/tests/legacystore/federation/federation_tests_env.sh b/qpid/cpp/src/tests/legacystore/federation/federation_tests_env.sh index bf75056444..be4504f3bf 100755 --- a/qpid/cpp/src/tests/legacystore/federation/federation_tests_env.sh +++ b/qpid/cpp/src/tests/legacystore/federation/federation_tests_env.sh @@ -142,7 +142,7 @@ func_set_env () if test -z ${QPID_BLD}; then QPID_BLD="${QPID_DIR}/cpp" fi - source $QPID_BLD/src/tests/test_env.sh + source $QPID_BLD/src/tests/env.sh # CPP_CLUSTER_EXEC="${QPID_BLD}/src/tests/cluster_test" # PYTHON_CLUSTER_EXEC="${QPID_DIR}/cpp/src/tests/$PYTHON_TESTNAME" FEDERATION_SYS_TESTS_FAIL="${QPID_DIR}/cpp/src/tests/federation_sys_tests.fail" diff --git a/qpid/cpp/src/tests/legacystore/run_python_tests b/qpid/cpp/src/tests/legacystore/run_python_tests index c1d04a28a1..d87c72e966 100755 --- a/qpid/cpp/src/tests/legacystore/run_python_tests +++ b/qpid/cpp/src/tests/legacystore/run_python_tests @@ -1,4 +1,5 @@ -#!/usr/bin/env bash +#!/usr/bin/env python + # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -18,26 +19,29 @@ # under the License. # -source $QPID_TEST_COMMON +import os +import sys -ensure_python_tests +# Put the python test library on the path so we can get our +# environment -#Add our directory to the python path -export PYTHONPATH=$srcdir/legacystore:$PYTHONPATH +file_path = os.path.abspath(__file__) +store_tests_dir = os.path.split(file_path)[0] +tests_dir = os.path.split(store_tests_dir)[0] -MODULENAME=python_tests +sys.path.insert(0, tests_dir) -echo "Running Python tests in module ${MODULENAME}..." +from common import * -QPID_PORT=${QPID_PORT:-5672} -FAILING=${FAILING:-/dev/null} -PYTHON_TESTS=${PYTHON_TESTS:-$*} +# Add our directory to the python path -OUTDIR=${MODULENAME}.tmp -rm -rf $OUTDIR +ENV["PYTHONPATH"] = "{}:{}".format(store_tests_dir, ENV["PYTHONPATH"]) # To debug a test, add the following options to the end of the following line: # -v DEBUG -c qpid.messaging.io.ops [*.testName] -${QPID_PYTHON_TEST} -m ${MODULENAME} -I $FAILING -DOUTDIR=$OUTDIR \ - $PYTHON_TEST || exit 1 +port = start_broker("broker", "--load-module {}".format(STORE_LIB)) + +run_broker_tests(port, "-m python_tests", "-DOUTDIR={}".format(WORK_DIR)) + +check_results() diff --git a/qpid/cpp/src/tests/linearstore/CMakeLists.txt b/qpid/cpp/src/tests/linearstore/CMakeLists.txt index bf6c164818..760c54f3b1 100644 --- a/qpid/cpp/src/tests/linearstore/CMakeLists.txt +++ b/qpid/cpp/src/tests/linearstore/CMakeLists.txt @@ -17,13 +17,10 @@ # under the License. # -if(BUILD_LINEARSTORE AND BUILD_TESTING) +if (BUILD_LINEARSTORE AND BUILD_TESTING) message(STATUS "Building linearstore tests") -set(test_wrap ${shell} ${CMAKE_SOURCE_DIR}/src/tests/run_test${test_script_suffix} -buildDir=${CMAKE_BINARY_DIR}) - -add_test (linearstore_python_tests ${test_wrap} -- ${CMAKE_CURRENT_SOURCE_DIR}/run_python_tests${test_script_suffix}) +add_test(linearstore_python_tests ${PYTHON_EXECUTABLE} run_python_tests) endif (BUILD_LINEARSTORE AND BUILD_TESTING) - diff --git a/qpid/cpp/src/tests/linearstore/run_python_tests b/qpid/cpp/src/tests/linearstore/run_python_tests index 4ff212a71c..d87c72e966 100755 --- a/qpid/cpp/src/tests/linearstore/run_python_tests +++ b/qpid/cpp/src/tests/linearstore/run_python_tests @@ -1,4 +1,5 @@ -#!/usr/bin/env bash +#!/usr/bin/env python + # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -18,25 +19,29 @@ # under the License. # -source ${QPID_TEST_COMMON} +import os +import sys -ensure_python_tests +# Put the python test library on the path so we can get our +# environment -#Add our directory to the python path -export PYTHONPATH=$srcdir/linearstore:${PYTHONPATH} +file_path = os.path.abspath(__file__) +store_tests_dir = os.path.split(file_path)[0] +tests_dir = os.path.split(store_tests_dir)[0] -MODULENAME=python_tests +sys.path.insert(0, tests_dir) -echo "Running Python tests in module ${MODULENAME}..." +from common import * -QPID_PORT=${QPID_PORT:-5672} -FAILING=${FAILING:-/dev/null} -PYTHON_TESTS=${PYTHON_TESTS:-$*} +# Add our directory to the python path -OUTDIR=${MODULENAME}.tmp -rm -rf ${OUTDIR} +ENV["PYTHONPATH"] = "{}:{}".format(store_tests_dir, ENV["PYTHONPATH"]) # To debug a test, add the following options to the end of the following line: # -v DEBUG -c qpid.messaging.io.ops [*.testName] -${QPID_PYTHON_TEST} -m ${MODULENAME} -I ${FAILING} -DOUTDIR=${OUTDIR} ${PYTHON_TEST} || exit 1 +port = start_broker("broker", "--load-module {}".format(STORE_LIB)) + +run_broker_tests(port, "-m python_tests", "-DOUTDIR={}".format(WORK_DIR)) + +check_results() diff --git a/qpid/cpp/src/tests/plano.py b/qpid/cpp/src/tests/plano.py new file mode 100644 index 0000000000..74a0f6d0b3 --- /dev/null +++ b/qpid/cpp/src/tests/plano.py @@ -0,0 +1,543 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from __future__ import print_function + +import atexit as _atexit +import codecs as _codecs +import fnmatch as _fnmatch +import getpass as _getpass +import os as _os +import random as _random +import re as _re +import shutil as _shutil +import subprocess as _subprocess +import sys as _sys +import tarfile as _tarfile +import tempfile as _tempfile +import traceback as _traceback + +# See documentation at http://www.ssorj.net/projects/plano.html + +def fail(message, *args): + error(message, *args) + + if isinstance(message, BaseException): + raise message + + raise Exception(message) + +def error(message, *args): + _print_message("Error", message, args, _sys.stderr) + +def warn(message, *args): + _print_message("Warn", message, args, _sys.stderr) + +def notice(message, *args): + _print_message(None, message, args, _sys.stdout) + +def debug(message, *args): + _print_message("Debug", message, args, _sys.stdout) + +def exit(message=None, *args): + if message is None: + _sys.exit() + + _print_message("Error", message, args, _sys.stderr) + + _sys.exit(1) + +def _print_message(category, message, args, file): + message = _format_message(category, message, args) + + print(message, file=file) + file.flush() + +def _format_message(category, message, args): + if isinstance(message, BaseException): + message = str(message) + + if message == "": + message = message.__class__.__name__ + + if category: + message = "{}: {}".format(category, message) + + if args: + message = message.format(*args) + + script = split(_sys.argv[0])[1] + message = "{}: {}".format(script, message) + + return message + +def flush(): + _sys.stdout.flush() + _sys.stderr.flush() + +absolute_path = _os.path.abspath +normalize_path = _os.path.normpath +exists = _os.path.exists +is_absolute = _os.path.isabs +is_dir = _os.path.isdir +is_file = _os.path.isfile +is_link = _os.path.islink + +join = _os.path.join +split = _os.path.split +split_extension = _os.path.splitext + +LINE_SEP = _os.linesep +PATH_SEP = _os.sep +PATH_VAR_SEP = _os.pathsep +ENV = _os.environ +ARGS = _sys.argv + +current_dir = _os.getcwd + +def home_dir(user=""): + return _os.path.expanduser("~{}".format(user)) + +def parent_dir(path): + path = normalize_path(path) + parent, child = split(path) + + return parent + +def file_name(file): + file = normalize_path(file) + dir, name = split(file) + + return name + +def name_stem(file): + name = file_name(file) + + if name.endswith(".tar.gz"): + name = name[:-3] + + stem, ext = split_extension(name) + + return stem + +def name_extension(file): + name = file_name(file) + stem, ext = split_extension(name) + + return ext + +def read(file): + with _codecs.open(file, encoding="utf-8", mode="r") as f: + return f.read() + +def write(file, string): + with _codecs.open(file, encoding="utf-8", mode="w") as f: + f.write(string) + + return file + +def append(file, string): + with _codecs.open(file, encoding="utf-8", mode="a") as f: + f.write(string) + + return file + +def prepend(file, string): + orig = read(file) + prepended = string + orig + + return write(file, prepended) + +def touch(file): + return append(file, "") + +def tail(file, n): + return "".join(tail_lines(file, n)) + +def read_lines(file): + with _codecs.open(file, encoding="utf-8", mode="r") as f: + return f.readlines() + +def write_lines(file, lines): + with _codecs.open(file, encoding="utf-8", mode="r") as f: + f.writelines(lines) + + return file + +def append_lines(file, lines): + with _codecs.open(file, encoding="utf-8", mode="a") as f: + f.writelines(string) + + return file + +def prepend_lines(file, lines): + orig_lines = read_lines(file) + + with _codecs.open(file, encoding="utf-8", mode="w") as f: + f.writelines(lines) + f.writelines(orig_lines) + + return file + +# Derived from http://stackoverflow.com/questions/136168/get-last-n-lines-of-a-file-with-python-similar-to-tail +def tail_lines(file, n): + assert n >= 0 + + with _codecs.open(file, encoding="utf-8", mode="r") as f: + pos = n + 1 + lines = list() + + while len(lines) <= n: + try: + f.seek(-pos, 2) + except IOError: + f.seek(0) + break + finally: + lines = f.readlines() + + pos *= 2 + + return lines[-n:] + +_temp_dir = _tempfile.mkdtemp(prefix="plano.") + +def _get_temp_file(key): + assert not key.startswith("_") + + return join(_temp_dir, "_file_{}".format(key)) + +def _remove_temp_dir(): + _shutil.rmtree(_temp_dir, ignore_errors=True) + +_atexit.register(_remove_temp_dir) + +def read_temp(key): + file = _get_temp_file(key) + return read(file) + +def write_temp(key, string): + file = _get_temp_file(key) + return write(file, string) + +def append_temp(key, string): + file = _get_temp_file(key) + return append(file, string) + +def prepend_temp(key, string): + file = _get_temp_file(key) + return prepend(file, string) + +def make_temp(key): + return append_temp(key, "") + +def open_temp(key, mode="r"): + file = _get_temp_file(key) + return _codecs.open(file, encoding="utf-8", mode=mode) + +# This one is deleted on process exit +def make_temp_dir(): + return _tempfile.mkdtemp(prefix="_dir_", dir=_temp_dir) + +# This one sticks around +def make_user_temp_dir(): + temp_dir = _tempfile.gettempdir() + user = _getpass.getuser() + user_temp_dir = join(temp_dir, user) + + return make_dir(user_temp_dir) + +def copy(from_path, to_path): + notice("Copying '{}' to '{}'", from_path, to_path) + + to_dir = parent_dir(to_path) + + if to_dir: + make_dir(to_dir) + + if is_dir(from_path): + _copytree(from_path, to_path, symlinks=True) + else: + _shutil.copy(from_path, to_path) + + return to_path + +def move(from_path, to_path): + notice("Moving '{}' to '{}'", from_path, to_path) + + _shutil.move(from_path, to_path) + + return to_path + +def rename(path, expr, replacement): + path = normalize_path(path) + parent_dir, name = split(path) + to_name = string_replace(name, expr, replacement) + to_path = join(parent_dir, to_name) + + notice("Renaming '{}' to '{}'", path, to_path) + + move(path, to_path) + + return to_path + +def remove(path): + notice("Removing '{}'", path) + + if not exists(path): + return + + if is_dir(path): + _shutil.rmtree(path, ignore_errors=True) + else: + _os.remove(path) + + return path + +def make_link(source_path, link_file): + if exists(link_file): + assert read_link(link_file) == source_path + return + + _os.symlink(source_path, link_file) + + return link_file + +def read_link(file): + return _os.readlink(file) + +def find(dir, *patterns): + matched_paths = set() + + if not patterns: + patterns = ("*",) + + for root, dirs, files in _os.walk(dir): + for pattern in patterns: + matched_dirs = _fnmatch.filter(dirs, pattern) + matched_files = _fnmatch.filter(files, pattern) + + matched_paths.update([join(root, x) for x in matched_dirs]) + matched_paths.update([join(root, x) for x in matched_files]) + + return sorted(matched_paths) + +def find_any_one(dir, *patterns): + paths = find(dir, *patterns) + + if len(paths) == 0: + return + + return paths[0] + +def find_only_one(dir, *patterns): + paths = find(dir, *patterns) + + if len(paths) == 0: + return + + assert len(paths) == 1 + + return paths[0] + +# find_via_expr? + +def string_replace(string, expr, replacement, count=0): + return _re.sub(expr, replacement, string, count) + +def make_dir(dir): + if not exists(dir): + _os.makedirs(dir) + + return dir + +# Returns the current working directory so you can change it back +def change_dir(dir): + notice("Changing directory to '{}'", dir) + + cwd = current_dir() + _os.chdir(dir) + return cwd + +def list_dir(dir, *patterns): + assert is_dir(dir) + + names = _os.listdir(dir) + + if not patterns: + return sorted(names) + + matched_names = set() + + for pattern in patterns: + matched_names.update(_fnmatch.filter(names, pattern)) + + return sorted(matched_names) + +class working_dir(object): + def __init__(self, dir): + self.dir = dir + self.prev_dir = None + + def __enter__(self): + self.prev_dir = change_dir(self.dir) + return self.dir + + def __exit__(self, type, value, traceback): + change_dir(self.prev_dir) + +def _init_call(command, args, kwargs): + if args: + command = command.format(*args) + + if "shell" not in kwargs: + kwargs["shell"] = True + + notice("Calling '{}'", command) + + return command, kwargs + +def call(command, *args, **kwargs): + command, args = _init_call(command, args, kwargs) + _subprocess.check_call(command, **kwargs) + +def call_for_output(command, *args, **kwargs): + command, args = _init_call(command, args, kwargs) + return _subprocess.check_output(command, **kwargs) + +def make_archive(input_dir, output_dir, archive_stem): + temp_dir = make_temp_dir() + temp_input_dir = join(temp_dir, archive_stem) + + copy(input_dir, temp_input_dir) + make_dir(output_dir) + + output_file = "{}.tar.gz".format(join(output_dir, archive_stem)) + output_file = absolute_path(output_file) + + with working_dir(temp_dir): + call("tar -czf {} {}", output_file, archive_stem) + + return output_file + +def extract_archive(archive_file, output_dir): + assert is_file(archive_file) + + if not exists(output_dir): + make_dir(output_dir) + + archive_file = absolute_path(archive_file) + + with working_dir(output_dir): + call("tar -xf {}", archive_file) + + return output_dir + +def rename_archive(archive_file, new_archive_stem): + assert is_file(archive_file) + + if name_stem(archive_file) == new_archive_stem: + return + + temp_dir = make_temp_dir() + + extract_archive(archive_file, temp_dir) + + input_name = list_dir(temp_dir)[0] + input_dir = join(temp_dir, input_name) + output_file = make_archive(input_dir, temp_dir, new_archive_stem) + output_name = file_name(output_file) + archive_dir = parent_dir(archive_file) + new_archive_file = join(archive_dir, output_name) + + move(output_file, new_archive_file) + remove(archive_file) + + return new_archive_file + +def random_port(min=49152, max=65535): + return _random.randint(min, max) + +# Modified copytree impl that allows for already existing destination +# dirs +def _copytree(src, dst, symlinks=False, ignore=None): + """Recursively copy a directory tree using copy2(). + + If exception(s) occur, an Error is raised with a list of reasons. + + If the optional symlinks flag is true, symbolic links in the + source tree result in symbolic links in the destination tree; if + it is false, the contents of the files pointed to by symbolic + links are copied. + + The optional ignore argument is a callable. If given, it + is called with the `src` parameter, which is the directory + being visited by copytree(), and `names` which is the list of + `src` contents, as returned by os.listdir(): + + callable(src, names) -> ignored_names + + Since copytree() is called recursively, the callable will be + called once for each directory that is copied. It returns a + list of names relative to the `src` directory that should + not be copied. + + XXX Consider this example code rather than the ultimate tool. + + """ + names = _os.listdir(src) + if ignore is not None: + ignored_names = ignore(src, names) + else: + ignored_names = set() + + if not exists(dst): + _os.makedirs(dst) + errors = [] + for name in names: + if name in ignored_names: + continue + srcname = _os.path.join(src, name) + dstname = _os.path.join(dst, name) + try: + if symlinks and _os.path.islink(srcname): + linkto = _os.readlink(srcname) + _os.symlink(linkto, dstname) + elif _os.path.isdir(srcname): + _copytree(srcname, dstname, symlinks, ignore) + else: + # Will raise a SpecialFileError for unsupported file types + _shutil.copy2(srcname, dstname) + # catch the Error from the recursive copytree so that we can + # continue with other files + except _shutil.Error as err: + errors.extend(err.args[0]) + except EnvironmentError as why: + errors.append((srcname, dstname, str(why))) + try: + _shutil.copystat(src, dst) + except OSError as why: + if _shutil.WindowsError is not None and isinstance \ + (why, _shutil.WindowsError): + # Copying file access times may fail on Windows + pass + else: + errors.append((src, dst, str(why))) + if errors: + raise _shutil.Error(errors) diff --git a/qpid/cpp/src/tests/python_tests b/qpid/cpp/src/tests/python_tests deleted file mode 100755 index a36839a43c..0000000000 --- a/qpid/cpp/src/tests/python_tests +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Run the python tests. -source $QPID_TEST_COMMON -ensure_python_tests -QPID_PORT=${QPID_PORT:-5672} -PYTHON_TESTS=${PYTHON_TESTS:-$*} -FAILING=${FAILING:-/dev/null} - -if [ ! -d $QPID_TESTS ]; then - echo "SKIPPED python tests: test code not found" - exit 0 -fi - -python $QPID_PYTHON_TEST -m qpid_tests.broker_0_10 -m qpid.tests -b localhost:$QPID_PORT -I $FAILING $PYTHON_TESTS || exit 1 diff --git a/qpid/cpp/src/tests/python_tests.ps1 b/qpid/cpp/src/tests/python_tests.ps1 deleted file mode 100644 index f7caa8f75a..0000000000 --- a/qpid/cpp/src/tests/python_tests.ps1 +++ /dev/null @@ -1,42 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Run the python tests; intended to be run by run_test.ps1 which sets up -# QPID_PORT -$srcdir = Split-Path $myInvocation.InvocationName -$PYTHON_DIR = "$srcdir\..\..\..\python" -if (!(Test-Path $PYTHON_DIR -pathType Container)) { - "Skipping python tests as python libs not found" - exit 1 -} - -. .\test_env.ps1 - -if (Test-Path env:FAILING) { - $fails = "-I $env:FAILING" -} -if (Test-Path env:PYTHON_TESTS) { - $tests = "$env:PYTHON_TESTS" -} -else { - $tests = "$args" -} - -python $PYTHON_DIR/qpid-python-test -m qpid_tests.broker_0_10 -m qpid.tests -b localhost:$env:QPID_PORT $fails $tests -exit $LASTEXITCODE diff --git a/qpid/cpp/src/tests/qpid-build-rinstall b/qpid/cpp/src/tests/qpid-build-rinstall deleted file mode 100755 index beff7dffba..0000000000 --- a/qpid/cpp/src/tests/qpid-build-rinstall +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under onemake -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Run "make install"" locally then copy the install tree to each of $HOSTS -# Must be run in a configured qpid build directory. -# -test -f config.status || { echo "Not in a configured build directory."; usage; } -. src/tests/install_env.sh -set -ex -make && make -j1 install -rsynchosts $QPID_INSTALL_PREFIX diff --git a/qpid/cpp/src/tests/quick_topictest.ps1 b/qpid/cpp/src/tests/quick_topictest.ps1 deleted file mode 100644 index 8f5b2caff7..0000000000 --- a/qpid/cpp/src/tests/quick_topictest.ps1 +++ /dev/null @@ -1,30 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Quick and quiet topic test for make check. -[string]$me = $myInvocation.InvocationName -$srcdir = Split-Path $me -Invoke-Expression "$srcdir\topictest.ps1 -subscribers 2 -messages 2 -batches 1" > topictest.log 2>&1 -if (!$?) { - "$me FAILED:" - cat topictest.log - exit 1 -} -Remove-Item topictest.log -exit 0 diff --git a/qpid/cpp/src/tests/rsynchosts b/qpid/cpp/src/tests/rsynchosts deleted file mode 100755 index 10e1081f76..0000000000 --- a/qpid/cpp/src/tests/rsynchosts +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env bash -# -# Licensed to the Apache Software Foundation (ASF) under onemake -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -absdir() { echo `cd $1 && pwd`; } - -abspath() { - if test -d "$1"; then absdir "$1"; - else echo $(absdir $(dirname "$1"))/$(basename "$1") - fi -} - -usage() { - echo "Usage: $(basename $0) [-l user] file [file...] -Synchronize the contents of each file or directory to the same absolute path on -each host in \$HOSTS. -" - exit 1 -} - -while getopts "l:" opt; do - case $opt in - l) RSYNC_USER="$OPTARG@" ;; - *) usage ;; - esac -done -shift `expr $OPTIND - 1` - -test "$*" || usage - -for f in $*; do FILES="$FILES $(abspath $f)" || exit 1; done - -OK_FILE=`mktemp` # Will be deleted if anything goes wrong. -trap "rm -f $OK_FILE" EXIT - -for h in $HOSTS; do - rsync -vaRO --delete $FILES $RSYNC_USER$h:/ || { echo "rsync to $h failed"; rm -f $OK_FILE; } & -done -wait -test -f $OK_FILE - diff --git a/qpid/cpp/src/tests/run.py b/qpid/cpp/src/tests/run.py new file mode 100755 index 0000000000..9b74ba4d30 --- /dev/null +++ b/qpid/cpp/src/tests/run.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python + +from common import * + +call(" ".join(ARGS[1:])) + diff --git a/qpid/cpp/src/tests/run_acl_tests b/qpid/cpp/src/tests/run_acl_tests index 4bb9e7aa5d..831fc7fbc7 100755 --- a/qpid/cpp/src/tests/run_acl_tests +++ b/qpid/cpp/src/tests/run_acl_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -19,148 +19,52 @@ # under the License. # -# Run the acl tests. $srcdir is set by the Makefile. -source ./test_env.sh -DATA_DIR=`pwd`/data_dir -DATA_DIRI=`pwd`/data_diri -DATA_DIRU=`pwd`/data_diru -DATA_DIRQ=`pwd`/data_dirq +from common import * -trap stop_brokers INT TERM QUIT +policy_file = join(BUILD_DIR, "src", "tests", "policy.acl") +broker_args = "--acl-file {}".format(policy_file) -start_brokers() { - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-module-dir --data-dir $DATA_DIR --acl-file policy.acl --auth no --log-enable trace+:acl --log-to-file local.log > qpidd.port - LOCAL_PORT=`cat qpidd.port` - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-module-dir --data-dir $DATA_DIRI --acl-file policy.acl --auth no --connection-limit-per-ip 2 --log-to-file locali.log > qpiddi.port - LOCAL_PORTI=`cat qpiddi.port` - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-module-dir --data-dir $DATA_DIRU --acl-file policy.acl --auth no --connection-limit-per-user 2 --log-to-file localu.log > qpiddu.port - LOCAL_PORTU=`cat qpiddu.port` - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-module-dir --data-dir $DATA_DIRQ --acl-file policy.acl --auth no --max-queues-per-user 2 --log-to-file localq.log > qpiddq.port - LOCAL_PORTQ=`cat qpiddq.port` -} +broker_port = start_broker("broker", broker_args, "--log-enable debug+:acl") +broker_i_port = start_broker("broker_i", broker_args, "--connection-limit-per-ip 2") +broker_u_port = start_broker("broker_u", broker_args, "--connection-limit-per-user 2") +broker_q_port = start_broker("broker_q", broker_args, "--max-queues-per-user 2") -start_noacl_noauth_brokers() { - ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIR --auth no --log-to-file local.log > qpidd.port - LOCAL_PORT=`cat qpidd.port` - ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIRI --auth no --log-to-file locali.log > qpiddi.port - LOCAL_PORTI=`cat qpiddi.port` - ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIRU --auth no --log-to-file localu.log > qpiddu.port - LOCAL_PORTU=`cat qpiddu.port` - ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIRQ --auth no --log-to-file localq.log > qpiddq.port - LOCAL_PORTQ=`cat qpiddq.port` -} +run_broker_tests(broker_port, "-m acl", + "-Dport-i={}".format(broker_i_port), + "-Dport-u={}".format(broker_u_port), + "-Dport-q={}".format(broker_q_port), + "-Dpolicy-file={}".format(policy_file)) -start_noacl_auth_brokers() { - sasl_config_file=$builddir/sasl_config - if [ ! -f $sasl_config_file ] ; then - echo Creating sasl database - . $srcdir/sasl_test_setup.sh - fi - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-module-dir --data-dir $DATA_DIR --auth yes --sasl-config=$sasl_config_file --log-to-file local.log > qpidd.port - LOCAL_PORT=`cat qpidd.port` - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-module-dir --data-dir $DATA_DIRI --auth yes --sasl-config=$sasl_config_file --log-to-file locali.log > qpiddi.port - LOCAL_PORTI=`cat qpiddi.port` - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-module-dir --data-dir $DATA_DIRU --auth yes --sasl-config=$sasl_config_file --log-to-file localu.log > qpiddu.port - LOCAL_PORTU=`cat qpiddu.port` - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-module-dir --data-dir $DATA_DIRQ --auth yes --sasl-config=$sasl_config_file --log-to-file localq.log > qpiddq.port - LOCAL_PORTQ=`cat qpiddq.port` -} +# Test interaction of authentication and link creation -stop_brokers() { - $QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORT - $QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORTI - $QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORTU - $QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORTQ -} +broker_1_port = start_broker("broker_1") +broker_2_port = start_broker("broker_2") -delete_directories() { - rm -rf $DATA_DIR - rm -rf $DATA_DIRI - rm -rf $DATA_DIRU - rm -rf $DATA_DIRQ -} +configure_broker(broker_1_port, "add exchange topic fed.topic") +configure_broker(broker_2_port, "add exchange topic fed.topic") -delete_logfiles() { - rm -rf local.log - rm -rf locali.log - rm -rf localu.log - rm -rf localq.log -} +connect_brokers("dynamic add", + "localhost:{}".format(broker_1_port), + "localhost:{}".format(broker_2_port), + "fed.topic") -create_directories() { - mkdir -p $DATA_DIR - mkdir -p $DATA_DIRI - mkdir -p $DATA_DIRU - mkdir -p $DATA_DIRQ -} +sasl_config_dir = join(BUILD_DIR, "src", "tests", "sasl_config") +broker_args = "--auth yes --sasl-config {}".format(sasl_config_dir) -populate_directories() { - cp $srcdir/policy.acl $DATA_DIR - cp $srcdir/policy.acl $DATA_DIRI - cp $srcdir/policy.acl $DATA_DIRU - cp $srcdir/policy.acl $DATA_DIRQ -} +broker_3_port = start_broker("broker_3", broker_args, auth_disabled=False) +broker_4_port = start_broker("broker_4", broker_args, auth_disabled=False) -test_loading_acl_from_absolute_path(){ - POLICY_FILE=$srcdir/policy.acl - rm -f temp.log - PORT=`../qpidd --daemon --port 0 --interface 127.0.0.1 --no-module-dir --no-data-dir --auth no --acl-file $POLICY_FILE -t --log-to-file temp.log 2>/dev/null` - ACL_FILE=`grep "notice ACL: Read file" temp.log | sed 's/^.*Read file //'` - $QPIDD_EXEC --no-module-dir -q --port $PORT - if test "$ACL_FILE" != "\"$POLICY_FILE\""; then - echo "unable to load policy file from an absolute path"; - return 1; - fi - rm temp.log -} +configure_broker(broker_3_port, "add exchange topic fed.topic") +configure_broker(broker_4_port, "add exchange topic fed.topic") -test_noacl_deny_create_link() { - delete_logfiles - start_noacl_noauth_brokers - echo "Running no-acl, no-auth tests using brokers on ports $LOCAL_PORT, $LOCAL_PORTI, $LOCAL_PORTU, and $LOCAL_PORTQ" - $QPID_CONFIG_EXEC -a localhost:$LOCAL_PORT add exchange topic fed.topic - $QPID_CONFIG_EXEC -a localhost:$LOCAL_PORTI add exchange topic fed.topic - $QPID_ROUTE_EXEC dynamic add localhost:$LOCAL_PORT localhost:$LOCAL_PORTI fed.topic 2>/dev/null - sleep 2 - stop_brokers - grep -q "must specify ACL create link rules" local.log - if [ $? -eq 0 ] - then - echo "Test fail - Broker with auth=no should have allowed link creation"; - return 1; - fi +try: + connect_brokers("dynamic add", + "localhost:{}".format(broker_3_port), + "localhost:{}".format(broker_4_port), + "fed.topic") - delete_logfiles - start_noacl_auth_brokers - echo "Running no-acl, auth tests using brokers on ports $LOCAL_PORT, $LOCAL_PORTI, $LOCAL_PORTU, and $LOCAL_PORTQ" - $QPID_CONFIG_EXEC -a localhost:$LOCAL_PORT add exchange topic fed.topic - $QPID_CONFIG_EXEC -a localhost:$LOCAL_PORTI add exchange topic fed.topic - $QPID_ROUTE_EXEC dynamic add localhost:$LOCAL_PORT localhost:$LOCAL_PORTI fed.topic 2>/dev/null - sleep 2 - stop_brokers - grep -q "must specify ACL create link rules" local.log - if [ $? -ne 0 ] - then - echo "Test fail - Broker with no ACL and --auth=yes file did not deny link creation"; - return 1; - fi -} - -if test -d ${PYTHON_DIR} ; then - # run acl.py test file - delete_directories - create_directories - populate_directories - delete_logfiles - start_brokers - echo "Running acl tests using brokers on ports $LOCAL_PORT, $LOCAL_PORTI, $LOCAL_PORTU, and $LOCAL_PORTQ" - $QPID_PYTHON_TEST -b localhost:$LOCAL_PORT -m acl -Dport-i=$LOCAL_PORTI -Dport-u=$LOCAL_PORTU -Dport-q=$LOCAL_PORTQ || EXITCODE=1 - stop_brokers || EXITCODE=1 - # - test_loading_acl_from_absolute_path || EXITCODE=1 - # - test_noacl_deny_create_link || EXITCODE=1 - delete_directories - exit $EXITCODE -fi + fail("Broker with no ACLs but auth enabled did not deny link creation") +except: + pass +check_results() diff --git a/qpid/cpp/src/tests/run_acl_tests.ps1 b/qpid/cpp/src/tests/run_acl_tests.ps1 deleted file mode 100644 index 8279d87e54..0000000000 --- a/qpid/cpp/src/tests/run_acl_tests.ps1 +++ /dev/null @@ -1,99 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Run the acl tests. - -$srcdir = Split-Path $myInvocation.InvocationName -. .\test_env.ps1 -if (!(Test-Path $PYTHON_DIR -pathType Container)) { - "Skipping acl tests as python libs not found" - exit 1 -} - -$Global:BROKER_EXE = "" - -Function start_broker($acl_options) -{ - # Test runs from the tests directory but the broker executable is one level - # up, and most likely in a subdirectory from there based on what build type. - # Look around for it before trying to start it. - . $srcdir\find_prog.ps1 ..\qpidd.exe - if (!(Test-Path $prog)) { - "Cannot locate qpidd.exe" - exit 1 - } - $Global:BROKER_EXE = $prog - if (Test-Path qpidd.port) { - Remove-Item qpidd.port - } - $cmdline = "$prog --auth=no --no-module-dir --port=0 --log-to-file qpidd.log $acl_options | foreach { set-content qpidd.port `$_ }" - $cmdblock = $executioncontext.invokecommand.NewScriptBlock($cmdline) - . $srcdir\background.ps1 $cmdblock - # Wait for the broker to start - $wait_time = 0 - while (!(Test-Path qpidd.port) -and ($wait_time -lt 30)) { - Start-Sleep 2 - $wait_time += 2 - } - if (!(Test-Path qpidd.port)) { - "Timeout waiting for broker to start" - exit 1 - } - set-item -path env:BROKER_PORT -value (get-content -path qpidd.port -totalcount 1) -} - -Function stop_broker -{ - "Stopping $Global:BROKER_EXE" - Invoke-Expression "$Global:BROKER_EXE --no-module-dir -q --port $env:BROKER_PORT" | Write-Output - Remove-Item qpidd.port -} - -$DATA_DIR = [IO.Directory]::GetCurrentDirectory() + "\data_dir" -Remove-Item $DATA_DIR -recurse -New-Item $DATA_DIR -type directory -Copy-Item $srcdir\policy.acl $DATA_DIR -start_broker("--data-dir $DATA_DIR --acl-file policy.acl") -"Running acl tests using broker on port $env:BROKER_PORT" -Invoke-Expression "python $PYTHON_DIR/qpid-python-test -m acl -b localhost:$env:BROKER_PORT" | Out-Default -$RETCODE=$LASTEXITCODE -stop_broker - -# Now try reading the acl file from an absolute path. -Remove-Item qpidd.log -$policy_full_path = "$srcdir\policy.acl" -start_broker("--no-data-dir --acl-file $policy_full_path") -#test_loading_acl_from_absolute_path(){ -# POLICY_FILE=$srcdir/policy.acl -# rm -f temp.log -# PORT=`../qpidd --daemon --port 0 --no-module-dir --no-data-dir --auth no --load-module $ACL_LIB --acl-file $POLICY_FILE -t --log-to-file temp.log 2>/dev/null` -# ACL_FILE=`grep "notice Read ACL file" temp.log | sed 's/^.*Read ACL file //'` -# $QPIDD_EXEC --no-module-dir -q --port $PORT -# if test "$ACL_FILE" != "\"$POLICY_FILE\""; then -# echo "unable to load policy file from an absolute path"; -# return 1; -# fi -# rm temp.log -#} -# -# test_loading_acl_from_absolute_path || EXITCODE=1 -# rm -rf $DATA_DIR -# exit $EXITCODE -stop_broker -exit $RETCODE diff --git a/qpid/cpp/src/tests/run_cli_tests b/qpid/cpp/src/tests/run_cli_tests index 1db99001a4..54517e0ef0 100755 --- a/qpid/cpp/src/tests/run_cli_tests +++ b/qpid/cpp/src/tests/run_cli_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -19,63 +19,45 @@ # under the License. # -# Run the cli-utility tests. +# Run the cli-utility tests -source ./test_env.sh -CLI_DIR=$PYTHON_COMMANDS +from common import * -trap stop_brokers INT TERM QUIT +cli_dir = join(SOURCE_DIR, "management", "python", "bin") -# helper function to create test.xquery in the current directory, so -# that the python test program can find it. yes, it leaves a turd. -create_test_xquery() { - cat <<EOF > ./test.xquery - let \$w := ./weather - return \$w/station = 'Raleigh-Durham International Airport (KRDU)' - and \$w/temperature_f > 50 - and \$w/temperature_f - \$w/dewpoint > 5 - and \$w/wind_speed_mph > 7 - and \$w/wind_speed_mph < 20 -EOF -} +xquery = """ +let $w := ./weather +return $w/station = 'Raleigh-Durham International Airport (KRDU)' + and $w/temperature_f > 50 + and $w/temperature_f - $w/dewpoint > 5 + and $w/wind_speed_mph > 7 + and $w/wind_speed_mph < 20 +""" +xquery_file = write(join(WORK_DIR, "xquery_file"), xquery) -start_brokers() { - # if the xml lib is present, use it. if not, disable any tests which - # look like they're xml related. - # if we start supporting xml on windows, it will need something similar - # here - if [ -f ../xml.so ] ; then - xargs="--load-module ../xml.so" - if [ ! -f test.xquery ] ; then - create_test_xquery - fi - targs="" - else - echo "Ignoring XML tests" - xargs="" - targs="--ignore=*xml*" - fi +# If the xml lib is present, use it. if not, disable any tests which +# look like they're xml related. +# +# If we start supporting xml on windows, it will need something +# similar here. + +if XML_LIB is not None: + broker_args = "--load-module {}".format(XML_LIB) + test_args = "" +else: + notice("Ignoring XML tests") - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-data-dir --no-module-dir --mgmt-publish no --auth no $xargs > qpidd.port - LOCAL_PORT=`cat qpidd.port` - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-data-dir --no-module-dir --mgmt-publish no --auth no $xargs > qpidd.port - REMOTE_PORT=`cat qpidd.port` -} + broker_args = "" + test_args = "--ignore=*xml*" -stop_brokers() { - $QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORT - $QPIDD_EXEC --no-module-dir -q --port $REMOTE_PORT -} +local_port = start_broker("local", broker_args) +remote_port = start_broker("remote", broker_args) -if test -d ${PYTHON_DIR} ; then - start_brokers - echo "Running CLI tests using brokers on ports $LOCAL_PORT $REMOTE_PORT" - PYTHON_TESTS=${PYTHON_TESTS:-$*} - $QPID_PYTHON_TEST -m cli_tests -b localhost:$LOCAL_PORT -Dremote-port=$REMOTE_PORT -Dcli-dir=$CLI_DIR $targs $PYTHON_TESTS $@ - RETCODE=$? - stop_brokers - if test x$RETCODE != x0; then - echo "FAIL CLI tests"; exit 1; - fi -fi +run_broker_tests(local_port, + "-m cli_tests", + "-Dremote-port={}".format(remote_port), + "-Dcli-dir={}".format(cli_dir), + "-Dxquery-file={}".format(xquery_file), + test_args) +check_results() diff --git a/qpid/cpp/src/tests/run_client_tests b/qpid/cpp/src/tests/run_client_tests new file mode 100755 index 0000000000..76c46ef949 --- /dev/null +++ b/qpid/cpp/src/tests/run_client_tests @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from common import * + +port = start_broker("broker") + +ENV["QPID_PORT"] = str(port) + +call_with_valgrind("qpid-client-test --verbose") + +check_results() diff --git a/qpid/cpp/src/tests/run_federation_sys_tests b/qpid/cpp/src/tests/run_federation_sys_tests deleted file mode 100755 index f5f1ae44d3..0000000000 --- a/qpid/cpp/src/tests/run_federation_sys_tests +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Run the federation system tests. - -source ./test_env.sh - -MODULENAME=federation_sys - -# Test for long test -if [[ "$1" == "LONG_TEST" ]]; then - USE_LONG_TEST=1 - shift # get rid of this param so it is not treated as a test name -fi - -trap stop_brokers INT TERM QUIT - -SKIPTESTS="-i federation_sys.E_* -i federation_sys.F_* -i federation_sys.G_* -i federation_sys.H_*" -if [ -z ${USE_LONG_TEST} ]; then - SKIPTESTS="-i federation_sys.A_Long* -i federation_sys.B_Long* ${SKIPTESTS}" -fi -echo "WARNING: Tests using persistence will be ignored." -SKIPTESTS="${SKIPTESTS} -i federation_sys.C_* -i federation_sys.D_*" - -start_brokers() { - start_broker() { - ${QPIDD_EXEC} --daemon --port 0 --interface 127.0.0.1 --auth no --no-data-dir $1 > qpidd.port - PORT=`cat qpidd.port` - eval "$2=${PORT}" - } - start_broker "" LOCAL_PORT - start_broker "" REMOTE_PORT - rm qpidd.port -} - -stop_brokers() { - ${QPIDD_EXEC} -q --port ${LOCAL_PORT} - ${QPIDD_EXEC} -q --port ${REMOTE_PORT} -} - -if test -d ${PYTHON_DIR} ; then - start_brokers - echo "Running federation tests using brokers on local port ${LOCAL_PORT}, remote port ${REMOTE_PORT} (NOTE: clustering is DISABLED)" - if [ -z ${USE_LONG_TEST} ]; then - echo "NOTE: To run a full set of federation system tests, use \"make check-long\". To test with persistence, run the store version of this script." - fi - ${QPID_PYTHON_TEST} -m ${MODULENAME} ${SKIPTESTS} -b localhost:${REMOTE_PORT} -Dlocal-port=${LOCAL_PORT} -Dremote-port=${REMOTE_PORT} $@ - RETCODE=$? - stop_brokers - if test x${RETCODE} != x0; then - echo "FAIL federation tests"; exit 1; - fi -fi diff --git a/qpid/cpp/src/tests/run_federation_tests b/qpid/cpp/src/tests/run_federation_tests index 8cadd3702f..381195af4e 100755 --- a/qpid/cpp/src/tests/run_federation_tests +++ b/qpid/cpp/src/tests/run_federation_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -19,43 +19,42 @@ # under the License. # -# Run the federation tests. - -source ./test_env.sh -#set -x -trap stop_brokers INT TERM QUIT - -if [ -f ../xml.so ] ; then - MODULES="--load-module xml" # Load the XML exchange and run XML exchange federation tests - SKIPTESTS= -else - MODULES="--no-module-dir" - SKIPTESTS='-i *_xml' # note: single quotes prevent expansion of * -fi - -QPIDD_CMD="../qpidd --daemon --port 0 --interface 127.0.0.1 --no-data-dir $MODULES --auth no --log-enable=info+ --log-enable=debug+:Bridge --log-to-file" -start_brokers() { - rm -f fed_local.log fed_remote.log fed_b1.log fed_b2.log - LOCAL_PORT=$($QPIDD_CMD fed_local.log --federation-tag LOCAL) - REMOTE_PORT=$($QPIDD_CMD fed_remote.log --federation-tag REMOTE) - REMOTE_B1=$($QPIDD_CMD fed_b1.log --federation-tag B1) - REMOTE_B2=$($QPIDD_CMD fed_b2.log --federation-tag B2) -} - -stop_brokers() { - $QPIDD_EXEC $MODULES -q --port $LOCAL_PORT - $QPIDD_EXEC $MODULES -q --port $REMOTE_PORT - $QPIDD_EXEC $MODULES -q --port $REMOTE_B1 - $QPIDD_EXEC $MODULES -q --port $REMOTE_B2 -} - -if test -d ${PYTHON_DIR} ; then - start_brokers - echo "Running federation tests using brokers on ports $LOCAL_PORT $REMOTE_PORT $REMOTE_B1 $REMOTE_B2" - $QPID_PYTHON_TEST -m federation ${SKIPTESTS} -b localhost:$LOCAL_PORT -Dremote-port=$REMOTE_PORT -Dextra-brokers="$REMOTE_B1 $REMOTE_B2" $@ - RETCODE=$? - stop_brokers - if test x$RETCODE != x0; then - echo "FAIL federation tests"; exit 1; - fi -fi +from common import * + +common_args = "--log-enable=debug+:Bridge" +module_args = None +test_args = "-i *_xml" + +sys_test_args = [ + "-i federation_sys.A_Long*", + "-i federation_sys.B_Long*", + "-i federation_sys.C_*", + "-i federation_sys.D_*", + "-i federation_sys.E_*", + "-i federation_sys.F_*", + "-i federation_sys.G_*", + "-i federation_sys.H_*", +] + +if XML_LIB is not None: + module_args = "--load-module {}".format(XML_LIB) + test_args = None + +local_port = start_broker("local", common_args, module_args, "--federation-tag LOCAL") +remote_port = start_broker("remote", common_args, module_args, "--federation-tag REMOTE") +remote_b1 = start_broker("remote_b1", common_args, module_args, "--federation-tag REMOTE_B1") +remote_b2 = start_broker("remote_b2", common_args, module_args, "--federation-tag REMOTE_B2") + +run_broker_tests(local_port, "-m federation", + "-Dremote-port={}".format(remote_port), + "-Dextra-brokers='{} {}'".format(remote_b1, remote_b2), + test_args) + +run_broker_tests(local_port, "-m headers_federation", "-Dremote-port={}".format(remote_port)) + +run_broker_tests(remote_port, "-m federation_sys", + "-Dlocal-port={}".format(local_port), + "-Dremote-port={}".format(remote_port), + *sys_test_args) + +check_results() diff --git a/qpid/cpp/src/tests/run_federation_tests.ps1 b/qpid/cpp/src/tests/run_federation_tests.ps1 deleted file mode 100644 index 803b3eef6f..0000000000 --- a/qpid/cpp/src/tests/run_federation_tests.ps1 +++ /dev/null @@ -1,83 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Run the federation tests. - -$srcdir = Split-Path $myInvocation.InvocationName -$PYTHON_DIR = "$srcdir\..\..\..\python" -if (!(Test-Path $PYTHON_DIR -pathType Container)) { - "Skipping federation tests as python libs not found" - exit 1 -} - -. .\test_env.ps1 - -# Test runs from the tests directory but the broker executable is one level -# up, and most likely in a subdirectory from there based on what build type. -# Look around for it before trying to start it. -$subs = "Debug","Release","MinSizeRel","RelWithDebInfo" -foreach ($sub in $subs) { - $prog = "..\$sub\qpidd.exe" - if (Test-Path $prog) { - break - } -} -if (!(Test-Path $prog)) { - "Cannot locate qpidd.exe" - exit 1 -} -$cmdline = "$prog --auth=no --no-module-dir --no-data-dir --port=0 --ssl-port=0 --log-to-file qpidd.log $args | foreach { set-content qpidd.port `$_ }" -$cmdblock = $executioncontext.invokecommand.NewScriptBlock($cmdline) - -function start_brokers { - # Start 2 brokers, saving the port numbers in LOCAL_PORT, REMOTE_PORT. - . $srcdir\background.ps1 $cmdblock - while (!(Test-Path qpidd.port)) { - Start-Sleep 2 - } - set-item -path env:LOCAL_PORT -value (get-content -path qpidd.port -totalcount 1) - Remove-Item qpidd.port - . $srcdir\background.ps1 $cmdblock - while (!(Test-Path qpidd.port)) { - Start-Sleep 2 - } - set-item -path env:REMOTE_PORT -value (get-content -path qpidd.port -totalcount 1) -} - -function stop_brokers { - Invoke-Expression "$prog -q --port $env:LOCAL_PORT" | Out-Default - Invoke-Expression "$prog -q --port $env:REMOTE_PORT" | Out-Default -} - -trap { - &stop_brokers - break -} - -&start_brokers -"Running federation tests using brokers on ports $env:LOCAL_PORT $env:REMOTE_PORT" -$env:PYTHONPATH="$srcdir;$PYTHON_DIR;$PYTHON_TEST_DIR;$env:PYTHONPATH;$QMF_LIB" -$tests = "*" -Invoke-Expression "python $PYTHON_DIR/qpid-python-test -m federation -b localhost:$env:LOCAL_PORT -Dremote-port=$env:REMOTE_PORT $tests" | Out-Default -$RETCODE=$LASTEXITCODE -&stop_brokers -if ($RETCODE -ne 0) { - "FAIL federation tests" - exit 1 -} diff --git a/qpid/cpp/src/tests/run_flow_control_tests b/qpid/cpp/src/tests/run_flow_control_tests new file mode 100755 index 0000000000..5158fb2c47 --- /dev/null +++ b/qpid/cpp/src/tests/run_flow_control_tests @@ -0,0 +1,28 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from common import * + +port = start_broker("broker", "--default-flow-stop-threshold=80 --default-flow-resume-threshold=70") + +run_broker_tests(port, "-m queue_flow_limit_tests") + +check_results() diff --git a/qpid/cpp/src/tests/run_ha_tests b/qpid/cpp/src/tests/run_ha_tests index bb60bea076..159f258c34 100755 --- a/qpid/cpp/src/tests/run_ha_tests +++ b/qpid/cpp/src/tests/run_ha_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -8,9 +8,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -19,11 +19,19 @@ # under the License. # +from common import * + +# ENV["QPID_USE_SWIG_CLIENT"] = "1" # XXX is this necessary? + +# XXX These tests hang, and I can't figure out why +# -i *tx_block_threads -# Make sure the python tools are available. They will be if we are building in -# a checkoug, they may not be in a distribution. -test -d $PYTHON_COMMANDS -a -x $PYTHON_COMMANDS/qpid-ha -a -x $PYTHON_COMMANDS/qpid-config || { echo "Skipping HA tests, qpid-ha or qpid-config not available."; exit 0; } +# ENV["QPID_NO_MODULE_DIR"] = "1" # Don't accidentally load installed modules +# ENV["QPID_DATA_DIR"] = "" +# ENV["QPID_CONFIG"] = join(BUILD_DIR, "src", "tests", "qpidd-empty.conf") +# ENV["QPID_PORT"] = "" +# ENV["QPID_LOG_TO_FILE"] = join(BUILD_DIR, "src", "tests", "gah.log") -srcdir=`dirname $0` -$srcdir/ha_tests.py +call("qpid-python-test -m ha_tests -DOUTDIR={}", WORK_DIR) +check_results() diff --git a/qpid/cpp/src/tests/run_header_test.ps1 b/qpid/cpp/src/tests/run_header_test.ps1 deleted file mode 100644 index 344fac9cf9..0000000000 --- a/qpid/cpp/src/tests/run_header_test.ps1 +++ /dev/null @@ -1,48 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Simple test of encode/decode of a double in application headers -# TODO: this should be expanded to cover a wider set of types and go -# in both directions - -$srcdir = Split-Path $myInvocation.InvocationName -$PYTHON_DIR = "$srcdir\..\..\..\python" -if (!(Test-Path $PYTHON_DIR -pathType Container)) { - "Skipping header test as python libs not found" - exit 0 -} - -. .\test_env.ps1 - -if (Test-Path qpidd.port) { - set-item -path env:QPID_PORT -value (get-content -path qpidd.port -totalcount 1) -} - -# Test runs from the tests directory but the test executables are in a -# subdirectory based on the build type. Look around for it before trying -# to start it. -. $srcdir\find_prog.ps1 .\header_test.exe -if (!(Test-Path $prog)) { - "Cannot locate header_test.exe" - exit 1 -} - -Invoke-Expression "$prog -p $env:QPID_PORT" | Write-Output -Invoke-Expression "python $srcdir/header_test.py localhost $env:QPID_PORT" | Write-Output -exit $LASTEXITCODE diff --git a/qpid/cpp/src/tests/run_headers_federation_tests b/qpid/cpp/src/tests/run_headers_federation_tests deleted file mode 100644 index afbbf144ee..0000000000 --- a/qpid/cpp/src/tests/run_headers_federation_tests +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/sh - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Run the federation tests for the Headers Exchange. - -source ./test_env.sh - -trap stop_brokers INT TERM QUIT - -start_brokers() { - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-data-dir --no-module-dir --auth no > qpidd.port - LOCAL_PORT=`cat qpidd.port` - ../qpidd --daemon --port 0 --interface 127.0.0.1 --no-data-dir --no-module-dir --auth no > qpidd.port - REMOTE_PORT=`cat qpidd.port` -} - -stop_brokers() { - $QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORT - $QPIDD_EXEC --no-module-dir -q --port $REMOTE_PORT -} - -if test -d ${PYTHON_DIR} ; then - start_brokers - echo "Running HeadersExchange federation tests using brokers on ports $LOCAL_PORT $REMOTE_PORT" - $QPID_PYTHON_TEST -m headers_federation -b localhost:$LOCAL_PORT -Dremote-port=$REMOTE_PORT $@ - RETCODE=$? - stop_brokers - if test x$RETCODE != x0; then - echo "FAIL federation tests"; exit 1; - fi -fi diff --git a/qpid/cpp/src/tests/run_idle_timeout_tests b/qpid/cpp/src/tests/run_idle_timeout_tests new file mode 100755 index 0000000000..79de1c9f22 --- /dev/null +++ b/qpid/cpp/src/tests/run_idle_timeout_tests @@ -0,0 +1,26 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from common import * + +call("qpid-python-test -m idle_timeout_tests -DOUTDIR={}", WORK_DIR) + +check_results() diff --git a/qpid/cpp/src/tests/run_interlink_tests b/qpid/cpp/src/tests/run_interlink_tests index 71482fa7fd..2505d1e1f8 100755 --- a/qpid/cpp/src/tests/run_interlink_tests +++ b/qpid/cpp/src/tests/run_interlink_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -19,8 +19,8 @@ # under the License. # -test -e "$AMQP_LIB" || { echo "Skipping AMQP 1.0 based tests; AMQP 1.0 support not available."; exit 0; } +from common import * -srcdir=`dirname $0` -$srcdir/interlink_tests.py +call("qpid-python-test -m interlink_tests -DOUTDIR={}", WORK_DIR) +check_results() diff --git a/qpid/cpp/src/tests/quick_topictest b/qpid/cpp/src/tests/run_interop_tests index e44ec0f477..8cc918bba8 100755 --- a/qpid/cpp/src/tests/quick_topictest +++ b/qpid/cpp/src/tests/run_interop_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -8,9 +8,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -19,12 +19,12 @@ # under the License. # +from common import * + +port = start_broker("broker", "--load-module {}".format(AMQP_LIB)) + +ENV["QPID_INTEROP_URL"] = "localhost:{}".format(port) + +call("qpid-python-test -m interop_tests -DOUTDIR={}", WORK_DIR) -# Quick and quiet topic test for make check. -test -z "$srcdir" && srcdir=`dirname $0` -$srcdir/topictest -s2 -m2 -b1 > topictest.log 2>&1 || { - echo $0 FAILED: - cat topictest.log - exit 1 -} -rm topictest.log +check_results() diff --git a/qpid/cpp/src/tests/ipv6_test b/qpid/cpp/src/tests/run_ipv6_tests index 4ac5f95fba..55212257a9 100755 --- a/qpid/cpp/src/tests/ipv6_test +++ b/qpid/cpp/src/tests/run_ipv6_tests @@ -19,23 +19,22 @@ # under the License. # +source env.sh + # Check whether we have any globally configured IPv6 addresses # - if not then we can't run the tests because ipv6 lookups won't # work within the qpid code. This is a deliberate feature to avoid # getting addresses that can't be routed by the machine. if ip -f inet6 -o addr | cut -f 9 -s -d' ' | grep global > /dev/null ; then - echo "IPv6 addresses configured continuing" + echo "IPv6 addresses configured - continuing" else echo "No global IPv6 addresses configured - skipping test" exit 0 fi -# Run a simple test over IPv6 -source $QPID_TEST_COMMON - -CONFIG=$(dirname $0)/config.null +CONFIG=$(dirname $0)/qpidd-empty.conf TEST_HOSTNAME=::1 COUNT=10 @@ -43,7 +42,7 @@ trap cleanup EXIT error() { echo $*; exit 1; } -# Don't need --no-module-dir or --no-data-dir as they are set as env vars in test_env.sh +# Don't need --no-module-dir or --no-data-dir as they are set as env vars in env.sh COMMON_OPTS="--interface [::1] --daemon --auth no --config $CONFIG" # Record all broker ports started @@ -57,7 +56,7 @@ declare -a PORTS start_brokers() { local -a ports for (( i=0; $i<$1; i++)) do - ports[$i]=$($QPIDD_EXEC --port 0 $COMMON_OPTS $2) + ports[$i]=$(qpidd --port 0 $COMMON_OPTS $2) done PORTS=( ${PORTS[@]} ${ports[@]} ) } @@ -65,7 +64,7 @@ start_brokers() { stop_brokers() { for port in "${PORTS[@]}"; do - $QPIDD_EXEC -qp $port + qpidd -qp $port done PORTS=() } @@ -79,13 +78,13 @@ PORT=${PORTS[0]} echo "Started IPv6 smoke perftest on broker port $PORT" ## Test connection via connection settings -./qpid-perftest --count ${COUNT} --port ${PORT} -b $TEST_HOSTNAME --summary +qpid-perftest --count ${COUNT} --port ${PORT} -b $TEST_HOSTNAME --summary ## Test connection with a URL URL="amqp:[$TEST_HOSTNAME]:$PORT" -./qpid-send -b $URL --content-string=hello -a "foo;{create:always}" -MSG=`./qpid-receive -b $URL -a "foo;{create:always}" --messages 1` +qpid-send -b $URL --content-string=hello -a "foo;{create:always}" +MSG=`qpid-receive -b $URL -a "foo;{create:always}" --messages 1` test "$MSG" = "hello" || { echo "receive failed '$MSG' != 'hello'"; exit 1; } stop_brokers @@ -94,9 +93,6 @@ stop_brokers # Start 2 brokers -# In a distribution, the python tools will be absent. -ensure_python_tests - start_brokers 2 echo "Started Federated brokers on ports ${PORTS[*]}" # Make broker urls @@ -104,15 +100,15 @@ BROKER0="[::1]:${PORTS[0]}" BROKER1="[::1]:${PORTS[1]}" TEST_QUEUE=ipv6-fed-test -$QPID_CONFIG_EXEC -b $BROKER0 add queue $TEST_QUEUE -$QPID_CONFIG_EXEC -b $BROKER1 add queue $TEST_QUEUE -$QPID_ROUTE_EXEC dynamic add $BROKER1 $BROKER0 amq.direct -$QPID_CONFIG_EXEC -b $BROKER1 bind amq.direct $TEST_QUEUE $TEST_QUEUE -$QPID_ROUTE_EXEC route map $BROKER1 +qpid-config -b $BROKER0 add queue $TEST_QUEUE +qpid-config -b $BROKER1 add queue $TEST_QUEUE +qpid-route dynamic add $BROKER1 $BROKER0 amq.direct +qpid-config -b $BROKER1 bind amq.direct $TEST_QUEUE $TEST_QUEUE +qpid-route route map $BROKER1 -./datagen --count 100 | tee rdata-in | +datagen --count 100 | tee rdata-in | ./qpid-send -b amqp:$BROKER0 -a amq.direct/$TEST_QUEUE --content-stdin -./qpid-receive -b amqp:$BROKER1 -a $TEST_QUEUE --print-content yes -m 0 > rdata-out +qpid-receive -b amqp:$BROKER1 -a $TEST_QUEUE --print-content yes -m 0 > rdata-out cmp rdata-in rdata-out || { echo "Federated data over IPv6 does not compare"; exit 1; } diff --git a/qpid/cpp/src/tests/run_header_test b/qpid/cpp/src/tests/run_logging_tests index d1edcf6831..2ebcc5c902 100755 --- a/qpid/cpp/src/tests/run_header_test +++ b/qpid/cpp/src/tests/run_logging_tests @@ -19,13 +19,20 @@ # under the License. # -# Simple test of encode/decode of a double in application headers -# TODO: this should be expanded to cover a wider set of types and go -# in both directions +set -eu -source $QPID_TEST_COMMON +source env.sh -ensure_python_tests +export WORK_DIR=$(mktemp -d $PWD/run_logging_tests_XXXX) -./header_test -p $QPID_PORT -$srcdir/header_test.py "localhost" $QPID_PORT +exit_code=0 + +dynamic_log_hires_timestamp || exit_code=1 +dynamic_log_level_test || exit_code=1 + +if (( exit_code != 0 )); then + echo "Logging test failures" + exit $exit_code +fi + +rm -rf $WORK_DIR diff --git a/qpid/cpp/src/tests/run_long_federation_sys_tests b/qpid/cpp/src/tests/run_long_federation_sys_tests index c2b4e02d81..ab8b5646ec 100644 --- a/qpid/cpp/src/tests/run_long_federation_sys_tests +++ b/qpid/cpp/src/tests/run_long_federation_sys_tests @@ -21,4 +21,4 @@ # Run the federation system tests (long version). -./run_federation_sys_tests LONG_TEST $@ +USE_LONG_TEST=1 ./run_federation_sys_tests $@ diff --git a/qpid/cpp/src/tests/run_msg_group_tests b/qpid/cpp/src/tests/run_msg_group_tests index ee479c23c7..78bc802a09 100755 --- a/qpid/cpp/src/tests/run_msg_group_tests +++ b/qpid/cpp/src/tests/run_msg_group_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -18,45 +18,30 @@ # specific language governing permissions and limitations # under the License. # -#script to run a sequence of message group queue tests via make -source $QPID_TEST_COMMON +from common import * -ensure_python_tests +port = start_broker("broker") -QUEUE_NAME="group-queue" -GROUP_KEY="My-Group-Id" +commands = [ + "qpid-config -b {} add queue group-queue --group-header=My-Group-Id --shared-groups", + "msg_group_test -b {} -a group-queue --group-key My-Group-Id --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 3 --ack-frequency 7 --randomize-group-size --interleave 3", + "msg_group_test -b {} -a group-queue --group-key My-Group-Id --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 7 --ack-frequency 7 --randomize-group-size", + "qpid-config -b {} add queue group-queue-two --group-header=My-Group-Id --shared-groups", + "msg_group_test -b {} -a group-queue --group-key My-Group-Id --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 7 --ack-frequency 3 --randomize-group-size", + "msg_group_test -b {} -a group-queue-two --group-key My-Group-Id --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 3 --ack-frequency 7 --randomize-group-size --interleave 5", + "msg_group_test -b {} -a group-queue --group-key My-Group-Id --messages 59 --group-size 5 --receivers 2 --senders 3 --capacity 1 --ack-frequency 3 --randomize-group-size", + "qpid-config -b {} del queue group-queue-two --force", + "msg_group_test -b {} -a group-queue --group-key My-Group-Id --messages 59 --group-size 3 --receivers 2 --senders 3 --capacity 1 --ack-frequency 1 --randomize-group-size", + "msg_group_test -b {} -a group-queue --group-key My-Group-Id --messages 211 --group-size 13 --receivers 2 --senders 3 --capacity 47 --ack-frequency 79 --interleave 53", + "msg_group_test -b {} -a group-queue --group-key My-Group-Id --messages 10000 --group-size 1 --receivers 0 --senders 1", + "msg_group_test -b {} -a group-queue --group-key My-Group-Id --messages 10000 --receivers 5 --senders 0", + "qpid-config -b {} del queue group-queue --force", +] -BROKER_URL="${QPID_BROKER:-localhost}:${QPID_PORT:-5672}" +address = "localhost:{}".format(port) -run_test() { - "$@" -} +for command in commands: + call(command, address) -##set -x - -declare -i i=0 -declare -a tests -tests=("qpid-config -b $BROKER_URL add queue $QUEUE_NAME --group-header=${GROUP_KEY} --shared-groups" - "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 3 --ack-frequency 7 --randomize-group-size --interleave 3" - "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 7 --ack-frequency 7 --randomize-group-size" - "qpid-config -b $BROKER_URL add queue ${QUEUE_NAME}-two --group-header=${GROUP_KEY} --shared-groups" - "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 7 --ack-frequency 3 --randomize-group-size" - "msg_group_test -b $BROKER_URL -a ${QUEUE_NAME}-two --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 3 --ack-frequency 7 --randomize-group-size --interleave 5" - "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 59 --group-size 5 --receivers 2 --senders 3 --capacity 1 --ack-frequency 3 --randomize-group-size" - "qpid-config -b $BROKER_URL del queue ${QUEUE_NAME}-two --force" - "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 59 --group-size 3 --receivers 2 --senders 3 --capacity 1 --ack-frequency 1 --randomize-group-size" - "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 211 --group-size 13 --receivers 2 --senders 3 --capacity 47 --ack-frequency 79 --interleave 53" - "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 10000 --group-size 1 --receivers 0 --senders 1" - "msg_group_test -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 10000 --receivers 5 --senders 0" - "qpid-config -b $BROKER_URL del queue $QUEUE_NAME --force") - -while [ -n "${tests[i]}" ]; do - run_test ${tests[i]} - RETCODE=$? - if test x$RETCODE != x0; then - echo "FAILED message group test. Failed command: \"${tests[i]}\""; - exit 1; - fi - i+=1 -done +check_results() diff --git a/qpid/cpp/src/tests/run_msg_group_tests.ps1 b/qpid/cpp/src/tests/run_msg_group_tests.ps1 deleted file mode 100644 index e9cee0a5a0..0000000000 --- a/qpid/cpp/src/tests/run_msg_group_tests.ps1 +++ /dev/null @@ -1,71 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Simple test of encode/decode of a double in application headers -# TODO: this should be expanded to cover a wider set of types and go -# in both directions - -$srcdir = Split-Path $myInvocation.InvocationName -$PYTHON_DIR = "$srcdir\..\..\..\python" -if (!(Test-Path $PYTHON_DIR -pathType Container)) { - "Skipping msg_group test as python libs not found" - exit 0 -} - -. .\test_env.ps1 - -if (Test-Path qpidd.port) { - set-item -path env:QPID_PORT -value (get-content -path qpidd.port -totalcount 1) -} - -# Test runs from the tests directory but the test executables are in a -# subdirectory based on the build type. Look around for it before trying -# to start it. -. $srcdir\find_prog.ps1 .\msg_group_test.exe -if (!(Test-Path $prog)) { - "Cannot locate msg_group_test.exe" - exit 1 -} - -$QUEUE_NAME="group-queue" -$GROUP_KEY="My-Group-Id" -$BROKER_URL="localhost:$env:QPID_PORT" - -$tests=@("python $QPID_CONFIG_EXEC -b $BROKER_URL add queue $QUEUE_NAME --group-header=${GROUP_KEY} --shared-groups", - "$prog -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 3 --ack-frequency 7 --randomize-group-size --interleave 3", - "$prog -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 7 --ack-frequency 7 --randomize-group-size", - "python $QPID_CONFIG_EXEC -b $BROKER_URL add queue ${QUEUE_NAME}-two --group-header=${GROUP_KEY} --shared-groups", - "$prog -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 7 --ack-frequency 3 --randomize-group-size", - "$prog -b $BROKER_URL -a ${QUEUE_NAME}-two --group-key $GROUP_KEY --messages 103 --group-size 13 --receivers 2 --senders 3 --capacity 3 --ack-frequency 7 --randomize-group-size --interleave 5", - "$prog -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 59 --group-size 5 --receivers 2 --senders 3 --capacity 1 --ack-frequency 3 --randomize-group-size", - "python $QPID_CONFIG_EXEC -b $BROKER_URL del queue ${QUEUE_NAME}-two --force", - "$prog -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 59 --group-size 3 --receivers 2 --senders 3 --capacity 1 --ack-frequency 1 --randomize-group-size", - "$prog -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 211 --group-size 13 --receivers 2 --senders 3 --capacity 47 --ack-frequency 79 --interleave 53", - "$prog -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 10000 --group-size 1 --receivers 0 --senders 1", - "$prog -b $BROKER_URL -a $QUEUE_NAME --group-key $GROUP_KEY --messages 10000 --receivers 5 --senders 0", - "python $QPID_CONFIG_EXEC -b $BROKER_URL del queue $QUEUE_NAME --force") - -foreach ($cmd in $tests) -{ - Invoke-Expression "$cmd" | Write-Output - $ret = $LASTEXITCODE - if ($ret -ne 0) {Write-Host "FAILED message group test. Failed command: $cmd" - break} -} -exit $ret diff --git a/qpid/cpp/src/tests/run_msg_group_tests_soak b/qpid/cpp/src/tests/run_msg_group_tests_soak index d87ca16c88..68a16793f3 100755 --- a/qpid/cpp/src/tests/run_msg_group_tests_soak +++ b/qpid/cpp/src/tests/run_msg_group_tests_soak @@ -18,13 +18,11 @@ # specific language governing permissions and limitations # under the License. # -#script to run a sequence of long-running message group tests via make -#setup path to find qpid-config and msg_group_test test progs -source ./test_env.sh -test -d $PYTHON_DIR || { echo "Skipping message group tests, no python dir."; exit 0; } +# Script to run a sequence of long-running message group tests via +# make -export PATH=$PWD:$srcdir:$PYTHON_COMMANDS:$PATH +source env.sh #set port to connect to via env var test -s qpidd.port && QPID_PORT=`cat qpidd.port` diff --git a/qpid/cpp/src/tests/run_paged_queue_tests b/qpid/cpp/src/tests/run_paged_queue_tests index 2c1e3ae614..c5f6aec927 100755 --- a/qpid/cpp/src/tests/run_paged_queue_tests +++ b/qpid/cpp/src/tests/run_paged_queue_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -19,32 +19,27 @@ # under the License. # -#setup path to find qpid-config and sender/receiver test progs -source ./test_env.sh -trap stop_broker INT TERM QUIT - -export PATH=$PWD:$srcdir:$PYTHON_COMMANDS:$PATH - -start_broker() { - QPID_PORT=$($QPIDD_EXEC --daemon --port 0 --interface 127.0.0.1 --no-data-dir --paging-dir=$PWD/pqtest_data $MODULES --auth no) || { echo "Could not start broker"; exit 1; } -} - -stop_broker() { - $QPIDD_EXEC -q --port $QPID_PORT -} - -test_single_page() { - msgcount=1000 - qpid-send --messages $msgcount --content-size 1024 --broker "localhost:$QPID_PORT" --address "onepage; {create: always, node:{x-declare:{arguments:{'qpid.paging':True,'qpid.max_pages_loaded':1}}}}" - received=$(qpid-receive --address onepage --broker "localhost:$QPID_PORT" --messages $msgcount | wc -l) - if [[ $received -ne $msgcount ]]; then - echo "single page test failed: received $received messages, expected $msgcount" - exit 1 - fi -} - -start_broker -test_single_page -qpid-cpp-benchmark --broker "localhost:$QPID_PORT" --create-option "node:{x-declare:{arguments:{'qpid.paging':True,'qpid.max_size':0,'qpid.max_count':0,'qpid.flow_stop_size':0,'qpid.flow_resume_size':0,'qpid.flow_stop_count':0,'qpid.flow_resume_count':0}}}" -qpid-cpp-benchmark --broker "localhost:$QPID_PORT" --create-option "node:{x-declare:{arguments:{'qpid.paging':True,'qpid.max_size':0,'qpid.max_count':0,'qpid.flow_stop_size':0,'qpid.flow_resume_size':0,'qpid.flow_stop_count':0,'qpid.flow_resume_count':0}}}" --fill-drain -stop_broker +from common import * + +port = start_broker("broker", "--paging-dir={}".format(join(WORK_DIR, "pqtest_data"))) + +messages = 1000 +address = "onepage; {create: always, node:{x-declare:{arguments:{'qpid.paging':True,'qpid.max_pages_loaded':1}}}}" + +call_with_valgrind("qpid-send --messages {} --content-size 1024 --broker localhost:{} --address \"{}\"", + messages, port, address) + +output = call_for_output_with_valgrind("qpid-receive --address onepage --messages {} --broker localhost:{}", + messages, port) + +received = len(output.splitlines()) + +if received != messages: + fail("Single page test failed: received {} messages, expected {}", received, messages) + +option = "node:{x-declare:{arguments:{'qpid.paging':True,'qpid.max_size':0,'qpid.max_count':0,'qpid.flow_stop_size':0,'qpid.flow_resume_size':0,'qpid.flow_stop_count':0,'qpid.flow_resume_count':0}}}" + +call("qpid-cpp-benchmark --broker localhost:{} --create-option \"{}\"", port, option) +call("qpid-cpp-benchmark --broker localhost:{} --create-option \"{}\" --fill-drain", port, option) + +check_results() diff --git a/qpid/cpp/src/tests/quick_txtest b/qpid/cpp/src/tests/run_performance_tests index 77e8556f1d..ea195ae80d 100755 --- a/qpid/cpp/src/tests/quick_txtest +++ b/qpid/cpp/src/tests/run_performance_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -19,4 +19,10 @@ # under the License. # -exec `dirname $0`/run_test ./qpid-txtest --queues 4 --tx-count 10 --quiet +from common import * + +port = start_broker("broker") + +call_with_valgrind("qpid-perftest --summary --count 100 --port {}", port) + +check_results() diff --git a/qpid/cpp/src/tests/run_perftest b/qpid/cpp/src/tests/run_perftest deleted file mode 100755 index 2fadc6cc62..0000000000 --- a/qpid/cpp/src/tests/run_perftest +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Args: count [qpid-perftest options...] -# Run a qpid-perftest with count multiplied. -# -MULTIPLIER=3 -COUNT=`expr $1 \* $MULTIPLIER` -shift -exec `dirname $0`/run_test ./qpid-perftest --summary --count $COUNT "$@" diff --git a/qpid/cpp/src/tests/run_python_tests b/qpid/cpp/src/tests/run_python_tests new file mode 100755 index 0000000000..2c6570335c --- /dev/null +++ b/qpid/cpp/src/tests/run_python_tests @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from common import * + +load_module_arg = None + +if AMQP_LIB is not None: + load_module_arg = "--load-module {}".format(AMQP_LIB) + +acl_file = join(BUILD_DIR, "src", "tests", "policy.acl") +acl_file_arg = "--acl-file {}".format(acl_file) + +port = start_broker("broker", load_module_arg, acl_file_arg) + +# Native python tests + +run_broker_tests(port, "-m qpid_tests.broker_0_10 -m qpid.tests") + +# Swigged python tests + +failing_tests = join(BUILD_DIR, "src", "tests", "failing-amqp0-10-python-tests") + +ENV["QPID_USE_SWIG_CLIENT"] = "1" + +run_broker_tests(port, + "-m qpid.tests.messaging.message", + "-m qpid_tests.broker_0_10.priority", + "-m qpid_tests.broker_0_10.lvq", + "-m qpid_tests.broker_0_10.new_api", + "-I {}".format(failing_tests)) + +if AMQP_LIB is not None: + failing_tests = join(BUILD_DIR, "src", "tests", "failing-amqp1.0-python-tests") + + run_broker_tests(port, + "--define=\"protocol_version=amqp1.0\"", + "--define=\"policy_file={}\"".format(acl_file), + "-m qpid_tests.broker_1_0", + "-m qpid_tests.broker_0_10.new_api", + "-m assertions -m reject_release -m misc -m policies -m acl_1", + "-I {}".format(failing_tests)) + +check_results() diff --git a/qpid/cpp/src/tests/run_qmf_tests b/qpid/cpp/src/tests/run_qmf_tests new file mode 100755 index 0000000000..40c6118470 --- /dev/null +++ b/qpid/cpp/src/tests/run_qmf_tests @@ -0,0 +1,26 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from common import * + +call("qpid-python-test -m qpidd_qmfv2_tests -DOUTDIR={}", WORK_DIR) + +check_results() diff --git a/qpid/cpp/src/tests/run_queue_redirect b/qpid/cpp/src/tests/run_queue_redirect deleted file mode 100755 index 3a0ae5118a..0000000000 --- a/qpid/cpp/src/tests/run_queue_redirect +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Run the queue redirect. $srcdir is set by the Makefile. -source ./test_env.sh -DATA_DIR=`pwd`/data_dir - -trap stop_brokers INT TERM QUIT - -start_brokers() { - $QPIDD_EXEC --daemon \ - --port 0 --interface 127.0.0.1 \ - --no-module-dir \ - --data-dir $DATA_DIR \ - --acl-file policy.acl \ - --auth no \ - --log-to-file queue_redirect.log \ - --log-enable info+ \ - --log-enable trace+:Model \ - --log-enable trace+ > qpidd.port - LOCAL_PORT=`cat qpidd.port` -} - -stop_brokers() { - $QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORT -} - -if test -d ${PYTHON_DIR} ; then - rm -f queue_redirect.log - rm -rf $DATA_DIR - mkdir -p $DATA_DIR - cp $srcdir/policy.acl $DATA_DIR - start_brokers - echo "Running queue redirect tests using broker on port $LOCAL_PORT" - $QPID_PYTHON_TEST -b localhost:$LOCAL_PORT -m queue_redirect - stop_brokers || EXITCODE=1 - exit $EXITCODE -fi diff --git a/qpid/cpp/src/tests/run_queue_redirect_tests b/qpid/cpp/src/tests/run_queue_redirect_tests new file mode 100644 index 0000000000..27458c272e --- /dev/null +++ b/qpid/cpp/src/tests/run_queue_redirect_tests @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from common import * + +policy_file = join(BUILD_DIR, "src", "tests", "policy.acl") + +port = start_broker("broker", "--acl-file {}".format(policy_file), "--log-enable debug+:Model") + +run_broker_tests(port, "-m queue_redirect", "-Dpolicy-file={}".format(policy_file)) + +check_results() diff --git a/qpid/cpp/src/tests/run_queue_flow_limit_tests b/qpid/cpp/src/tests/run_ring_queue_tests index 55b3e5d4c5..fbd6b0dff8 100755 --- a/qpid/cpp/src/tests/run_queue_flow_limit_tests +++ b/qpid/cpp/src/tests/run_ring_queue_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -8,9 +8,9 @@ # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @@ -19,9 +19,12 @@ # under the License. # -source $QPID_TEST_COMMON +from common import * + +ENV["QPID_PORT"] = start_broker("broker") -ensure_python_tests +call("ring_queue_test -c -s 4 -r 4") +call("ring_queue_test -s 4 -r 0") +call("ring_queue_test -s 1 -r 1") -# Run tests against Queue producer flow control. -$QPID_PYTHON_TEST -m queue_flow_limit_tests $SKIPTESTS -b localhost:$QPID_PORT +check_results() diff --git a/qpid/cpp/src/tests/run_sasl_tests b/qpid/cpp/src/tests/run_sasl_tests new file mode 100755 index 0000000000..7ce92a232b --- /dev/null +++ b/qpid/cpp/src/tests/run_sasl_tests @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +set -eu + +source env.sh + +export WORK_DIR=$(mktemp -d $PWD/run_sasl_tests_XXXX) + +echo "Created work dir '${WORK_DIR}'" + +min_sasl_version=131350 # Sasl version 2.1.22 +sasl_version=$(sasl_version) + +if (( $sasl_version < min_sasl_version )); then + echo "Sasl version is too old; found $version; require $min_version or greater" + echo "SKIPPING" + exit 0 +fi + +exit_code=0 + +function fail { + echo "FAILED! ${1}" + exit_code=1 +} + +echo "# Running sasl_fed" +sasl_fed || fail "Test sasl_fed failed" +echo "# Running sasl_fed_ex dynamic" +sasl_fed_ex dynamic || fail "Test sasl_fed_ex dynamic failed" +echo "# Running sasl_fed_ex link" +sasl_fed_ex link || fail "Test sasl_fed_ex link failed" +echo "# Running sasl_fed_ex queue" +sasl_fed_ex queue || fail "Test sasl_fed_ex queue failed" +echo "# Running sasl_fed_ex route" +sasl_fed_ex route || fail "Test sasl_fed_ex route failed" +echo "# Running sasl_no_dir" +sasl_no_dir || fail "Test sasl_no_dir failed" + +if (( exit_code != 0 )); then + echo "SASL test failures" + exit $exit_code +fi + +rm -rf $WORK_DIR diff --git a/qpid/cpp/src/tests/run_ssl_tests b/qpid/cpp/src/tests/run_ssl_tests new file mode 100755 index 0000000000..896893eb3d --- /dev/null +++ b/qpid/cpp/src/tests/run_ssl_tests @@ -0,0 +1,329 @@ +#!/usr/bin/env bash + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Run a simple test over SSL + +source env.sh + +#set -x + +CONFIG=$(dirname $0)/qpidd-empty.conf +TEST_CERT_DIR=`pwd`/test_cert_dir +CERT_DB=${TEST_CERT_DIR}/test_cert_db +CERT_PW_FILE=`pwd`/cert.password +TEST_HOSTNAME=127.0.0.1 +TEST_CLIENT_CERT=rumplestiltskin +CA_PEM_FILE=${TEST_CERT_DIR}/ca_cert.pem +OTHER_CA_CERT_DB=${TEST_CERT_DIR}/x_ca_cert_db +OTHER_CA_PEM_FILE=${TEST_CERT_DIR}/other_ca_cert.pem +PY_PING_BROKER=$SOURCE_DIR/src/tests/ping_broker +COUNT=10 + +if [[ -a $AMQP_LIB ]] ; then + MODULES="--load-module $AMQP_LIB" +fi + +trap cleanup EXIT + +error() { echo $*; exit 1; } + +# create the test certificate database +# $1 = string used as Subject in server's certificate +# $2 = string used as SubjectAlternateName (SAN) in server's certificate +create_certs() { + + local CERT_SUBJECT=${1:-"CN=${TEST_HOSTNAME},O=MyCo,ST=Massachusetts,C=US"} + local CERT_SAN=${2:-"*.server.com"} + + mkdir -p ${TEST_CERT_DIR} + rm -rf ${TEST_CERT_DIR}/* + + # Set Up a CA with a self-signed Certificate + # + mkdir -p ${CERT_DB} + certutil -N -d ${CERT_DB} -f ${CERT_PW_FILE} + certutil -S -d ${CERT_DB} -n "Test-CA" -s "CN=Test-CA,O=MyCo,ST=Massachusetts,C=US" -t "CT,," -x -f ${CERT_PW_FILE} -z /bin/sh >/dev/null 2>&1 + certutil -L -d ${CERT_DB} -n "Test-CA" -a -o ${CERT_DB}/rootca.crt -f ${CERT_PW_FILE} + #certutil -L -d ${CERT_DB} -f ${CERT_PW_FILE} + + # create server certificate signed by Test-CA + # + certutil -R -d ${CERT_DB} -s "${CERT_SUBJECT}" -o ${TEST_CERT_DIR}/server.req -f ${CERT_PW_FILE} -z /bin/sh > /dev/null 2>&1 + certutil -C -d ${CERT_DB} -c "Test-CA" -8 "${CERT_SAN}" -i ${TEST_CERT_DIR}/server.req -o ${TEST_CERT_DIR}/server.crt -f ${CERT_PW_FILE} -m ${RANDOM} + certutil -A -d ${CERT_DB} -n ${TEST_HOSTNAME} -i ${TEST_CERT_DIR}/server.crt -t "Pu,," + + # create a certificate to identify the client + # + certutil -R -d ${CERT_DB} -s "CN=${TEST_CLIENT_CERT}" -o ${TEST_CERT_DIR}/client.req -f ${CERT_PW_FILE} -z /bin/sh > /dev/null 2>&1 + certutil -C -d ${CERT_DB} -c "Test-CA" -8 "*.client.com" -i ${TEST_CERT_DIR}/client.req -o ${TEST_CERT_DIR}/client.crt -f ${CERT_PW_FILE} -m ${RANDOM} + certutil -A -d ${CERT_DB} -n ${TEST_CLIENT_CERT} -i ${TEST_CERT_DIR}/client.crt -t "Pu,," + ### + #certutil -N -d ${SERVER_CERT_DIR} -f ${CERT_PW_FILE} + #certutil -S -d ${SERVER_CERT_DIR} -n ${TEST_HOSTNAME} -s "CN=${TEST_HOSTNAME}" -t "CT,," -x -f ${CERT_PW_FILE} -z /usr/bin/certutil + #certutil -S -d ${SERVER_CERT_DIR} -n ${TEST_CLIENT_CERT} -s "CN=${TEST_CLIENT_CERT}" -t "CT,," -x -f ${CERT_PW_FILE} -z /usr/bin/certutil + + # Set up a separate DB with its own CA for testing failure to validate scenario + # + mkdir -p ${OTHER_CA_CERT_DB} + certutil -N -d ${OTHER_CA_CERT_DB} -f ${CERT_PW_FILE} + certutil -S -d ${OTHER_CA_CERT_DB} -n "Other-Test-CA" -s "CN=Another Test CA,O=MyCo,ST=Massachusetts,C=US" -t "CT,," -x -f ${CERT_PW_FILE} -z /bin/sh >/dev/null 2>&1 + certutil -L -d ${OTHER_CA_CERT_DB} -n "Other-Test-CA" -a -o ${OTHER_CA_CERT_DB}/rootca.crt -f ${CERT_PW_FILE} + #certutil -L -d ${OTHER_CA_CERT_DB} -f ${CERT_PW_FILE} +} + +delete_certs() { + if [[ -e ${TEST_CERT_DIR} ]] ; then + rm -rf ${TEST_CERT_DIR} + fi +} + +# Don't need --no-module-dir or --no-data-dir as they are set as env vars in env.sh +COMMON_OPTS="--daemon --config $CONFIG --ssl-cert-db $CERT_DB --ssl-cert-password-file $CERT_PW_FILE --ssl-cert-name $TEST_HOSTNAME" + +# Start new brokers: +# $1 must be integer +# $2 = extra opts +# Append used ports to PORTS variable +start_brokers() { + local -a ports + for (( i=0; $i<$1; i++)) do + ports[$i]=$(qpidd --port 0 --interface 127.0.0.1 $COMMON_OPTS $2) || error "Could not start broker $i" + done + PORTS=( ${PORTS[@]} ${ports[@]} ) +} + +# Stop single broker: +# $1 is number of broker to stop (0 based) +stop_broker() { + qpidd -qp ${PORTS[$1]} + + # Remove from ports array + unset PORTS[$1] +} + +stop_brokers() { + for port in "${PORTS[@]}"; + do + qpidd -qp $port + done + PORTS=() +} + +pick_port() { + # We need a fixed port to set --cluster-url. Use qpidd to pick a free port. + PICK=`qpidd --no-module-dir --listen-disable ssl -dp0` + qpidd --no-module-dir -qp $PICK + echo $PICK +} + +cleanup() { + stop_brokers + delete_certs + rm -f ${CERT_PW_FILE} +} + +start_ssl_broker() { + start_brokers 1 "--transport ssl --ssl-port 0 --require-encryption --auth no $MODULES" +} + +start_ssl_mux_broker() { + qpidd $COMMON_OPTS --port $1 --ssl-port $1 --auth no + PORTS=( ${PORTS[@]} $1 ) +} + +sasl_config_dir=$BUILD_DIR/src/tests/sasl_config + +start_authenticating_broker() { + start_brokers 1 "--transport ssl --ssl-port 0 --require-encryption --ssl-sasl-no-dict --ssl-require-client-authentication --auth yes --sasl-config=${sasl_config_dir} $MODULES" +} + +ssl_cluster_broker() { # $1 = port + start_brokers 1 "--ssl-port $1 --auth no --load-module $CLUSTER_LIB --cluster-name ssl_test.$HOSTNAME.$$ --cluster-url amqp:ssl:$TEST_HOSTNAME:$1" + + # Wait for broker to be ready + qpid-ping -Pssl -b $TEST_HOSTNAME:$1 -q || { echo "Cannot connect to broker on $1"; exit 1; } +} + +CERTUTIL=$(type -p certutil) +if [[ !(-x $CERTUTIL) ]] ; then + echo "No certutil, skipping ssl test"; + exit 0; +fi + +if [[ !(-e ${CERT_PW_FILE}) ]] ; then + echo password > ${CERT_PW_FILE} +fi +delete_certs +create_certs || error "Could not create test certificate database" + +start_ssl_broker +PORT=${PORTS[0]} +echo "Running SSL test on port $PORT" +export QPID_NO_MODULE_DIR=1 +export QPID_SSL_CERT_DB=${CERT_DB} +export QPID_SSL_CERT_PASSWORD_FILE=${CERT_PW_FILE} + +## Test connection via connection settings +qpid-perftest --count ${COUNT} --port ${PORT} -P ssl -b $TEST_HOSTNAME --summary + +## Test connection with a URL +URL=amqp:ssl:$TEST_HOSTNAME:$PORT +qpid-send -b $URL --content-string=hello -a "foo;{create:always}" +MSG=`qpid-receive -b $URL -a "foo;{create:always}" --messages 1` +test "$MSG" = "hello" || { echo "receive failed '$MSG' != 'hello'"; exit 1; } + +if [[ -a $AMQP_LIB ]] ; then + echo "Testing ssl over AMQP 1.0" + qpid-send --connection-options '{protocol:amqp1.0}' -b $URL --content-string=hello -a "foo;{create:always}" + MSG=`qpid-receive --connection-options '{protocol:amqp1.0}' -b $URL -a "foo;{create:always}" --messages 1` + test "$MSG" = "hello" || { echo "receive failed for AMQP 1.0 '$MSG' != 'hello'"; exit 1; } +fi + +## Test connection with a combination of URL and connection options (in messaging API) +URL=$TEST_HOSTNAME:$PORT +qpid-send -b $URL --connection-options '{transport:ssl,heartbeat:2}' --content-string='hello again' -a "foo;{create:always}" +MSG=`qpid-receive -b $URL --connection-options '{transport:ssl,heartbeat:2}' -a "foo;{create:always}" --messages 1` +test "$MSG" = "hello again" || { echo "receive failed '$MSG' != 'hello again'"; exit 1; } + +## Test using the Python client +if test -d $PYTHON_DIR; then + echo "Testing Non-Authenticating with Python Client..." + URL=amqps://$TEST_HOSTNAME:$PORT + if `$PY_PING_BROKER -b $URL`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi +else + echo "Skipping python part of ssl_test, no python dir." +fi + +#### Client Authentication tests + +start_authenticating_broker +PORT2=${PORTS[1]} +echo "Running SSL client authentication test on port $PORT2" +URL=amqp:ssl:$TEST_HOSTNAME:$PORT2 + +## See if you can set the SSL cert-name for the connection +qpid-send -b $URL --connection-options "{ssl-cert-name: $TEST_CLIENT_CERT }" --content-string=hello -a "bar;{create:always}" +MSG2=`qpid-receive -b $URL --connection-options "{ssl-cert-name: $TEST_CLIENT_CERT }" -a "bar;{create:always}" --messages 1` +test "$MSG2" = "hello" || { echo "receive failed '$MSG2' != 'hello'"; exit 1; } + +## Make sure that connect fails with an invalid SSL cert-name +qpid-send -b $URL --connection-options "{ssl-cert-name: pignose }" --content-string=hello -a "baz;{create:always}" 2>/dev/null 1>/dev/null +MSG3=`qpid-receive -b $URL --connection-options "{ssl-cert-name: pignose }" -a "baz;{create:always}" --messages 1 2>/dev/null` +test "$MSG3" = "" || { echo "receive succeeded without valid ssl cert '$MSG3' != ''"; exit 1; } + +stop_brokers + +# Test ssl muxed with plain TCP on the same connection + +# Test a specified port number - since tcp/ssl are the same port don't need to specify --transport ssl +PORT=`pick_port` +start_ssl_mux_broker $PORT || error "Could not start broker" +echo "Running SSL/TCP mux test on fixed port $PORT" + +## Test connection via connection settings +qpid-perftest --count ${COUNT} --port ${PORT} -P ssl -b $TEST_HOSTNAME --summary || error "SSL connection failed!" +qpid-perftest --count ${COUNT} --port ${PORT} -P tcp -b $TEST_HOSTNAME --summary || error "TCP connection failed!" + +# Test a broker chosen port - since ssl chooses port need to use --transport ssl here +start_ssl_broker +PORT=${PORTS[0]} +echo "Running SSL/TCP mux test on random port $PORT" + +## Test connection via connection settings +qpid-perftest --count ${COUNT} --port ${PORT} -P ssl -b $TEST_HOSTNAME --summary || error "SSL connection failed!" +qpid-perftest --count ${COUNT} --port ${PORT} -P tcp -b $TEST_HOSTNAME --summary || error "TCP connection failed!" + +stop_brokers + +### Additional tests that require 'openssl' and 'pk12util' to be installed (optional) + +PK12UTIL=$(type -p pk12util) +if [[ !(-x $PK12UTIL) ]] ; then + echo >&2 "'pk12util' command not available, skipping remaining tests" + exit 0 +fi + +OPENSSL=$(type -p openssl) +if [[ !(-x $OPENSSL) ]] ; then + echo >&2 "'openssl' command not available, skipping remaining tests" + exit 0 +fi + +## verify python version > 2.5 (only 2.6+ does certificate checking) +PY_VERSION=$(python -c "import sys; print hex(sys.hexversion)") +if (( PY_VERSION < 0x02060000 )); then + echo >&2 "Detected python version < 2.6 - skipping certificate verification tests" + exit 0 +fi + +echo "Testing Certificate validation and Authentication with the Python Client..." + +# extract the CA's certificate as a PEM file +get_ca_certs() { + $PK12UTIL -o ${TEST_CERT_DIR}/CA_pk12.out -d ${CERT_DB} -n "Test-CA" -w ${CERT_PW_FILE} -k ${CERT_PW_FILE} > /dev/null + $OPENSSL pkcs12 -in ${TEST_CERT_DIR}/CA_pk12.out -out ${CA_PEM_FILE} -nokeys -passin file:${CERT_PW_FILE} >/dev/null + $PK12UTIL -o ${TEST_CERT_DIR}/other_CA_pk12.out -d ${OTHER_CA_CERT_DB} -n "Other-Test-CA" -w ${CERT_PW_FILE} -k ${CERT_PW_FILE} > /dev/null + $OPENSSL pkcs12 -in ${TEST_CERT_DIR}/other_CA_pk12.out -out ${OTHER_CA_PEM_FILE} -nokeys -passin file:${CERT_PW_FILE} >/dev/null +} + +get_ca_certs || error "Could not extract CA certificates as PEM files" +start_ssl_broker +PORT=${PORTS[0]} +URL=amqps://$TEST_HOSTNAME:$PORT +# verify the python client can authenticate the broker using the CA +if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE}`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi +# verify the python client fails to authenticate the broker when using the other CA +if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${OTHER_CA_PEM_FILE} > /dev/null 2>&1`; then { echo " Failed"; exit 1; }; else echo " Passed"; fi +stop_brokers + +# create a certificate without matching TEST_HOSTNAME, should fail to verify + +create_certs "O=MyCo" "*.${TEST_HOSTNAME}.com" || error "Could not create server test certificate" +get_ca_certs || error "Could not extract CA certificates as PEM files" +start_ssl_broker +PORT=${PORTS[0]} +URL=amqps://$TEST_HOSTNAME:$PORT +if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE} > /dev/null 2>&1`; then { echo " Failed"; exit 1; }; else echo " Passed"; fi +# but disabling the check for the hostname should pass +if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE} --ssl-skip-hostname-check`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi +stop_brokers + +# test SubjectAltName parsing + +if (( PY_VERSION >= 0x02070300 )); then +# python 2.7.3+ supports SubjectAltName extraction +# create a certificate with TEST_HOSTNAME only in SAN, should verify OK + create_certs "O=MyCo" "*.foo.com,${TEST_HOSTNAME},*xyz.com" || error "Could not create server test certificate" + get_ca_certs || error "Could not extract CA certificates as PEM files" + start_ssl_broker + PORT=${PORTS[0]} + URL=amqps://$TEST_HOSTNAME:$PORT + if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE}`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi + stop_brokers + + create_certs "O=MyCo" "*${TEST_HOSTNAME}" || error "Could not create server test certificate" + get_ca_certs || error "Could not extract CA certificates as PEM files" + start_ssl_broker + PORT=${PORTS[0]} + URL=amqps://$TEST_HOSTNAME:$PORT + if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE}`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi + stop_brokers +fi diff --git a/qpid/cpp/src/tests/run_store_tests.ps1 b/qpid/cpp/src/tests/run_store_tests.ps1 index 0683892393..f85e158cc0 100644 --- a/qpid/cpp/src/tests/run_store_tests.ps1 +++ b/qpid/cpp/src/tests/run_store_tests.ps1 @@ -31,7 +31,7 @@ if ($test_store -ne "MSSQL" -and $test_store -ne "MSSQL-CLFS") { $srcdir = Split-Path $myInvocation.InvocationName -. .\test_env.ps1 +. .\env.ps1 if (!(Test-Path $PYTHON_DIR -pathType Container)) { "Skipping store tests as python libs not found" diff --git a/qpid/cpp/src/tests/run_test b/qpid/cpp/src/tests/run_test deleted file mode 100755 index 8e397b3458..0000000000 --- a/qpid/cpp/src/tests/run_test +++ /dev/null @@ -1,191 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# -# Set up environment and run a test executable or script. -# -# Output nothing if test passes, show the output if it fails and -# leave output in <test>.log for examination. -# -# If qpidd.port exists and is not empty run test with QPID_PORT=`cat qpidd.port` -# -# If $VALGRIND if is set run under valgrind. If there are valgrind -# erros show valgrind output, also leave it in <test>.valgrind for -# examination. -# - -wrapper="Qpid Test Wrapper" -function usage { - echo "Usage:" - echo " -workingDir DIR" - echo " -buildDir DIR" - echo " -sourceDir DIR" - echo " -python - run python script" - echo " -boostTest - run boost unit test" - echo " -xml - XML output from tests" - echo " -startBroker - start/stop broker before/after test" - echo " -brokerOptions - use these extra options when starting broker" - echo " -help - print this message" - echo " -- - This is required to separate the wrapped command" - echo " from the test parameters" -} - -function illegal_option { - echo "$wrapper: $1 is not an accepted option" - usage >&2 -} - -function no_command { - echo "$wrapper: No wrapped command specified" - usage >&2 -} - -function ignored_argument { - echo "Ignored argument: $1" >&2 -} - -working_dir='.' - -while true; do -case "$1" in - --) shift; break ;; - # Split up any parameters expressed as -blah=foo - # and process them next time round the loop - -*=*) option=${1%%=*}; param=${1#*=} - shift; - set -- "$option" "$param" "$@" ;; - -workingDir) working_dir=$2; shift 2 ;; - -buildDir) build_dir=$2; shift 2 ;; - -sourceDir) source_dir=$2; shift 2 ;; - -python) run_python=yes; shift ;; - -boostTest) boost_test=yes; shift ;; - -xml) xml_output=yes; shift ;; - -startBroker) start_broker=yes; shift ;; - -brokerOptions) qpidd_extra_options=$2; shift 2 ;; - -help) usage; exit 0; ;; - -*) illegal_option "$1"; exit 1; ;; - '') no_command; exit 1; ;; - *) ignored_argument "$1"; shift; ;; -esac -done - -program=$1 -shift - -logfilebase=$(pwd -P)/$(basename $program) -source $build_dir/src/tests/test_env.sh || (echo "Error: Couldn't read test_env.sh (build settings)" ; exit 1) -source $srcdir/vg_check - -# Allow environment to dictate if we output xml test results -if [ -n "$QPID_XML_TEST_OUTPUT" ] ; then - xml_output=yes -fi - -# Use VALGRIND_OPTS="--gen-suppressions=all" to generated suppressions -VALGRIND_OPTS="$VALGRIND_OPTS ---leak-check=full ---demangle=yes ---suppressions=$srcdir/.valgrind.supp ---num-callers=25 -" - -# Set up environment for running a Qpid test -if [ -n "$start_broker" ] ; then - qpidd_command="$QPIDD_EXEC --auth=no --no-module-dir --daemon --port=0 --interface 127.0.0.1 --log-to-file $logfilebase-qpidd.log $qpidd_extra_options" - if [ -n "$VALGRIND" ] ; then - if [ -n "$xml_output" ] ; then - QPID_PORT=$($VALGRIND $VALGRIND_OPTS --xml=yes --xml-file=$logfilebase-qpidd-vg.xml -- $qpidd_command) - else - QPID_PORT=$($VALGRIND $VALGRIND_OPTS --log-file=$logfilebase-qpidd.vglog -- $qpidd_command) - fi - else - QPID_PORT=$($qpidd_command) - fi -elif [ -r qpidd.port ]; then - QPID_PORT=$(cat qpidd.port) -fi -export QPID_PORT -QPID_LOG_TO_FILE="$logfilebase.log" -export QPID_LOG_TO_FILE - -# Export variables from makefile. -export srcdir - -if [ -n "$VALGRIND" ] ; then - if [ -n "$xml_output" ] ; then - valgrind_command="$VALGRIND $VALGRIND_OPTS --xml=yes --xml-file=$logfilebase-vg.xml --" - else - VG_LOG="$logfilebase.vglog" - rm -f $VG_LOG* - valgrind_command="$VALGRIND $VALGRIND_OPTS --log-file=$VG_LOG --" - fi -fi - -ERROR=0 -if [ -n "$run_python" -a -n "$PYTHON" ] ; then - (cd $working_dir; $PYTHON $program "$@") || ERROR=1 -elif [ ! -x $program ] ; then - echo "Cannot execute $program" - ERROR=1 -elif file $program | grep -q ELF; then - if [ -n "$boost_test" ] ; then - # Set boost unit test environment - if [ -n "$xml_output" ] ; then - export BOOST_TEST_SHOW_PROGRESS=no - export BOOST_TEST_OUTPUT_FORMAT=XML - export BOOST_TEST_LOG_LEVEL=test_suite - export BOOST_TEST_REPORT_LEVEL=no - (cd $working_dir; $valgrind_command $program "$@") > $logfilebase-unittest.xml || ERROR=1 - else - (cd $working_dir; $valgrind_command $program "$@") || ERROR=1 - fi - else - # This is a real executable, valgrind it if required - # Hide output unless there's an error. - (cd $working_dir; $valgrind_command $program "$@" 2>&1) || ERROR=1 - fi - if [ -n "$VG_LOG" ] ; then - vg_check $VG_LOG* || ERROR=1 - fi -else - (cd $working_dir; $program "$@") || ERROR=1 -fi - -# Check log -if [ -r $QPID_LOG_TO_FILE ]; then -egrep 'warning\|error\|critical' $QPID_LOG_TO_FILE && { - echo "WARNING: Suspicious log entries in $QPID_LOG_TO_FILE, above." -} -fi - -if [ -n "$start_broker" ] ; then - $QPIDD_EXEC --no-module-dir --quit || ERROR=1 - - # Check qpidd.log. - egrep 'warning\|error\|critical' $logfilebase-qpidd.log && { - echo "WARNING: Suspicious broker log entries in qpidd.log, above." - } - - # Check valgrind log. - if [ -n "$VALGRIND" -a -z "$xml_output" ] ; then - vg_check $logfilebase-qpidd.vglog || ERROR=1 - fi -fi -exit $ERROR diff --git a/qpid/cpp/src/tests/run_test.ps1 b/qpid/cpp/src/tests/run_test.ps1 deleted file mode 100644 index ff103e4556..0000000000 --- a/qpid/cpp/src/tests/run_test.ps1 +++ /dev/null @@ -1,162 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -param( - [string]$workingDir = $pwd, - [string]$buildDir = $(throw "-buildDir is required"), - [string]$sourceDir, - [switch]$python = $false, - [switch]$boostTest = $false, - [switch]$xml, - [switch]$startBroker = $false, - [string]$brokerOptions, - [switch]$help, - [Parameter(Mandatory=$true, ValueFromRemainingArguments=$true, Position=0)] - [String[]]$rest - ) - -if ([string]::IsNullOrEmpty($sourceDir)) { - $sourceDir = Split-Path $myInvocation.InvocationName -} - -if ([string]::IsNullOrEmpty($xml)) { - $xml = Test-Path variable:global:QPID_XML_TEST_OUTPUT -} - -# Set up environment and run a test executable or script. -. .\test_env.ps1 - -if ($rest[0] -eq $null) { - "No wrapped command specified" - exit 1 -} -# The test exe is probably not in the current binary dir - it's usually -# placed in a subdirectory based on the configuration built in Visual Studio. -# So check around to see where it is - when located, set the QPID_LIB_DIR -# and PATH to look in the corresponding configuration off the src directory, -# one level up. -$prog = $rest[0] -$logfilebase = [System.IO.Path]::GetFileNameWithoutExtension($prog) -$logfilebase = "$pwd\\$logfilebase" -# Qpid client lib sees QPID_LOG_TO_FILE; acts like using --log-to-file on -# command line. -$env:QPID_LOG_TO_FILE = "$logfilebase.log" -$is_script = $prog -match ".ps1$" -if (($is_script -or $python) -and !(Test-Path "$prog")) { - "$prog does not exist" - exit 1 -} -if (!$is_script -and !(Test-Path "$prog")) { - . $sourceDir\find_prog.ps1 $prog - $rest[0] = $prog - $env:QPID_LIB_DIR = "..\$sub" -} - -# Set up environment for running a Qpid test. If a broker should be started, -# do that, else check for a saved port number to use. -if ($startBroker) { - $broker = new-object System.Diagnostics.ProcessStartInfo - $broker.WorkingDirectory = $pwd - $broker.UseShellExecute = $false - $broker.CreateNoWindow = $true - $broker.RedirectStandardOutput = $true - $broker.FileName = $env:QPIDD_EXEC - $broker.Arguments = "--auth=no --no-module-dir --port=0 --interface 127.0.0.1 --log-to-file $logfilebase-qpidd.log $brokerOptions" - $broker_process = [System.Diagnostics.Process]::Start($broker) - $env:QPID_PORT = $broker_process.StandardOutput.ReadLine() -} -else { - # If qpidd.port exists and is not empty run test with QPID_PORT set. - if (Test-Path qpidd.port) { - set-item -path env:QPID_PORT -value (get-content -path qpidd.port -totalcount 1) - } -} - -# Now start the real test. -if ($python) { - $to_run = $PYTHON_EXE - $skip_args0 = $false - $outputfile = "" -} -elseif ($boostTest) { - if ($xml) { - $env:BOOST_TEST_SHOW_PROGRESS=no - $env:BOOST_TEST_OUTPUT_FORMAT=XML - $env:BOOST_TEST_LOG_LEVEL=test_suite - $env:BOOST_TEST_REPORT_LEVEL=no - $to_run = $rest[0] - $skip_args0 = $true - $outputfile = "$logfilebase-unittest.xml" - } - else { - $to_run = $rest[0] - $skip_args0 = $true - $outputfile = "" - } -} -else { - # Non-boost executable or powershell script - $outputfile = "" - if ($is_script) { - $to_run = (get-command powershell.exe).Definition - $skip_args0 = $false - } - else { - $to_run = $rest[0] - $skip_args0 = $true - } -} - -if ($skip_args0) { - $arglist = $rest[1..($rest.length-1)] -} -else { - $arglist = $rest -} - -if ($outputfile -eq "") { - $p = Start-Process -FilePath $to_run -ArgumentList $arglist -NoNewWindow -PassThru - $line = "" -} -else { - $p = Start-Process -FilePath $to_run -ArgumentList $arglist -NoNewWindow -RedirectStandardOutput $outputfile -PassThru -} -Wait-Process -InputObject $p -$status = $p.ExitCode - -if (Test-Path $env:QPID_LOG_TO_FILE) { - $problems = Select-String -Path $env:QPID_LOG_TO_FILE -pattern " error ", " warning ", " critical " - if ($problems -ne $null) { - "WARNING: suspicious log entries in $env:QPID_LOG_TO_FILE:\n$problems" - $status = 1 - } -} - -# If a broker was started, stop it. -if ($startBroker) { - & $env:QPIDD_EXEC --no-module-dir --quit - # Check qpid log for problems - $problems = Select-String -Path $logfilebase-qpidd.log -pattern " error ", " warning ", " critical " - if ($problems -ne $null) { - "WARNING: suspicious log entries in $logfilebase-qpidd.log:\n$problems" - $status = 1 - } -} - -exit $status diff --git a/qpid/cpp/src/tests/run_topic_tests b/qpid/cpp/src/tests/run_topic_tests new file mode 100755 index 0000000000..f34b8044ba --- /dev/null +++ b/qpid/cpp/src/tests/run_topic_tests @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Quick and quiet topic test for make test + +from common import * + +ENV["QPID_PORT"] = start_broker("broker") + +call_with_valgrind("topictest -s2 -m2 -b1") + +check_results() diff --git a/qpid/cpp/src/tests/run_transaction_tests b/qpid/cpp/src/tests/run_transaction_tests new file mode 100755 index 0000000000..4319b22096 --- /dev/null +++ b/qpid/cpp/src/tests/run_transaction_tests @@ -0,0 +1,30 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from common import * + +port1 = start_broker("broker1") +port2 = start_broker("broker2") + +call_with_valgrind("qpid-txtest --queues 4 --tx-count 10 --quiet --port {}", port1) +call_with_valgrind("qpid-txtest2 --queues 4 --tx-count 10 --quiet --port {}", port2) + +check_results() diff --git a/qpid/cpp/src/tests/run_ring_queue_test b/qpid/cpp/src/tests/run_unit_tests index 69497f9872..8e81fe73a0 100755 --- a/qpid/cpp/src/tests/run_ring_queue_test +++ b/qpid/cpp/src/tests/run_unit_tests @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env python # # Licensed to the Apache Software Foundation (ASF) under one @@ -18,19 +18,22 @@ # specific language governing permissions and limitations # under the License. # -#script to run a sequence of ring queue tests via make -#setup path to find qpid-config and sender/receiver test progs -source ./test_env.sh +from common import * -export PATH=$PWD:$srcdir:$PYTHON_COMMANDS:$PATH +# Options for boost test framework -#set port to connect to via env var -test -s qpidd.port && QPID_PORT=`cat qpidd.port` -export QPID_PORT +if "BOOST_TEST_SHOW_PROGRESS" not in ENV: + ENV["BOOST_TEST_SHOW_PROGRESS"] = "yes" -ring_queue_test -c -s 4 -r 4 -ring_queue_test -s 4 -r 0 -ring_queue_test -s 1 -r 1 +if "BOOST_TEST_CATCH_SYSTEM_ERRORS" not in ENV: + ENV["BOOST_TEST_CATCH_SYSTEM_ERRORS"] = "no" +if WINDOWS: + ENV["QPID_SSL_CERT_STORE_LOCATION"] = "LocalMachine" + ENV["QPID_SSL_CERT_NAME"] = "localhost" + ENV["QPID_SSL_PORT"] = "0" + +call_with_valgrind("unit_test") +check_results() diff --git a/qpid/cpp/src/tests/sasl_fed b/qpid/cpp/src/tests/sasl_fed index 38ef43f56f..8491d428be 100755 --- a/qpid/cpp/src/tests/sasl_fed +++ b/qpid/cpp/src/tests/sasl_fed @@ -19,69 +19,54 @@ # under the License. # -# This minimum value corresponds to sasl version 2.1.22 -minimum_sasl_version=131350 +set -eu -sasl_version=`$QPID_TEST_EXEC_DIR/sasl_version` - -# This test is necessary becasue this sasl version is the first one that permits -# redirection of the sasl config file path. -if [ "$sasl_version" -lt "$minimum_sasl_version" ]; then - echo "sasl_fed: must have sasl version 2.1.22 or greater. ( Integer value: $minimum_sasl_version ) Version is: $sasl_version" - exit 0 -fi - -# In a distribution, the python tools will be absent. -if [ ! -f $QPID_CONFIG_EXEC ] || [ ! -f $QPID_ROUTE_EXEC ] ; then - echo "python tools absent - skipping sasl_fed." - exit 0 -fi +WORK_DIR=$WORK_DIR/sasl_fed +mkdir $WORK_DIR +sasl_config_dir=$BUILD_DIR/src/tests/sasl_config -sasl_config_file=$QPID_TEST_EXEC_DIR/sasl_config +# Create ACL file to allow links +echo acl allow all all > $WORK_DIR/sasl_fed.acl -my_random_number=$RANDOM -tmp_root=/tmp/sasl_fed_$my_random_number -mkdir -p $tmp_root +echo "Starting broker 1" -# create ACL file to allow links -echo acl allow all all > $tmp_root/sasl_fed.acl - - -#-------------------------------------------------- -#echo " Starting broker 1" -#-------------------------------------------------- -$QPIDD_EXEC \ +qpidd \ -p 0 --interface 127.0.0.1 \ - --data-dir $tmp_root/data_1 \ + --data-dir $WORK_DIR/data_1 \ --auth=yes \ --mgmt-enable=yes \ --log-enable info+ \ --log-source yes \ - --log-to-file $tmp_root/qpidd_1.log \ - --sasl-config=$sasl_config_file \ - --acl-file $tmp_root/sasl_fed.acl \ - -d > $tmp_root/broker_1_port + --log-to-file $WORK_DIR/qpidd_1.log \ + --sasl-config=$sasl_config_dir \ + --acl-file $WORK_DIR/sasl_fed.acl \ + -d > $WORK_DIR/broker_1_port -broker_1_port=`cat $tmp_root/broker_1_port` +broker_1_port=$(cat $WORK_DIR/broker_1_port) +echo "Starting broker 2" -#-------------------------------------------------- -#echo " Starting broker 2" -#-------------------------------------------------- -$QPIDD_EXEC \ +qpidd \ -p 0 --interface 127.0.0.1 \ - --data-dir $tmp_root/data_2 \ + --data-dir $WORK_DIR/data_2 \ --auth=yes \ --mgmt-enable=yes \ --log-enable info+ \ --log-source yes \ - --log-to-file $tmp_root/qpidd_2.log \ - --sasl-config=$sasl_config_file \ - --acl-file $tmp_root/sasl_fed.acl \ - -d > $tmp_root/broker_2_port + --log-to-file $WORK_DIR/qpidd_2.log \ + --sasl-config=$sasl_config_dir \ + --acl-file $WORK_DIR/sasl_fed.acl \ + -d > $WORK_DIR/broker_2_port + +broker_2_port=$(cat $WORK_DIR/broker_2_port) + +function stop_brokers { + qpidd --port $broker_1_port --quit + qpidd --port $broker_2_port --quit +} -broker_2_port=`cat $tmp_root/broker_2_port` +trap stop_brokers EXIT sleep 2 @@ -90,80 +75,48 @@ QUEUE_NAME=sasl_fed_queue ROUTING_KEY=sasl_fed_queue EXCHANGE_NAME=sasl_fedex -#-------------------------------------------------- -#echo " add exchanges" -#-------------------------------------------------- -$QPID_CONFIG_EXEC -b localhost:$broker_1_port add exchange direct $EXCHANGE_NAME -$QPID_CONFIG_EXEC -b localhost:$broker_2_port add exchange direct $EXCHANGE_NAME +echo "Adding exchanges" +qpid-config -b localhost:$broker_1_port add exchange direct $EXCHANGE_NAME +qpid-config -b localhost:$broker_2_port add exchange direct $EXCHANGE_NAME -#-------------------------------------------------- -#echo " add queues" -#-------------------------------------------------- -$QPID_CONFIG_EXEC -b localhost:$broker_1_port add queue $QUEUE_NAME -$QPID_CONFIG_EXEC -b localhost:$broker_2_port add queue $QUEUE_NAME +echo "Adding queues" + +qpid-config -b localhost:$broker_1_port add queue $QUEUE_NAME +qpid-config -b localhost:$broker_2_port add queue $QUEUE_NAME sleep 5 -#-------------------------------------------------- -#echo " create bindings" -#-------------------------------------------------- -$QPID_CONFIG_EXEC -b localhost:$broker_1_port bind $EXCHANGE_NAME $QUEUE_NAME $ROUTING_KEY -$QPID_CONFIG_EXEC -b localhost:$broker_2_port bind $EXCHANGE_NAME $QUEUE_NAME $ROUTING_KEY +echo "Creating bindings" + +qpid-config -b localhost:$broker_1_port bind $EXCHANGE_NAME $QUEUE_NAME $ROUTING_KEY +qpid-config -b localhost:$broker_2_port bind $EXCHANGE_NAME $QUEUE_NAME $ROUTING_KEY sleep 5 +echo "Adding routes" -#-------------------------------------------------- -#echo " qpid-route route add" -#-------------------------------------------------- -$QPID_ROUTE_EXEC route add zag/zag@localhost:$broker_2_port zag/zag@localhost:$broker_1_port $EXCHANGE_NAME $ROUTING_KEY "" "" DIGEST-MD5 +qpid-route route add zag/zag@localhost:$broker_2_port zag/zag@localhost:$broker_1_port $EXCHANGE_NAME $ROUTING_KEY "" "" DIGEST-MD5 sleep 5 - n_messages=100 -#-------------------------------------------------- -#echo " Sending 100 messages to $broker_1_port " -#-------------------------------------------------- -$QPID_TEST_EXEC_DIR/datagen --count $n_messages | $SENDER_EXEC --mechanism DIGEST-MD5 --username zag --password zag --exchange $EXCHANGE_NAME --routing-key $ROUTING_KEY --port $broker_1_port -sleep 5 +echo "Sending 100 messages to $broker_1_port " -#-------------------------------------------------- -#echo " Examine Broker $broker_1_port" -#-------------------------------------------------- -broker_1_message_count=`$PYTHON_COMMANDS/qpid-stat -q -b localhost:$broker_1_port | grep sasl_fed_queue | awk '{print $2}'` -#echo " " - -#-------------------------------------------------- -#echo " Examine Broker $broker_2_port" -#-------------------------------------------------- -broker_2_message_count=`$PYTHON_COMMANDS/qpid-stat -q -b localhost:$broker_2_port | grep sasl_fed_queue | awk '{print $2}'` -#echo " " - -#-------------------------------------------------- -#echo " Asking brokers to quit." -#-------------------------------------------------- -$QPIDD_EXEC --port $broker_1_port --quit -$QPIDD_EXEC --port $broker_2_port --quit - - -#-------------------------------------------------- -#echo "Removing temporary directory $tmp_root" -#-------------------------------------------------- -rm -rf $tmp_root - -if [ "$broker_2_message_count" -eq "$n_messages" ]; then - # echo "good: |$broker_2_message_count| == |$n_messages|" - exit 0 -else - # echo "not ideal: |$broker_1_message_count| != |$n_messages|" - exit 1 -fi +datagen --count $n_messages | sender --mechanism DIGEST-MD5 --username zag --password zag --exchange $EXCHANGE_NAME --routing-key $ROUTING_KEY --port $broker_1_port +sleep 5 +echo "Examining Broker $broker_1_port" +broker_1_message_count=$(qpid-stat -q -b localhost:$broker_1_port | grep sasl_fed_queue | awk '{print $2}') +echo "Examining Broker $broker_2_port" +broker_2_message_count=$(qpid-stat -q -b localhost:$broker_2_port | grep sasl_fed_queue | awk '{print $2}') +if (( $broker_2_message_count != $n_messages )); then + echo "Expected ${n_messages} and received ${broker_2_message_count}" + exit 1 +fi diff --git a/qpid/cpp/src/tests/sasl_fed_ex b/qpid/cpp/src/tests/sasl_fed_ex index e2ee37ba39..bf62be95df 100755 --- a/qpid/cpp/src/tests/sasl_fed_ex +++ b/qpid/cpp/src/tests/sasl_fed_ex @@ -25,86 +25,47 @@ # transport-layer security. #=============================================================================== -source $QPID_TEST_COMMON +set -eu -ensure_python_tests - -script_name=`basename $0` - -if [ $# -lt 1 ] || [ $# -gt 2 ] -then - echo - # These are the four different ways of creating links ( or routes+links ) - # that the qpid-route command provides. - echo "Usage: ${script_name} dynamic|link|queue|route" - echo - exit 1 +if (( $# != 1 )); then + # These are the four different ways of creating links ( or routes+links ) + # that the qpid-route command provides. + echo "Usage: $(basename $0) dynamic|link|queue|route" + exit 1 fi qpid_route_method=$1 -# Debugging print. -------------------------- -debug= -function print { - if [ "$debug" ]; then - echo "${script_name}: $1" - fi -} - -print "=========== start sasl_fed_ex $* ============" - +WORK_DIR="${WORK_DIR}/sasl_fed_ex_${qpid_route_method}" +mkdir $WORK_DIR - -# This minimum value corresponds to sasl version 2.1.22 -minimum_sasl_version=131350 - -sasl_version=`$QPID_TEST_EXEC_DIR/sasl_version` - -# This test is necessary because this sasl version is the first one that permits -# redirection of the sasl config file path. -if [ "$sasl_version" -lt "$minimum_sasl_version" ]; then - echo "sasl_fed: must have sasl version 2.1.22 or greater. ( Integer value: $minimum_sasl_version ) Version is: $sasl_version" - exit 0 -fi - -CERT_DIR=`pwd`/test_cert_db -CERT_PW_FILE=`pwd`/cert.password +CERT_DIR=$WORK_DIR/test_cert_db +CERT_PW_FILE=$WORK_DIR/cert.password TEST_HOSTNAME=127.0.0.1 create_certs() { - #create certificate and key databases with single, simple, self-signed certificate in it + # Create certificate and key databases with single, simple, + # self-signed certificate in it mkdir ${CERT_DIR} certutil -N -d ${CERT_DIR} -f ${CERT_PW_FILE} certutil -S -d ${CERT_DIR} -n ${TEST_HOSTNAME} -s "CN=${TEST_HOSTNAME}" -t "CT,," -x -f ${CERT_PW_FILE} -z /bin/sh 2> /dev/null } -delete_certs() { - if [[ -e ${CERT_DIR} ]] ; then - print "removing cert dir ${CERT_DIR}" - rm -rf ${CERT_DIR} - fi -} - - CERTUTIL=$(type -p certutil) -if [[ !(-x $CERTUTIL) ]] ; then - echo "No certutil, skipping ssl test"; - exit 0; + +if [[ ! -x $CERTUTIL ]]; then + echo "No certutil, skipping ssl test" + exit 0 fi -delete_certs create_certs 2> /dev/null -if [ ! $? ]; then - error "Could not create test certificate" - exit 1 -fi -sasl_config_dir=$QPID_TEST_EXEC_DIR/sasl_config +if (( $? != 0 )); then + echo "Could not create test certificate" + exit 1 +fi -tmp_root=$QPID_TEST_EXEC_DIR/sasl_fed_ex_temp -print "results dir is ${tmp_root}" -rm -rf ${tmp_root} -mkdir -p $tmp_root +sasl_config_dir=$BUILD_DIR/src/tests/sasl_config SRC_SSL_PORT=6667 DST_SSL_PORT=6666 @@ -125,8 +86,6 @@ export QPID_SSL_CERT_DB=${CERT_DIR} export QPID_SSL_CERT_PASSWORD_FILE=${CERT_PW_FILE} export QPID_SSL_CERT_NAME=${TEST_HOSTNAME} - - ####################################### # Understanding this Plumbing ####################################### @@ -146,7 +105,7 @@ export QPID_SSL_CERT_NAME=${TEST_HOSTNAME} COMMON_BROKER_OPTIONS=" \ --ssl-sasl-no-dict \ - --sasl-config=$sasl_config_dir \ + --sasl-config $sasl_config_dir \ --ssl-require-client-authentication \ --auth yes \ --ssl-cert-db $CERT_DIR \ @@ -154,130 +113,111 @@ COMMON_BROKER_OPTIONS=" \ --ssl-cert-name $TEST_HOSTNAME \ --no-data-dir \ --no-module-dir \ - --mgmt-enable=yes \ + --mgmt-enable yes \ --log-enable info+ \ --log-source yes \ - --daemon " + --daemon" - function start_brokers { # vanilla brokers -------------------------------- - print "Starting SRC broker" - $QPIDD_EXEC \ + echo "Starting SRC broker" + qpidd \ --port=${SRC_TCP_PORT} \ --ssl-port ${SRC_SSL_PORT} \ ${COMMON_BROKER_OPTIONS} \ - --log-to-file $tmp_root/qpidd_src.log 2> /dev/null + --log-to-file $WORK_DIR/qpidd_src.log 2> /dev/null broker_ports[0]=${SRC_TCP_PORT} - print "Starting DST broker" - $QPIDD_EXEC \ + echo "Starting DST broker" + qpidd \ --port=${DST_TCP_PORT} \ --ssl-port ${DST_SSL_PORT} \ ${COMMON_BROKER_OPTIONS} \ - --log-to-file $tmp_root/qpidd_dst.log 2> /dev/null + --log-to-file $WORK_DIR/qpidd_dst.log 2> /dev/null broker_ports[1]=${DST_TCP_PORT} } function halt_brokers { - n_brokers=${#broker_ports[@]} - print "Halting ${n_brokers} brokers." - for i in $(seq 0 $((${n_brokers} - 1))) - do - halt_port=${broker_ports[$i]} - print "Halting broker $i on port ${halt_port}" - $QPIDD_EXEC --port ${halt_port} --quit - done - + n_brokers=${#broker_ports[@]} + echo "Halting ${n_brokers} brokers" + for i in $(seq 0 $((${n_brokers} - 1))); do + halt_port=${broker_ports[$i]} + echo "Halting broker $i on port ${halt_port}" + qpidd --port ${halt_port} --quit + done } - start_brokers - +trap halt_brokers EXIT # I am not randomizing these names, because this test creates its own brokers. QUEUE_NAME=sasl_fed_queue ROUTING_KEY=sasl_fed_queue EXCHANGE_NAME=sasl_fedex +echo "Add exchanges" +qpid-config -b localhost:${SRC_TCP_PORT} add exchange direct $EXCHANGE_NAME +qpid-config -b localhost:${DST_TCP_PORT} add exchange direct $EXCHANGE_NAME -print "add exchanges" -$QPID_CONFIG_EXEC -b localhost:${SRC_TCP_PORT} add exchange direct $EXCHANGE_NAME -$QPID_CONFIG_EXEC -b localhost:${DST_TCP_PORT} add exchange direct $EXCHANGE_NAME - - -print "add queues" -$QPID_CONFIG_EXEC -b localhost:${SRC_TCP_PORT} add queue $QUEUE_NAME -$QPID_CONFIG_EXEC -b localhost:${DST_TCP_PORT} add queue $QUEUE_NAME - - -print "create bindings" -$QPID_CONFIG_EXEC -b localhost:${SRC_TCP_PORT} bind $EXCHANGE_NAME $QUEUE_NAME $ROUTING_KEY -$QPID_CONFIG_EXEC -b localhost:${DST_TCP_PORT} bind $EXCHANGE_NAME $QUEUE_NAME $ROUTING_KEY +echo "Add queues" +qpid-config -b localhost:${SRC_TCP_PORT} add queue $QUEUE_NAME +qpid-config -b localhost:${DST_TCP_PORT} add queue $QUEUE_NAME +echo "Create bindings" +qpid-config -b localhost:${SRC_TCP_PORT} bind $EXCHANGE_NAME $QUEUE_NAME $ROUTING_KEY +qpid-config -b localhost:${DST_TCP_PORT} bind $EXCHANGE_NAME $QUEUE_NAME $ROUTING_KEY # # NOTE: The SRC broker *must* be referred to as $TEST_HOSTNAME, and not as "localhost". # It must be referred to by the exact string given as the Common Name (CN) in the cert, # which was created in the function create_certs, above. - - #---------------------------------------------------------------- # Use qpid-route to create the link, or the link+route, depending # on which of its several methods was requested. #---------------------------------------------------------------- -if [ ${qpid_route_method} == "dynamic" ]; then - print "dynamic add" - $QPID_ROUTE_EXEC -t ssl dynamic add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} $EXCHANGE_NAME "" "" EXTERNAL -elif [ ${qpid_route_method} == "link" ]; then - print "link add" - $QPID_ROUTE_EXEC -t ssl link add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} EXTERNAL -elif [ ${qpid_route_method} == "queue" ]; then - print "queue add" - $QPID_ROUTE_EXEC -t ssl queue add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} $EXCHANGE_NAME $ROUTING_KEY EXTERNAL -elif [ ${qpid_route_method} == "route" ]; then - print "route add" - $QPID_ROUTE_EXEC -t ssl route add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} $EXCHANGE_NAME $ROUTING_KEY "" "" EXTERNAL +if [[ $qpid_route_method == "dynamic" ]]; then + echo "Dynamic add" + qpid-route -t ssl dynamic add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} $EXCHANGE_NAME "" "" EXTERNAL || : +elif [[ $qpid_route_method == "link" ]]; then + echo "Link add" + qpid-route -t ssl link add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} EXTERNAL || : +elif [[ $qpid_route_method == "queue" ]]; then + echo "Queue add" + qpid-route -t ssl queue add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} $EXCHANGE_NAME $ROUTING_KEY EXTERNAL || : +elif [[ $qpid_route_method == "route" ]]; then + echo "Route add" + qpid-route -t ssl route add localhost:${DST_TCP_PORT} $TEST_HOSTNAME:${SRC_SSL_PORT} $EXCHANGE_NAME $ROUTING_KEY "" "" EXTERNAL || : else - echo "unknown method: |${qpid_route_method}|" - echo " choices are: dynamic|link|queue|route " - halt_brokers - exit 1 + echo "Unknown method: |${qpid_route_method}|" + echo "Choices are: dynamic|link|queue|route " + halt_brokers + exit 1 fi - # I don't know how to avoid this sleep yet. It has to come after route-creation # to avoid false negatives. sleep 5 # Look only at the transport field, which should be "ssl". -print "check the link" -link_status=$($QPID_ROUTE_EXEC link list localhost:${DST_TCP_PORT} | tail -1 | awk '{print $3}') - -halt_brokers +echo "Check the link" +link_status=$(qpid-route link list localhost:${DST_TCP_PORT} | tail -1 | awk '{print $3}') sleep 1 -if [ ! ${link_status} ]; then - print "link_status is empty" - print "result: fail" - exit 2 +if [[ ! $link_status ]]; then + echo "Link status is empty" + echo "Result: fail" + exit 2 fi -if [ ${link_status} == "ssl" ]; then - print "result: good" - # Only remove the tmp_root on success, to permit debugging. - print "Removing temporary directory $tmp_root" - rm -rf $tmp_root - exit 0 +if [[ $link_status == "ssl" ]]; then + echo "Result: good" + exit 0 fi -print "link_status has a bad value: ${link_status}" -print "result: fail" +echo "Link status has a bad value: ${link_status}" +echo "Result: fail" exit 3 - - - diff --git a/qpid/cpp/src/tests/sasl_no_dir b/qpid/cpp/src/tests/sasl_no_dir index b2f5d1668e..30d8434079 100755 --- a/qpid/cpp/src/tests/sasl_no_dir +++ b/qpid/cpp/src/tests/sasl_no_dir @@ -19,88 +19,40 @@ # under the License. # -script_name=`basename $0` +set -eu -# This minimum value corresponds to sasl version 2.1.22 -minimum_sasl_version=131350 +sasl_config_dir=$BUILD_DIR/not_there_at_all -sasl_version=$($QPID_TEST_EXEC_DIR/sasl_version) - -# This test is necessary because this sasl version is the first one that permits -# redirection of the sasl config file path. -if [ "$sasl_version" -lt "$minimum_sasl_version" ]; then - echo "sasl_fed: must have sasl version 2.1.22 or greater. ( Integer value: $minimum_sasl_version ) Version is: $sasl_version" - exit 0 -fi - - -sasl_config_dir=$QPID_TEST_EXEC_DIR/sasl_config - - -# Debugging print. -------------------------- -debug= -function print { - if [ "$debug" ]; then - echo "${script_name}: $1" - fi -} - - -my_random_number=$RANDOM -tmp_root=/tmp/sasl_fed_$my_random_number -mkdir -p $tmp_root - - -LOG_FILE=$tmp_root/qpidd.log - -# If you want to see this test fail, just comment out this 'mv' command. -print "Moving sasl configuration dir." -mv ${sasl_config_dir} ${sasl_config_dir}- - - -#-------------------------------------------------- -print " Starting broker" -#-------------------------------------------------- -$QPIDD_EXEC \ - -p 0 --interface 127.0.0.1 \ - --no-data-dir \ - --auth=yes \ - --mgmt-enable=yes \ - --log-enable info+ \ - --log-source yes \ - --log-to-file ${LOG_FILE} \ - --sasl-config=$sasl_config_dir \ - -d 2> /dev/null 1> $tmp_root/broker_port +WORK_DIR=$WORK_DIR/sasl_no_dir +mkdir $WORK_DIR +LOG_FILE=$WORK_DIR/qpidd.log +echo "Starting broker" +qpidd \ + -p 0 --interface 127.0.0.1 \ + --no-data-dir \ + --auth=yes \ + --mgmt-enable=yes \ + --log-enable info+ \ + --log-source yes \ + --log-to-file ${LOG_FILE} \ + --sasl-config=$sasl_config_dir \ + -d 2> /dev/null 1> $WORK_DIR/broker_port || : # If it works right, the output will look something like this: ( two lines long ) # Daemon startup failed: SASL: sasl_set_path failed: no such directory: /home/mick/trunk/qpid/cpp/src/tests/sasl_config (qpid/broker/SaslAuthenticator.cpp:112) # 2011-10-13 14:07:00 critical qpidd.cpp:83: Unexpected error: Daemon startup failed: SASL: sasl_set_path failed: no such directory: /home/mick/trunk/qpid/cpp/src/tests/sasl_config (qpid/broker/SaslAuthenticator.cpp:112) -result=`cat ${LOG_FILE} | grep "sasl_set_path failed: no such directory" | wc -l ` - -#-------------------------------------------------- -print "Restore the Sasl config dir to its original place." -#-------------------------------------------------- -mv ${sasl_config_dir}- ${sasl_config_dir} +result=$(cat ${LOG_FILE} | grep "sasl_set_path failed: no such directory" | wc -l) -if [ "2" -eq ${result} ]; then - print "result: success" - rm -rf $tmp_root - exit 0 +if (( $result == 2 )); then + echo "Result: success" + exit 0 fi +broker_port=$(cat $WORK_DIR/broker_port) +qpidd --port ${broker_port} --quit -# If this test fails, the broker is still alive. -# Kill it. -broker_port=`cat $tmp_root/broker_port` -#-------------------------------------------------- -print "Asking broker to quit." -#-------------------------------------------------- -$QPIDD_EXEC --port $broker_port --quit - -rm -rf $tmp_root - -print "result: fail" +echo "Result: fail" exit 1 diff --git a/qpid/cpp/src/tests/sasl_test_setup.sh b/qpid/cpp/src/tests/sasl_test_setup.sh index d41efbe6e5..1416759da7 100755 --- a/qpid/cpp/src/tests/sasl_test_setup.sh +++ b/qpid/cpp/src/tests/sasl_test_setup.sh @@ -18,9 +18,10 @@ # specific language governing permissions and limitations # under the License. # -source ./test_env.sh -test -x $SASL_PW || { echo Skipping SASL test, saslpasswd2 not found; exit 0; } +source env.sh + +test -x $SASLPASSWD2 || { echo Skipping SASL test, saslpasswd2 not found; exit 0; } mkdir -p sasl_config @@ -36,7 +37,7 @@ EOF # Populate temporary sasl db. SASLTEST_DB=./sasl_config/qpidd.sasldb rm -f $SASLTEST_DB -echo guest | $SASL_PW -c -p -f $SASLTEST_DB -u QPID guest -echo zig | $SASL_PW -c -p -f $SASLTEST_DB -u QPID zig -echo zag | $SASL_PW -c -p -f $SASLTEST_DB -u QPID zag +echo guest | $SASLPASSWD2 -c -p -f $SASLTEST_DB -u QPID guest +echo zig | $SASLPASSWD2 -c -p -f $SASLTEST_DB -u QPID zig +echo zag | $SASLPASSWD2 -c -p -f $SASLTEST_DB -u QPID zag diff --git a/qpid/cpp/src/tests/shared_perftest b/qpid/cpp/src/tests/shared_perftest deleted file mode 100755 index 709ffd56b5..0000000000 --- a/qpid/cpp/src/tests/shared_perftest +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -exec `dirname $0`/run_perftest 100000 --mode shared --npubs 16 --nsubs 16 diff --git a/qpid/cpp/src/tests/ssl_test b/qpid/cpp/src/tests/ssl_test index 46068afefb..7c85087ad1 100755 --- a/qpid/cpp/src/tests/ssl_test +++ b/qpid/cpp/src/tests/ssl_test @@ -21,9 +21,11 @@ # Run a simple test over SSL +source env.sh + #set -x -CONFIG=$(dirname $0)/config.null +CONFIG=$(dirname $0)/qpidd-empty.conf TEST_CERT_DIR=`pwd`/test_cert_dir CERT_DB=${TEST_CERT_DIR}/test_cert_db CERT_PW_FILE=`pwd`/cert.password @@ -32,7 +34,7 @@ TEST_CLIENT_CERT=rumplestiltskin CA_PEM_FILE=${TEST_CERT_DIR}/ca_cert.pem OTHER_CA_CERT_DB=${TEST_CERT_DIR}/x_ca_cert_db OTHER_CA_PEM_FILE=${TEST_CERT_DIR}/other_ca_cert.pem -PY_PING_BROKER=${QPID_TEST_SRC_DIR}/ping_broker +PY_PING_BROKER=$SOURCE_DIR/src/tests/ping_broker COUNT=10 if [[ -a $AMQP_LIB ]] ; then @@ -93,7 +95,7 @@ delete_certs() { fi } -# Don't need --no-module-dir or --no-data-dir as they are set as env vars in test_env.sh +# Don't need --no-module-dir or --no-data-dir as they are set as env vars in env.sh COMMON_OPTS="--daemon --config $CONFIG --ssl-cert-db $CERT_DB --ssl-cert-password-file $CERT_PW_FILE --ssl-cert-name $TEST_HOSTNAME" # Start new brokers: @@ -103,7 +105,7 @@ COMMON_OPTS="--daemon --config $CONFIG --ssl-cert-db $CERT_DB --ssl-cert-passwor start_brokers() { local -a ports for (( i=0; $i<$1; i++)) do - ports[$i]=$($QPIDD_EXEC --port 0 --interface 127.0.0.1 $COMMON_OPTS $2) || error "Could not start broker $i" + ports[$i]=$(qpidd --port 0 --interface 127.0.0.1 $COMMON_OPTS $2) || error "Could not start broker $i" done PORTS=( ${PORTS[@]} ${ports[@]} ) } @@ -111,7 +113,7 @@ start_brokers() { # Stop single broker: # $1 is number of broker to stop (0 based) stop_broker() { - $QPIDD_EXEC -qp ${PORTS[$1]} + qpidd -qp ${PORTS[$1]} # Remove from ports array unset PORTS[$1] @@ -120,15 +122,15 @@ stop_broker() { stop_brokers() { for port in "${PORTS[@]}"; do - $QPIDD_EXEC -qp $port + qpidd -qp $port done PORTS=() } pick_port() { # We need a fixed port to set --cluster-url. Use qpidd to pick a free port. - PICK=`../qpidd --no-module-dir --listen-disable ssl -dp0` - ../qpidd --no-module-dir -qp $PICK + PICK=`qpidd --no-module-dir --listen-disable ssl -dp0` + qpidd --no-module-dir -qp $PICK echo $PICK } @@ -143,11 +145,11 @@ start_ssl_broker() { } start_ssl_mux_broker() { - ../qpidd $COMMON_OPTS --port $1 --ssl-port $1 --auth no + qpidd $COMMON_OPTS --port $1 --ssl-port $1 --auth no PORTS=( ${PORTS[@]} $1 ) } -sasl_config_dir=$QPID_TEST_EXEC_DIR/sasl_config +sasl_config_dir=$BUILD_DIR/src/tests/sasl_config start_authenticating_broker() { start_brokers 1 "--transport ssl --ssl-port 0 --require-encryption --ssl-sasl-no-dict --ssl-require-client-authentication --auth yes --sasl-config=${sasl_config_dir} $MODULES" @@ -180,25 +182,25 @@ export QPID_SSL_CERT_DB=${CERT_DB} export QPID_SSL_CERT_PASSWORD_FILE=${CERT_PW_FILE} ## Test connection via connection settings -./qpid-perftest --count ${COUNT} --port ${PORT} -P ssl -b $TEST_HOSTNAME --summary +qpid-perftest --count ${COUNT} --port ${PORT} -P ssl -b $TEST_HOSTNAME --summary ## Test connection with a URL URL=amqp:ssl:$TEST_HOSTNAME:$PORT -./qpid-send -b $URL --content-string=hello -a "foo;{create:always}" -MSG=`./qpid-receive -b $URL -a "foo;{create:always}" --messages 1` +qpid-send -b $URL --content-string=hello -a "foo;{create:always}" +MSG=`qpid-receive -b $URL -a "foo;{create:always}" --messages 1` test "$MSG" = "hello" || { echo "receive failed '$MSG' != 'hello'"; exit 1; } if [[ -a $AMQP_LIB ]] ; then echo "Testing ssl over AMQP 1.0" - ./qpid-send --connection-options '{protocol:amqp1.0}' -b $URL --content-string=hello -a "foo;{create:always}" - MSG=`./qpid-receive --connection-options '{protocol:amqp1.0}' -b $URL -a "foo;{create:always}" --messages 1` + qpid-send --connection-options '{protocol:amqp1.0}' -b $URL --content-string=hello -a "foo;{create:always}" + MSG=`qpid-receive --connection-options '{protocol:amqp1.0}' -b $URL -a "foo;{create:always}" --messages 1` test "$MSG" = "hello" || { echo "receive failed for AMQP 1.0 '$MSG' != 'hello'"; exit 1; } fi ## Test connection with a combination of URL and connection options (in messaging API) URL=$TEST_HOSTNAME:$PORT -./qpid-send -b $URL --connection-options '{transport:ssl,heartbeat:2}' --content-string='hello again' -a "foo;{create:always}" -MSG=`./qpid-receive -b $URL --connection-options '{transport:ssl,heartbeat:2}' -a "foo;{create:always}" --messages 1` +qpid-send -b $URL --connection-options '{transport:ssl,heartbeat:2}' --content-string='hello again' -a "foo;{create:always}" +MSG=`qpid-receive -b $URL --connection-options '{transport:ssl,heartbeat:2}' -a "foo;{create:always}" --messages 1` test "$MSG" = "hello again" || { echo "receive failed '$MSG' != 'hello again'"; exit 1; } ## Test using the Python client @@ -218,14 +220,14 @@ echo "Running SSL client authentication test on port $PORT2" URL=amqp:ssl:$TEST_HOSTNAME:$PORT2 ## See if you can set the SSL cert-name for the connection -./qpid-send -b $URL --connection-options "{ssl-cert-name: $TEST_CLIENT_CERT }" --content-string=hello -a "bar;{create:always}" -MSG2=`./qpid-receive -b $URL --connection-options "{ssl-cert-name: $TEST_CLIENT_CERT }" -a "bar;{create:always}" --messages 1` +qpid-send -b $URL --connection-options "{ssl-cert-name: $TEST_CLIENT_CERT }" --content-string=hello -a "bar;{create:always}" +MSG2=`qpid-receive -b $URL --connection-options "{ssl-cert-name: $TEST_CLIENT_CERT }" -a "bar;{create:always}" --messages 1` test "$MSG2" = "hello" || { echo "receive failed '$MSG2' != 'hello'"; exit 1; } ## Make sure that connect fails with an invalid SSL cert-name -./qpid-send -b $URL --connection-options "{ssl-cert-name: pignose }" --content-string=hello -a "baz;{create:always}" 2>/dev/null 1>/dev/null -MSG3=`./qpid-receive -b $URL --connection-options "{ssl-cert-name: pignose }" -a "baz;{create:always}" --messages 1 2>/dev/null` +qpid-send -b $URL --connection-options "{ssl-cert-name: pignose }" --content-string=hello -a "baz;{create:always}" 2>/dev/null 1>/dev/null +MSG3=`qpid-receive -b $URL --connection-options "{ssl-cert-name: pignose }" -a "baz;{create:always}" --messages 1 2>/dev/null` test "$MSG3" = "" || { echo "receive succeeded without valid ssl cert '$MSG3' != ''"; exit 1; } ## Set the userid in the message to the authenticated username @@ -243,8 +245,8 @@ start_ssl_mux_broker $PORT || error "Could not start broker" echo "Running SSL/TCP mux test on fixed port $PORT" ## Test connection via connection settings -./qpid-perftest --count ${COUNT} --port ${PORT} -P ssl -b $TEST_HOSTNAME --summary || error "SSL connection failed!" -./qpid-perftest --count ${COUNT} --port ${PORT} -P tcp -b $TEST_HOSTNAME --summary || error "TCP connection failed!" +qpid-perftest --count ${COUNT} --port ${PORT} -P ssl -b $TEST_HOSTNAME --summary || error "SSL connection failed!" +qpid-perftest --count ${COUNT} --port ${PORT} -P tcp -b $TEST_HOSTNAME --summary || error "TCP connection failed!" # Test a broker chosen port - since ssl chooses port need to use --transport ssl here start_ssl_broker @@ -252,8 +254,8 @@ PORT=${PORTS[0]} echo "Running SSL/TCP mux test on random port $PORT" ## Test connection via connection settings -./qpid-perftest --count ${COUNT} --port ${PORT} -P ssl -b $TEST_HOSTNAME --summary || error "SSL connection failed!" -./qpid-perftest --count ${COUNT} --port ${PORT} -P tcp -b $TEST_HOSTNAME --summary || error "TCP connection failed!" +qpid-perftest --count ${COUNT} --port ${PORT} -P ssl -b $TEST_HOSTNAME --summary || error "SSL connection failed!" +qpid-perftest --count ${COUNT} --port ${PORT} -P tcp -b $TEST_HOSTNAME --summary || error "TCP connection failed!" stop_brokers @@ -271,67 +273,63 @@ if [[ !(-x $OPENSSL) ]] ; then exit 0 fi -if test -d $PYTHON_DIR; then ## verify python version > 2.5 (only 2.6+ does certificate checking) - PY_VERSION=$(python -c "import sys; print hex(sys.hexversion)") - if (( PY_VERSION < 0x02060000 )); then - echo >&2 "Detected python version < 2.6 - skipping certificate verification tests" - exit 0 - fi +PY_VERSION=$(python -c "import sys; print hex(sys.hexversion)") +if (( PY_VERSION < 0x02060000 )); then + echo >&2 "Detected python version < 2.6 - skipping certificate verification tests" + exit 0 +fi - echo "Testing Certificate validation and Authentication with the Python Client..." +echo "Testing Certificate validation and Authentication with the Python Client..." # extract the CA's certificate as a PEM file - get_ca_certs() { - $PK12UTIL -o ${TEST_CERT_DIR}/CA_pk12.out -d ${CERT_DB} -n "Test-CA" -w ${CERT_PW_FILE} -k ${CERT_PW_FILE} > /dev/null - $OPENSSL pkcs12 -in ${TEST_CERT_DIR}/CA_pk12.out -out ${CA_PEM_FILE} -nokeys -passin file:${CERT_PW_FILE} >/dev/null - $PK12UTIL -o ${TEST_CERT_DIR}/other_CA_pk12.out -d ${OTHER_CA_CERT_DB} -n "Other-Test-CA" -w ${CERT_PW_FILE} -k ${CERT_PW_FILE} > /dev/null - $OPENSSL pkcs12 -in ${TEST_CERT_DIR}/other_CA_pk12.out -out ${OTHER_CA_PEM_FILE} -nokeys -passin file:${CERT_PW_FILE} >/dev/null - } +get_ca_certs() { + $PK12UTIL -o ${TEST_CERT_DIR}/CA_pk12.out -d ${CERT_DB} -n "Test-CA" -w ${CERT_PW_FILE} -k ${CERT_PW_FILE} > /dev/null + $OPENSSL pkcs12 -in ${TEST_CERT_DIR}/CA_pk12.out -out ${CA_PEM_FILE} -nokeys -passin file:${CERT_PW_FILE} >/dev/null + $PK12UTIL -o ${TEST_CERT_DIR}/other_CA_pk12.out -d ${OTHER_CA_CERT_DB} -n "Other-Test-CA" -w ${CERT_PW_FILE} -k ${CERT_PW_FILE} > /dev/null + $OPENSSL pkcs12 -in ${TEST_CERT_DIR}/other_CA_pk12.out -out ${OTHER_CA_PEM_FILE} -nokeys -passin file:${CERT_PW_FILE} >/dev/null +} +get_ca_certs || error "Could not extract CA certificates as PEM files" +start_ssl_broker +PORT=${PORTS[0]} +URL=amqps://$TEST_HOSTNAME:$PORT +# verify the python client can authenticate the broker using the CA +if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE}`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi +# verify the python client fails to authenticate the broker when using the other CA +if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${OTHER_CA_PEM_FILE} > /dev/null 2>&1`; then { echo " Failed"; exit 1; }; else echo " Passed"; fi +stop_brokers + +# create a certificate without matching TEST_HOSTNAME, should fail to verify + +create_certs "O=MyCo" "*.${TEST_HOSTNAME}.com" || error "Could not create server test certificate" +get_ca_certs || error "Could not extract CA certificates as PEM files" +start_ssl_broker +PORT=${PORTS[0]} +URL=amqps://$TEST_HOSTNAME:$PORT +if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE} > /dev/null 2>&1`; then { echo " Failed"; exit 1; }; else echo " Passed"; fi +# but disabling the check for the hostname should pass +if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE} --ssl-skip-hostname-check`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi +stop_brokers + +# test SubjectAltName parsing + +if (( PY_VERSION >= 0x02070300 )); then +# python 2.7.3+ supports SubjectAltName extraction +# create a certificate with TEST_HOSTNAME only in SAN, should verify OK + create_certs "O=MyCo" "*.foo.com,${TEST_HOSTNAME},*xyz.com" || error "Could not create server test certificate" get_ca_certs || error "Could not extract CA certificates as PEM files" start_ssl_broker PORT=${PORTS[0]} URL=amqps://$TEST_HOSTNAME:$PORT -# verify the python client can authenticate the broker using the CA if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE}`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi -# verify the python client fails to authenticate the broker when using the other CA - if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${OTHER_CA_PEM_FILE} > /dev/null 2>&1`; then { echo " Failed"; exit 1; }; else echo " Passed"; fi stop_brokers -# create a certificate without matching TEST_HOSTNAME, should fail to verify - - create_certs "O=MyCo" "*.${TEST_HOSTNAME}.com" || error "Could not create server test certificate" + create_certs "O=MyCo" "*${TEST_HOSTNAME}" || error "Could not create server test certificate" get_ca_certs || error "Could not extract CA certificates as PEM files" start_ssl_broker PORT=${PORTS[0]} URL=amqps://$TEST_HOSTNAME:$PORT - if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE} > /dev/null 2>&1`; then { echo " Failed"; exit 1; }; else echo " Passed"; fi -# but disabling the check for the hostname should pass - if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE} --ssl-skip-hostname-check`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi + if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE}`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi stop_brokers - -# test SubjectAltName parsing - - if (( PY_VERSION >= 0x02070300 )); then - # python 2.7.3+ supports SubjectAltName extraction - # create a certificate with TEST_HOSTNAME only in SAN, should verify OK - create_certs "O=MyCo" "*.foo.com,${TEST_HOSTNAME},*xyz.com" || error "Could not create server test certificate" - get_ca_certs || error "Could not extract CA certificates as PEM files" - start_ssl_broker - PORT=${PORTS[0]} - URL=amqps://$TEST_HOSTNAME:$PORT - if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE}`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi - stop_brokers - - create_certs "O=MyCo" "*${TEST_HOSTNAME}" || error "Could not create server test certificate" - get_ca_certs || error "Could not extract CA certificates as PEM files" - start_ssl_broker - PORT=${PORTS[0]} - URL=amqps://$TEST_HOSTNAME:$PORT - if `${PY_PING_BROKER} -b $URL --ssl-trustfile=${CA_PEM_FILE}`; then echo " Passed"; else { echo " Failed"; exit 1; }; fi - stop_brokers - fi - fi - diff --git a/qpid/cpp/src/tests/swig_python_tests b/qpid/cpp/src/tests/swig_python_tests deleted file mode 100755 index c28c96e839..0000000000 --- a/qpid/cpp/src/tests/swig_python_tests +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env bash - -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Run the python tests. -source $QPID_TEST_COMMON - -ensure_python_tests - -trap stop_broker INT TERM QUIT - -if [[ -a $AMQP_LIB ]] ; then - echo "Found AMQP support: $AMQP_LIB" - MODULES="--load-module $AMQP_LIB" -fi - -fail() { - echo "FAIL swigged python tests: $1"; exit 1; -} -skip() { - echo "SKIPPED swigged python tests: $1"; exit 0; -} - -start_broker() { - rm -f swig_python_tests.log - cp $srcdir/policy.acl $builddir/policy.acl - QPID_PORT=$($QPIDD_EXEC --daemon --port 0 --interface 127.0.0.1 --no-data-dir $MODULES --auth no --acl-file $builddir/policy.acl --log-to-file swig_python_tests.log) || fail "Could not start broker" -} - -stop_broker() { - $QPIDD_EXEC -q --port $QPID_PORT - rm $builddir/policy.acl -} - -test -f $PYTHONSWIGMODULE || skip "no swigged python client" -test -d $QPID_TESTS || skip "test code not found" - -start_broker -echo "Running swigged python tests using broker on port $QPID_PORT" - -export PYTHONPATH=$PYTHONPATH:$PYTHONPATH_SWIG -export QPID_USE_SWIG_CLIENT=1 -$QPID_PYTHON_TEST -m qpid.tests.messaging.message -m qpid_tests.broker_0_10.priority -m qpid_tests.broker_0_10.lvq -m qpid_tests.broker_0_10.new_api -b localhost:$QPID_PORT -I $srcdir/failing-amqp0-10-python-tests $* || FAILED=1 -if [[ -a $AMQP_LIB ]] ; then - $QPID_PYTHON_TEST --define="protocol_version=amqp1.0" --define="policy_file=$builddir/policy.acl" -m qpid_tests.broker_1_0 -m qpid_tests.broker_0_10.new_api -m assertions -m reject_release -m misc -m policies -m acl_1 -b localhost:$QPID_PORT -I $srcdir/failing-amqp1.0-python-tests $* || FAILED=1 -fi -stop_broker -if [[ $FAILED -eq 1 ]]; then - fail "" -fi - diff --git a/qpid/cpp/src/tests/test.xquery b/qpid/cpp/src/tests/test.xquery deleted file mode 100644 index 4cfe3af02d..0000000000 --- a/qpid/cpp/src/tests/test.xquery +++ /dev/null @@ -1,6 +0,0 @@ - let $w := ./weather - return $w/station = 'Raleigh-Durham International Airport (KRDU)' - and $w/temperature_f > 50 - and $w/temperature_f - $w/dewpoint > 5 - and $w/wind_speed_mph > 7 - and $w/wind_speed_mph < 20 diff --git a/qpid/cpp/src/tests/test_env.sh.in b/qpid/cpp/src/tests/test_env.sh.in deleted file mode 100644 index 1c4c117e4b..0000000000 --- a/qpid/cpp/src/tests/test_env.sh.in +++ /dev/null @@ -1,100 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -absdir() { echo `cd $1 && pwd`; } - -# Environment variables substituted by cmake. -export srcdir=`absdir @abs_srcdir@` -export builddir=`absdir @abs_builddir@` -export top_srcdir=`absdir @abs_top_srcdir@` -export top_builddir=`absdir @abs_top_builddir@` -export moduledir=$top_builddir/src@builddir_lib_suffix@ -export pythonswigdir=$top_builddir/bindings/qpid/python/ -export pythonswiglibdir=$top_builddir/bindings/qpid/python@builddir_lib_suffix@ -export testmoduledir=$builddir@builddir_lib_suffix@ -export QPID_INSTALL_PREFIX=@prefix@ - -# Tools substituted by cmake -enable_valgrind=${enable_valgrind-@ENABLE_VALGRIND@} -if [ "$enable_valgrind" = "ON" ] ; then - export VALGRIND=@VALGRIND_EXECUTABLE@ -fi -export SASL_PW=@SASLPASSWD2_EXECUTABLE@ -export PYTHON=@PYTHON_EXECUTABLE@ - -# Python paths and directories -export PYTHON_DIR=$builddir/python -export QPID_PYTHON_TEST=$PYTHON_DIR/commands/qpid-python-test -if [ ! -d $PYTHON_DIR -a -d $top_srcdir/../python ]; then - export PYTHON_DIR=$top_srcdir/../python - export QPID_PYTHON_TEST=$PYTHON_DIR/qpid-python-test -fi -export QPID_TESTS=$top_srcdir/../tests -export QPID_TESTS_PY=$QPID_TESTS/src/py -export QPID_TOOLS=$top_srcdir/../tools -export QMF_LIB=$top_srcdir/../extras/qmf/src/py -export PYTHON_COMMANDS=$QPID_TOOLS/src/py -export PYTHONPATH_SWIG=$pythonswigdir:$pythonswiglibdir -export PYTHONPATH=$srcdir:$PYTHON_DIR:$PYTHON_COMMANDS:$QPID_TESTS_PY:$QMF_LIB:$PYTHONPATH_SWIG:$PYTHONPATH -export QPID_CONFIG_EXEC=$PYTHON_COMMANDS/qpid-config -export QPID_ROUTE_EXEC=$PYTHON_COMMANDS/qpid-route -export QPID_HA_EXEC=$PYTHON_COMMANDS/qpid-ha -export PYTHONSWIGMODULE=$pythonswigdir/qpid_messaging.py -# Executables -export QPIDD_EXEC=$top_builddir/src/qpidd - -# Test executables -export QPID_TEST_EXEC_DIR=$builddir -export QPID_TEST_SRC_DIR=$srcdir -export RECEIVER_EXEC=$QPID_TEST_EXEC_DIR/receiver -export SENDER_EXEC=$QPID_TEST_EXEC_DIR/sender - -# Path -export PATH=$top_builddir/src:$builddir:$srcdir:$PYTHON_COMMANDS:$QPID_TEST_EXEC_DIR:$PYTHON_DIR/commands:$PATH - -# Modules -export TEST_STORE_LIB=$testmoduledir/test_store.so - -exportmodule() { test -f $moduledir/$2 && eval "export $1=$moduledir/$2"; } -exportmodule HA_LIB ha.so -exportmodule XML_LIB xml.so -test "$STORE_LIB" || exportmodule STORE_LIB linearstore.so -test "$STORE_LIB" || exportmodule STORE_LIB legacystore.so -exportmodule AMQP_LIB amqp.so - -# Qpid options -export QPID_NO_MODULE_DIR=1 # Don't accidentally load installed modules -export QPID_DATA_DIR= -export QPID_CONFIG=$srcdir/qpidd-empty.conf - -# Use temporary directory if $HOME does not exist -if [ ! -e "$HOME" ]; then - export QPID_DATA_DIR=/tmp/qpid - export QPID_PID_DIR=/tmp/qpid -fi - -# Options for boost test framework -test -z "$BOOST_TEST_SHOW_PROGRESS" && export BOOST_TEST_SHOW_PROGRESS=yes -test -z "$BOOST_TEST_CATCH_SYSTEM_ERRORS" && export BOOST_TEST_CATCH_SYSTEM_ERRORS=no - -# Source this for useful common testing functions -export QPID_TEST_COMMON=$srcdir/test_env_common.sh - -# Proton configuration -export QPID_PROTON_VERSION=@Proton_VERSION@ diff --git a/qpid/cpp/src/tests/test_env_common.sh b/qpid/cpp/src/tests/test_env_common.sh deleted file mode 100644 index 348664ca76..0000000000 --- a/qpid/cpp/src/tests/test_env_common.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Ensure that we have python testing tools available -function ensure_python_tests { - if [ ! -d ${PYTHON_DIR} ] ; then - echo "Python test code not found: skipping python based test" - exit 0; - fi -} - diff --git a/qpid/cpp/src/tests/topictest b/qpid/cpp/src/tests/topictest index f4c6e7187d..13f38120b3 100755 --- a/qpid/cpp/src/tests/topictest +++ b/qpid/cpp/src/tests/topictest @@ -46,11 +46,11 @@ done subscribe() { echo Start subscriber $1 LOG="subscriber_$1.log" - ./qpid-topic-listener $TRANSACTIONAL > $LOG 2>&1 && rm -f $LOG + qpid-topic-listener $TRANSACTIONAL > $LOG 2>&1 && rm -f $LOG } publish() { - ./qpid-topic-publisher --messages $MESSAGES --batches $BATCHES --subscribers $SUBSCRIBERS $HOST $TRANSACTIONAL + qpid-topic-publisher --messages $MESSAGES --batches $BATCHES --subscribers $SUBSCRIBERS $HOST $TRANSACTIONAL } for ((i=$SUBSCRIBERS ; i--; )); do diff --git a/qpid/cpp/src/tests/vg_check b/qpid/cpp/src/tests/vg_check deleted file mode 100644 index 462f4cb5e4..0000000000 --- a/qpid/cpp/src/tests/vg_check +++ /dev/null @@ -1,43 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -# Check for valgrind errors. Sourced by test scripts. - -vg_failed() { - echo "Valgrind error log in $VG_LOG." 1>&2 - cat $VG_LOG 1>&2 - echo $1 1>&2 - exit 1 -} - -vg_check() -{ - test -z "$1" || VG_LOG=$1 - test -f $VG_LOG || vg_failed Valgrind log file $VG_LOG missing. - # Ensure there is an ERROR SUMMARY line. - grep -E '^==[0-9]+== ERROR SUMMARY:' $VG_LOG > /dev/null || \ - vg_failed "No valgrind ERROR SUMMARY line in $VG_LOG." - # Ensure that the number of errors is 0. - grep -E '^==[0-9]+== ERROR SUMMARY: [^0]' $VG_LOG > /dev/null && \ - vg_failed "Valgrind reported errors in $VG_LOG; see above." - # Check for leaks. - grep -E '^==[0-9]+== +.* lost: [^0]' $VG_LOG && \ - vg_failed "Found memory leaks (see log file, $VG_LOG); see above." - true -} |