From 71149592670f7592886751a9a866459bef0f12cc Mon Sep 17 00:00:00 2001 From: Justin Ross Date: Thu, 21 Apr 2016 12:31:34 +0000 Subject: QPID-7207: Create independent cpp and python subtrees, with content from tools and extras git-svn-id: https://svn.apache.org/repos/asf/qpid/trunk@1740289 13f79535-47bb-0310-9956-ffa450edef68 --- qpid/cpp/CMakeLists.txt | 4 +- qpid/cpp/CTestCustom.cmake | 1 + qpid/cpp/INSTALL.txt | 4 +- qpid/cpp/docs/design/CONTENTS | 31 - qpid/cpp/management/python/.gitignore | 3 + qpid/cpp/management/python/CMakeLists.txt | 32 + qpid/cpp/management/python/LICENSE.txt | 203 + qpid/cpp/management/python/MANIFEST.in | 1 + qpid/cpp/management/python/NOTICE.txt | 5 + qpid/cpp/management/python/bin/.gitignore | 13 + qpid/cpp/management/python/bin/qmf-tool | 775 ++++ qpid/cpp/management/python/bin/qpid-config | 878 +++++ qpid/cpp/management/python/bin/qpid-config.bat | 2 + qpid/cpp/management/python/bin/qpid-ha | 299 ++ qpid/cpp/management/python/bin/qpid-ha.bat | 2 + qpid/cpp/management/python/bin/qpid-printevents | 191 + .../cpp/management/python/bin/qpid-printevents.bat | 2 + qpid/cpp/management/python/bin/qpid-qls-analyze | 114 + qpid/cpp/management/python/bin/qpid-queue-stats | 159 + .../cpp/management/python/bin/qpid-queue-stats.bat | 3 + qpid/cpp/management/python/bin/qpid-receive | 194 + qpid/cpp/management/python/bin/qpid-route | 635 +++ qpid/cpp/management/python/bin/qpid-route.bat | 2 + qpid/cpp/management/python/bin/qpid-send | 281 ++ qpid/cpp/management/python/bin/qpid-stat | 514 +++ qpid/cpp/management/python/bin/qpid-stat.bat | 2 + qpid/cpp/management/python/bin/qpid-store-chk | 332 ++ qpid/cpp/management/python/bin/qpid-store-resize | 350 ++ qpid/cpp/management/python/bin/qpid-tool | 799 ++++ qpid/cpp/management/python/bin/qpid-tool.bat | 2 + qpid/cpp/management/python/lib/.gitignore | 22 + qpid/cpp/management/python/lib/README.txt | 4 + qpid/cpp/management/python/lib/qlslibs/__init__.py | 19 + qpid/cpp/management/python/lib/qlslibs/analyze.py | 606 +++ qpid/cpp/management/python/lib/qlslibs/efp.py | 327 ++ qpid/cpp/management/python/lib/qlslibs/err.py | 261 ++ qpid/cpp/management/python/lib/qlslibs/jrnl.py | 394 ++ qpid/cpp/management/python/lib/qlslibs/utils.py | 216 ++ qpid/cpp/management/python/lib/qmf/__init__.py | 18 + qpid/cpp/management/python/lib/qmf/console.py | 4054 ++++++++++++++++++++ .../management/python/lib/qpidstore/__init__.py | 19 + qpid/cpp/management/python/lib/qpidstore/janal.py | 617 +++ qpid/cpp/management/python/lib/qpidstore/jerr.py | 219 ++ qpid/cpp/management/python/lib/qpidstore/jrnl.py | 794 ++++ .../management/python/lib/qpidtoollibs/__init__.py | 22 + .../management/python/lib/qpidtoollibs/broker.py | 486 +++ .../management/python/lib/qpidtoollibs/config.py | 36 + .../cpp/management/python/lib/qpidtoollibs/disp.py | 270 ++ qpid/cpp/management/python/setup.py | 78 + qpid/cpp/management/ruby/.gitignore | 23 + qpid/cpp/management/ruby/.rspec | 1 + qpid/cpp/management/ruby/Gemfile | 30 + qpid/cpp/management/ruby/Gemfile.lock | 55 + qpid/cpp/management/ruby/Rakefile | 27 + qpid/cpp/management/ruby/lib/qpid_management.rb | 81 + .../cpp/management/ruby/lib/qpid_management/acl.rb | 38 + .../management/ruby/lib/qpid_management/binding.rb | 31 + .../management/ruby/lib/qpid_management/bridge.rb | 39 + .../management/ruby/lib/qpid_management/broker.rb | 278 ++ .../ruby/lib/qpid_management/broker_agent.rb | 173 + .../ruby/lib/qpid_management/broker_object.rb | 126 + .../management/ruby/lib/qpid_management/cluster.rb | 26 + .../ruby/lib/qpid_management/connection.rb | 51 + .../management/ruby/lib/qpid_management/errors.rb | 28 + .../ruby/lib/qpid_management/exchange.rb | 44 + .../ruby/lib/qpid_management/ha_broker.rb | 26 + .../management/ruby/lib/qpid_management/link.rb | 35 + .../management/ruby/lib/qpid_management/memory.rb | 34 + .../management/ruby/lib/qpid_management/queue.rb | 97 + .../management/ruby/lib/qpid_management/session.rb | 38 + .../ruby/lib/qpid_management/subscription.rb | 35 + qpid/cpp/management/ruby/qpid_management.gemspec | 36 + qpid/cpp/management/ruby/spec/broker_agent_spec.rb | 43 + qpid/cpp/management/ruby/spec/broker_spec.rb | 373 ++ qpid/cpp/management/ruby/spec/spec_helper.rb | 21 + qpid/cpp/src/CMakeLists.txt | 41 +- qpid/cpp/src/tests/CMakeLists.txt | 351 +- qpid/cpp/src/tests/README.txt | 19 +- qpid/cpp/src/tests/ais_test.cpp | 23 - qpid/cpp/src/tests/allhosts | 79 - qpid/cpp/src/tests/brokertest.py | 26 +- qpid/cpp/src/tests/check_dependencies.py.in | 53 + qpid/cpp/src/tests/cli_tests.py | 11 +- qpid/cpp/src/tests/common.py | 297 ++ qpid/cpp/src/tests/config.null | 21 - qpid/cpp/src/tests/dynamic_log_hires_timestamp | 22 +- qpid/cpp/src/tests/dynamic_log_level_test | 30 +- qpid/cpp/src/tests/env.ps1.in | 77 + qpid/cpp/src/tests/env.py.in | 100 + qpid/cpp/src/tests/env.sh.in | 74 + qpid/cpp/src/tests/fanout_perftest | 22 - qpid/cpp/src/tests/federated_topic_test | 45 +- qpid/cpp/src/tests/ha_test.py | 10 +- qpid/cpp/src/tests/ha_tests.py | 16 +- qpid/cpp/src/tests/install_env.sh.in | 26 - qpid/cpp/src/tests/interlink_tests.py | 7 +- qpid/cpp/src/tests/interop_tests.py | 5 +- qpid/cpp/src/tests/ipv6_test | 120 - qpid/cpp/src/tests/legacystore/CMakeLists.txt | 8 +- .../legacystore/federation/federation_tests_env.sh | 2 +- qpid/cpp/src/tests/legacystore/run_python_tests | 32 +- qpid/cpp/src/tests/linearstore/CMakeLists.txt | 7 +- qpid/cpp/src/tests/linearstore/run_python_tests | 31 +- qpid/cpp/src/tests/multiq_perftest | 22 - qpid/cpp/src/tests/plano.py | 543 +++ qpid/cpp/src/tests/python_tests | 34 - qpid/cpp/src/tests/python_tests.ps1 | 42 - qpid/cpp/src/tests/qpid-build-rinstall | 28 - qpid/cpp/src/tests/quick_perftest | 22 - qpid/cpp/src/tests/quick_topictest | 30 - qpid/cpp/src/tests/quick_topictest.ps1 | 30 - qpid/cpp/src/tests/quick_txtest | 22 - qpid/cpp/src/tests/rsynchosts | 57 - qpid/cpp/src/tests/run.py | 6 + qpid/cpp/src/tests/run_acl_tests | 170 +- qpid/cpp/src/tests/run_acl_tests.ps1 | 99 - qpid/cpp/src/tests/run_cli_tests | 88 +- qpid/cpp/src/tests/run_client_tests | 30 + qpid/cpp/src/tests/run_federation_sys_tests | 71 - qpid/cpp/src/tests/run_federation_tests | 81 +- qpid/cpp/src/tests/run_federation_tests.ps1 | 83 - qpid/cpp/src/tests/run_flow_control_tests | 28 + qpid/cpp/src/tests/run_ha_tests | 24 +- qpid/cpp/src/tests/run_header_test | 31 - qpid/cpp/src/tests/run_header_test.ps1 | 48 - qpid/cpp/src/tests/run_headers_federation_tests | 49 - qpid/cpp/src/tests/run_idle_timeout_tests | 26 + qpid/cpp/src/tests/run_interlink_tests | 8 +- qpid/cpp/src/tests/run_interop_tests | 30 + qpid/cpp/src/tests/run_ipv6_tests | 116 + qpid/cpp/src/tests/run_logging_tests | 38 + qpid/cpp/src/tests/run_long_federation_sys_tests | 2 +- qpid/cpp/src/tests/run_msg_group_tests | 59 +- qpid/cpp/src/tests/run_msg_group_tests.ps1 | 71 - qpid/cpp/src/tests/run_msg_group_tests_soak | 8 +- qpid/cpp/src/tests/run_paged_queue_tests | 55 +- qpid/cpp/src/tests/run_performance_tests | 28 + qpid/cpp/src/tests/run_perftest | 28 - qpid/cpp/src/tests/run_python_tests | 62 + qpid/cpp/src/tests/run_qmf_tests | 26 + qpid/cpp/src/tests/run_queue_flow_limit_tests | 27 - qpid/cpp/src/tests/run_queue_redirect | 56 - qpid/cpp/src/tests/run_queue_redirect_tests | 30 + qpid/cpp/src/tests/run_ring_queue_test | 36 - qpid/cpp/src/tests/run_ring_queue_tests | 30 + qpid/cpp/src/tests/run_sasl_tests | 64 + qpid/cpp/src/tests/run_ssl_tests | 329 ++ qpid/cpp/src/tests/run_store_tests.ps1 | 2 +- qpid/cpp/src/tests/run_test | 191 - qpid/cpp/src/tests/run_test.ps1 | 162 - qpid/cpp/src/tests/run_topic_tests | 30 + qpid/cpp/src/tests/run_transaction_tests | 30 + qpid/cpp/src/tests/run_unit_tests | 39 + qpid/cpp/src/tests/sasl_fed | 153 +- qpid/cpp/src/tests/sasl_fed_ex | 208 +- qpid/cpp/src/tests/sasl_no_dir | 94 +- qpid/cpp/src/tests/sasl_test_setup.sh | 11 +- qpid/cpp/src/tests/shared_perftest | 22 - qpid/cpp/src/tests/ssl_test | 140 +- qpid/cpp/src/tests/swig_python_tests | 68 - qpid/cpp/src/tests/test.xquery | 6 - qpid/cpp/src/tests/test_env.ps1.in | 77 - qpid/cpp/src/tests/test_env.sh.in | 100 - qpid/cpp/src/tests/test_env_common.sh | 28 - qpid/cpp/src/tests/topic_perftest | 22 - qpid/cpp/src/tests/topictest | 4 +- qpid/cpp/src/tests/vg_check | 43 - qpid/extras/qmf/.gitignore | 20 - qpid/extras/qmf/LICENSE.txt | 203 - qpid/extras/qmf/MANIFEST.in | 1 - qpid/extras/qmf/NOTICE.txt | 5 - qpid/extras/qmf/setup.py | 30 - qpid/extras/qmf/src/py/qmf/__init__.py | 18 - qpid/extras/qmf/src/py/qmf/console.py | 4054 -------------------- qpid/python/doc/test-requirements.txt | 29 - qpid/python/qpid-python-test.bat | 2 + qpid/python/qpid_tests/__init__.py | 22 + qpid/python/qpid_tests/broker_0_10/__init__.py | 39 + .../qpid_tests/broker_0_10/alternate_exchange.py | 351 ++ qpid/python/qpid_tests/broker_0_10/broker.py | 93 + qpid/python/qpid_tests/broker_0_10/dtx.py | 790 ++++ qpid/python/qpid_tests/broker_0_10/example.py | 95 + qpid/python/qpid_tests/broker_0_10/exchange.py | 558 +++ qpid/python/qpid_tests/broker_0_10/extensions.py | 87 + qpid/python/qpid_tests/broker_0_10/lvq.py | 122 + qpid/python/qpid_tests/broker_0_10/management.py | 726 ++++ qpid/python/qpid_tests/broker_0_10/message.py | 1108 ++++++ qpid/python/qpid_tests/broker_0_10/msg_groups.py | 1195 ++++++ qpid/python/qpid_tests/broker_0_10/new_api.py | 358 ++ qpid/python/qpid_tests/broker_0_10/persistence.py | 68 + qpid/python/qpid_tests/broker_0_10/priority.py | 252 ++ qpid/python/qpid_tests/broker_0_10/qmf_events.py | 83 + qpid/python/qpid_tests/broker_0_10/query.py | 247 ++ qpid/python/qpid_tests/broker_0_10/queue.py | 436 +++ qpid/python/qpid_tests/broker_0_10/stats.py | 519 +++ qpid/python/qpid_tests/broker_0_10/threshold.py | 212 + qpid/python/qpid_tests/broker_0_10/tx.py | 265 ++ qpid/python/qpid_tests/broker_0_8/__init__.py | 22 + qpid/python/qpid_tests/broker_0_8/basic.py | 441 +++ qpid/python/qpid_tests/broker_0_8/broker.py | 120 + qpid/python/qpid_tests/broker_0_8/example.py | 94 + qpid/python/qpid_tests/broker_0_8/exchange.py | 349 ++ qpid/python/qpid_tests/broker_0_8/queue.py | 255 ++ qpid/python/qpid_tests/broker_0_8/testlib.py | 66 + qpid/python/qpid_tests/broker_0_8/tx.py | 209 + qpid/python/qpid_tests/broker_0_9/__init__.py | 22 + qpid/python/qpid_tests/broker_0_9/echo.py | 159 + qpid/python/qpid_tests/broker_0_9/messageheader.py | 61 + qpid/python/qpid_tests/broker_0_9/query.py | 224 ++ qpid/python/qpid_tests/broker_0_9/queue.py | 148 + qpid/python/qpid_tests/broker_1_0/__init__.py | 26 + qpid/python/qpid_tests/broker_1_0/general.py | 81 + .../qpid_tests/broker_1_0/legacy_exchanges.py | 96 + qpid/python/qpid_tests/broker_1_0/selector.py | 95 + qpid/python/qpid_tests/broker_1_0/translation.py | 87 + qpid/python/qpid_tests/broker_1_0/tx.py | 264 ++ .../qpid_tests/client/client-api-example-tests.py | 338 ++ qpid/python/qpid_tests/client/log4j.conf | 25 + qpid/python/setup.py | 17 +- qpid/python/todo.txt | 197 - qpid/tests/LICENSE.txt | 203 - qpid/tests/MANIFEST.in | 1 - qpid/tests/NOTICE.txt | 5 - qpid/tests/setup.py | 31 - qpid/tests/src/py/qpid_tests/__init__.py | 22 - .../src/py/qpid_tests/broker_0_10/__init__.py | 39 - .../qpid_tests/broker_0_10/alternate_exchange.py | 351 -- qpid/tests/src/py/qpid_tests/broker_0_10/broker.py | 93 - qpid/tests/src/py/qpid_tests/broker_0_10/dtx.py | 790 ---- .../tests/src/py/qpid_tests/broker_0_10/example.py | 95 - .../src/py/qpid_tests/broker_0_10/exchange.py | 558 --- .../src/py/qpid_tests/broker_0_10/extensions.py | 87 - qpid/tests/src/py/qpid_tests/broker_0_10/lvq.py | 122 - .../src/py/qpid_tests/broker_0_10/management.py | 726 ---- .../tests/src/py/qpid_tests/broker_0_10/message.py | 1108 ------ .../src/py/qpid_tests/broker_0_10/msg_groups.py | 1195 ------ .../tests/src/py/qpid_tests/broker_0_10/new_api.py | 358 -- .../src/py/qpid_tests/broker_0_10/persistence.py | 68 - .../src/py/qpid_tests/broker_0_10/priority.py | 252 -- .../src/py/qpid_tests/broker_0_10/qmf_events.py | 83 - qpid/tests/src/py/qpid_tests/broker_0_10/query.py | 247 -- qpid/tests/src/py/qpid_tests/broker_0_10/queue.py | 436 --- qpid/tests/src/py/qpid_tests/broker_0_10/stats.py | 519 --- .../src/py/qpid_tests/broker_0_10/threshold.py | 212 - qpid/tests/src/py/qpid_tests/broker_0_10/tx.py | 265 -- .../tests/src/py/qpid_tests/broker_0_8/__init__.py | 22 - qpid/tests/src/py/qpid_tests/broker_0_8/basic.py | 441 --- qpid/tests/src/py/qpid_tests/broker_0_8/broker.py | 120 - qpid/tests/src/py/qpid_tests/broker_0_8/example.py | 94 - .../tests/src/py/qpid_tests/broker_0_8/exchange.py | 349 -- qpid/tests/src/py/qpid_tests/broker_0_8/queue.py | 255 -- qpid/tests/src/py/qpid_tests/broker_0_8/testlib.py | 66 - qpid/tests/src/py/qpid_tests/broker_0_8/tx.py | 209 - .../tests/src/py/qpid_tests/broker_0_9/__init__.py | 22 - qpid/tests/src/py/qpid_tests/broker_0_9/echo.py | 159 - .../src/py/qpid_tests/broker_0_9/messageheader.py | 61 - qpid/tests/src/py/qpid_tests/broker_0_9/query.py | 224 -- qpid/tests/src/py/qpid_tests/broker_0_9/queue.py | 148 - .../tests/src/py/qpid_tests/broker_1_0/__init__.py | 26 - qpid/tests/src/py/qpid_tests/broker_1_0/general.py | 81 - .../py/qpid_tests/broker_1_0/legacy_exchanges.py | 96 - .../tests/src/py/qpid_tests/broker_1_0/selector.py | 95 - .../src/py/qpid_tests/broker_1_0/translation.py | 87 - qpid/tests/src/py/qpid_tests/broker_1_0/tx.py | 264 -- .../qpid_tests/client/client-api-example-tests.py | 338 -- qpid/tests/src/py/qpid_tests/client/log4j.conf | 25 - qpid/tools/LICENSE.txt | 203 - qpid/tools/MANIFEST.in | 1 - qpid/tools/NOTICE.txt | 5 - qpid/tools/setup.py | 64 - qpid/tools/src/py/.gitignore | 22 - qpid/tools/src/py/README.txt | 4 - qpid/tools/src/py/qlslibs/__init__.py | 19 - qpid/tools/src/py/qlslibs/analyze.py | 606 --- qpid/tools/src/py/qlslibs/efp.py | 327 -- qpid/tools/src/py/qlslibs/err.py | 261 -- qpid/tools/src/py/qlslibs/jrnl.py | 394 -- qpid/tools/src/py/qlslibs/utils.py | 216 -- qpid/tools/src/py/qmf-tool | 775 ---- qpid/tools/src/py/qpid-config | 878 ----- qpid/tools/src/py/qpid-ha | 299 -- qpid/tools/src/py/qpid-printevents | 191 - qpid/tools/src/py/qpid-qls-analyze | 114 - qpid/tools/src/py/qpid-queue-stats | 159 - qpid/tools/src/py/qpid-receive | 194 - qpid/tools/src/py/qpid-route | 635 --- qpid/tools/src/py/qpid-send | 281 -- qpid/tools/src/py/qpid-stat | 514 --- qpid/tools/src/py/qpid-store-chk | 332 -- qpid/tools/src/py/qpid-store-resize | 350 -- qpid/tools/src/py/qpid-tool | 799 ---- qpid/tools/src/py/qpidstore/__init__.py | 19 - qpid/tools/src/py/qpidstore/janal.py | 617 --- qpid/tools/src/py/qpidstore/jerr.py | 219 -- qpid/tools/src/py/qpidstore/jrnl.py | 794 ---- qpid/tools/src/py/qpidtoollibs/__init__.py | 22 - qpid/tools/src/py/qpidtoollibs/broker.py | 486 --- qpid/tools/src/py/qpidtoollibs/config.py | 36 - qpid/tools/src/py/qpidtoollibs/disp.py | 270 -- qpid/tools/src/ruby/qpid_management/.gitignore | 23 - qpid/tools/src/ruby/qpid_management/.rspec | 1 - qpid/tools/src/ruby/qpid_management/Gemfile | 30 - qpid/tools/src/ruby/qpid_management/Gemfile.lock | 55 - qpid/tools/src/ruby/qpid_management/Rakefile | 27 - .../ruby/qpid_management/lib/qpid_management.rb | 81 - .../qpid_management/lib/qpid_management/acl.rb | 38 - .../qpid_management/lib/qpid_management/binding.rb | 31 - .../qpid_management/lib/qpid_management/bridge.rb | 39 - .../qpid_management/lib/qpid_management/broker.rb | 278 -- .../lib/qpid_management/broker_agent.rb | 173 - .../lib/qpid_management/broker_object.rb | 126 - .../qpid_management/lib/qpid_management/cluster.rb | 26 - .../lib/qpid_management/connection.rb | 51 - .../qpid_management/lib/qpid_management/errors.rb | 28 - .../lib/qpid_management/exchange.rb | 44 - .../lib/qpid_management/ha_broker.rb | 26 - .../qpid_management/lib/qpid_management/link.rb | 35 - .../qpid_management/lib/qpid_management/memory.rb | 34 - .../qpid_management/lib/qpid_management/queue.rb | 97 - .../qpid_management/lib/qpid_management/session.rb | 38 - .../lib/qpid_management/subscription.rb | 35 - .../ruby/qpid_management/qpid_management.gemspec | 36 - .../ruby/qpid_management/spec/broker_agent_spec.rb | 43 - .../src/ruby/qpid_management/spec/broker_spec.rb | 373 -- .../src/ruby/qpid_management/spec/spec_helper.rb | 21 - 325 files changed, 29662 insertions(+), 30501 deletions(-) create mode 100644 qpid/cpp/CTestCustom.cmake delete mode 100644 qpid/cpp/docs/design/CONTENTS create mode 100644 qpid/cpp/management/python/.gitignore create mode 100644 qpid/cpp/management/python/CMakeLists.txt create mode 100644 qpid/cpp/management/python/LICENSE.txt create mode 100644 qpid/cpp/management/python/MANIFEST.in create mode 100644 qpid/cpp/management/python/NOTICE.txt create mode 100644 qpid/cpp/management/python/bin/.gitignore create mode 100755 qpid/cpp/management/python/bin/qmf-tool create mode 100755 qpid/cpp/management/python/bin/qpid-config create mode 100644 qpid/cpp/management/python/bin/qpid-config.bat create mode 100755 qpid/cpp/management/python/bin/qpid-ha create mode 100644 qpid/cpp/management/python/bin/qpid-ha.bat create mode 100755 qpid/cpp/management/python/bin/qpid-printevents create mode 100644 qpid/cpp/management/python/bin/qpid-printevents.bat create mode 100755 qpid/cpp/management/python/bin/qpid-qls-analyze create mode 100755 qpid/cpp/management/python/bin/qpid-queue-stats create mode 100644 qpid/cpp/management/python/bin/qpid-queue-stats.bat create mode 100755 qpid/cpp/management/python/bin/qpid-receive create mode 100755 qpid/cpp/management/python/bin/qpid-route create mode 100644 qpid/cpp/management/python/bin/qpid-route.bat create mode 100755 qpid/cpp/management/python/bin/qpid-send create mode 100755 qpid/cpp/management/python/bin/qpid-stat create mode 100644 qpid/cpp/management/python/bin/qpid-stat.bat create mode 100755 qpid/cpp/management/python/bin/qpid-store-chk create mode 100755 qpid/cpp/management/python/bin/qpid-store-resize create mode 100755 qpid/cpp/management/python/bin/qpid-tool create mode 100644 qpid/cpp/management/python/bin/qpid-tool.bat create mode 100644 qpid/cpp/management/python/lib/.gitignore create mode 100644 qpid/cpp/management/python/lib/README.txt create mode 100644 qpid/cpp/management/python/lib/qlslibs/__init__.py create mode 100644 qpid/cpp/management/python/lib/qlslibs/analyze.py create mode 100644 qpid/cpp/management/python/lib/qlslibs/efp.py create mode 100644 qpid/cpp/management/python/lib/qlslibs/err.py create mode 100644 qpid/cpp/management/python/lib/qlslibs/jrnl.py create mode 100644 qpid/cpp/management/python/lib/qlslibs/utils.py create mode 100644 qpid/cpp/management/python/lib/qmf/__init__.py create mode 100644 qpid/cpp/management/python/lib/qmf/console.py create mode 100644 qpid/cpp/management/python/lib/qpidstore/__init__.py create mode 100644 qpid/cpp/management/python/lib/qpidstore/janal.py create mode 100644 qpid/cpp/management/python/lib/qpidstore/jerr.py create mode 100644 qpid/cpp/management/python/lib/qpidstore/jrnl.py create mode 100644 qpid/cpp/management/python/lib/qpidtoollibs/__init__.py create mode 100644 qpid/cpp/management/python/lib/qpidtoollibs/broker.py create mode 100644 qpid/cpp/management/python/lib/qpidtoollibs/config.py create mode 100644 qpid/cpp/management/python/lib/qpidtoollibs/disp.py create mode 100755 qpid/cpp/management/python/setup.py create mode 100644 qpid/cpp/management/ruby/.gitignore create mode 100644 qpid/cpp/management/ruby/.rspec create mode 100644 qpid/cpp/management/ruby/Gemfile create mode 100644 qpid/cpp/management/ruby/Gemfile.lock create mode 100644 qpid/cpp/management/ruby/Rakefile create mode 100644 qpid/cpp/management/ruby/lib/qpid_management.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/acl.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/binding.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/bridge.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/broker.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/broker_agent.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/broker_object.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/cluster.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/connection.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/errors.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/exchange.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/ha_broker.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/link.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/memory.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/queue.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/session.rb create mode 100644 qpid/cpp/management/ruby/lib/qpid_management/subscription.rb create mode 100644 qpid/cpp/management/ruby/qpid_management.gemspec create mode 100644 qpid/cpp/management/ruby/spec/broker_agent_spec.rb create mode 100644 qpid/cpp/management/ruby/spec/broker_spec.rb create mode 100644 qpid/cpp/management/ruby/spec/spec_helper.rb delete mode 100644 qpid/cpp/src/tests/ais_test.cpp delete mode 100755 qpid/cpp/src/tests/allhosts create mode 100644 qpid/cpp/src/tests/check_dependencies.py.in create mode 100644 qpid/cpp/src/tests/common.py delete mode 100644 qpid/cpp/src/tests/config.null create mode 100644 qpid/cpp/src/tests/env.ps1.in create mode 100644 qpid/cpp/src/tests/env.py.in create mode 100644 qpid/cpp/src/tests/env.sh.in delete mode 100755 qpid/cpp/src/tests/fanout_perftest delete mode 100644 qpid/cpp/src/tests/install_env.sh.in delete mode 100755 qpid/cpp/src/tests/ipv6_test delete mode 100755 qpid/cpp/src/tests/multiq_perftest create mode 100644 qpid/cpp/src/tests/plano.py delete mode 100755 qpid/cpp/src/tests/python_tests delete mode 100644 qpid/cpp/src/tests/python_tests.ps1 delete mode 100755 qpid/cpp/src/tests/qpid-build-rinstall delete mode 100755 qpid/cpp/src/tests/quick_perftest delete mode 100755 qpid/cpp/src/tests/quick_topictest delete mode 100644 qpid/cpp/src/tests/quick_topictest.ps1 delete mode 100755 qpid/cpp/src/tests/quick_txtest delete mode 100755 qpid/cpp/src/tests/rsynchosts create mode 100755 qpid/cpp/src/tests/run.py delete mode 100644 qpid/cpp/src/tests/run_acl_tests.ps1 create mode 100755 qpid/cpp/src/tests/run_client_tests delete mode 100755 qpid/cpp/src/tests/run_federation_sys_tests delete mode 100644 qpid/cpp/src/tests/run_federation_tests.ps1 create mode 100755 qpid/cpp/src/tests/run_flow_control_tests delete mode 100755 qpid/cpp/src/tests/run_header_test delete mode 100644 qpid/cpp/src/tests/run_header_test.ps1 delete mode 100644 qpid/cpp/src/tests/run_headers_federation_tests create mode 100755 qpid/cpp/src/tests/run_idle_timeout_tests create mode 100755 qpid/cpp/src/tests/run_interop_tests create mode 100755 qpid/cpp/src/tests/run_ipv6_tests create mode 100755 qpid/cpp/src/tests/run_logging_tests delete mode 100644 qpid/cpp/src/tests/run_msg_group_tests.ps1 create mode 100755 qpid/cpp/src/tests/run_performance_tests delete mode 100755 qpid/cpp/src/tests/run_perftest create mode 100755 qpid/cpp/src/tests/run_python_tests create mode 100755 qpid/cpp/src/tests/run_qmf_tests delete mode 100755 qpid/cpp/src/tests/run_queue_flow_limit_tests delete mode 100755 qpid/cpp/src/tests/run_queue_redirect create mode 100644 qpid/cpp/src/tests/run_queue_redirect_tests delete mode 100755 qpid/cpp/src/tests/run_ring_queue_test create mode 100755 qpid/cpp/src/tests/run_ring_queue_tests create mode 100755 qpid/cpp/src/tests/run_sasl_tests create mode 100755 qpid/cpp/src/tests/run_ssl_tests delete mode 100755 qpid/cpp/src/tests/run_test delete mode 100644 qpid/cpp/src/tests/run_test.ps1 create mode 100755 qpid/cpp/src/tests/run_topic_tests create mode 100755 qpid/cpp/src/tests/run_transaction_tests create mode 100755 qpid/cpp/src/tests/run_unit_tests delete mode 100755 qpid/cpp/src/tests/shared_perftest delete mode 100755 qpid/cpp/src/tests/swig_python_tests delete mode 100644 qpid/cpp/src/tests/test.xquery delete mode 100644 qpid/cpp/src/tests/test_env.ps1.in delete mode 100644 qpid/cpp/src/tests/test_env.sh.in delete mode 100644 qpid/cpp/src/tests/test_env_common.sh delete mode 100755 qpid/cpp/src/tests/topic_perftest delete mode 100644 qpid/cpp/src/tests/vg_check delete mode 100644 qpid/extras/qmf/.gitignore delete mode 100644 qpid/extras/qmf/LICENSE.txt delete mode 100644 qpid/extras/qmf/MANIFEST.in delete mode 100644 qpid/extras/qmf/NOTICE.txt delete mode 100755 qpid/extras/qmf/setup.py delete mode 100644 qpid/extras/qmf/src/py/qmf/__init__.py delete mode 100644 qpid/extras/qmf/src/py/qmf/console.py delete mode 100644 qpid/python/doc/test-requirements.txt create mode 100755 qpid/python/qpid-python-test.bat create mode 100644 qpid/python/qpid_tests/__init__.py create mode 100644 qpid/python/qpid_tests/broker_0_10/__init__.py create mode 100644 qpid/python/qpid_tests/broker_0_10/alternate_exchange.py create mode 100644 qpid/python/qpid_tests/broker_0_10/broker.py create mode 100644 qpid/python/qpid_tests/broker_0_10/dtx.py create mode 100644 qpid/python/qpid_tests/broker_0_10/example.py create mode 100644 qpid/python/qpid_tests/broker_0_10/exchange.py create mode 100644 qpid/python/qpid_tests/broker_0_10/extensions.py create mode 100644 qpid/python/qpid_tests/broker_0_10/lvq.py create mode 100644 qpid/python/qpid_tests/broker_0_10/management.py create mode 100644 qpid/python/qpid_tests/broker_0_10/message.py create mode 100644 qpid/python/qpid_tests/broker_0_10/msg_groups.py create mode 100644 qpid/python/qpid_tests/broker_0_10/new_api.py create mode 100644 qpid/python/qpid_tests/broker_0_10/persistence.py create mode 100644 qpid/python/qpid_tests/broker_0_10/priority.py create mode 100644 qpid/python/qpid_tests/broker_0_10/qmf_events.py create mode 100644 qpid/python/qpid_tests/broker_0_10/query.py create mode 100644 qpid/python/qpid_tests/broker_0_10/queue.py create mode 100644 qpid/python/qpid_tests/broker_0_10/stats.py create mode 100644 qpid/python/qpid_tests/broker_0_10/threshold.py create mode 100644 qpid/python/qpid_tests/broker_0_10/tx.py create mode 100644 qpid/python/qpid_tests/broker_0_8/__init__.py create mode 100644 qpid/python/qpid_tests/broker_0_8/basic.py create mode 100644 qpid/python/qpid_tests/broker_0_8/broker.py create mode 100644 qpid/python/qpid_tests/broker_0_8/example.py create mode 100644 qpid/python/qpid_tests/broker_0_8/exchange.py create mode 100644 qpid/python/qpid_tests/broker_0_8/queue.py create mode 100644 qpid/python/qpid_tests/broker_0_8/testlib.py create mode 100644 qpid/python/qpid_tests/broker_0_8/tx.py create mode 100644 qpid/python/qpid_tests/broker_0_9/__init__.py create mode 100644 qpid/python/qpid_tests/broker_0_9/echo.py create mode 100644 qpid/python/qpid_tests/broker_0_9/messageheader.py create mode 100644 qpid/python/qpid_tests/broker_0_9/query.py create mode 100644 qpid/python/qpid_tests/broker_0_9/queue.py create mode 100644 qpid/python/qpid_tests/broker_1_0/__init__.py create mode 100644 qpid/python/qpid_tests/broker_1_0/general.py create mode 100644 qpid/python/qpid_tests/broker_1_0/legacy_exchanges.py create mode 100644 qpid/python/qpid_tests/broker_1_0/selector.py create mode 100644 qpid/python/qpid_tests/broker_1_0/translation.py create mode 100644 qpid/python/qpid_tests/broker_1_0/tx.py create mode 100755 qpid/python/qpid_tests/client/client-api-example-tests.py create mode 100644 qpid/python/qpid_tests/client/log4j.conf delete mode 100644 qpid/python/todo.txt delete mode 100644 qpid/tests/LICENSE.txt delete mode 100644 qpid/tests/MANIFEST.in delete mode 100644 qpid/tests/NOTICE.txt delete mode 100755 qpid/tests/setup.py delete mode 100644 qpid/tests/src/py/qpid_tests/__init__.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/__init__.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/alternate_exchange.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/broker.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/dtx.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/example.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/exchange.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/extensions.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/lvq.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/management.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/message.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/msg_groups.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/new_api.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/persistence.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/priority.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/qmf_events.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/query.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/queue.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/stats.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/threshold.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_10/tx.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_8/__init__.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_8/basic.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_8/broker.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_8/example.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_8/exchange.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_8/queue.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_8/testlib.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_8/tx.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_9/__init__.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_9/echo.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_9/messageheader.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_9/query.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_0_9/queue.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_1_0/__init__.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_1_0/general.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_1_0/legacy_exchanges.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_1_0/selector.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_1_0/translation.py delete mode 100644 qpid/tests/src/py/qpid_tests/broker_1_0/tx.py delete mode 100755 qpid/tests/src/py/qpid_tests/client/client-api-example-tests.py delete mode 100644 qpid/tests/src/py/qpid_tests/client/log4j.conf delete mode 100644 qpid/tools/LICENSE.txt delete mode 100644 qpid/tools/MANIFEST.in delete mode 100644 qpid/tools/NOTICE.txt delete mode 100755 qpid/tools/setup.py delete mode 100644 qpid/tools/src/py/.gitignore delete mode 100644 qpid/tools/src/py/README.txt delete mode 100644 qpid/tools/src/py/qlslibs/__init__.py delete mode 100644 qpid/tools/src/py/qlslibs/analyze.py delete mode 100644 qpid/tools/src/py/qlslibs/efp.py delete mode 100644 qpid/tools/src/py/qlslibs/err.py delete mode 100644 qpid/tools/src/py/qlslibs/jrnl.py delete mode 100644 qpid/tools/src/py/qlslibs/utils.py delete mode 100755 qpid/tools/src/py/qmf-tool delete mode 100755 qpid/tools/src/py/qpid-config delete mode 100755 qpid/tools/src/py/qpid-ha delete mode 100755 qpid/tools/src/py/qpid-printevents delete mode 100755 qpid/tools/src/py/qpid-qls-analyze delete mode 100755 qpid/tools/src/py/qpid-queue-stats delete mode 100755 qpid/tools/src/py/qpid-receive delete mode 100755 qpid/tools/src/py/qpid-route delete mode 100755 qpid/tools/src/py/qpid-send delete mode 100755 qpid/tools/src/py/qpid-stat delete mode 100755 qpid/tools/src/py/qpid-store-chk delete mode 100755 qpid/tools/src/py/qpid-store-resize delete mode 100755 qpid/tools/src/py/qpid-tool delete mode 100644 qpid/tools/src/py/qpidstore/__init__.py delete mode 100644 qpid/tools/src/py/qpidstore/janal.py delete mode 100644 qpid/tools/src/py/qpidstore/jerr.py delete mode 100644 qpid/tools/src/py/qpidstore/jrnl.py delete mode 100644 qpid/tools/src/py/qpidtoollibs/__init__.py delete mode 100644 qpid/tools/src/py/qpidtoollibs/broker.py delete mode 100644 qpid/tools/src/py/qpidtoollibs/config.py delete mode 100644 qpid/tools/src/py/qpidtoollibs/disp.py delete mode 100644 qpid/tools/src/ruby/qpid_management/.gitignore delete mode 100644 qpid/tools/src/ruby/qpid_management/.rspec delete mode 100644 qpid/tools/src/ruby/qpid_management/Gemfile delete mode 100644 qpid/tools/src/ruby/qpid_management/Gemfile.lock delete mode 100644 qpid/tools/src/ruby/qpid_management/Rakefile delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/acl.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/binding.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/bridge.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/broker.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/broker_agent.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/broker_object.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/cluster.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/connection.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/errors.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/exchange.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/ha_broker.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/link.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/memory.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/queue.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/session.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/lib/qpid_management/subscription.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/qpid_management.gemspec delete mode 100644 qpid/tools/src/ruby/qpid_management/spec/broker_agent_spec.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/spec/broker_spec.rb delete mode 100644 qpid/tools/src/ruby/qpid_management/spec/spec_helper.rb diff --git a/qpid/cpp/CMakeLists.txt b/qpid/cpp/CMakeLists.txt index 56f09c27c3..12a0503398 100644 --- a/qpid/cpp/CMakeLists.txt +++ b/qpid/cpp/CMakeLists.txt @@ -51,6 +51,7 @@ include(BuildInstallSettings.cmake) enable_testing() include (CTest) +configure_file(${CMAKE_SOURCE_DIR}/CTestCustom.cmake ${CMAKE_BINARY_DIR}/CTestCustom.cmake) if (MSVC) # Chaxnge warning C4996 from level 1 to level 4. These are real and shouldn't @@ -80,7 +81,7 @@ endif (WIN32) set_absolute_install_path (QPIDC_CONF_FILE ${QPID_INSTALL_CONFDIR}/qpidc.conf) set_absolute_install_path (QPIDD_CONF_FILE ${QPID_INSTALL_CONFDIR}/qpidd.conf) -install(FILES LICENSE.txt NOTICE.txt DESTINATION ${QPID_INSTALL_DOCDIR}) +install(FILES LICENSE.txt NOTICE.txt DESTINATION ${QPID_INSTALL_DOCDIR}) install(FILES include/qmf/qmf2.i DESTINATION ${QPID_INSTALL_INCLUDEDIR}/qmf) @@ -217,6 +218,7 @@ if (MSVC) endif (MSVC) # Subdirectories +add_subdirectory(management/python) add_subdirectory(managementgen) add_subdirectory(src) add_subdirectory(etc) diff --git a/qpid/cpp/CTestCustom.cmake b/qpid/cpp/CTestCustom.cmake new file mode 100644 index 0000000000..57efd32a81 --- /dev/null +++ b/qpid/cpp/CTestCustom.cmake @@ -0,0 +1 @@ +set(CTEST_CUSTOM_PRE_TEST "python ${CMAKE_BINARY_DIR}/src/tests/check_dependencies.py") diff --git a/qpid/cpp/INSTALL.txt b/qpid/cpp/INSTALL.txt index 717c9b0908..cee7f1764e 100644 --- a/qpid/cpp/INSTALL.txt +++ b/qpid/cpp/INSTALL.txt @@ -194,7 +194,7 @@ a source distribution: (*) Boost 1.33 will also work. -Optional support for AMQP 1.0 requires (see AMQP_1.0 for details): +Optional support for AMQP 1.0 requires (see docs/amqp-1.0.txt for details): * Qpid proton-c (0.5) Note: If Proton is installed in a non-standard location, there are two ways to locate it: 1. Recommended: use proton 0.7 or later and use the same install prefix @@ -206,7 +206,7 @@ Optional XML exchange requires: * xqilla (2.0.0) * xerces-c (2.7.0) -Optional SSL support requires: +Optional SSL support requires (see docs/ssl.txt for details): * nss * nspr diff --git a/qpid/cpp/docs/design/CONTENTS b/qpid/cpp/docs/design/CONTENTS deleted file mode 100644 index cc3b868e0e..0000000000 --- a/qpid/cpp/docs/design/CONTENTS +++ /dev/null @@ -1,31 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -This directory contains documentation about the C++ source -that is expressed in formats that does not fit comfortably -within C++ source files. - -As with all documentation, including comments, it may become -outmoded with respect to the code. - -If you find external code doco useful in your work -- if it -helps you save some time -- please return some of that time -in the form of effort to keep the documentation updated. - - diff --git a/qpid/cpp/management/python/.gitignore b/qpid/cpp/management/python/.gitignore new file mode 100644 index 0000000000..4fca027dea --- /dev/null +++ b/qpid/cpp/management/python/.gitignore @@ -0,0 +1,3 @@ +MANIFEST +build +dist diff --git a/qpid/cpp/management/python/CMakeLists.txt b/qpid/cpp/management/python/CMakeLists.txt new file mode 100644 index 0000000000..4e65958043 --- /dev/null +++ b/qpid/cpp/management/python/CMakeLists.txt @@ -0,0 +1,32 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +find_package(PythonInterp REQUIRED) + +add_custom_target(management_python_build ALL + COMMAND + ${PYTHON_EXECUTABLE} setup.py build + --build-base=${CMAKE_CURRENT_BINARY_DIR} + --build-scripts=${CMAKE_CURRENT_BINARY_DIR}/bin + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + +install(CODE "execute_process(COMMAND ${PYTHON_EXECUTABLE} setup.py install + --prefix=${CMAKE_INSTALL_PREFIX} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})") + + diff --git a/qpid/cpp/management/python/LICENSE.txt b/qpid/cpp/management/python/LICENSE.txt new file mode 100644 index 0000000000..6b0b1270ff --- /dev/null +++ b/qpid/cpp/management/python/LICENSE.txt @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/qpid/cpp/management/python/MANIFEST.in b/qpid/cpp/management/python/MANIFEST.in new file mode 100644 index 0000000000..ab30e9acee --- /dev/null +++ b/qpid/cpp/management/python/MANIFEST.in @@ -0,0 +1 @@ +include *.txt diff --git a/qpid/cpp/management/python/NOTICE.txt b/qpid/cpp/management/python/NOTICE.txt new file mode 100644 index 0000000000..24512d0da9 --- /dev/null +++ b/qpid/cpp/management/python/NOTICE.txt @@ -0,0 +1,5 @@ +Apache Qpid Python Tools +Copyright 2006-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/qpid/cpp/management/python/bin/.gitignore b/qpid/cpp/management/python/bin/.gitignore new file mode 100644 index 0000000000..f99dba8c08 --- /dev/null +++ b/qpid/cpp/management/python/bin/.gitignore @@ -0,0 +1,13 @@ +qmf-toolc +qpid-configc +qpid-hac +qpid-printeventsc +qpid-qls-analyzec +qpid-queue-statsc +qpid-receivec +qpid-routec +qpid-sendc +qpid-statc +qpid-store-chkc +qpid-store-resizec +qpid-toolc diff --git a/qpid/cpp/management/python/bin/qmf-tool b/qpid/cpp/management/python/bin/qmf-tool new file mode 100755 index 0000000000..407ae74b10 --- /dev/null +++ b/qpid/cpp/management/python/bin/qmf-tool @@ -0,0 +1,775 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import optparse +import sys +import socket +from cmd import Cmd +from shlex import split +from threading import Lock +from time import strftime, gmtime +from qpid.disp import Display +import qpid_messaging +import qmf2 + +class OptsAndArgs(object): + + def __init__(self, argv): + self.argv = argv + self.usage = """qmf-tool [OPTIONS] [[:]]""" + self.option_parser = optparse.OptionParser(usage=self.usage) + self.conn_group = optparse.OptionGroup(self.option_parser, "Connection Options") + self.conn_group.add_option("-u", "--user", action="store", type="string", help="User name for authentication") + self.conn_group.add_option("-p", "--password", action="store", type="string", help="Password for authentication") + self.conn_group.add_option("-t", "--transport", action="store", type="string", help="Transport type (tcp, ssl, rdma)") + self.conn_group.add_option("-m", "--mechanism", action="store", type="string", help="SASL Mechanism for security") + self.conn_group.add_option("-s", "--service", action="store", type="string", default="qpidd", help="SASL Service name") + self.conn_group.add_option("--min-ssf", action="store", type="int", metavar="", help="Minimum acceptable security strength factor") + self.conn_group.add_option("--max-ssf", action="store", type="int", metavar="", help="Maximum acceptable security strength factor") + self.conn_group.add_option("--conn-option", action="append", default=[], metavar="", help="Additional connection option(s)") + self.option_parser.add_option_group(self.conn_group) + + self.qmf_group = optparse.OptionGroup(self.option_parser, "QMF Session Options") + self.qmf_group.add_option("--domain", action="store", type="string", help="QMF Domain") + self.qmf_group.add_option("--agent-age", action="store", type="int", metavar="", help="Time, in minutes, to age out non-communicating agents") + self.qmf_group.add_option("--qmf-option", action="append", default=[], metavar="", help="Additional QMF session option(s)") + self.option_parser.add_option_group(self.qmf_group) + + def parse(self): + host = "localhost" + conn_options = {} + qmf_options = [] + + options, encArgs = self.option_parser.parse_args(args=self.argv) + try: + encoding = locale.getpreferredencoding() + args = [a.decode(encoding) for a in encArgs] + except: + args = encArgs + + if len(args) > 1: + host = args[1] + + if options.user: + conn_options["username"] = options.user + if options.password: + conn_options["password"] = options.password + if options.transport: + conn_options["transport"] = options.transport + if options.mechanism: + conn_options["sasl_mechanisms"] = options.mechanism + if options.service: + conn_options["sasl_service"] = options.service + if options.min_ssf: + conn_options["sasl_min_ssf"] = options.min_ssf + if options.max_ssf: + conn_options["sasl_max_ssf"] = options.max_ssf + for x in options.conn_option: + try: + key, val = x.split('=') + conn_options[key] = val + except: + raise Exception("Improperly formatted text for --conn-option: '%s'" % x) + + if options.domain: + qmf_options.append("domain:'%s'" % options.domain) + if options.agent_age: + qmf_options.append("max-agent-age:%d" % options.agent_age) + for x in options.qmf_option: + try: + key, val = x.split('=') + qmf_options.append("%s:%s" % (key, val)) + except: + raise Exception("Improperly formatted text for --qmf-option: '%s'" % x) + + qmf_string = '{' + first = True + for x in qmf_options: + if first: + first = None + else: + qmf_string += ',' + qmf_string += x + qmf_string += '}' + + return host, conn_options, qmf_string + + + +class Mcli(Cmd): + """ Management Command Interpreter """ + + def __init__(self, dataObject, dispObject): + Cmd.__init__(self) + self.dataObject = dataObject + self.dispObject = dispObject + self.dataObject.setCli(self) + self.prompt = "qmf: " + + def emptyline(self): + pass + + def setPromptMessage(self, p=None): + if p == None: + self.prompt = "qmf: " + else: + self.prompt = "qmf[%s]: " % p + + def do_help(self, data): + print "Management Tool for QMF" + print + print "Agent Commands:" + print " set filter - Filter the list of agents" + print " list agents - Print a list of the known Agents" + print " set default - Set the default agent for operations" + print " show filter - Show the agent filter currently in effect" + print " show agent - Print detailed information about an Agent" + print " show options - Show option strings used in the QMF session" + print + print "Schema Commands:" + print " list packages - Print a list of packages supported by the default agent" + print " list classes [] - Print all classes supported byt the default agent" + print " show class [] - Show details of a class" + print + print "Data Commands:" + print " query [] [] - Query for data from the agent" + print " list - List accumulated query results" + print " clear - Clear accumulated query results" + print " show - Show details from a data object" + print " call [] - Call a method on a data object" + print + print "General Commands:" + print " set time-format short - Select short timestamp format (default)" + print " set time-format long - Select long timestamp format" + print " quit or ^D - Exit the program" + print + + def complete_set(self, text, line, begidx, endidx): + """ Command completion for the 'set' command """ + tokens = split(line[:begidx]) + if len(tokens) == 1: + return [i for i in ('filter ', 'default ', 'time-format ') if i.startswith(text)] + if len(tokens) == 2 and tokens[1] == 'time-format': + return [i for i in ('long', 'short') if i.startswith(text)] + return [] + + def do_set(self, data): + tokens = split(data) + try: + if tokens[0] == "time-format": + self.dispObject.do_setTimeFormat(tokens[1]) + else: + self.dataObject.do_set(data) + except Exception, e: + print "Exception in set command:", e + + def complete_list(self, text, line, begidx, endidx): + tokens = split(line[:begidx]) + if len(tokens) == 1: + return [i for i in ('agents', 'packages', 'classes ') if i.startswith(text)] + return [] + + def do_list(self, data): + try: + self.dataObject.do_list(data) + except Exception, e: + print "Exception in list command:", e + + def complete_show(self, text, line, begidx, endidx): + tokens = split(line[:begidx]) + if len(tokens) == 1: + return [i for i in ('options', 'filter', 'agent ', 'class ') if i.startswith(text)] + return [] + + def do_show(self, data): + try: + self.dataObject.do_show(data) + except Exception, e: + print "Exception in show command:", e + + def complete_query(self, text, line, begidx, endidx): + return [] + + def do_query(self, data): + try: + self.dataObject.do_query(data) + except Exception, e: + if e.message.__class__ == qmf2.Data: + e = e.message.getProperties() + print "Exception in query command:", e + + def do_call(self, data): + try: + self.dataObject.do_call(data) + except Exception, e: + if e.message.__class__ == qmf2.Data: + e = e.message.getProperties() + print "Exception in call command:", e + + def do_clear(self, data): + try: + self.dataObject.do_clear(data) + except Exception, e: + print "Exception in clear command:", e + + def do_EOF(self, data): + print "quit" + try: + self.dataObject.do_exit() + except: + pass + return True + + def do_quit(self, data): + try: + self.dataObject.do_exit() + except: + pass + return True + + def postcmd(self, stop, line): + return stop + + def postloop(self): + print "Exiting..." + self.dataObject.close() + + +#====================================================================================================== +# QmfData +#====================================================================================================== +class QmfData: + """ + """ + def __init__(self, disp, url, conn_options, qmf_options): + self.disp = disp + self.url = url + self.conn_options = conn_options + self.qmf_options = qmf_options + self.agent_filter = '[]' + self.connection = qpid_messaging.Connection(self.url, **self.conn_options) + self.connection.open() + self.session = qmf2.ConsoleSession(self.connection, self.qmf_options) + self.session.setAgentFilter(self.agent_filter) + self.session.open() + self.lock = Lock() + self.cli = None + self.agents = {} # Map of number => agent object + self.deleted_agents = {} # Map of number => agent object + self.agent_numbers = {} # Map of agent name => number + self.next_number = 1 + self.focus_agent = None + self.data_list = {} + self.next_data_index = 1 + + #======================= + # Methods to support CLI + #======================= + def setCli(self, cli): + self.cli = cli + + def close(self): + try: + self.session.close() + self.connection.close() + except: + pass # we're shutting down - ignore any errors + + def do_list(self, data): + tokens = data.split() + if len(tokens) == 0: + self.listData() + elif tokens[0] == 'agents' or tokens[0] == 'agent': + self.listAgents() + elif tokens[0] == 'packages' or tokens[0] == 'package': + self.listPackages() + elif tokens[0] == 'classes' or tokens[0] == 'class': + self.listClasses(tokens[1:]) + + def do_set(self, data): + tokens = split(data) + if len(tokens) == 0: + print "What do you want to set? type 'help' for more information." + return + if tokens[0] == 'filter': + if len(tokens) == 2: + self.setAgentFilter(tokens[1]) + elif tokens[0] == 'default': + if len(tokens) == 2: + self.updateAgents() + number = int(tokens[1]) + self.focus_agent = self.agents[number] + print "Default Agent: %s" % self.focus_agent.getName() + + def do_show(self, data): + tokens = split(data) + if len(tokens) == 0: + print "What do you want to show? Type 'help' for more information." + return + + if tokens[0] == 'options': + print "Options used in this session:" + print " Connection Options : %s" % self.scrubConnOptions() + print " QMF Session Options: %s" % self.qmf_options + return + + if tokens[0] == 'agent': + self.showAgent(tokens[1:]) + return + + if tokens[0] == 'filter': + print self.agent_filter + return + + if tokens[0] == "default": + if not self.focus_agent: + self.updateAgents() + if self.focus_agent: + print "Default Agent: %s" % self.focus_agent.getName() + else: + print "Default Agent not set" + return + + if tokens[0] == "class": + self.showClass(tokens[1:]) + return + + if tokens[0].isdigit(): + self.showData(tokens[0]) + return + + print "What do you want to show? Type 'help' for more information." + return + + def do_query(self, data): + tokens = split(data) + if len(tokens) == 0: + print "Class name not specified." + return + cname = tokens[0] + pname = None + pred = None + if len(tokens) >= 2: + if tokens[1][0] == '[': + pred = tokens[1] + else: + pname = tokens[1] + if len(tokens) >= 3: + pred = tokens[2] + query = "{class:'%s'" % cname + if pname: + query += ",package:'%s'" % pname + if pred: + query += ",where:%s" % pred + query += "}" + if not self.focus_agent: + self.updateAgents() + d_list = self.focus_agent.query(query) + local_data_list = {} + for d in d_list: + local_data_list[self.next_data_index] = d + self.next_data_index += 1 + rows = [] + for index,val in local_data_list.items(): + rows.append((index, val.getAddr().getName())) + self.data_list[index] = val + self.disp.table("Data Objects Returned: %d:" % len(d_list), ("Number", "Data Address"), rows) + + def do_call(self, data): + tokens = split(data) + if len(tokens) < 2: + print "Data ID and method-name not specified." + return + idx = int(tokens[0]) + methodName = tokens[1] + args = [] + for arg in tokens[2:]: + ## + ## If the argument is a map, list, boolean, integer, or floating (one decimal point), + ## run it through the Python evaluator so it is converted to the correct type. + ## + ## TODO: use a regex for this instead of this convoluted logic + if arg[0] == '{' or arg[0] == '[' or arg == "True" or arg == "False" or \ + ((arg.count('.') < 2 and (arg.count('-') == 0 or \ + (arg.count('-') == 1 and arg[0] == '-')) and \ + arg.replace('.','').replace('-','').isdigit())): + args.append(eval(arg)) + else: + args.append(arg) + + if not idx in self.data_list: + print "Unknown data index, run 'query' to get a list of data indices" + return + + data = self.data_list[idx] + data._getSchema() + result = data._invoke(methodName, args, {}) + rows = [] + for k,v in result.items(): + rows.append((k,v)) + self.disp.table("Output Parameters:", ("Name", "Value"), rows) + + def do_clear(self, data): + self.data_list = {} + self.next_data_index = 1 + print "Accumulated query results cleared" + + def do_exit(self): + pass + + #==================== + # Sub-Command Methods + #==================== + def setAgentFilter(self, filt): + self.agent_filter = filt + self.session.setAgentFilter(filt) + + def updateAgents(self): + agents = self.session.getAgents() + number_list = [] + for agent in agents: + if agent.getName() not in self.agent_numbers: + number = self.next_number + number_list.append(number) + self.next_number += 1 + self.agent_numbers[agent.getName()] = number + self.agents[number] = agent + else: + ## Track seen agents so we can clean out deleted ones + number = self.agent_numbers[agent.getName()] + number_list.append(number) + if number in self.deleted_agents: + self.agents[number] = self.deleted_agents.pop(number) + deleted = [] + for number in self.agents: + if number not in number_list: + deleted.append(number) + for number in deleted: + self.deleted_agents[number] = self.agents.pop(number) + if not self.focus_agent: + self.focus_agent = self.session.getConnectedBrokerAgent() + + def listAgents(self): + self.updateAgents() + rows = [] + for number in self.agents: + agent = self.agents[number] + if self.focus_agent and agent.getName() == self.focus_agent.getName(): + d = '*' + else: + d = '' + rows.append((d, number, agent.getVendor(), agent.getProduct(), agent.getInstance(), agent.getEpoch())) + self.disp.table("QMF Agents:", ("", "Id", "Vendor", "Product", "Instance", "Epoch"), rows) + + def listPackages(self): + if not self.focus_agent: + raise "Default Agent not set - use 'set default'" + self.focus_agent.loadSchemaInfo() + packages = self.focus_agent.getPackages() + for p in packages: + print " %s" % p + + def getClasses(self, tokens): + if not self.focus_agent: + raise "Default Agent not set - use 'set default'" + return + self.focus_agent.loadSchemaInfo() + if len(tokens) == 1: + classes = self.focus_agent.getSchemaIds(tokens[0]); + else: + packages = self.focus_agent.getPackages() + classes = [] + for p in packages: + classes.extend(self.focus_agent.getSchemaIds(p)) + return classes + + def listClasses(self, tokens): + classes = self.getClasses(tokens) + rows = [] + for c in classes: + rows.append((c.getPackageName(), c.getName(), self.classTypeName(c.getType()))) + self.disp.table("Classes:", ("Package", "Class", "Type"), rows) + + def showClass(self, tokens): + if len(tokens) < 1: + return + classes = self.getClasses([]) + c = tokens[0] + p = None + if len(tokens) == 2: + p = tokens[1] + schema = None + sid = None + for cls in classes: + if c == cls.getName(): + if not p or p == cls.getPackageName(): + schema = self.focus_agent.getSchema(cls) + sid = cls + break + if not sid: + return + print "Class: %s:%s (%s) - %s" % \ + (sid.getPackageName(), sid.getName(), self.classTypeName(sid.getType()), schema.getDesc()) + print " hash: %r" % sid.getHash() + props = schema.getProperties() + methods = schema.getMethods() + rows = [] + for prop in props: + name = prop.getName() + dtype = self.typeName(prop.getType()) + if len(prop.getSubtype()) > 0: + dtype += "(%s)" % prop.getSubtype() + access = self.accessName(prop.getAccess()) + idx = self.yes_blank(prop.isIndex()) + opt = self.yes_blank(prop.isOptional()) + unit = prop.getUnit() + desc = prop.getDesc() + rows.append((name, dtype, idx, access, opt, unit, desc)) + self.disp.table("Properties:", ("Name", "Type", "Index", "Access", "Optional", "Unit", "Description"), rows) + if len(methods) > 0: + for meth in methods: + name = meth.getName() + desc = meth.getDesc() + if len(desc) > 0: + desc = " - " + desc + args = meth.getArguments() + rows = [] + for prop in args: + aname = prop.getName() + dtype = self.typeName(prop.getType()) + if len(prop.getSubtype()) > 0: + dtype += "(%s)" % prop.getSubtype() + unit = prop.getUnit() + adesc = prop.getDesc() + io = self.dirName(prop.getDirection()) + rows.append((aname, dtype, io, unit, adesc)) + print + print " Method: %s%s" % (name, desc) + self.disp.table("Arguments:", ("Name", "Type", "Dir", "Unit", "Description"), rows) + + def showAgent(self, tokens): + self.updateAgents() + for token in tokens: + number = int(token) + agent = self.agents[number] + print + print " ==================================================================================" + print " Agent Id: %d" % number + print " Agent Name: %s" % agent.getName() + print " Epoch: %d" % agent.getEpoch() + print " Attributes:" + attrs = agent.getAttributes() + keys = attrs.keys() + keys.sort() + pairs = [] + for key in keys: + if key == '_timestamp' or key == '_schema_updated': + val = disp.timestamp(attrs[key]) + else: + val = attrs[key] + pairs.append((key, val)) + self.printAlignedPairs(pairs) + agent.loadSchemaInfo() + print " Packages:" + packages = agent.getPackages() + for package in packages: + print " %s" % package + + def showData(self, idx): + num = int(idx) + if not num in self.data_list: + print "Data ID not known, run 'query' first to get data" + return + data = self.data_list[num] + props = data.getProperties() + rows = [] + for k,v in props.items(): + rows.append((k, v)) + self.disp.table("Properties:", ("Name", "Value"), rows) + + def listData(self): + if len(self.data_list) == 0: + print "No Query Results - Use the 'query' command" + return + rows = [] + for index,val in self.data_list.items(): + rows.append((index, val.getAgent().getName(), val.getAddr().getName())) + self.disp.table("Accumulated Query Results:", ('Number', 'Agent', 'Data Address'), rows) + + def printAlignedPairs(self, rows, indent=8): + maxlen = 0 + for first, second in rows: + if len(first) > maxlen: + maxlen = len(first) + maxlen += indent + for first, second in rows: + for i in range(maxlen - len(first)): + print "", + print "%s : %s" % (first, second) + + def classTypeName(self, code): + if code == qmf2.SCHEMA_TYPE_DATA: return "Data" + if code == qmf2.SCHEMA_TYPE_EVENT: return "Event" + return "Unknown" + + def typeName (self, typecode): + """ Convert type-codes to printable strings """ + if typecode == qmf2.SCHEMA_DATA_VOID: return "void" + elif typecode == qmf2.SCHEMA_DATA_BOOL: return "bool" + elif typecode == qmf2.SCHEMA_DATA_INT: return "int" + elif typecode == qmf2.SCHEMA_DATA_FLOAT: return "float" + elif typecode == qmf2.SCHEMA_DATA_STRING: return "string" + elif typecode == qmf2.SCHEMA_DATA_MAP: return "map" + elif typecode == qmf2.SCHEMA_DATA_LIST: return "list" + elif typecode == qmf2.SCHEMA_DATA_UUID: return "uuid" + else: + raise ValueError ("Invalid type code: %s" % str(typecode)) + + def valueByType(self, typecode, val): + if typecode == 1: return "%d" % val + elif typecode == 2: return "%d" % val + elif typecode == 3: return "%d" % val + elif typecode == 4: return "%d" % val + elif typecode == 6: return val + elif typecode == 7: return val + elif typecode == 8: return strftime("%c", gmtime(val / 1000000000)) + elif typecode == 9: + if val < 0: val = 0 + sec = val / 1000000000 + min = sec / 60 + hour = min / 60 + day = hour / 24 + result = "" + if day > 0: + result = "%dd " % day + if hour > 0 or result != "": + result += "%dh " % (hour % 24) + if min > 0 or result != "": + result += "%dm " % (min % 60) + result += "%ds" % (sec % 60) + return result + + elif typecode == 10: return str(self.idRegistry.displayId(val)) + elif typecode == 11: + if val: + return "True" + else: + return "False" + + elif typecode == 12: return "%f" % val + elif typecode == 13: return "%f" % val + elif typecode == 14: return "%r" % val + elif typecode == 15: return "%r" % val + elif typecode == 16: return "%d" % val + elif typecode == 17: return "%d" % val + elif typecode == 18: return "%d" % val + elif typecode == 19: return "%d" % val + elif typecode == 20: return "%r" % val + elif typecode == 21: return "%r" % val + elif typecode == 22: return "%r" % val + else: + raise ValueError ("Invalid type code: %s" % str(typecode)) + + def accessName (self, code): + """ Convert element access codes to printable strings """ + if code == qmf2.ACCESS_READ_CREATE: return "ReadCreate" + elif code == qmf2.ACCESS_READ_WRITE: return "ReadWrite" + elif code == qmf2.ACCESS_READ_ONLY: return "ReadOnly" + else: + raise ValueError ("Invalid access code: %s" % str(code)) + + def dirName(self, io): + if io == qmf2.DIR_IN: return "in" + elif io == qmf2.DIR_OUT: return "out" + elif io == qmf2.DIR_IN_OUT: return "in_out" + else: + raise ValueError("Invalid direction code: %r" % io) + + def notNone (self, text): + if text == None: + return "" + else: + return text + + def yes_blank(self, val): + if val: + return "Y" + return "" + + def objectIndex(self, obj): + if obj._objectId.isV2: + return obj._objectId.getObject() + result = "" + first = True + props = obj.getProperties() + for prop in props: + if prop[0].index: + if not first: + result += "." + result += self.valueByType(prop[0].type, prop[1]) + first = None + return result + + def scrubConnOptions(self): + scrubbed = {} + for key, val in self.conn_options.items(): + if key == "password": + val = "***" + scrubbed[key] = val + return str(scrubbed) + + +#========================================================= +# Main Program +#========================================================= +try: + oa = OptsAndArgs(sys.argv) + host, conn_options, qmf_options = oa.parse() +except Exception, e: + print "Parse Error: %s" % e + sys.exit(1) + +disp = Display() + +# Attempt to make a connection to the target broker +try: + data = QmfData(disp, host, conn_options, qmf_options) +except Exception, e: + if str(e).find("Exchange not found") != -1: + print "Management not enabled on broker: Use '-m yes' option on broker startup." + else: + print "Failed: %s - %s" % (e.__class__.__name__, e) + sys.exit(1) + +# Instantiate the CLI interpreter and launch it. +cli = Mcli(data, disp) +print("Management Tool for QMF") +try: + cli.cmdloop() +except KeyboardInterrupt: + print + print "Exiting..." +except Exception, e: + print "Failed: %s - %s" % (e.__class__.__name__, e) + +# alway attempt to cleanup broker resources +data.close() diff --git a/qpid/cpp/management/python/bin/qpid-config b/qpid/cpp/management/python/bin/qpid-config new file mode 100755 index 0000000000..3d4bb6036a --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-config @@ -0,0 +1,878 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +import pdb + +import os +from optparse import OptionParser, OptionGroup, IndentedHelpFormatter +import sys +import locale + +home = os.environ.get("QPID_TOOLS_HOME", os.path.normpath("/usr/share/qpid-tools")) +sys.path.append(os.path.join(home, "python")) + +from qpid.messaging import Connection, ConnectionError +from qpidtoollibs import BrokerAgent +from qpidtoollibs import Display, Header + +usage = """ +Usage: qpid-config [OPTIONS] + qpid-config [OPTIONS] exchanges [filter-string] + qpid-config [OPTIONS] queues [filter-string] + qpid-config [OPTIONS] add exchange [AddExchangeOptions] + qpid-config [OPTIONS] del exchange + qpid-config [OPTIONS] add queue [AddQueueOptions] + qpid-config [OPTIONS] del queue [DelQueueOptions] + qpid-config [OPTIONS] bind [binding-key] + [-f -|filename] + [all|any] k1=v1 [, k2=v2...] + qpid-config [OPTIONS] unbind [binding-key] + qpid-config [OPTIONS] reload-acl + qpid-config [OPTIONS] add [--argument =] + qpid-config [OPTIONS] del + qpid-config [OPTIONS] list [--show-property ] + qpid-config [OPTIONS] log [] + qpid-config [OPTIONS] shutdown""" + +description = """ +Examples: + +$ qpid-config add queue q +$ qpid-config add exchange direct d -a localhost:5672 +$ qpid-config exchanges -b 10.1.1.7:10000 +$ qpid-config queues -b guest/guest@broker-host:10000 + +Add Exchange values: + + direct Direct exchange for point-to-point communication + fanout Fanout exchange for broadcast communication + topic Topic exchange that routes messages using binding keys with wildcards + headers Headers exchange that matches header fields against the binding keys + xml XML Exchange - allows content filtering using an XQuery + + +Queue Limit Actions: + + none (default) - Use broker's default policy + reject - Reject enqueued messages + ring - Replace oldest unacquired message with new + +Replication levels: + + none - no replication + configuration - replicate queue and exchange existence and bindings, but not messages. + all - replicate configuration and messages + +Log value: + + Comma separated : pairs, e.g. 'info+,debug+:Broker,trace+:Queue' +""" + +REPLICATE_LEVELS= ["none", "configuration", "all"] +DEFAULT_PROPERTIES = {"exchange":["name", "type", "durable"], "queue":["name", "durable", "autoDelete"]} + +def get_value(r): + if len(r) == 2: + try: + value = int(r[1]) + except: + value = r[1] + else: value = None + return value + +class Config: + def __init__(self): + self._recursive = False + self._host = "localhost" + self._connTimeout = 10 + self._ignoreDefault = False + self._altern_ex = None + self._durable = False + self._replicate = None + self._if_empty = True + self._if_unused = True + self._fileCount = None + self._fileSize = None + self._efp_partition_num = None + self._efp_pool_file_size = None + self._maxQueueSize = None + self._maxQueueCount = None + self._limitPolicy = None + self._msgSequence = False + self._lvq_key = None + self._ive = False + self._eventGeneration = None + self._file = None + self._flowStopCount = None + self._flowResumeCount = None + self._flowStopSize = None + self._flowResumeSize = None + self._msgGroupHeader = None + self._sharedMsgGroup = False + self._extra_arguments = [] + self._start_replica = None + self._returnCode = 0 + self._list_properties = [] + + def getOptions(self): + options = {} + for a in self._extra_arguments: + r = a.split("=", 1) + options[r[0]] = get_value(r) + return options + + +config = Config() +conn_options = {} + +FILECOUNT = "qpid.file_count" +FILESIZE = "qpid.file_size" +EFP_PARTITION_NUM = "qpid.efp_partition_num" +EFP_POOL_FILE_SIZE = "qpid.efp_pool_file_size" +MAX_QUEUE_SIZE = "qpid.max_size" +MAX_QUEUE_COUNT = "qpid.max_count" +POLICY_TYPE = "qpid.policy_type" +LVQ_KEY = "qpid.last_value_queue_key" +MSG_SEQUENCE = "qpid.msg_sequence" +IVE = "qpid.ive" +FLOW_STOP_COUNT = "qpid.flow_stop_count" +FLOW_RESUME_COUNT = "qpid.flow_resume_count" +FLOW_STOP_SIZE = "qpid.flow_stop_size" +FLOW_RESUME_SIZE = "qpid.flow_resume_size" +MSG_GROUP_HDR_KEY = "qpid.group_header_key" +SHARED_MSG_GROUP = "qpid.shared_msg_group" +REPLICATE = "qpid.replicate" +#There are various arguments to declare that have specific program +#options in this utility. However there is now a generic mechanism for +#passing arguments as well. The SPECIAL_ARGS list contains the +#arguments for which there are specific program options defined +#i.e. the arguments for which there is special processing on add and +#list +SPECIAL_ARGS=[ + FILECOUNT,FILESIZE,EFP_PARTITION_NUM,EFP_POOL_FILE_SIZE, + MAX_QUEUE_SIZE,MAX_QUEUE_COUNT,POLICY_TYPE, + LVQ_KEY,MSG_SEQUENCE,IVE, + FLOW_STOP_COUNT,FLOW_RESUME_COUNT,FLOW_STOP_SIZE,FLOW_RESUME_SIZE, + MSG_GROUP_HDR_KEY,SHARED_MSG_GROUP,REPLICATE] + +class JHelpFormatter(IndentedHelpFormatter): + """Format usage and description without stripping newlines from usage strings + """ + + def format_usage(self, usage): + return usage + + + def format_description(self, description): + if description: + return description + "\n" + else: + return "" + +def Usage(): + print usage + sys.exit(-1) + +def OptionsAndArguments(argv): + """ Set global variables for options, return arguments """ + + global config + + + parser = OptionParser(usage=usage, + description=description, + formatter=JHelpFormatter()) + + group1 = OptionGroup(parser, "General Options") + group1.add_option("-t", "--timeout", action="store", type="int", default=10, metavar="", help="Maximum time to wait for broker connection (in seconds)") + group1.add_option("-r", "--recursive", action="store_true", help="Show bindings in queue or exchange list") + group1.add_option("-b", "--broker", action="store", type="string", metavar="
", help="Address of qpidd broker with syntax: [username/password@] hostname | ip-address [:]") + group1.add_option("-a", "--broker-addr", action="store", type="string", metavar="
") + group1.add_option("--sasl-mechanism", action="store", type="string", metavar="", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override.") + group1.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + group1.add_option("--ssl-certificate", action="store", type="string", metavar="", help="Client SSL certificate (PEM Format)") + group1.add_option("--ssl-key", action="store", type="string", metavar="", help="Client SSL private key (PEM Format)") + group1.add_option("--ha-admin", action="store_true", help="Allow connection to a HA backup broker.") + parser.add_option_group(group1) + + group_ls = OptionGroup(parser, "Options for Listing Exchanges and Queues") + group_ls.add_option("--ignore-default", action="store_true", help="Ignore the default exchange in exchange or queue list") + parser.add_option_group(group_ls) + + group2 = OptionGroup(parser, "Options for Adding Exchanges and Queues") + group2.add_option("--alternate-exchange", action="store", type="string", metavar="", help="Name of the alternate-exchange for the new queue or exchange. Exchanges route messages to the alternate exchange if they are unable to route them elsewhere. Queues route messages to the alternate exchange if they are rejected by a subscriber or orphaned by queue deletion.") + group2.add_option("--durable", action="store_true", help="The new queue or exchange is durable.") + group2.add_option("--replicate", action="store", metavar="", help="Enable automatic replication in a HA cluster. is 'none', 'configuration' or 'all').") + parser.add_option_group(group2) + + group3 = OptionGroup(parser, "Options for Adding Queues") + group3.add_option("--file-count", action="store", type="int", metavar="", help="[legacystore] Number of files in queue's persistence journal") + group3.add_option("--file-size", action="store", type="int", metavar="", help="[legactystore] File size in pages (64KiB/page)") + group3.add_option("--efp-partition-num", action="store", type="int", metavar="", help="[linearstore] EFP partition number") + group3.add_option("--efp-pool-file-size", action="store", type="int", metavar="", help="[linearstore] EFP file size (KiB)") + group3.add_option("--max-queue-size", action="store", type="int", metavar="", help="Maximum in-memory queue size as bytes") + group3.add_option("--max-queue-count", action="store", type="int", metavar="", help="Maximum in-memory queue size as a number of messages") + group3.add_option("--limit-policy", action="store", choices=["none", "reject", "ring", "ring-strict"], metavar="", help="Action to take when queue limit is reached") + group3.add_option("--lvq-key", action="store", metavar="", help="Last Value Queue key") + group3.add_option("--flow-stop-size", action="store", type="int", metavar="", + help="Turn on sender flow control when the number of queued bytes exceeds this value.") + group3.add_option("--flow-resume-size", action="store", type="int", metavar="", + help="Turn off sender flow control when the number of queued bytes drops below this value.") + group3.add_option("--flow-stop-count", action="store", type="int", metavar="", + help="Turn on sender flow control when the number of queued messages exceeds this value.") + group3.add_option("--flow-resume-count", action="store", type="int", metavar="", + help="Turn off sender flow control when the number of queued messages drops below this value.") + group3.add_option("--group-header", action="store", type="string", metavar="", + help="Enable message groups. Specify name of header that holds group identifier.") + group3.add_option("--shared-groups", action="store_true", + help="Allow message group consumption across multiple consumers.") + group3.add_option("--argument", dest="extra_arguments", action="append", default=[], + metavar="", help="Specify a key-value pair to add to queue arguments") + group3.add_option("--start-replica", metavar="", help="Start replication from the same-named queue at ") + # no option for declaring an exclusive queue - which can only be used by the session that creates it. + parser.add_option_group(group3) + + group4 = OptionGroup(parser, "Options for Adding Exchanges") + group4.add_option("--sequence", action="store_true", help="Exchange will insert a 'qpid.msg_sequence' field in the message header") + group4.add_option("--ive", action="store_true", help="Exchange will behave as an 'initial-value-exchange', keeping a reference to the last message forwarded and enqueuing that message to newly bound queues.") + parser.add_option_group(group4) + + group5 = OptionGroup(parser, "Options for Deleting Queues") + group5.add_option("--force", action="store_true", help="Force delete of queue even if it's currently used or it's not empty") + group5.add_option("--force-if-not-empty", action="store_true", help="Force delete of queue even if it's not empty") + group5.add_option("--force-if-used", action="store_true", help="Force delete of queue even if it's currently used") + parser.add_option_group(group5) + + group6 = OptionGroup(parser, "Options for Declaring Bindings") + group6.add_option("-f", "--file", action="store", type="string", metavar="", help="For XML Exchange bindings - specifies the name of a file containing an XQuery.") + parser.add_option_group(group6) + + group_7 = OptionGroup(parser, "Formatting options for 'list' action") + group_7.add_option("--show-property", dest="list_properties", action="append", default=[], + metavar="", help="Specify a property of an object to be included in output") + parser.add_option_group(group_7) + + opts, encArgs = parser.parse_args(args=argv) + + try: + encoding = locale.getpreferredencoding() + args = [a.decode(encoding) for a in encArgs] + except: + args = encArgs + + if opts.recursive: + config._recursive = True + if opts.broker: + config._host = opts.broker + if opts.broker_addr: + config._host = opts.broker_addr + if config._host is None: config._host="localhost:5672" + if opts.timeout is not None: + config._connTimeout = opts.timeout + if config._connTimeout == 0: + config._connTimeout = None + if opts.ignore_default: + config._ignoreDefault = True + if opts.alternate_exchange: + config._altern_ex = opts.alternate_exchange + if opts.durable: + config._durable = True + if opts.replicate: + if not opts.replicate in REPLICATE_LEVELS: + raise Exception("Invalid replication level '%s', should be one of: %s" % (opts.replicate, ", ".join(REPLICATE_LEVELS))) + config._replicate = opts.replicate + if opts.ha_admin: config._ha_admin = True + if opts.file: + config._file = opts.file + if opts.file_count is not None: + config._fileCount = opts.file_count + if opts.file_size is not None: + config._fileSize = opts.file_size + if opts.efp_partition_num is not None: + config._efp_partition_num = opts.efp_partition_num + if opts.efp_pool_file_size is not None: + config._efp_pool_file_size = opts.efp_pool_file_size + if opts.max_queue_size is not None: + config._maxQueueSize = opts.max_queue_size + if opts.max_queue_count is not None: + config._maxQueueCount = opts.max_queue_count + if opts.limit_policy: + config._limitPolicy = opts.limit_policy + if opts.sequence: + config._msgSequence = True + if opts.lvq_key: + config._lvq_key = opts.lvq_key + if opts.ive: + config._ive = True + if opts.force: + config._if_empty = False + config._if_unused = False + if opts.force_if_not_empty: + config._if_empty = False + if opts.force_if_used: + config._if_unused = False + if opts.sasl_mechanism: + config._sasl_mechanism = opts.sasl_mechanism + if opts.flow_stop_size is not None: + config._flowStopSize = opts.flow_stop_size + if opts.flow_resume_size is not None: + config._flowResumeSize = opts.flow_resume_size + if opts.flow_stop_count is not None: + config._flowStopCount = opts.flow_stop_count + if opts.flow_resume_count is not None: + config._flowResumeCount = opts.flow_resume_count + if opts.group_header: + config._msgGroupHeader = opts.group_header + if opts.shared_groups: + config._sharedMsgGroup = True + if opts.extra_arguments: + config._extra_arguments = opts.extra_arguments + if opts.start_replica: + config._start_replica = opts.start_replica + if opts.list_properties: + config._list_properties = opts.list_properties + + if opts.sasl_mechanism: + conn_options['sasl_mechanisms'] = opts.sasl_mechanism + if opts.sasl_service_name: + conn_options['sasl_service'] = opts.sasl_service_name + if opts.ssl_certificate: + conn_options['ssl_certfile'] = opts.ssl_certificate + if opts.ssl_key: + if not opts.ssl_certificate: + parser.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = opts.ssl_key + if opts.ha_admin: + conn_options['client_properties'] = {'qpid.ha-admin' : 1} + + return args + + +# +# helpers for the arg parsing in bind(). return multiple values; "ok" +# followed by the resultant args + +# +# accept -f followed by either +# a filename or "-", for stdin. pull the bits into a string, to be +# passed to the xml binding. +# +def snarf_xquery_args(): + if not config._file: + print "Invalid args to bind xml: need an input file or stdin" + return [False] + if config._file == "-": + res = sys.stdin.read() + else: + f = open(config._file) # let this signal if it can't find it + res = f.read() + f.close() + return [True, res] + +# +# look for "any"/"all" and grok the rest of argv into a map +# +def snarf_header_args(args): + + if len(args) < 2: + print "Invalid args to bind headers: need 'any'/'all' plus conditions" + return [False] + op = args[0] + if op == "all" or op == "any": + kv = {} + for thing in args[1:]: + k_and_v = thing.split("=") + kv[k_and_v[0]] = k_and_v[1] + return [True, op, kv] + else: + print "Invalid condition arg to bind headers, need 'any' or 'all', not '" + op + "'" + return [False] + +class BrokerManager: + def __init__(self): + self.brokerName = None + self.conn = None + self.broker = None + + def SetBroker(self, brokerUrl): + self.url = brokerUrl + self.conn = Connection.establish(self.url, **conn_options) + self.broker = BrokerAgent(self.conn) + + def Disconnect(self, ignore=True): + if self.conn: + try: + self.conn.close() + except Exception, e: + if ignore: + # suppress close errors to avoid + # tracebacks when a previous + # exception will be printed to stdout + pass + else: + # raise last exception so complete + # trackback is preserved + raise + + def Overview(self): + exchanges = self.broker.getAllExchanges() + queues = self.broker.getAllQueues() + print "Total Exchanges: %d" % len(exchanges) + etype = {} + for ex in exchanges: + if ex.type not in etype: + etype[ex.type] = 1 + else: + etype[ex.type] = etype[ex.type] + 1 + for typ in etype: + print "%15s: %d" % (typ, etype[typ]) + + print + print " Total Queues: %d" % len(queues) + durable = 0 + for queue in queues: + if queue.durable: + durable = durable + 1 + print " durable: %d" % durable + print " non-durable: %d" % (len(queues) - durable) + + def ExchangeList(self, filter): + exchanges = self.broker.getAllExchanges() + caption1 = "Type " + caption2 = "Exchange Name" + maxNameLen = len(caption2) + found = False + for ex in exchanges: + if self.match(ex.name, filter): + if len(ex.name) > maxNameLen: maxNameLen = len(ex.name) + found = True + if not found: + global config + config._returnCode = 1 + return + + print "%s%-*s Attributes" % (caption1, maxNameLen, caption2) + line = "" + for i in range(((maxNameLen + len(caption1)) / 5) + 5): + line += "=====" + print line + + for ex in exchanges: + if config._ignoreDefault and not ex.name: continue + if self.match(ex.name, filter): + print "%-10s%-*s " % (ex.type, maxNameLen, ex.name), + args = ex.arguments + if not args: args = {} + if ex.durable: print "--durable", + if REPLICATE in args: print "--replicate=%s" % args[REPLICATE], + if MSG_SEQUENCE in args and args[MSG_SEQUENCE]: print "--sequence", + if IVE in args and args[IVE]: print "--ive", + if ex.altExchange: + print "--alternate-exchange=%s" % ex.altExchange, + print + + def ExchangeListRecurse(self, filter): + exchanges = self.broker.getAllExchanges() + bindings = self.broker.getAllBindings() + queues = self.broker.getAllQueues() + for ex in exchanges: + if config._ignoreDefault and not ex.name: continue + if self.match(ex.name, filter): + print "Exchange '%s' (%s)" % (ex.name, ex.type) + for bind in bindings: + if bind.exchangeRef == ex.name: + qname = "" + queue = self.findById(queues, bind.queueRef) + if queue != None: + qname = queue.name + if bind.arguments: + print " bind [%s] => %s %s" % (bind.bindingKey, qname, bind.arguments) + else: + print " bind [%s] => %s" % (bind.bindingKey, qname) + + + def QueueList(self, filter): + queues = self.broker.getAllQueues() + caption = "Queue Name" + maxNameLen = len(caption) + found = False + for q in queues: + if self.match(q.name, filter): + if len(q.name) > maxNameLen: maxNameLen = len(q.name) + found = True + if not found: + global config + config._returnCode = 1 + return + + print "%-*s Attributes" % (maxNameLen, caption) + line = "" + for i in range((maxNameLen / 5) + 5): + line += "=====" + print line + + for q in queues: + if self.match(q.name, filter): + print "%-*s " % (maxNameLen, q.name), + args = q.arguments + if not args: args = {} + if q.durable: print "--durable", + if REPLICATE in args: print "--replicate=%s" % args[REPLICATE], + if q.autoDelete: print "auto-del", + if q.exclusive: print "excl", + if FILESIZE in args: print "--file-size=%s" % args[FILESIZE], + if FILECOUNT in args: print "--file-count=%s" % args[FILECOUNT], + if EFP_PARTITION_NUM in args: print "--efp-partition-num=%s" % args[EFP_PARTITION_NUM], + if EFP_POOL_FILE_SIZE in args: print "--efp-pool-file-size=%s" % args[EFP_POOL_FILE_SIZE], + if MAX_QUEUE_SIZE in args: print "--max-queue-size=%s" % args[MAX_QUEUE_SIZE], + if MAX_QUEUE_COUNT in args: print "--max-queue-count=%s" % args[MAX_QUEUE_COUNT], + if POLICY_TYPE in args: print "--limit-policy=%s" % args[POLICY_TYPE].replace("_", "-"), + if LVQ_KEY in args: print "--lvq-key=%s" % args[LVQ_KEY], + if q.altExchange: + print "--alternate-exchange=%s" % q.altExchange, + if FLOW_STOP_SIZE in args: print "--flow-stop-size=%s" % args[FLOW_STOP_SIZE], + if FLOW_RESUME_SIZE in args: print "--flow-resume-size=%s" % args[FLOW_RESUME_SIZE], + if FLOW_STOP_COUNT in args: print "--flow-stop-count=%s" % args[FLOW_STOP_COUNT], + if FLOW_RESUME_COUNT in args: print "--flow-resume-count=%s" % args[FLOW_RESUME_COUNT], + if MSG_GROUP_HDR_KEY in args: print "--group-header=%s" % args[MSG_GROUP_HDR_KEY], + if SHARED_MSG_GROUP in args and args[SHARED_MSG_GROUP] == 1: print "--shared-groups", + print " ".join(["--argument %s=%s" % (k, v) for k,v in args.iteritems() if not k in SPECIAL_ARGS]) + + def QueueListRecurse(self, filter): + exchanges = self.broker.getAllExchanges() + bindings = self.broker.getAllBindings() + queues = self.broker.getAllQueues() + for queue in queues: + if self.match(queue.name, filter): + print "Queue '%s'" % queue.name + for bind in bindings: + if bind.queueRef == queue.name: + ename = "" + ex = self.findById(exchanges, bind.exchangeRef) + if ex != None: + ename = ex.name + if ename == "": + if config._ignoreDefault: continue + ename = "''" + if bind.arguments: + print " bind [%s] => %s %s" % (bind.bindingKey, ename, bind.arguments) + else: + print " bind [%s] => %s" % (bind.bindingKey, ename) + + def AddExchange(self, args): + if len(args) < 2: + Usage() + etype = args[0] + ename = args[1] + declArgs = {} + for a in config._extra_arguments: + r = a.split("=", 1) + declArgs[r[0]] = get_value(r) + + if config._msgSequence: + declArgs[MSG_SEQUENCE] = 1 + if config._ive: + declArgs[IVE] = 1 + if config._altern_ex: + declArgs['alternate-exchange'] = config._altern_ex + if config._durable: + declArgs['durable'] = 1 + if config._replicate: + declArgs[REPLICATE] = config._replicate + self.broker.addExchange(etype, ename, declArgs) + + + def DelExchange(self, args): + if len(args) < 1: + Usage() + ename = args[0] + self.broker.delExchange(ename) + + + def AddQueue(self, args): + if len(args) < 1: + Usage() + qname = args[0] + declArgs = {} + for a in config._extra_arguments: + r = a.split("=", 1) + declArgs[r[0]] = get_value(r) + + if config._durable: + # allow the default fileCount and fileSize specified + # in qpid config file to take prededence + if config._fileCount: + declArgs[FILECOUNT] = config._fileCount + if config._fileSize: + declArgs[FILESIZE] = config._fileSize + if config._efp_partition_num: + declArgs[EFP_PARTITION_NUM] = config._efp_partition_num + if config._efp_pool_file_size: + declArgs[EFP_POOL_FILE_SIZE] = config._efp_pool_file_size + + if config._maxQueueSize is not None: + declArgs[MAX_QUEUE_SIZE] = config._maxQueueSize + if config._maxQueueCount is not None: + declArgs[MAX_QUEUE_COUNT] = config._maxQueueCount + if config._limitPolicy: + if config._limitPolicy == "none": + pass + elif config._limitPolicy == "reject": + declArgs[POLICY_TYPE] = "reject" + elif config._limitPolicy == "ring": + declArgs[POLICY_TYPE] = "ring" + + if config._lvq_key: + declArgs[LVQ_KEY] = config._lvq_key + + if config._flowStopSize is not None: + declArgs[FLOW_STOP_SIZE] = config._flowStopSize + if config._flowResumeSize is not None: + declArgs[FLOW_RESUME_SIZE] = config._flowResumeSize + if config._flowStopCount is not None: + declArgs[FLOW_STOP_COUNT] = config._flowStopCount + if config._flowResumeCount is not None: + declArgs[FLOW_RESUME_COUNT] = config._flowResumeCount + + if config._msgGroupHeader: + declArgs[MSG_GROUP_HDR_KEY] = config._msgGroupHeader + if config._sharedMsgGroup: + declArgs[SHARED_MSG_GROUP] = 1 + + if config._altern_ex: + declArgs['alternate-exchange'] = config._altern_ex + if config._durable: + declArgs['durable'] = 1 + if config._replicate: + declArgs[REPLICATE] = config._replicate + self.broker.addQueue(qname, declArgs) + if config._start_replica: # Start replication + self.broker._method("replicate", {"broker":config._start_replica, "queue":qname}, "org.apache.qpid.ha:habroker:ha-broker") + + def DelQueue(self, args): + if len(args) < 1: + Usage() + qname = args[0] + self.broker.delQueue(qname, if_empty=config._if_empty, if_unused=config._if_unused) + + + + def Bind(self, args): + if len(args) < 2: + Usage() + ename = args[0] + qname = args[1] + key = "" + if len(args) > 2: + key = args[2] + + # query the exchange to determine its type. + res = self.broker.getExchange(ename) + + # type of the xchg determines the processing of the rest of + # argv. if it's an xml xchg, we want to find a file + # containing an x-query, and pass that. if it's a headers + # exchange, we need to pass either "any" or all, followed by a + # map containing key/value pairs. if neither of those, extra + # args are ignored. + ok = True + _args = {} + if not res: + pass + elif res.type == "xml": + # this checks/imports the -f arg + [ok, xquery] = snarf_xquery_args() + _args = { "xquery" : xquery } + else: + if res.type == "headers": + [ok, op, kv] = snarf_header_args(args[3:]) + _args = kv + _args["x-match"] = op + + if not ok: + sys.exit(1) + + self.broker.bind(ename, qname, key, _args) + + def Unbind(self, args): + if len(args) < 2: + Usage() + ename = args[0] + qname = args[1] + key = "" + if len(args) > 2: + key = args[2] + self.broker.unbind(ename, qname, key) + + def ReloadAcl(self): + try: + self.broker.reloadAclFile() + except Exception, e: + if str(e).find('No object found') != -1: + print "Failed: ACL Module Not Loaded in Broker" + else: + raise + + def findById(self, items, id): + for item in items: + if item.name == id: + return item + return None + + def match(self, name, filter): + if filter == "": + return True + if name.find(filter) == -1: + return False + return True + +def YN(bool): + if bool: + return 'Y' + return 'N' + +def _clean_ref(o): + if isinstance(o, dict) and "_object_name" in o: + fqn = o["_object_name"] + parts = fqn.split(":",2) + return parts[len(parts)-1] + else: + return o + +def main(argv=None): + args = OptionsAndArguments(argv) + bm = BrokerManager() + + try: + bm.SetBroker(config._host) + if len(args) == 0: + bm.Overview() + else: + cmd = args[0] + modifier = "" + if len(args) > 1: + modifier = args[1] + if cmd == "exchanges": + if config._recursive: + bm.ExchangeListRecurse(modifier) + else: + bm.ExchangeList(modifier) + elif cmd == "queues": + if config._recursive: + bm.QueueListRecurse(modifier) + else: + bm.QueueList(modifier) + elif cmd == "add": + if modifier == "exchange": + bm.AddExchange(args[2:]) + elif modifier == "queue": + bm.AddQueue(args[2:]) + elif len(args) > 2: + bm.broker.create(modifier, args[2], config.getOptions()) + else: + Usage() + elif cmd == "del": + if modifier == "exchange": + bm.DelExchange(args[2:]) + elif modifier == "queue": + bm.DelQueue(args[2:]) + elif len(args) > 2: + bm.broker.delete(modifier, args[2], {}) + else: + Usage() + elif cmd == "bind": + bm.Bind(args[1:]) + elif cmd == "unbind": + bm.Unbind(args[1:]) + elif cmd == "reload-acl": + bm.ReloadAcl() + elif cmd == "list" and len(args) > 1: + # fetch objects + objects = bm.broker.list(modifier) + + # collect available attributes + attributes = [] + for o in objects: + for k in o.keys(): + if k == "name" and k not in attributes: + attributes.insert(0, k) + elif k not in attributes: + attributes.append(k) + + # determine which attributes to display + desired = [] + if len(config._list_properties): + for p in config._list_properties: + if p not in attributes: print "Warning: No such property '%s' for type '%s'" % (p, modifier) + else: desired.append(p) + elif modifier in DEFAULT_PROPERTIES: + desired = DEFAULT_PROPERTIES[modifier] + else: + desired = attributes[:6] + + # display + display = Display(prefix=" ") + headers = [Header(a) for a in desired] + rows = [tuple([_clean_ref(o.get(a, "n/a")) for a in desired]) for o in objects] + display.formattedTable("Objects of type '%s'" % modifier, headers, rows) + elif cmd == "log" and len (args) == 1: + print "Log level:", bm.broker.getLogLevel()["level"] + elif cmd == "log" and len (args) == 2: + bm.broker.setLogLevel(args[1]) + elif cmd == "shutdown": + try: + bm.broker._method("shutdown", {}) + except ConnectionError: + pass # Normal, the broker has been shut down! + bm.conn = None # Don't try to close again + else: + Usage() + except KeyboardInterrupt: + print + except IOError, e: + print e + bm.Disconnect() + return 1 + except SystemExit, e: + bm.Disconnect() + return 1 + except Exception,e: + if e.__class__.__name__ != "Timeout": + # ignore Timeout exception, handle in the loop below + print "Failed: %s: %s" % (e.__class__.__name__, e) + bm.Disconnect() + return 1 + + while True: + # some commands take longer than the default amqp timeout to complete, + # so attempt to disconnect until successful, ignoring Timeouts + try: + bm.Disconnect(ignore=False) + break + except Exception, e: + if e.__class__.__name__ != "Timeout": + print "Failed: %s: %s" % (e.__class__.__name__, e) + return 1 + return config._returnCode + + +if __name__ == "__main__": + sys.exit(main()) + diff --git a/qpid/cpp/management/python/bin/qpid-config.bat b/qpid/cpp/management/python/bin/qpid-config.bat new file mode 100644 index 0000000000..0ab000f5d3 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-config.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-config %* diff --git a/qpid/cpp/management/python/bin/qpid-ha b/qpid/cpp/management/python/bin/qpid-ha new file mode 100755 index 0000000000..1c07658d34 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-ha @@ -0,0 +1,299 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import optparse, sys, time, os, re, math +from qpid.messaging import Connection +from qpid.messaging import Message as QpidMessage +from qpid.util import URL +from qpidtoollibs.broker import BrokerAgent +from qpidtoollibs.config import parse_qpidd_conf +try: + from uuid import uuid4 +except ImportError: + from qpid.datatypes import uuid4 + +# QMF address for the HA broker object. +HA_BROKER = "org.apache.qpid.ha:habroker:ha-broker" +# Define these defaults here rather than in add_option because we want +# to use qpidd.conf for defaults if --config is specified and +# these defaults otherwise: +DEFAULTS = { "broker":"0.0.0.0", "timeout":10.0} + +class ExitStatus(Exception): + """Raised if a command want's a non-0 exit status from the script""" + def __init__(self, status): self.status = status + +def find_qpidd_conf(): + """Return the path to the local qpid.conf file or None if it is not found""" + p = os.path + prefix, bin = p.split(p.dirname(__file__)) + if bin == "bin": # Installed in a standard place. + conf = p.join(prefix, "etc", "qpid", "qpidd.conf") + if p.isfile(conf): return conf + return None + +class Command(object): + """ + Common options and logic for all commands. Subclasses provide additional + options and execution logic. + """ + + commands = [] + + def __init__(self, name, help, arg_names=[], connect_agent=True): + """@param connect_agent true if we should establish a QMF agent connection""" + Command.commands.append(self) + self.name = name + self.connect_agent = connect_agent + self.arg_names = arg_names + usage="%s [options] %s\n\n%s"%(name, " ".join(arg_names), help) + self.help = help + self.op=optparse.OptionParser(usage) + common = optparse.OptionGroup(self.op, "Broker connection options") + def help_default(what): return " (Default %s)"%DEFAULTS[what] + common.add_option("-b", "--broker", metavar="
", help="Address of qpidd broker with syntax: [username/password@] hostname | ip-address [:]"+help_default("broker")) + common.add_option("--timeout", type="float", metavar="", help="Give up if the broker does not respond within the timeout. 0 means wait forever"+help_default("timeout")) + common.add_option("--sasl-mechanism", metavar="", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override") + common.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + common.add_option("--ssl-certificate", metavar="", help="Client SSL certificate (PEM Format)") + common.add_option("--ssl-key", metavar="", help="Client SSL private key (PEM Format)") + common.add_option("--config", metavar="", help="Read default connection configuration from the qpidd.conf broker configuration file. Defaults are overridden by command-line options.)") + self.op.add_option_group(common) + + def connect(self, opts): + conn_options = {} + if not opts.broker: + opts.broker = DEFAULTS["broker"] + # If we are connecting locally, use local qpidd.conf by default + if not opts.config: opts.config = find_qpidd_conf() + url = URL(opts.broker) + if opts.config: # Use broker config file for defaults + config = parse_qpidd_conf(opts.config) + if not url.user: url.user = config.get("ha-username") + if not url.password: url.password = config.get("ha-password") + if not url.port: url.port = config.get("port") + opts.broker = str(url) + if not opts.sasl_mechanism: opts.sasl_mechanism = config.get("ha-mechanism") + if not opts.timeout: + timeout = config.get("ha-heartbeat-interval") or config.get("link-heartbeat-interval") + if timeout: opts.timeout = float(timeout) + else: # Use DEFAULTS + if not opts.timeout: opts.timeout = DEFAULTS["timeout"] + if opts.sasl_mechanism: conn_options['sasl_mechanisms'] = opts.sasl_mechanism + if opts.sasl_service_name: + conn_options['sasl_service'] = opts.sasl_service_name + if opts.ssl_certificate: conn_options['ssl_certfile'] = opts.ssl_certificate + if opts.ssl_key: + if not opts.ssl_certificate: + self.op.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = opts.ssl_key + conn_options['client_properties'] = {'qpid.ha-admin' : 1} + if opts.timeout: + conn_options['timeout'] = opts.timeout + conn_options['heartbeat'] = int(math.ceil(opts.timeout/2)) + connection = Connection.establish(opts.broker, **conn_options) + qmf_broker = self.connect_agent and BrokerAgent(connection) + ha_broker = self.connect_agent and qmf_broker.getHaBroker() + return (connection, qmf_broker, ha_broker) + + def all_brokers(self, ha_broker, opts, func): + """@return: List of (broker_addr, ha_broker) for all brokers in the cluster. + Returns (broker_addr, Exception) if an exception is raised accessing a broker. + """ + # The brokersUrl setting is not in python URL format, simpler parsing here. + result = [] + brokers = filter(None, re.sub(r'(^amqps?:)|(tcp:)', "", ha_broker.brokersUrl).split(",")) + if brokers and opts.all: + if "@" in opts.broker: userpass = opts.broker.split("@")[0] + else: userpass = None + for b in brokers: + if userpass and not "@" in b: opts.broker = userpass+"@"+b + else: opts.broker = b + try: + connection, qmf_broker, ha_broker = self.connect(opts) + func(ha_broker, b) + except Exception,e: + func(ha_broker, b, e) + else: + func(ha_broker) + + def execute(self, args): + opts, args = self.op.parse_args(args) + if len(args) != len(self.arg_names)+1: + self.op.print_help() + raise Exception("Wrong number of arguments") + self.connection, qmf_broker, ha_broker = self.connect(opts) + if self.connect_agent and not ha_broker: + raise Exception("HA module is not loaded on broker at %s" % opts.broker) + try: self.do_execute(qmf_broker, ha_broker, opts, args) + finally: self.connection.close() + + def do_execute(self, qmf_broker, opts, args): + raise Exception("Command '%s' is not yet implemented"%self.name) + +class ManagerCommand(Command): + """ + Base for commands that should only be used by a cluster manager tool that ensures + cluster consistency. + """ + + manager_commands = [] # Cluster manager commands + + def __init__(self, name, help, arg_names=[], connect_agent=True): + """@param connect_agent true if we should establish a QMF agent connection""" + super(ManagerCommand, self).__init__(name, "[Cluster manager only] "+help, arg_names, connect_agent) + self.commands.remove(self) # Not a user command + self.manager_commands.append(self) + + +class PingCmd(Command): + def __init__(self): + Command.__init__(self, "ping","Check if the broker is alive and responding", connect_agent=False) + def do_execute(self, qmf_broker, ha_broker, opts, args): + self.connection.session() # Make sure we can establish a session. +PingCmd() + +class PromoteCmd(ManagerCommand): + def __init__(self): + super(PromoteCmd, self).__init__("promote", "Promote a backup broker to primary. This command should *only* be used by a cluster manager (such as rgmanager) that ensures only one broker is primary at a time. Promoting more than one broker to primary at the same time will make the cluster inconsistent and will cause data loss and unexpected behavior.") + + def do_execute(self, qmf_broker, ha_broker, opts, args): + qmf_broker._method("promote", {}, HA_BROKER, timeout=opts.timeout) + +PromoteCmd() + + +class StatusCmd(Command): + def __init__(self): + Command.__init__(self, "status", "Print HA status") + self.op.add_option( + "--expect", metavar="", + help="Don't print status. Return 0 if it matches , 1 otherwise") + self.op.add_option( + "--is-primary", action="store_true", default=False, + help="Don't print status. Return 0 if the broker is primary, 1 otherwise") + self.op.add_option( + "--all", action="store_true", default=False, + help="Print status for all brokers in the cluster") + + def do_execute(self, qmf_broker, ha_broker, opts, args): + if opts.is_primary: + if not ha_broker.status in ["active", "recovering"]: raise ExitStatus(1) + return + if opts.expect: + if opts.expect != ha_broker.status: raise ExitStatus(1) + return + + def status(hb, b=None, ex=None): + if ex: print b, ex + elif b: print b, hb.status + else: print hb.status + self.all_brokers(ha_broker, opts, status) + +StatusCmd() + +class ReplicateCmd(Command): + def __init__(self): + Command.__init__(self, "replicate", "Set up replication from on to on the current broker.", ["", ""]) + def do_execute(self, qmf_broker, ha_broker, opts, args): + qmf_broker._method("replicate", {"broker":args[1], "queue":args[2]}, HA_BROKER, timeout=opts.timeout) +ReplicateCmd() + +class QueryCmd(Command): + def __init__(self): + Command.__init__(self, "query", "Print HA configuration and status") + self.op.add_option( + "--all", action="store_true", default=False, + help="Print configuration and status for all brokers in the cluster") + + def do_execute(self, qmf_broker, ha_broker, opts, args): + def query(hb, b=None, ex=None): + if ex: + print "%s %s\n" % (b, ex) + else: + if b: + print "%-20s %s"%("Address:", b) + for x in [("Status:", hb.status), + ("Broker ID:", hb.systemId), + ("Brokers URL:", hb.brokersUrl), + ("Public URL:", hb.publicUrl), + ("Replicate: ", hb.replicateDefault) + ]: + print "%-20s %s"%x + if b: print + self.all_brokers(ha_broker, opts, query) + + +QueryCmd() + +def print_usage(prog): + print "usage: %s []\n\nCommands are:\n"%prog + for cmd in Command.commands: + print " %-12s %s."%(cmd.name, cmd.help.split(".")[0]) + print "\nFor help with a command type: %s --help\n"%prog + +def find_command(args, commands): + """Find a command among the arguments and options""" + for arg in args: + cmds = [cmd for cmd in commands if cmd.name == arg] + if cmds: return cmds[0] + return None + +def main_except(argv): + """This version of main raises exceptions""" + args = argv[1:] + commands = Command.commands + if "--cluster-manager" in args: + commands += ManagerCommand.manager_commands + args.remove("--cluster-manager") + if len(args) and args[0] in ['help', '--help', '-help', '-h', 'help-all', '--help-all']: + if 'help-all' in args[0]: + for c in commands: c.op.print_help(); print + else: + print_usage(os.path.basename(argv[0])); + else: + command = find_command(args, commands) + if command: + command.execute(args) + else: + # Check for attempt to use a manager command without --cluster-manager + command = find_command(args, ManagerCommand.manager_commands) + if command: + message="""'%s' should only be called by the cluster manager. +Incorrect use of '%s' will cause cluster malfunction. +To call from a cluster manager use '%s --cluster-manager'. """ + raise Exception(message%((command.name,)*3)) + else: + print_usage(os.path.basename(argv[0])); + raise Exception("No valid command") + +def main(argv): + try: + main_except(argv) + return 0 + except ExitStatus, e: + return e.status + except Exception, e: + print "%s: %s"%(type(e).__name__, e) + return 1 + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/qpid/cpp/management/python/bin/qpid-ha.bat b/qpid/cpp/management/python/bin/qpid-ha.bat new file mode 100644 index 0000000000..29a77a0fb4 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-ha.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-ha %* diff --git a/qpid/cpp/management/python/bin/qpid-printevents b/qpid/cpp/management/python/bin/qpid-printevents new file mode 100755 index 0000000000..f702ca91e8 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-printevents @@ -0,0 +1,191 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import optparse +import sys +from optparse import IndentedHelpFormatter +from time import time, strftime, gmtime, sleep +from threading import Lock, Condition, Thread +from qpid.messaging import Connection +import qpid.messaging.exceptions + +home = os.environ.get("QPID_TOOLS_HOME", os.path.normpath("/usr/share/qpid-tools")) +sys.path.append(os.path.join(home, "python")) + +from qpidtoollibs.broker import EventHelper + + +class Printer(object): + """ + This class serializes printed lines so that events coming from different + threads don't overlap each other. + """ + def __init__(self): + self.lock = Lock() + + def pr(self, text): + self.lock.acquire() + try: + print text + finally: + self.lock.release() + sys.stdout.flush() + + +class EventReceiver(Thread): + """ + One instance of this class is created for each broker that is being monitored. + This class does not use the "reconnect" option because it needs to report as + events when the connection is established and when it's lost. + """ + def __init__(self, printer, url, options): + Thread.__init__(self) + self.printer = printer + self.url = url + self.options = options + self.running = True + self.helper = EventHelper() + + def cancel(self): + self.running = False + + def run(self): + isOpen = False + while self.running: + try: + conn = Connection.establish(self.url, **self.options) + isOpen = True + self.printer.pr(strftime("%c", gmtime(time())) + " NOTIC qpid-printevents:brokerConnected broker=%s" % self.url) + + sess = conn.session() + rx = sess.receiver(self.helper.eventAddress()) + + while self.running: + try: + msg = rx.fetch(1) + event = self.helper.event(msg) + self.printer.pr(event.__repr__()) + sess.acknowledge() + except qpid.messaging.exceptions.Empty: + pass + + except Exception, e: + if isOpen: + self.printer.pr(strftime("%c", gmtime(time())) + " NOTIC qpid-printevents:brokerDisconnected broker=%s" % self.url) + isOpen = False + sleep(1) + + +class JHelpFormatter(IndentedHelpFormatter): + """ + Format usage and description without stripping newlines from usage strings + """ + def format_usage(self, usage): + return usage + + def format_description(self, description): + if description: + return description + "\n" + else: + return "" + +_usage = "%prog [options] [broker-addr]..." + +_description = \ +""" +Collect and print events from one or more Qpid message brokers. + +If no broker-addr is supplied, %prog connects to 'localhost:5672'. + +[broker-addr] syntax: + + [username/password@] hostname + ip-address [:] + +Examples: + +$ %prog localhost:5672 +$ %prog 10.1.1.7:10000 +$ %prog guest/guest@broker-host:10000 +""" + +def main(argv=None): + p = optparse.OptionParser(usage=_usage, description=_description, formatter=JHelpFormatter()) + p.add_option("--heartbeats", action="store_true", default=False, help="Use heartbeats.") + p.add_option("--sasl-mechanism", action="store", type="string", metavar="", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override.") + p.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + p.add_option("--ssl-certificate", action="store", type="string", metavar="", help="Client SSL certificate (PEM Format)") + p.add_option("--ssl-key", action="store", type="string", metavar="", help="Client SSL private key (PEM Format)") + p.add_option("--ha-admin", action="store_true", help="Allow connection to a HA backup broker.") + + options, arguments = p.parse_args(args=argv) + if len(arguments) == 0: + arguments.append("localhost") + + brokers = [] + conn_options = {} + props = {} + printer = Printer() + + if options.sasl_mechanism: + conn_options['sasl_mechanisms'] = options.sasl_mechanism + if options.sasl_service_name: + conn_options['sasl_service'] = options.sasl_service_name + if options.ssl_certificate: + conn_options['ssl_certfile'] = options.ssl_certificate + if options.ssl_key: + if not options.ssl_certificate: + p.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = options.ssl_key + if options.ha_admin: + props['qpid.ha-admin'] = 1 + if options.heartbeats: + props['heartbeat'] = 5 + + if len(props) > 0: + conn_options['client_properties'] = props + + try: + try: + for host in arguments: + er = EventReceiver(printer, host, conn_options) + brokers.append(er) + er.start() + + while (True): + sleep(10) + + except KeyboardInterrupt: + print + return 0 + + except Exception, e: + print "Failed: %s - %s" % (e.__class__.__name__, e) + return 1 + finally: + for b in brokers: + b.cancel() + for b in brokers: + b.join() + +if __name__ == '__main__': + sys.exit(main()) diff --git a/qpid/cpp/management/python/bin/qpid-printevents.bat b/qpid/cpp/management/python/bin/qpid-printevents.bat new file mode 100644 index 0000000000..3486bed39d --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-printevents.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-printevents %* diff --git a/qpid/cpp/management/python/bin/qpid-qls-analyze b/qpid/cpp/management/python/bin/qpid-qls-analyze new file mode 100755 index 0000000000..7fbf6b1bb2 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-qls-analyze @@ -0,0 +1,114 @@ +#!/usr/bin/env python + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +""" +qpid-qls-analyze + +Reads and analyzes a Qpid Linear Store (QLS) store directory. +""" + +import os.path +import sys + +default = os.path.normpath('/usr/share/qpid-tools') +home = os.environ.get('QPID_TOOLS_HOME', default) +sys.path.append(os.path.join(home, 'python')) + +import argparse +import os +import qlslibs.analyze +import qlslibs.efp + +class QlsAnalyzerArgParser(argparse.ArgumentParser): + """ + Class to handle command-line arguments. + """ + def __init__(self): + argparse.ArgumentParser.__init__(self, description='Qpid Linear Store Analyzer', prog='qpid-qls-analyze') + self.add_argument('qls_dir', metavar='DIR', + help='Qpid Linear Store (QLS) directory to be analyzed') + self.add_argument('--efp', action='store_true', + help='Analyze the Emtpy File Pool (EFP) and show stats') + self.add_argument('--show-recovered-recs', action='store_true', + help='Show only recovered records') + self.add_argument('--show-recovery-recs', action='store_true', + help='Show material records found during recovery') + self.add_argument('--show-all-recs', action='store_true', + help='Show all records (including fillers) found during recovery') + self.add_argument('--show-xids', action='store_true', + help='Show xid as hex number, otherwise show only xid length. Only has effect when records are shown') +# TODO: Add ability to show xid as an index rather than a value, helps analysis when xid is a long value with +# small differences which cannot easily be seen when looking at an output. Also prints a table of indeces vs xid values. +# self.add_argument('--show-xid-index', action='store_true', +# help='Show xids by index rather than by their value. Useful for long xids. Prints xid index table') + self.add_argument('--show-data', action='store_true', + help='Show data, otherwise show only data length. Only has effect when records are shown') + self.add_argument('--stats', action='store_true', + help='Print journal record stats') + self.add_argument('--txtest', action='store_true', + help='Show qpid-txtest message number as the message content when viewing records. Only has effect when records are shown') + self.add_argument('--txn', action='store_true', + help='Reconcile incomplete transactions') + self.add_argument('--version', action='version', + version='%(prog)s ' + QqpdLinearStoreAnalyzer.QLS_ANALYZE_VERSION) + def parse_args(self, args=None, namespace=None): + args = argparse.ArgumentParser.parse_args(self, args, namespace) + # If required, perform additional validity checks here, raise errors if req'd + return args + +class QqpdLinearStoreAnalyzer(object): + """ + Top-level store analyzer. Will analyze the directory in args.qls_dir as the top-level Qpid Linear Store (QLS) + directory. The following may be analyzed: + * The Empty File Pool (if --efp is specified in the arguments) + * The Linear Store + * The Transaction Prepared List (TPL) + """ + QLS_ANALYZE_VERSION = '1.0' + def __init__(self): + self.args = None + self._process_args() + self.qls_dir = os.path.abspath(self.args.qls_dir) + self.efp_manager = qlslibs.efp.EfpManager(self.qls_dir, None) + self.jrnl_recovery_mgr = qlslibs.analyze.JournalRecoveryManager(self.qls_dir, self.args) + def _process_args(self): + """ Create arg parser and process args """ + parser = QlsAnalyzerArgParser() + self.args = parser.parse_args() + if not os.path.exists(self.args.qls_dir): + parser.error('Journal path "%s" does not exist' % self.args.qls_dir) + def report(self): + """ Create a report on the linear store previously analyzed using analyze() """ + if self.args.efp: + self.efp_manager.report() + self.jrnl_recovery_mgr.report() + def run(self): + """ Run the analyzer, which reads and analyzes the linear store """ + if self.args.efp: + self.efp_manager.run(None) + self.jrnl_recovery_mgr.run() + +#============================================================================== +# main program +#============================================================================== + +if __name__ == "__main__": + M = QqpdLinearStoreAnalyzer() + M.run() + M.report() diff --git a/qpid/cpp/management/python/bin/qpid-queue-stats b/qpid/cpp/management/python/bin/qpid-queue-stats new file mode 100755 index 0000000000..ca78f9b602 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-queue-stats @@ -0,0 +1,159 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import optparse +import sys +import re +import socket +import qpid +from threading import Condition +from qmf.console import Session, Console +from qpid.peer import Closed +from qpid.connection import Connection, ConnectionFailed +from time import sleep + +class BrokerManager(Console): + def __init__(self, host, conn_options): + self.url = host + self.objects = {} + self.filter = None + self.session = Session(self, rcvEvents=False, rcvHeartbeats=False, + userBindings=True, manageConnections=True) + self.broker = self.session.addBroker(self.url, **conn_options) + self.firstError = True + + def setFilter(self,filter): + self.filter = filter + + def brokerConnected(self, broker): + if not self.firstError: + print "*** Broker connected" + self.firstError = False + + def brokerDisconnected(self, broker): + print "*** Broker connection lost - %s, retrying..." % broker.getError() + self.firstError = False + self.objects.clear() + + def objectProps(self, broker, record): + className = record.getClassKey().getClassName() + if className != "queue": + return + + id = record.getObjectId().__repr__() + if id not in self.objects: + self.objects[id] = (record.name, None, None) + + def objectStats(self, broker, record): + className = record.getClassKey().getClassName() + if className != "queue": + return + + id = record.getObjectId().__repr__() + if id not in self.objects: + return + + (name, first, last) = self.objects[id] + if first == None: + self.objects[id] = (name, record, None) + return + + if len(self.filter) > 0 : + match = False + + for x in self.filter: + if x.match(name): + match = True + break + if match == False: + return + + if last == None: + lastSample = first + else: + lastSample = last + + self.objects[id] = (name, first, record) + + deltaTime = float (record.getTimestamps()[0] - lastSample.getTimestamps()[0]) + if deltaTime < 1000000000.0: + return + enqueueRate = float (record.msgTotalEnqueues - lastSample.msgTotalEnqueues) / \ + (deltaTime / 1000000000.0) + dequeueRate = float (record.msgTotalDequeues - lastSample.msgTotalDequeues) / \ + (deltaTime / 1000000000.0) + print "%-41s%10.2f%11d%13.2f%13.2f" % \ + (name, deltaTime / 1000000000, record.msgDepth, enqueueRate, dequeueRate) + sys.stdout.flush() + + + def Display (self): + self.session.bindClass("org.apache.qpid.broker", "queue") + print "Queue Name Sec Depth Enq Rate Deq Rate" + print "========================================================================================" + sys.stdout.flush() + try: + while True: + sleep (1) + if self.firstError and self.broker.getError(): + self.firstError = False + print "*** Error: %s, retrying..." % self.broker.getError() + except KeyboardInterrupt: + print + self.session.delBroker(self.broker) + +def main(argv=None): + p = optparse.OptionParser() + p.add_option('--broker-address','-a', default='localhost' , help='broker-addr is in the form: [username/password@] hostname | ip-address [:] \n ex: localhost, 10.1.1.7:10000, broker-host:10000, guest/guest@localhost') + p.add_option('--filter','-f' ,default=None ,help='a list of comma separated queue names (regex are accepted) to show') + p.add_option("--sasl-mechanism", action="store", type="string", metavar="", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override.") + p.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + p.add_option("--ssl-certificate", action="store", type="string", metavar="", help="Client SSL certificate (PEM Format)") + p.add_option("--ssl-key", action="store", type="string", metavar="", help="Client SSL private key (PEM Format)") + + options, arguments = p.parse_args(args=argv) + + conn_options = {} + if options.sasl_mechanism: + conn_options['mechanisms'] = options.sasl_mechanism + if options.sasl_service_name: + conn_options['service'] = options.sasl_service_name + if options.ssl_certificate: + conn_options['ssl_certfile'] = options.ssl_certificate + if options.ssl_key: + if not options.ssl_certificate: + p.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = options.ssl_key + + host = options.broker_address + filter = [] + if options.filter != None: + for s in options.filter.split(","): + filter.append(re.compile(s)) + + bm = BrokerManager(host, conn_options) + bm.setFilter(filter) + bm.Display() + +if __name__ == '__main__': + sys.exit(main()) + diff --git a/qpid/cpp/management/python/bin/qpid-queue-stats.bat b/qpid/cpp/management/python/bin/qpid-queue-stats.bat new file mode 100644 index 0000000000..24290d46b3 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-queue-stats.bat @@ -0,0 +1,3 @@ +@echo off +python %~dp0\qpid-queue-stats %* + diff --git a/qpid/cpp/management/python/bin/qpid-receive b/qpid/cpp/management/python/bin/qpid-receive new file mode 100755 index 0000000000..f14df277ac --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-receive @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import optparse, sys, time +import statistics +from qpid.messaging import * + +SECOND = 1000 +TIME_SEC = 1000000000 + +op = optparse.OptionParser(usage="usage: %prog [options]", description="Drains messages from the specified address") +op.add_option("-b", "--broker", default="localhost:5672", type="str", help="url of broker to connect to") +op.add_option("-a", "--address", type="str", help="address to receive from") +op.add_option("--connection-options", default={}, help="options for the connection") +op.add_option("-m", "--messages", default=0, type="int", help="stop after N messages have been received, 0 means no limit") +op.add_option("--timeout", default=0, type="int", help="timeout in seconds to wait before exiting") +op.add_option("-f", "--forever", default=False, action="store_true", help="ignore timeout and wait forever") +op.add_option("--ignore-duplicates", default=False, action="store_true", help="Detect and ignore duplicates (by checking 'sn' header)") +op.add_option("--verify-sequence", default=False, action="store_true", help="Verify there are no gaps in the message sequence (by checking 'sn' header)") +op.add_option("--check-redelivered", default=False, action="store_true", help="Fails with exception if a duplicate is not marked as redelivered (only relevant when ignore-duplicates is selected)") +op.add_option("--capacity", default=1000, type="int", help="size of the senders outgoing message queue") +op.add_option("--ack-frequency", default=100, type="int", help="Ack frequency (0 implies none of the messages will get accepted)") +op.add_option("--tx", default=0, type="int", help="batch size for transactions (0 implies transaction are not used)") +op.add_option("--rollback-frequency", default=0, type="int", help="rollback frequency (0 implies no transaction will be rolledback)") +op.add_option("--print-content", type="str", default="yes", help="print out message content") +op.add_option("--print-headers", type="str", default="no", help="print out message headers") +op.add_option("--failover-updates", default=False, action="store_true", help="Listen for membership updates distributed via amq.failover") +op.add_option("--report-total", default=False, action="store_true", help="Report total throughput statistics") +op.add_option("--report-every", default=0, type="int", help="Report throughput statistics every N messages") +op.add_option("--report-header", type="str", default="yes", help="Headers on report") +op.add_option("--ready-address", type="str", help="send a message to this address when ready to receive") +op.add_option("--receive-rate", default=0, type="int", help="Receive at rate of N messages/second. 0 means receive as fast as possible") +#op.add_option("--help", default=False, action="store_true", help="print this usage statement") + +def getTimeout(timeout, forever): + if forever: + return None + else: + return SECOND*timeout + + +EOS = "eos" +SN = "sn" + +# Check for duplicate or dropped messages by sequence number +class SequenceTracker: + def __init__(self, opts): + self.opts = opts + self.lastSn = 0 + + # Return True if the message should be procesed, false if it should be ignored. + def track(self, message): + if not(self.opts.verify_sequence) or (self.opts.ignore_duplicates): + return True + sn = message.properties[SN] + duplicate = (sn <= lastSn) + dropped = (sn > lastSn+1) + if self.opts.verify_sequence and dropped: + raise Exception("Gap in sequence numbers %s-%s" %(lastSn, sn)) + ignore = (duplicate and self.opts.ignore_duplicates) + if ignore and self.opts.check_redelivered and (not msg.redelivered): + raise Exception("duplicate sequence number received, message not marked as redelivered!") + if not duplicate: + lastSn = sn + return (not(ignore)) + + +def main(): + opts, args = op.parse_args() + if not opts.address: + raise Exception("Address must be specified!") + + broker = opts.broker + address = opts.address + connection = Connection(opts.broker, **opts.connection_options) + + try: + connection.open() + if opts.failover_updates: + auto_fetch_reconnect_urls(connection) + session = connection.session(transactional=(opts.tx)) + receiver = session.receiver(opts.address) + if opts.capacity > 0: + receiver.capacity = opts.capacity + msg = Message() + count = 0 + txCount = 0 + sequenceTracker = SequenceTracker(opts) + timeout = getTimeout(opts.timeout, opts.forever) + done = False + stats = statistics.ThroughputAndLatency() + reporter = statistics.Reporter(opts.report_every, opts.report_header == "yes", stats) + + if opts.ready_address is not None: + session.sender(opts.ready_address).send(msg) + if opts.tx > 0: + session.commit() + # For receive rate calculation + start = time.time()*TIME_SEC + interval = 0 + if opts.receive_rate > 0: + interval = TIME_SEC / opts.receive_rate + + replyTo = {} # a dictionary of reply-to address -> sender mapping + + while (not done): + try: + msg = receiver.fetch(timeout=timeout) + reporter.message(msg) + if sequenceTracker.track(msg): + if msg.content == EOS: + done = True + else: + count+=1 + if opts.print_headers == "yes": + if msg.subject is not None: + print "Subject: %s" %msg.subject + if msg.reply_to is not None: + print "ReplyTo: %s" %msg.reply_to + if msg.correlation_id is not None: + print "CorrelationId: %s" %msg.correlation_id + if msg.user_id is not None: + print "UserId: %s" %msg.user_id + if msg.ttl is not None: + print "TTL: %s" %msg.ttl + if msg.priority is not None: + print "Priority: %s" %msg.priority + if msg.durable: + print "Durable: true" + if msg.redelivered: + print "Redelivered: true" + print "Properties: %s" %msg.properties + print + if opts.print_content == "yes": + print msg.content + if (opts.messages > 0) and (count >= opts.messages): + done = True + # end of "if sequenceTracker.track(msg):" + if (opts.tx > 0) and (count % opts.tx == 0): + txCount+=1 + if (opts.rollback_frequency > 0) and (txCount % opts.rollback_frequency == 0): + session.rollback() + else: + session.commit() + elif (opts.ack_frequency > 0) and (count % opts.ack_frequency == 0): + session.acknowledge() + if msg.reply_to is not None: # Echo message back to reply-to address. + if msg.reply_to not in replyTo: + replyTo[msg.reply_to] = session.sender(msg.reply_to) + replyTo[msg.reply_to].capacity = opts.capacity + replyTo[msg.reply_to].send(msg) + if opts.receive_rate > 0: + delay = start + count*interval - time.time()*TIME_SEC + if delay > 0: + time.sleep(delay) + # Clear out message properties & content for next iteration. + msg = Message() + except Empty: # no message fetched => break the while cycle + break + # end of while cycle + if opts.report_total: + reporter.report() + if opts.tx > 0: + txCount+=1 + if opts.rollback_frequency and (txCount % opts.rollback_frequency == 0): + session.rollback() + else: + session.commit() + else: + session.acknowledge() + session.close() + connection.close() + except Exception,e: + print e + connection.close() + +if __name__ == "__main__": main() diff --git a/qpid/cpp/management/python/bin/qpid-route b/qpid/cpp/management/python/bin/qpid-route new file mode 100755 index 0000000000..f51d2493e9 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-route @@ -0,0 +1,635 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from optparse import OptionParser, OptionGroup, IndentedHelpFormatter +import sys +import os +import locale +from qmf.console import Session, BrokerURL +from time import sleep + +usage = """ +Usage: qpid-route [OPTIONS] dynamic add [tag] [exclude-list] [mechanism] + qpid-route [OPTIONS] dynamic del + + qpid-route [OPTIONS] route add [tag] [exclude-list] [mechanism] + qpid-route [OPTIONS] route del + qpid-route [OPTIONS] queue add [mechanism] + qpid-route [OPTIONS] queue del + qpid-route [OPTIONS] route list [] + qpid-route [OPTIONS] route flush [] + qpid-route [OPTIONS] route map [] + + qpid-route [OPTIONS] link add [mechanism] + qpid-route [OPTIONS] link del + qpid-route [OPTIONS] link list []""" + +description = """ +ADDRESS syntax: + + [username/password@] hostname + ip-address [:]""" + +def Usage(): + print usage + +class Config: + def __init__(self): + self._verbose = False + self._quiet = False + self._durable = False + self._dellink = False + self._srclocal = False + self._transport = "tcp" + self._ack = 0 + self._credit = 0xFFFFFFFF # unlimited + self._connTimeout = 10 + self._conn_options = {} + +config = Config() + +class JHelpFormatter(IndentedHelpFormatter): + """Format usage and description without stripping newlines from usage strings + """ + + def format_usage(self, usage): + return usage + + + def format_description(self, description): + if description: + return description + "\n" + else: + return "" + +def OptionsAndArguments(argv): + parser = OptionParser(usage=usage, + description=description, + formatter=JHelpFormatter()) + + parser.add_option("--timeout", action="store", type="int", default=10, metavar="", help="Maximum time to wait for broker connection (in seconds)") + parser.add_option("-v", "--verbose", action="store_true", help="Verbose output") + parser.add_option("-q", "--quiet", action="store_true", help="Quiet output, don't print duplicate warnings") + parser.add_option("-d", "--durable", action="store_true", help="Added configuration shall be durable") + + parser.add_option("-e", "--del-empty-link", action="store_true", help="Delete link after deleting last route on the link") + parser.add_option("-s", "--src-local", action="store_true", help="Make connection to source broker (push route)") + + parser.add_option("--ack", action="store", type="int", metavar="", help="Acknowledge transfers over the bridge in batches of N") + parser.add_option("--credit", action="store", type="int", default=0xFFFFFFFF, metavar="", + help="Maximum number of messages a sender can have outstanding (0=unlimited)") + parser.add_option("-t", "--transport", action="store", type="string", default="tcp", metavar="", help="Transport to use for links, defaults to tcp") + + parser.add_option("--client-sasl-mechanism", action="store", type="string", metavar="", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). Used when the client connects to the destination broker (not for authentication between the source and destination brokers - that is specified using the [mechanisms] argument to 'add route'). SASL automatically picks the most secure available mechanism - use this option to override.") + parser.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + parser.add_option("--ssl-certificate", action="store", type="string", metavar="", help="Client SSL certificate (PEM Format)") + parser.add_option("--ssl-key", action="store", type="string", metavar="", help="Client SSL private key (PEM Format)") + parser.add_option("--ha-admin", action="store_true", help="Allow connection to a HA backup broker.") + opts, encArgs = parser.parse_args(args=argv) + + try: + encoding = locale.getpreferredencoding() + args = [a.decode(encoding) for a in encArgs] + except: + args = encArgs + + if opts.timeout: + config._connTimeout = opts.timeout + if config._connTimeout == 0: + config._connTimeout = None + + if opts.verbose: + config._verbose = True + + if opts.quiet: + config._quiet = True + + if opts.durable: + config._durable = True + + if opts.del_empty_link: + config._dellink = True + + if opts.src_local: + config._srclocal = True + + if opts.transport: + config._transport = opts.transport + + if opts.ha_admin: + config._conn_options['client_properties'] = {'qpid.ha-admin' : 1} + + if opts.ack: + config._ack = opts.ack + + config._credit = opts.credit + + if opts.client_sasl_mechanism: + config._conn_options['mechanisms'] = opts.client_sasl_mechanism + if opts.sasl_service_name: + config._conn_options['service'] = opts.sasl_service_name + + if opts.ssl_certificate: + config._conn_options['ssl_certfile'] = opts.ssl_certificate + + if opts.ssl_key: + if not opts.ssl_certificate: + parser.error("missing '--ssl-certificate' (required by '--ssl-key')") + config._conn_options['ssl_keyfile'] = opts.ssl_key + + return args + + +class RouteManager: + def __init__(self, localBroker): + self.brokerList = {} + self.local = BrokerURL(localBroker) + self.remote = None + self.qmf = Session() + self.broker = self.qmf.addBroker(localBroker, config._connTimeout, **config._conn_options) + self.broker._waitForStable() + self.agent = self.broker.getBrokerAgent() + + def disconnect(self): + try: + if self.broker: + self.qmf.delBroker(self.broker) + self.broker = None + while len(self.brokerList): + b = self.brokerList.popitem() + if b[0] != self.local.name(): + self.qmf.delBroker(b[1]) + except: + pass # ignore errors while shutting down + + def getLink(self): + links = self.agent.getObjects(_class="link") + for link in links: + if self.remote.match(link.host, link.port): + return link + return None + + def checkLink(self, link): + retry = 3 + while link is None or (link.state in ("Waiting", "Connecting", "Closing") and retry > 0): + sleep(1) + link = self.getLink() + retry -= 1 + + if link == None: + raise Exception("Link failed to create") + + if link.state == "Failed": + raise Exception("Link failed to create %s" % (link.lastError or "")) + elif config._verbose: + print "Link state is", link.state + + def addLink(self, remoteBroker, interbroker_mechanism=""): + self.remote = BrokerURL(remoteBroker) + if self.local.match(self.remote.host, self.remote.port): + raise Exception("Linking broker to itself is not permitted") + + brokers = self.agent.getObjects(_class="broker") + broker = brokers[0] + link = self.getLink() + if link == None: + res = broker.connect(self.remote.host, self.remote.port, config._durable, + interbroker_mechanism, self.remote.authName or "", self.remote.authPass or "", + config._transport) + + def delLink(self, remoteBroker): + self.remote = BrokerURL(remoteBroker) + brokers = self.agent.getObjects(_class="broker") + broker = brokers[0] + link = self.getLink() + if link == None: + raise Exception("Link not found") + + res = link.close() + if config._verbose: + print "Close method returned:", res.status, res.text + + def listLinks(self): + links = self.agent.getObjects(_class="link") + if len(links) == 0: + print "No Links Found" + else: + print + print "Host Port Transport Durable State Last Error" + print "=============================================================================" + for link in links: + print "%-16s%-8d%-13s%c %-18s%s" % \ + (link.host, link.port, link.transport, YN(link.durable), link.state, link.lastError) + + def mapRoutes(self): + print + print "Finding Linked Brokers:" + + self.brokerList[self.local.name()] = self.broker + print " %s:%s... Ok" % (self.local.host, self.local.port) + + added = True + while added: + added = False + links = self.qmf.getObjects(_class="link") + for link in links: + url = BrokerURL(host=link.host, port=link.port, user=self.broker.authUser, password=self.broker.authPass) + if url.name() not in self.brokerList: + print " %s:%s..." % (link.host, link.port) + try: + url.authName = self.local.authName + url.authPass = self.local.authPass + b = self.qmf.addBroker(url, config._connTimeout, **config._conn_options) + self.brokerList[url.name()] = b + added = True + print "Ok" + except Exception, e: + print e + + print + print "Dynamic Routes:" + bridges = self.qmf.getObjects(_class="bridge", dynamic=True) + fedExchanges = [] + for bridge in bridges: + if bridge.src not in fedExchanges: + fedExchanges.append(bridge.src) + if len(fedExchanges) == 0: + print " none found" + print + + for ex in fedExchanges: + print " Exchange %s:" % ex + pairs = [] + for bridge in bridges: + if bridge.src == ex: + link = bridge._linkRef_ + fromUrl = BrokerURL(host=link.host, port=link.port) + toUrl = bridge.getBroker().getUrl() + found = False + for pair in pairs: + if pair.matches(fromUrl, toUrl): + found = True + if not found: + pairs.append(RoutePair(fromUrl, toUrl)) + for pair in pairs: + print " %s" % pair + print + + print "Static Routes:" + bridges = self.qmf.getObjects(_class="bridge", dynamic=False) + if len(bridges) == 0: + print " none found" + print + + for bridge in bridges: + link = bridge._linkRef_ + fromUrl = "%s:%s" % (link.host, link.port) + toUrl = bridge.getBroker().getUrl() + leftType = "ex" + rightType = "ex" + if bridge.srcIsLocal: + arrow = "=>" + left = bridge.src + right = bridge.dest + if bridge.srcIsQueue: + leftType = "queue" + else: + arrow = "<=" + left = bridge.dest + right = bridge.src + if bridge.srcIsQueue: + rightType = "queue" + + if bridge.srcIsQueue: + print " %s(%s=%s) %s %s(%s=%s)" % \ + (toUrl, leftType, left, arrow, fromUrl, rightType, right) + else: + print " %s(%s=%s) %s %s(%s=%s) key=%s" % \ + (toUrl, leftType, left, arrow, fromUrl, rightType, right, bridge.key) + print + + while len(self.brokerList): + b = self.brokerList.popitem() + if b[0] != self.local.name(): + self.qmf.delBroker(b[1]) + + def addRoute(self, remoteBroker, exchange, routingKey, tag, excludes, interbroker_mechanism="", dynamic=False): + if dynamic and config._srclocal: + raise Exception("--src-local is not permitted on dynamic routes") + + self.addLink(remoteBroker, interbroker_mechanism) + link = self.getLink() + self.checkLink(link) + + bridges = self.agent.getObjects(_class="bridge") + for bridge in bridges: + if bridge.linkRef == link.getObjectId() and \ + bridge.dest == exchange and bridge.key == routingKey and not bridge.srcIsQueue: + if not config._quiet: + raise Exception("Duplicate Route - ignoring: %s(%s)" % (exchange, routingKey)) + sys.exit(0) + + if config._verbose: + print "Creating inter-broker binding..." + res = link.bridge(config._durable, exchange, exchange, routingKey, tag, + excludes, False, config._srclocal, dynamic, + config._ack, credit=config._credit) + if res.status != 0: + raise Exception(res.text) + if config._verbose: + print "Bridge method returned:", res.status, res.text + + def addQueueRoute(self, remoteBroker, interbroker_mechanism, exchange, queue ): + self.addLink(remoteBroker, interbroker_mechanism) + link = self.getLink() + self.checkLink(link) + + bridges = self.agent.getObjects(_class="bridge") + for bridge in bridges: + if bridge.linkRef == link.getObjectId() and \ + bridge.dest == exchange and bridge.src == queue and bridge.srcIsQueue: + if not config._quiet: + raise Exception("Duplicate Route - ignoring: %s(%s)" % (exchange, queue)) + sys.exit(0) + + if config._verbose: + print "Creating inter-broker binding..." + res = link.bridge(config._durable, queue, exchange, "", "", "", True, + config._srclocal, False, config._ack, credit=config._credit) + if res.status != 0: + raise Exception(res.text) + if config._verbose: + print "Bridge method returned:", res.status, res.text + + def delQueueRoute(self, remoteBroker, exchange, queue): + self.remote = BrokerURL(remoteBroker) + link = self.getLink() + if link == None: + if not config._quiet: + raise Exception("No link found from %s to %s" % (self.remote.name(), self.local.name())) + sys.exit(0) + + bridges = self.agent.getObjects(_class="bridge") + for bridge in bridges: + if bridge.linkRef == link.getObjectId() and \ + bridge.dest == exchange and bridge.src == queue and bridge.srcIsQueue: + if config._verbose: + print "Closing bridge..." + res = bridge.close() + if res.status != 0: + raise Exception("Error closing bridge: %d - %s" % (res.status, res.text)) + if len(bridges) == 1 and config._dellink: + link = self.getLink() + if link == None: + sys.exit(0) + if config._verbose: + print "Last bridge on link, closing link..." + res = link.close() + if res.status != 0: + raise Exception("Error closing link: %d - %s" % (res.status, res.text)) + sys.exit(0) + if not config._quiet: + raise Exception("Route not found") + + def delRoute(self, remoteBroker, exchange, routingKey, dynamic=False): + self.remote = BrokerURL(remoteBroker) + link = self.getLink() + if link == None: + if not config._quiet: + raise Exception("No link found from %s to %s" % (self.remote.name(), self.local.name())) + sys.exit(0) + + bridges = self.agent.getObjects(_class="bridge") + for bridge in bridges: + if bridge.linkRef == link.getObjectId() and bridge.dest == exchange and bridge.key == routingKey \ + and bridge.dynamic == dynamic: + if config._verbose: + print "Closing bridge..." + res = bridge.close() + if res.status != 0: + raise Exception("Error closing bridge: %d - %s" % (res.status, res.text)) + if len(bridges) == 1 and config._dellink: + link = self.getLink() + if link == None: + sys.exit(0) + if config._verbose: + print "Last bridge on link, closing link..." + res = link.close() + if res.status != 0: + raise Exception("Error closing link: %d - %s" % (res.status, res.text)) + return + if not config._quiet: + raise Exception("Route not found") + + def listRoutes(self): + links = self.qmf.getObjects(_class="link") + bridges = self.qmf.getObjects(_class="bridge") + + for bridge in bridges: + myLink = None + for link in links: + if bridge.linkRef == link.getObjectId(): + myLink = link + break + if myLink != None: + if bridge.dynamic: + keyText = "" + else: + keyText = bridge.key + print "%s %s:%d %s %s" % (self.local.name(), myLink.host, myLink.port, bridge.dest, keyText) + + def clearAllRoutes(self): + links = self.qmf.getObjects(_class="link") + bridges = self.qmf.getObjects(_class="bridge") + + for bridge in bridges: + if config._verbose: + myLink = None + for link in links: + if bridge.linkRef == link.getObjectId(): + myLink = link + break + if myLink != None: + print "Deleting Bridge: %s:%d %s %s... " % (myLink.host, myLink.port, bridge.dest, bridge.key), + res = bridge.close() + if res.status != 0: + print "Error: %d - %s" % (res.status, res.text) + elif config._verbose: + print "Ok" + + if config._dellink: + links = self.qmf.getObjects(_class="link") + for link in links: + if config._verbose: + print "Deleting Link: %s:%d... " % (link.host, link.port), + res = link.close() + if res.status != 0: + print "Error: %d - %s" % (res.status, res.text) + elif config._verbose: + print "Ok" + +class RoutePair: + def __init__(self, fromUrl, toUrl): + self.fromUrl = fromUrl + self.toUrl = toUrl + self.bidir = False + + def __repr__(self): + if self.bidir: + delimit = "<=>" + else: + delimit = " =>" + return "%s %s %s" % (self.fromUrl, delimit, self.toUrl) + + def matches(self, fromUrl, toUrl): + if fromUrl == self.fromUrl and toUrl == self.toUrl: + return True + if toUrl == self.fromUrl and fromUrl == self.toUrl: + self.bidir = True + return True + return False + + +def YN(val): + if val == 1: + return 'Y' + return 'N' + + +def main(argv=None): + + args = OptionsAndArguments(argv) + nargs = len(args) + if nargs < 2: + Usage() + return(-1) + + if nargs == 2: + localBroker = "localhost" + else: + if config._srclocal: + localBroker = args[3] + remoteBroker = args[2] + else: + localBroker = args[2] + if nargs > 3: + remoteBroker = args[3] + + group = args[0] + cmd = args[1] + + rm = None + try: + rm = RouteManager(localBroker) + if group == "link": + if cmd == "add": + if nargs < 3 or nargs > 5: + Usage() + return(-1) + interbroker_mechanism = "" + if nargs > 4: interbroker_mechanism = args[4] + rm.addLink(remoteBroker, interbroker_mechanism) + rm.checkLink(rm.getLink()) + elif cmd == "del": + if nargs != 4: + Usage() + return(-1) + rm.delLink(remoteBroker) + elif cmd == "list": + rm.listLinks() + + elif group == "dynamic": + if cmd == "add": + if nargs < 5 or nargs > 8: + Usage() + return(-1) + + tag = "" + excludes = "" + interbroker_mechanism = "" + if nargs > 5: tag = args[5] + if nargs > 6: excludes = args[6] + if nargs > 7: interbroker_mechanism = args[7] + rm.addRoute(remoteBroker, args[4], "", tag, excludes, interbroker_mechanism, dynamic=True) + elif cmd == "del": + if nargs != 5: + Usage() + return(-1) + else: + rm.delRoute(remoteBroker, args[4], "", dynamic=True) + + elif group == "route": + if cmd == "add": + if nargs < 6 or nargs > 9: + Usage() + return(-1) + + tag = "" + excludes = "" + interbroker_mechanism = "" + if nargs > 6: tag = args[6] + if nargs > 7: excludes = args[7] + if nargs > 8: interbroker_mechanism = args[8] + rm.addRoute(remoteBroker, args[4], args[5], tag, excludes, interbroker_mechanism, dynamic=False) + elif cmd == "del": + if nargs != 6: + Usage() + return(-1) + rm.delRoute(remoteBroker, args[4], args[5], dynamic=False) + elif cmd == "map": + rm.mapRoutes() + else: + if cmd == "list": + rm.listRoutes() + elif cmd == "flush": + rm.clearAllRoutes() + else: + Usage() + return(-1) + + elif group == "queue": + if nargs < 6 or nargs > 7: + Usage() + return(-1) + if cmd == "add": + interbroker_mechanism = "" + if nargs > 6: interbroker_mechanism = args[6] + rm.addQueueRoute(remoteBroker, interbroker_mechanism, exchange=args[4], queue=args[5] ) + elif cmd == "del": + rm.delQueueRoute(remoteBroker, exchange=args[4], queue=args[5]) + else: + Usage() + return(-1) + else: + Usage() + return(-1) + + except Exception,e: + if rm: + rm.disconnect() # try to release broker resources + print "Failed: %s - %s" % (e.__class__.__name__, e) + return 1 + + rm.disconnect() + return 0 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/qpid/cpp/management/python/bin/qpid-route.bat b/qpid/cpp/management/python/bin/qpid-route.bat new file mode 100644 index 0000000000..ae8e9fe63c --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-route.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-route %* diff --git a/qpid/cpp/management/python/bin/qpid-send b/qpid/cpp/management/python/bin/qpid-send new file mode 100755 index 0000000000..b0105e41a6 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-send @@ -0,0 +1,281 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import optparse, random, os, time, uuid +from qpid.messaging import * +import statistics + +EOS = "eos" +SN = "sn" +TS = "ts" + +TIME_SEC = 1000000000 +SECOND = 1000 + +def nameval(st): + idx = st.find("=") + if idx >= 0: + name = st[0:idx] + value = st[idx+1:] + else: + name = st + value = None + return name, value + + +op = optparse.OptionParser(usage="usage: %prog [options]", description="Spouts messages to the specified address") +op.add_option("-b", "--broker", default="localhost:5672", type="str", help="url of broker to connect to") +op.add_option("-a", "--address", type="str", help="address to send to") +op.add_option("--connection-options", default={}, help="options for the connection") +op.add_option("-m", "--messages", default=1, type="int", help="stop after N messages have been sent, 0 means no limit") +op.add_option("-i", "--id", type="str", help="use the supplied id instead of generating one") +op.add_option("--reply-to", type="str", help="specify reply-to address") +op.add_option("--send-eos", default=0, type="int", help="Send N EOS messages to mark end of input") +op.add_option("--durable", default=False, action="store_true", help="Mark messages as durable") +op.add_option("--ttl", default=0, type="int", help="Time-to-live for messages, in milliseconds") +op.add_option("--priority", default=0, type="int", help="Priority for messages (higher value implies higher priority)") +op.add_option("-P", "--property", default=[], action="append", type="str", help="specify message property") +op.add_option("--correlation-id", type="str", help="correlation-id for message") +op.add_option("--user-id", type="str", help="userid for message") +op.add_option("--content-string", type="str", help="use CONTENT as message content") +op.add_option("--content-size", default=0, type="int", help="create an N-byte message content") +op.add_option("-M", "--content-map", default=[], action="append", type="str", help="specify entry for map content") +op.add_option("--content-stdin", default=False, action="store_true", help="read message content from stdin, one line per message") +op.add_option("--capacity", default=1000, type="int", help="size of the senders outgoing message queue") +op.add_option("--tx", default=0, type="int", help="batch size for transactions (0 implies transaction are not used)") +op.add_option("--rollback-frequency", default=0, type="int", help="rollback frequency (0 implies no transaction will be rolledback)") +op.add_option("--failover-updates", default=False, action="store_true", help="Listen for membership updates distributed via amq.failover") +op.add_option("--report-total", default=False, action="store_true", help="Report total throughput statistics") +op.add_option("--report-every", default=0, type="int", help="Report throughput statistics every N messages") +op.add_option("--report-header", type="str", default="yes", help="Headers on report") +op.add_option("--send-rate", default=0, type="int", help="Send at rate of N messages/second. 0 means send as fast as possible") +op.add_option("--flow-control", default=0, type="int", help="Do end to end flow control to limit queue depth to 2*N. 0 means no flow control.") +op.add_option("--sequence", type="str", default="yes", help="Add a sequence number messages property (required for duplicate/lost message detection)") +op.add_option("--timestamp", type="str", default="yes", help="Add a time stamp messages property (required for latency measurement)") +op.add_option("--group-key", type="str", help="Generate groups of messages using message header 'KEY' to hold the group identifier") +op.add_option("--group-prefix", default="GROUP-", type="str", help="Generate group identifers with 'STRING' prefix (if group-key specified)") +op.add_option("--group-size", default=10, type="int", help="Number of messages per a group (if group-key specified)") +op.add_option("--group-randomize-size", default=False, action="store_true", help="Randomize the number of messages per group to [1...group-size] (if group-key specified)") +op.add_option("--group-interleave", default=1, type="int", help="Simultaineously interleave messages from N different groups (if group-key specified)") + + +class ContentGenerator: + def setContent(self, msg): + return + +class GetlineContentGenerator(ContentGenerator): + def setContent(self, msg): + content = sys.stdin.readline() + got = (not line) + if (got): + msg.content = content + return got + +class FixedContentGenerator(ContentGenerator): + def __init__(self, content=None): + self.content = content + + def setContent(self, msg): + msg.content = self.content + return True + +class MapContentGenerator(ContentGenerator): + def __init__(self, opts=None): + self.opts = opts + + def setContent(self, msg): + self.content = {} + for e in self.opts.content_map: + name, val = nameval(p) + content[name] = val + msg.content = self.content + return True + + +# tag each generated message with a group identifer +class GroupGenerator: + def __init__(self, key, prefix, size, randomize, interleave): + groupKey = key + groupPrefix = prefix + groupSize = size + randomizeSize = randomize + groupSuffix = 0 + if (randomize > 0): + random.seed(os.getpid()) + + for i in range(0, interleave): + self.newGroup() + current = 0 + + def setGroupInfo(self, msg): + if (current == len(groups)): + current = 0 + my_group = groups[current] + msg.properties[groupKey] = my_group[id]; + # print "SENDING GROUPID=[%s]\n" % my_group[id] + my_group[count]=my_group[count]+1 + if (my_group[count] == my_group[size]): + self.newGroup() + del groups[current] + else: + current+=1 + + def newGroup(self): + groupId = "%s%s" % (groupPrefix, groupSuffix) + groupSuffix+=1 + size = groupSize + if (randomizeSize == True): + size = random.randint(1,groupSize) + # print "New group: GROUPID=["%s] size=%s" % (groupId, size) + groups.append({'id':groupId, 'size':size, 'count':0}) + + + +def main(): + opts, args = op.parse_args() + if not opts.address: + raise Exception("Address must be specified!") + + broker = opts.broker + address = opts.address + connection = Connection(opts.broker, **opts.connection_options) + + try: + connection.open() + if (opts.failover_updates): + auto_fetch_reconnect_urls(connection) + session = connection.session(transactional=(opts.tx)) + sender = session.sender(opts.address) + if (opts.capacity>0): + sender.capacity = opts.capacity + sent = 0 + txCount = 0 + stats = statistics.Throughput() + reporter = statistics.Reporter(opts.report_every, opts.report_header == "yes", stats) + + contentGen = ContentGenerator() + content = "" # auxiliary variable for determining content type of message - needs to be changed to {} for Map message + if opts.content_stdin: + opts.messages = 0 # Don't limit number of messages sent. + contentGen = GetlineContentGenerator() + elif opts.content_map is not None: + contentGen = MapContentGenerator(opts) + content = {} + elif opts.content_size is not None: + contentGen = FixedContentGenerator('X' * opts.content_size) + else: + contentGen = FixedContentGenerator(opts.content_string) + if opts.group_key is not None: + groupGen = GroupGenerator(opts.group_key, opts.group_prefix, opts.group_size, opts.group_random_size, opts.group_interleave) + + msg = Message(content=content) + msg.durable = opts.durable + if opts.ttl: + msg.ttl = opts.ttl/1000.0 + if opts.priority: + msg.priority = opts.priority + if opts.reply_to is not None: + if opts.flow_control > 0: + raise Exception("Can't use reply-to and flow-control together") + msg.reply_to = opts.reply_to + if opts.user_id is not None: + msg.user_id = opts.user_id + if opts.correlation_id is not None: + msg.correlation_id = opts.correlation_id + for p in opts.property: + name, val = nameval(p) + msg.properties[name] = val + + start = time.time()*TIME_SEC + interval = 0 + if opts.send_rate > 0: + interval = TIME_SEC/opts.send_rate + + flowControlAddress = "flow-" + str(uuid.uuid1()) + ";{create:always,delete:always}" + flowSent = 0 + if opts.flow_control > 0: + flowControlReceiver = session.receiver(flowControlAddress) + flowControlReceiver.capacity = 2 + + while (contentGen.setContent(msg) == True): + sent+=1 + if opts.sequence == "yes": + msg.properties[SN] = sent + + if opts.flow_control > 0: + if (sent % opts.flow_control == 0): + msg.reply_to = flowControlAddress + flowSent+=1 + else: + msg.reply_to = "" # Clear the reply address. + + if 'groupGen' in vars(): + groupGen.setGroupInfo(msg) + + if (opts.timestamp == "yes"): + msg.properties[TS] = int(time.time()*TIME_SEC) + sender.send(msg) + reporter.message(msg) + + if ((opts.tx > 0) and (sent % opts.tx == 0)): + txCount+=1 + if ((opts.rollbackFrequency > 0) and (txCount % opts.rollbackFrequency == 0)): + session.rollback() + else: + session.commit() + if ((opts.messages > 0) and (sent >= opts.messages)): + break + + if (opts.flow_control > 0) and (flowSent == 2): + flowControlReceiver.fetch(timeout=SECOND) + flowSent -= 1 + + if (opts.send_rate > 0): + delay = start + sent*interval - time.time()*TIME_SEC + if (delay > 0): + time.sleep(delay) + #end of while + + while flowSent > 0: + flowControlReceiver.fetch(timeout=SECOND) + flowSent -= 1 + + if (opts.report_total): + reporter.report() + for i in reversed(range(1,opts.send_eos+1)): + if (opts.sequence == "yes"): + sent+=1 + msg.properties[SN] = sent + msg.properties[EOS] = True #TODO (also in C++ client): add in ability to send digest or similar + sender.send(msg) + if ((opts.tx > 0) and (sent % opts.tx == 0)): + txCount+=1 + if ((opts.rollback_frequency > 0) and (txCount % opts.rollback_frequency == 0)): + session.rollback() + else: + session.commit() + session.sync() + session.close() + connection.close() + except Exception,e: + print e + connection.close() + +if __name__ == "__main__": main() diff --git a/qpid/cpp/management/python/bin/qpid-stat b/qpid/cpp/management/python/bin/qpid-stat new file mode 100755 index 0000000000..1780c4a819 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-stat @@ -0,0 +1,514 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +from optparse import OptionParser, OptionGroup +import sys +import locale +import socket +import re +from qpid.messaging import Connection + +home = os.environ.get("QPID_TOOLS_HOME", os.path.normpath("/usr/share/qpid-tools")) +sys.path.append(os.path.join(home, "python")) + +from qpidtoollibs import BrokerAgent +from qpidtoollibs import Display, Header, Sorter, YN, Commas, TimeLong + + +class Config: + def __init__(self): + self._host = "localhost" + self._connTimeout = 10 + self._types = "" + self._limit = 50 + self._increasing = False + self._sortcol = None + +config = Config() +conn_options = {} + +def OptionsAndArguments(argv): + """ Set global variables for options, return arguments """ + + global config + global conn_options + + usage = \ +"""%prog -g [options] + %prog -c [options] + %prog -e [options] + %prog -q [options] [queue-name] + %prog -u [options] + %prog -m [options] + %prog --acl [options]""" + + parser = OptionParser(usage=usage) + + group1 = OptionGroup(parser, "General Options") + group1.add_option("-b", "--broker", action="store", type="string", default="localhost", metavar="", + help="URL of the broker to query") + group1.add_option("-t", "--timeout", action="store", type="int", default=10, metavar="", + help="Maximum time to wait for broker connection (in seconds)") + group1.add_option("--sasl-mechanism", action="store", type="string", metavar="", + help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override.") + group1.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + group1.add_option("--ssl-certificate", action="store", type="string", metavar="", help="Client SSL certificate (PEM Format)") + group1.add_option("--ssl-key", action="store", type="string", metavar="", help="Client SSL private key (PEM Format)") + group1.add_option("--ha-admin", action="store_true", help="Allow connection to a HA backup broker.") + parser.add_option_group(group1) + + group2 = OptionGroup(parser, "Command Options") + group2.add_option("-g", "--general", help="Show General Broker Stats", action="store_const", const="g", dest="show") + group2.add_option("-c", "--connections", help="Show Connections", action="store_const", const="c", dest="show") + group2.add_option("-e", "--exchanges", help="Show Exchanges", action="store_const", const="e", dest="show") + group2.add_option("-q", "--queues", help="Show Queues", action="store_const", const="q", dest="show") + group2.add_option("-u", "--subscriptions", help="Show Subscriptions", action="store_const", const="u", dest="show") + group2.add_option("-m", "--memory", help="Show Broker Memory Stats", action="store_const", const="m", dest="show") + group2.add_option( "--acl", help="Show Access Control List Stats", action="store_const", const="acl", dest="show") + parser.add_option_group(group2) + + group3 = OptionGroup(parser, "Display Options") + group3.add_option("-S", "--sort-by", metavar="", help="Sort by column name") + group3.add_option("-I", "--increasing", action="store_true", default=False, help="Sort by increasing value (default = decreasing)") + group3.add_option("-L", "--limit", type="int", default=50, metavar="", help="Limit output to n rows") + parser.add_option_group(group3) + + opts, args = parser.parse_args(args=argv) + + if not opts.show: + parser.error("You must specify one of these options: -g, -c, -e, -q, -m, or -u. For details, try $ qpid-stat --help") + + config._types = opts.show + config._sortcol = opts.sort_by + config._host = opts.broker + config._connTimeout = opts.timeout + config._increasing = opts.increasing + config._limit = opts.limit + + if opts.sasl_mechanism: + conn_options['sasl_mechanisms'] = opts.sasl_mechanism + if opts.sasl_service_name: + conn_options['sasl_service'] = opts.sasl_service_name + if opts.ssl_certificate: + conn_options['ssl_certfile'] = opts.ssl_certificate + if opts.ssl_key: + if not opts.ssl_certificate: + parser.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = opts.ssl_key + if opts.ha_admin: + conn_options['client_properties'] = {'qpid.ha-admin' : 1} + + return args + +class BrokerManager: + def __init__(self): + self.brokerName = None + self.connection = None + self.broker = None + self.cluster = None + + def SetBroker(self, brokerUrl): + self.url = brokerUrl + self.connection = Connection.establish(self.url, **conn_options) + self.broker = BrokerAgent(self.connection) + + def Disconnect(self): + """ Release any allocated brokers. Ignore any failures as the tool is + shutting down. + """ + try: + self.connection.close() + except: + pass + + def displayBroker(self): + disp = Display(prefix=" ") + heads = [] + heads.append(Header('uptime', Header.DURATION)) + heads.append(Header('cluster', Header.NONE)) + heads.append(Header('connections', Header.COMMAS)) + heads.append(Header('sessions', Header.COMMAS)) + heads.append(Header('exchanges', Header.COMMAS)) + heads.append(Header('queues', Header.COMMAS)) + rows = [] + broker = self.broker.getBroker() + cluster = self.broker.getCluster() + clusterInfo = cluster and cluster.clusterName + "<" + cluster.status + ">" or "" + connections = self.getConnectionMap() + sessions = self.getSessionMap() + exchanges = self.getExchangeMap() + queues = self.getQueueMap() + row = (broker.getUpdateTime() - broker.getCreateTime(), + clusterInfo, + len(connections), len(sessions), + len(exchanges), len(queues)) + rows.append(row) + disp.formattedTable('Broker Summary:', heads, rows) + + if 'queueCount' not in broker.values: + return + + print + heads = [] + heads.append(Header('Statistic')) + heads.append(Header('Messages', Header.COMMAS)) + heads.append(Header('Bytes', Header.COMMAS)) + rows = [] + rows.append(['queue-depth', broker.msgDepth, broker.byteDepth]) + rows.append(['total-enqueues', broker.msgTotalEnqueues, broker.byteTotalEnqueues]) + rows.append(['total-dequeues', broker.msgTotalDequeues, broker.byteTotalDequeues]) + rows.append(['persistent-enqueues', broker.msgPersistEnqueues, broker.bytePersistEnqueues]) + rows.append(['persistent-dequeues', broker.msgPersistDequeues, broker.bytePersistDequeues]) + rows.append(['transactional-enqueues', broker.msgTxnEnqueues, broker.byteTxnEnqueues]) + rows.append(['transactional-dequeues', broker.msgTxnDequeues, broker.byteTxnDequeues]) + rows.append(['flow-to-disk-depth', broker.msgFtdDepth, broker.byteFtdDepth]) + rows.append(['flow-to-disk-enqueues', broker.msgFtdEnqueues, broker.byteFtdEnqueues]) + rows.append(['flow-to-disk-dequeues', broker.msgFtdDequeues, broker.byteFtdDequeues]) + rows.append(['acquires', broker.acquires, None]) + rows.append(['releases', broker.releases, None]) + rows.append(['discards-no-route', broker.discardsNoRoute, None]) + rows.append(['discards-ttl-expired', broker.discardsTtl, None]) + rows.append(['discards-limit-overflow', broker.discardsOverflow, None]) + rows.append(['discards-ring-overflow', broker.discardsRing, None]) + rows.append(['discards-lvq-replace', broker.discardsLvq, None]) + rows.append(['discards-subscriber-reject', broker.discardsSubscriber, None]) + rows.append(['discards-purged', broker.discardsPurge, None]) + rows.append(['reroutes', broker.reroutes, None]) + rows.append(['abandoned', broker.abandoned, None]) + rows.append(['abandoned-via-alt', broker.abandonedViaAlt, None]) + disp.formattedTable('Aggregate Broker Statistics:', heads, rows) + + + def displayConn(self): + disp = Display(prefix=" ") + heads = [] + heads.append(Header('connection')) + heads.append(Header('cproc')) + heads.append(Header('cpid')) + heads.append(Header('mech')) + heads.append(Header('auth')) + heads.append(Header('connected', Header.DURATION)) + heads.append(Header('idle', Header.DURATION)) + heads.append(Header('msgIn', Header.KMG)) + heads.append(Header('msgOut', Header.KMG)) + rows = [] + connections = self.broker.getAllConnections() + broker = self.broker.getBroker() + for conn in connections: + row = [] + row.append(conn.address) + if conn.remoteProcessName: row.append(conn.remoteProcessName) + else: row.append("-") + row.append(conn.remotePid) + if conn.saslMechanism: row.append(conn.saslMechanism) + else: row.append("-") + if conn.authIdentity: row.append(conn.authIdentity) + else: row.append("-") + row.append(broker.getUpdateTime() - conn.getCreateTime()) + row.append(broker.getUpdateTime() - conn.getUpdateTime()) + row.append(conn.msgsFromClient) + row.append(conn.msgsToClient) + rows.append(row) + title = "Connections" + if config._sortcol: + sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) + dispRows = sorter.getSorted() + else: + dispRows = rows + disp.formattedTable(title, heads, dispRows) + + def displaySession(self): + disp = Display(prefix=" ") + + def displayExchange(self): + disp = Display(prefix=" ") + heads = [] + heads.append(Header("exchange")) + heads.append(Header("type")) + heads.append(Header("dur", Header.Y)) + heads.append(Header("bind", Header.KMG)) + heads.append(Header("msgIn", Header.KMG)) + heads.append(Header("msgOut", Header.KMG)) + heads.append(Header("msgDrop", Header.KMG)) + heads.append(Header("byteIn", Header.KMG)) + heads.append(Header("byteOut", Header.KMG)) + heads.append(Header("byteDrop", Header.KMG)) + rows = [] + exchanges = self.broker.getAllExchanges() + for ex in exchanges: + row = [] + row.append(ex.name) + row.append(ex.type) + row.append(ex.durable) + row.append(ex.bindingCount) + row.append(ex.msgReceives) + row.append(ex.msgRoutes) + row.append(ex.msgDrops) + row.append(ex.byteReceives) + row.append(ex.byteRoutes) + row.append(ex.byteDrops) + rows.append(row) + title = "Exchanges" + if config._sortcol: + sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) + dispRows = sorter.getSorted() + else: + dispRows = rows + disp.formattedTable(title, heads, dispRows) + + def displayQueues(self): + disp = Display(prefix=" ") + heads = [] + heads.append(Header("queue")) + heads.append(Header("dur", Header.Y)) + heads.append(Header("autoDel", Header.Y)) + heads.append(Header("excl", Header.Y)) + heads.append(Header("msg", Header.KMG)) + heads.append(Header("msgIn", Header.KMG)) + heads.append(Header("msgOut", Header.KMG)) + heads.append(Header("bytes", Header.KMG)) + heads.append(Header("bytesIn", Header.KMG)) + heads.append(Header("bytesOut", Header.KMG)) + heads.append(Header("cons", Header.KMG)) + heads.append(Header("bind", Header.KMG)) + rows = [] + queues = self.broker.getAllQueues() + for q in queues: + row = [] + row.append(q.name) + row.append(q.durable) + row.append(q.autoDelete) + row.append(q.exclusive) + row.append(q.msgDepth) + row.append(q.msgTotalEnqueues) + row.append(q.msgTotalDequeues) + row.append(q.byteDepth) + row.append(q.byteTotalEnqueues) + row.append(q.byteTotalDequeues) + row.append(q.consumerCount) + row.append(q.bindingCount) + rows.append(row) + title = "Queues" + if config._sortcol: + sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) + dispRows = sorter.getSorted() + else: + dispRows = rows + disp.formattedTable(title, heads, dispRows) + + + def displayQueue(self, name): + queue = self.broker.getQueue(name) + if not queue: + print "Queue '%s' not found" % name + return + + disp = Display(prefix=" ") + heads = [] + heads.append(Header('Name')) + heads.append(Header('Durable', Header.YN)) + heads.append(Header('AutoDelete', Header.YN)) + heads.append(Header('Exclusive', Header.YN)) + heads.append(Header('FlowStopped', Header.YN)) + heads.append(Header('FlowStoppedCount', Header.COMMAS)) + heads.append(Header('Consumers', Header.COMMAS)) + heads.append(Header('Bindings', Header.COMMAS)) + rows = [] + rows.append([queue.name, queue.durable, queue.autoDelete, queue.exclusive, + queue.flowStopped, queue.flowStoppedCount, + queue.consumerCount, queue.bindingCount]) + disp.formattedTable("Properties:", heads, rows) + print + + heads = [] + heads.append(Header('Property')) + heads.append(Header('Value')) + rows = [] + rows.append(['arguments', queue.arguments]) + rows.append(['alt-exchange', queue.altExchange]) + disp.formattedTable("Optional Properties:", heads, rows) + print + + heads = [] + heads.append(Header('Statistic')) + heads.append(Header('Messages', Header.COMMAS)) + heads.append(Header('Bytes', Header.COMMAS)) + rows = [] + rows.append(['queue-depth', queue.msgDepth, queue.byteDepth]) + rows.append(['total-enqueues', queue.msgTotalEnqueues, queue.byteTotalEnqueues]) + rows.append(['total-dequeues', queue.msgTotalDequeues, queue.byteTotalDequeues]) + rows.append(['persistent-enqueues', queue.msgPersistEnqueues, queue.bytePersistEnqueues]) + rows.append(['persistent-dequeues', queue.msgPersistDequeues, queue.bytePersistDequeues]) + rows.append(['transactional-enqueues', queue.msgTxnEnqueues, queue.byteTxnEnqueues]) + rows.append(['transactional-dequeues', queue.msgTxnDequeues, queue.byteTxnDequeues]) + rows.append(['flow-to-disk-depth', queue.msgFtdDepth, queue.byteFtdDepth]) + rows.append(['flow-to-disk-enqueues', queue.msgFtdEnqueues, queue.byteFtdEnqueues]) + rows.append(['flow-to-disk-dequeues', queue.msgFtdDequeues, queue.byteFtdDequeues]) + rows.append(['acquires', queue.acquires, None]) + rows.append(['releases', queue.releases, None]) + rows.append(['discards-ttl-expired', queue.discardsTtl, None]) + rows.append(['discards-limit-overflow', queue.discardsOverflow, None]) + rows.append(['discards-ring-overflow', queue.discardsRing, None]) + rows.append(['discards-lvq-replace', queue.discardsLvq, None]) + rows.append(['discards-subscriber-reject', queue.discardsSubscriber, None]) + rows.append(['discards-purged', queue.discardsPurge, None]) + rows.append(['reroutes', queue.reroutes, None]) + disp.formattedTable("Statistics:", heads, rows) + + + def displaySubscriptions(self): + disp = Display(prefix=" ") + heads = [] + heads.append(Header("subscr")) + heads.append(Header("queue")) + heads.append(Header("conn")) + heads.append(Header("procName")) + heads.append(Header("procId")) + heads.append(Header("browse", Header.Y)) + heads.append(Header("acked", Header.Y)) + heads.append(Header("excl", Header.Y)) + heads.append(Header("creditMode")) + heads.append(Header("delivered", Header.COMMAS)) + heads.append(Header("sessUnacked", Header.COMMAS)) + rows = [] + subscriptions = self.broker.getAllSubscriptions() + sessions = self.getSessionMap() + connections = self.getConnectionMap() + for s in subscriptions: + row = [] + try: + row.append(s.name) + row.append(s.queueRef) + session = sessions[s.sessionRef] + connection = connections[session.connectionRef] + row.append(connection.address) + if connection.remoteProcessName: row.append(connection.remoteProcessName) + else: row.append("-") + row.append(connection.remotePid) + row.append(s.browsing) + row.append(s.acknowledged) + row.append(s.exclusive) + row.append(s.creditMode) + row.append(s.delivered) + row.append(session.unackedMessages) + rows.append(row) + except: + pass + title = "Subscriptions" + if config._sortcol: + sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) + dispRows = sorter.getSorted() + else: + dispRows = rows + disp.formattedTable(title, heads, dispRows) + + def displayMemory(self): + disp = Display(prefix=" ") + heads = [Header('Statistic'), Header('Value', Header.COMMAS)] + rows = [] + memory = self.broker.getMemory() + for k,v in memory.values.items(): + if k != 'name': + rows.append([k, v]) + disp.formattedTable('Broker Memory Statistics:', heads, rows) + + def displayAcl(self): + acl = self.broker.getAcl() + if not acl: + print "ACL Policy Module is not installed" + return + disp = Display(prefix=" ") + heads = [Header('Statistic'), Header('Value')] + rows = [] + rows.append(['policy-file', acl.policyFile]) + rows.append(['enforcing', YN(acl.enforcingAcl)]) + rows.append(['has-transfer-acls', YN(acl.transferAcl)]) + rows.append(['last-acl-load', TimeLong(acl.lastAclLoad)]) + rows.append(['acl-denials', Commas(acl.aclDenyCount)]) + disp.formattedTable('ACL Policy Statistics:', heads, rows) + + def getExchangeMap(self): + exchanges = self.broker.getAllExchanges() + emap = {} + for e in exchanges: + emap[e.name] = e + return emap + + def getQueueMap(self): + queues = self.broker.getAllQueues() + qmap = {} + for q in queues: + qmap[q.name] = q + return qmap + + def getSessionMap(self): + sessions = self.broker.getAllSessions() + smap = {} + for s in sessions: + smap[s.name] = s + return smap + + def getConnectionMap(self): + connections = self.broker.getAllConnections() + cmap = {} + for c in connections: + cmap[c.address] = c + return cmap + + def displayMain(self, names, main): + if main == 'g': self.displayBroker() + elif main == 'c': self.displayConn() + elif main == 's': self.displaySession() + elif main == 'e': self.displayExchange() + elif main == 'q': + if len(names) >= 1: + self.displayQueue(names[0]) + else: + self.displayQueues() + elif main == 'u': self.displaySubscriptions() + elif main == 'm': self.displayMemory() + elif main == 'acl': self.displayAcl() + + def display(self, names): + self.displayMain(names, config._types) + + +def main(argv=None): + + args = OptionsAndArguments(argv) + bm = BrokerManager() + + try: + bm.SetBroker(config._host) + bm.display(args) + bm.Disconnect() + return 0 + except KeyboardInterrupt: + print + except Exception,e: + print "Failed: %s - %s" % (e.__class__.__name__, e) + + bm.Disconnect() # try to deallocate brokers + return 1 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/qpid/cpp/management/python/bin/qpid-stat.bat b/qpid/cpp/management/python/bin/qpid-stat.bat new file mode 100644 index 0000000000..0a03d5177c --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-stat.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-stat %* diff --git a/qpid/cpp/management/python/bin/qpid-store-chk b/qpid/cpp/management/python/bin/qpid-store-chk new file mode 100755 index 0000000000..f6d70cb3c6 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-store-chk @@ -0,0 +1,332 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from qpidstore import jerr, jrnl, janal +import optparse, os, sys + + +#== class StoreChk ============================================================ + +class StoreChk(object): + """ + This class: + 1. Reads a journal jinf file, and from its info: + 2. Analyzes the journal data files to determine which is the last to be written, then + 3. Reads and analyzes all the records in the journal files. + The only public method is run() which kicks off the analysis. + """ + + def __init__(self): + """Constructor""" + # params + self.opts = None + + self._jdir = None + + # recovery analysis objects +# self._jrnl_info = None +# self.jrnl_rdr = None + + self._process_args() + self._jrnl_info = jrnl.JrnlInfo(self._jdir, self.opts.bfn) + # FIXME: This is a hack... find an elegant way of getting the file size to jrec! + jrnl.JRNL_FILE_SIZE = self._jrnl_info.get_jrnl_file_size_bytes() + self.jrnl_anal = janal.JrnlAnalyzer(self._jrnl_info) + self.jrnl_rdr = janal.JrnlReader(self._jrnl_info, self.jrnl_anal, self.opts.qflag, self.opts.rflag, + self.opts.vflag) + + def run(self): + """Run the store check""" + if not self.opts.qflag: + print self._jrnl_info + print self.jrnl_anal + self.jrnl_rdr.run() + self._report() + + def _report(self): + """Print the results of the store check""" + if not self.opts.qflag: + print + print " === REPORT ====" + print + print "Records: %8d non-transactional" % \ + (self.jrnl_rdr.get_msg_cnt() - self.jrnl_rdr.get_txn_msg_cnt()) + print " %8d transactional" % self.jrnl_rdr.get_txn_msg_cnt() + print " %8d total" % self.jrnl_rdr.get_msg_cnt() + print + print "Transactions: %8d aborts" % self.jrnl_rdr.get_abort_cnt() + print " %8d commits" % self.jrnl_rdr.get_commit_cnt() + print " %8d total" % (self.jrnl_rdr.get_abort_cnt() + self.jrnl_rdr.get_commit_cnt()) + print + if self.jrnl_rdr.emap().size() > 0: + print "Remaining enqueued records (sorted by rid): " + rid_list = self.jrnl_rdr.emap().rids() + rid_list.sort() + for rid in rid_list: + l = self.jrnl_rdr.emap().get(rid) + locked = "" + if l[2]: + locked += " (locked)" + print " fid=%d %s%s" % (l[0], l[1], locked) + print "WARNING: Enqueue-Dequeue mismatch, %d enqueued records remain." % self.jrnl_rdr.emap().size() + else: + print "No remaining enqueued records found (emap empty)." + print + if self.jrnl_rdr.tmap().size() > 0: + txn_rec_cnt = 0 + print "Incomplete transactions: " + for xid in self.jrnl_rdr.tmap().xids(): + jrnl.Utils.format_xid(xid) + recs = self.jrnl_rdr.tmap().get(xid) + for l in recs: + print " fid=%d %s" % (l[0], l[1]) + print " Total: %d records for %s" % (len(recs), jrnl.Utils.format_xid(xid)) + print + txn_rec_cnt += len(recs) + print "WARNING: Incomplete transactions found, %d xids remain containing a total of %d records." % \ + (self.jrnl_rdr.tmap().size(), txn_rec_cnt) + else: + print "No incomplete transactions found (tmap empty)." + print + print "%d enqueues, %d journal records processed." % \ + (self.jrnl_rdr.get_msg_cnt(), self.jrnl_rdr.get_rec_cnt()) + + + def _process_args(self): + """Process the command-line arguments""" + opt = optparse.OptionParser(usage="%prog [options] DIR", version="%prog 1.0") + opt.add_option("-b", "--base-filename", + action="store", dest="bfn", default="JournalData", + help="Base filename for old journal files") + opt.add_option("-q", "--quiet", + action="store_true", dest="qflag", + help="Quiet (suppress all non-error output)") + opt.add_option("-r", "--records", + action="store_true", dest="rflag", + help="Print all records and transactions (including consumed/closed)") + opt.add_option("-v", "--verbose", + action="store_true", dest="vflag", + help="Verbose output") + (self.opts, args) = opt.parse_args() + if len(args) == 0: + opt.error("No journal directory argument") + elif len(args) > 1: + opt.error("Too many positional arguments: %s" % args) + if self.opts.qflag and self.opts.rflag: + opt.error("Quiet (-q/--quiet) and record (-r/--records) options are mutually exclusive") + if self.opts.qflag and self.opts.vflag: + opt.error("Quiet (-q/--quiet) and verbose (-v/--verbose) options are mutually exclusive") + self._jdir = args[0] + if not os.path.exists(self._jdir): + opt.error("Journal path \"%s\" does not exist" % self._jdir) + + +#== class CsvStoreChk ========================================================= + +class CsvStoreChk(StoreChk): + """ + This class, in addition to analyzing a journal, can compare the journal footprint (ie enqueued/dequeued/transaction + record counts) to expected values from a CSV file. This can be used for additional automated testing, and is + currently in use in the long store tests for journal encode testing. + """ + + # CSV file cols + TEST_NUM_COL = 0 + NUM_MSGS_COL = 5 + MIN_MSG_SIZE_COL = 7 + MAX_MSG_SIZE_COL = 8 + MIN_XID_SIZE_COL = 9 + MAX_XID_SIZE_COL = 10 + AUTO_DEQ_COL = 11 + TRANSIENT_COL = 12 + EXTERN_COL = 13 + COMMENT_COL = 20 + + def __init__(self): + """Constructor""" + StoreChk.__init__(self) + + # csv params + self.num_msgs = None + self.msg_len = None + self.auto_deq = None + self.xid_len = None + self.transient = None + self.extern = None + + self._warning = [] + + self.jrnl_rdr.set_callbacks(self, CsvStoreChk._csv_pre_run_chk, CsvStoreChk._csv_enq_chk, + CsvStoreChk._csv_deq_chk, CsvStoreChk._csv_txn_chk, CsvStoreChk._csv_post_run_chk) + self._get_csv_test() + + def _get_csv_test(self): + """Get a test from the CSV reader""" + if self.opts.csvfn != None and self.opts.tnum != None: + tparams = self._read_csv_file(self.opts.csvfn, self.opts.tnum) + if tparams == None: + print "ERROR: Test %d not found in CSV file \"%s\"" % (self.opts.tnum, self.opts.csvfn) + sys.exit(1) + self.num_msgs = tparams["num_msgs"] + if tparams["min_size"] == tparams["max_size"]: + self.msg_len = tparams["max_size"] + else: + self.msg_len = 0 + self.auto_deq = tparams["auto_deq"] + if tparams["xid_min_size"] == tparams["xid_max_size"]: + self.xid_len = tparams["xid_max_size"] + else: + self.xid_len = 0 + self.transient = tparams["transient"] + self.extern = tparams["extern"] + + def _read_csv_file(self, filename, tnum): + """Read the CSV test parameter file""" + try: + csvf = open(filename, "r") + except IOError: + print "ERROR: Unable to open CSV file \"%s\"" % filename + sys.exit(1) + for line in csvf: + str_list = line.strip().split(",") + if len(str_list[0]) > 0 and str_list[0][0] != "\"": + try: + if (int(str_list[self.TEST_NUM_COL]) == tnum): + return { "num_msgs": int(str_list[self.NUM_MSGS_COL]), + "min_size": int(str_list[self.MIN_MSG_SIZE_COL]), + "max_size": int(str_list[self.MAX_MSG_SIZE_COL]), + "auto_deq": not (str_list[self.AUTO_DEQ_COL] == "FALSE" or + str_list[self.AUTO_DEQ_COL] == "0"), + "xid_min_size": int(str_list[self.MIN_XID_SIZE_COL]), + "xid_max_size": int(str_list[self.MAX_XID_SIZE_COL]), + "transient": not (str_list[self.TRANSIENT_COL] == "FALSE" or + str_list[self.TRANSIENT_COL] == "0"), + "extern": not (str_list[self.EXTERN_COL] == "FALSE" or + str_list[self.EXTERN_COL] == "0"), + "comment": str_list[self.COMMENT_COL] } + except Exception: + pass + return None + + def _process_args(self): + """Process command-line arguments""" + opt = optparse.OptionParser(usage="%prog [options] DIR", version="%prog 1.0") + opt.add_option("-b", "--base-filename", + action="store", dest="bfn", default="JournalData", + help="Base filename for old journal files") + opt.add_option("-c", "--csv-filename", + action="store", dest="csvfn", + help="CSV filename containing test parameters") + opt.add_option("-q", "--quiet", + action="store_true", dest="qflag", + help="Quiet (suppress all non-error output)") + opt.add_option("-r", "--records", + action="store_true", dest="rflag", + help="Print all records and transactions (including consumed/closed)") + opt.add_option("-t", "--test-num", + action="store", type="int", dest="tnum", + help="Test number from CSV file - only valid if CSV file named") + opt.add_option("-v", "--verbose", + action="store_true", dest="vflag", + help="Verbose output") + (self.opts, args) = opt.parse_args() + if len(args) == 0: + opt.error("No journal directory argument") + elif len(args) > 1: + opt.error("Too many positional arguments: %s" % args) + if self.opts.qflag and self.opts.rflag: + opt.error("Quiet (-q/--quiet) and record (-r/--records) options are mutually exclusive") + if self.opts.qflag and self.opts.vflag: + opt.error("Quiet (-q/--quiet) and verbose (-v/--verbose) options are mutually exclusive") + self._jdir = args[0] + if not os.path.exists(self._jdir): + opt.error("Journal path \"%s\" does not exist" % self._jdir) + + # Callbacks for checking against CSV test parameters. Return False if ok, True to raise error. + + #@staticmethod + def _csv_pre_run_chk(csv_store_chk): + """Check performed before a test runs""" + if csv_store_chk.num_msgs == None: + return + if csv_store_chk.jrnl_anal.is_empty() and csv_store_chk.num_msgs > 0: + raise jerr.AllJrnlFilesEmptyCsvError(csv_store_chk.get_opts().tnum, csv_store_chk.num_msgs) + return False + _csv_pre_run_chk = staticmethod(_csv_pre_run_chk) + + #@staticmethod + def _csv_enq_chk(csv_store_chk, hdr): + """Check performed before each enqueue operation""" + #if csv_store_chk.num_msgs == None: return + # + if csv_store_chk.extern != None: + if csv_store_chk.extern != hdr.extern: + raise jerr.ExternFlagCsvError(csv_store_chk.opts.tnum, csv_store_chk.extern) + if hdr.extern and hdr.data != None: + raise jerr.ExternFlagWithDataCsvError(csv_store_chk.opts.tnum) + if csv_store_chk.msg_len != None and csv_store_chk.msg_len > 0 and hdr.data != None and \ + len(hdr.data) != csv_store_chk.msg_len: + raise jerr.MessageLengthCsvError(csv_store_chk.opts.tnum, csv_store_chk.msg_len, len(hdr.data)) + if csv_store_chk.xid_len != None and csv_store_chk.xid_len > 0 and len(hdr.xid) != csv_store_chk.xid_len: + raise jerr.XidLengthCsvError(csv_store_chk.opts.tnum, csv_store_chk.xid_len, len(hdr.xid)) + if csv_store_chk.transient != None and hdr.transient != csv_store_chk.transient: + raise jerr.TransactionCsvError(csv_store_chk.opts.tnum, csv_store_chk.transient) + return False + _csv_enq_chk = staticmethod(_csv_enq_chk) + + #@staticmethod + def _csv_deq_chk(csv_store_chk, hdr): + """Check performed before each dequeue operation""" + if csv_store_chk.auto_deq != None and not csv_store_chk.auto_deq: + raise jerr.JWarning("[CSV %d] WARNING: Dequeue record rid=%d found in non-dequeue test - ignoring." % + (csv_store_chk.opts.tnum, hdr.rid)) + #self._warning.append("[CSV %d] WARNING: Dequeue record rid=%d found in non-dequeue test - ignoring." % + # (csv_store_chk.opts.tnum, hdr.rid)) + return False + _csv_deq_chk = staticmethod(_csv_deq_chk) + + #@staticmethod + def _csv_txn_chk(csv_store_chk, hdr): + """Check performed before each transaction commit/abort""" + return False + _csv_txn_chk = staticmethod(_csv_txn_chk) + + #@staticmethod + def _csv_post_run_chk(csv_store_chk): + """Cehck performed after the completion of the test""" + # Exclude this check if lastFileFlag is set - the count may be less than the number of msgs sent because + # of journal overwriting + if csv_store_chk.num_msgs != None and not csv_store_chk.jrnl_rdr.is_last_file() and \ + csv_store_chk.num_msgs != csv_store_chk.jrnl_rdr.get_msg_cnt(): + raise jerr.NumMsgsCsvError(csv_store_chk.opts.tnum, csv_store_chk.num_msgs, + csv_store_chk.jrnl_rdr.get_msg_cnt()) + return False + _csv_post_run_chk = staticmethod(_csv_post_run_chk) + +#============================================================================== +# main program +#============================================================================== + +if __name__ == "__main__": + M = CsvStoreChk() + try: + M.run() + except Exception, e: + sys.exit(e) diff --git a/qpid/cpp/management/python/bin/qpid-store-resize b/qpid/cpp/management/python/bin/qpid-store-resize new file mode 100755 index 0000000000..38d8eaf1ad --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-store-resize @@ -0,0 +1,350 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +from qpidstore import jerr, jrnl, janal +import glob, optparse, os, sys, time + + +#== class Resize ============================================================== + +class Resize(object): + """ + Creates a new store journal and copies records from old journal to new. The new journal may be of + different size from the old one. The records are packed into the new journal (ie only remaining + enqueued records and associated transactions - if any - are copied over without spaces between them). + + The default action is to push the old journal down into a 'bak' sub-directory and then create a + new journal of the same size and pack it with the records from the old. However, it is possible to + suppress the pushdown (using --no-pushdown), in which case either a new journal id (using + --new-base-filename) or an old journal id (usnig --old-base-filename) must be supplied. In the former + case,a new journal will be created using the new base file name alongside the old one. In the latter + case, the old journal will be renamed to the supplied name, and the new one will take the default. + Note that both can be specified together with the --no-pushdown option. + + To resize the journal, use the optional --num-jfiles and/or --jfile-size parameters. These + should be large enough to write all the records or an error will result. If the size is large enough + to write all records, but too small to keep below the enqueue threshold, a warning will be printed. + Note that as any valid size will be accepted, a journal can also be shrunk, as long as it is sufficiently + big to accept the transferred records. + """ + + BAK_DIR = "bak" + JFILE_SIZE_PGS_MIN = 1 + JFILE_SIZE_PGS_MAX = 32768 + NUM_JFILES_MIN = 4 + NUM_JFILES_MAX = 64 + + def __init__(self): + """Constructor""" + self._opts = None + self._jdir = None + self._fname = None + self._fnum = None + self._file = None + self._file_rec_wr_cnt = None + self._filler_wr_cnt = None + self._last_rec_fid = None + self._last_rec_offs = None + self._rec_wr_cnt = None + + self._jrnl_info = None + self._jrnl_analysis = None + self._jrnl_reader = None + + self._process_args() + self._jrnl_info = jrnl.JrnlInfo(self._jdir, self._opts.bfn) + # FIXME: This is a hack... find an elegant way of getting the file size to jrec! + jrnl.JRNL_FILE_SIZE = self._jrnl_info.get_jrnl_file_size_bytes() + self._jrnl_analysis = janal.JrnlAnalyzer(self._jrnl_info) + self._jrnl_reader = janal.JrnlReader(self._jrnl_info, self._jrnl_analysis, self._opts.qflag, self._opts.rflag, + self._opts.vflag) + + def run(self): + """Perform the action of resizing the journal""" + if not self._opts.qflag: + print self._jrnl_analysis + self._jrnl_reader.run() + if self._opts.vflag: + print self._jrnl_info + if not self._opts.qflag: + print self._jrnl_reader.report(self._opts.vflag, self._opts.rflag) + self._handle_old_files() + self._create_new_files() + if not self._opts.qflag: + print "Transferred %d records to new journal." % self._rec_wr_cnt + self._chk_free() + + def _chk_free(self): + """Check if sufficient space is available in resized journal to be able to enqueue. Raise a warning if not.""" + if self._last_rec_fid == None or self._last_rec_offs == None: + return + wr_capacity_bytes = self._last_rec_fid * self._jrnl_info.get_jrnl_data_size_bytes() + self._last_rec_offs + tot_capacity_bytes = self._jrnl_info.get_tot_jrnl_data_size_bytes() + percent_full = 100.0 * wr_capacity_bytes / tot_capacity_bytes + if percent_full > 80.0: + raise jerr.JWarning("WARNING: Journal %s is %2.1f%% full and will likely not allow enqueuing of new records" + " until some existing records are dequeued." % + (self._jrnl_info.get_jrnl_id(), percent_full)) + + def _create_new_files(self): + """Create new journal files""" + # Assemble records to be transfered + master_record_list = {} + txn_record_list = self._jrnl_reader.txn_obj_list() + if self._opts.vflag and self._jrnl_reader.emap().size() > 0: + print "* Assembling %d records from emap" % self._jrnl_reader.emap().size() + for tup in self._jrnl_reader.emap().get_rec_list(): + hdr = tup[1] + hdr.flags &= ~jrnl.Hdr.OWI_MASK # Turn off owi + master_record_list[long(hdr.rid)] = hdr + if hdr.xidsize > 0 and hdr.xid in txn_record_list: + txn_hdr = txn_record_list[hdr.xid] + del(txn_record_list[hdr.xid]) + txn_hdr.flags &= ~jrnl.Hdr.OWI_MASK # Turn off owi + master_record_list[long(txn_hdr.rid)] = txn_hdr + if self._opts.vflag and self._jrnl_reader.tmap().size() > 0: + print "* Assembling %d records from tmap" % self._jrnl_reader.tmap().size() + for xid in self._jrnl_reader.tmap().xids(): + for l in self._jrnl_reader.tmap().get(xid): + hdr = l[1] + hdr.flags &= ~jrnl.Hdr.OWI_MASK # Turn off owi + master_record_list[hdr.rid] = hdr + rid_list = master_record_list.keys() + rid_list.sort() + + # get base filename + bfn = self._opts.bfn + if self._opts.nbfn != None: + bfn = self._opts.nbfn + + # write jinf file + self._jrnl_info.resize(self._opts.njf, self._opts.jfs) + self._jrnl_info.write(self._jdir, bfn) + + # write records + if self._opts.vflag: + print "* Transferring records to new journal files" + fro = self._jrnl_info.get_jrnl_sblk_size_bytes() + while len(rid_list) > 0: + hdr = master_record_list[rid_list.pop(0)] + rec = hdr.encode() + pos = 0 + while pos < len(rec): + if self._file == None or self._file.tell() >= self._jrnl_info.get_jrnl_file_size_bytes(): + if self._file == None: + rid = hdr.rid + elif len(rid_list) == 0: + rid = 0 + else: + rid = rid_list[0] + if not self._rotate_file(rid, fro): + raise jerr.JournalSpaceExceededError() + if len(rec) - pos <= self._jrnl_info.get_jrnl_file_size_bytes() - self._file.tell(): + self._file.write(rec[pos:]) + self._fill_file(jrnl.Utils.size_in_bytes_to_blk(self._file.tell(), + self._jrnl_info.get_jrnl_dblk_size_bytes())) + pos = len(rec) + fro = self._jrnl_info.get_jrnl_sblk_size_bytes() + else: + flen = self._jrnl_info.get_jrnl_file_size_bytes() - self._file.tell() + self._file.write(rec[pos:pos + flen]) + pos += flen + rem = len(rec) - pos + if rem <= self._jrnl_info.get_jrnl_data_size_bytes(): + fro = (jrnl.Utils.size_in_bytes_to_blk(self._jrnl_info.get_jrnl_sblk_size_bytes() + rem, + self._jrnl_info.get_jrnl_dblk_size_bytes())) + else: + fro = 0 + self._rec_wr_cnt += 1 + self._file_rec_wr_cnt += 1 + self._fill_file(add_filler_recs = True) + while self._rotate_file(): + pass + + def _fill_file(self, to_posn = None, add_filler_recs = False): + """Fill a file to a known offset""" + if self._file == None: + return + if add_filler_recs: + nfr = int(jrnl.Utils.rem_bytes_in_blk(self._file, self._jrnl_info.get_jrnl_sblk_size_bytes()) / + self._jrnl_info.get_jrnl_dblk_size_bytes()) + if nfr > 0: + self._filler_wr_cnt = nfr + for i in range(0, nfr): + self._file.write("RHMx") + self._fill_file(jrnl.Utils.size_in_bytes_to_blk(self._file.tell(), + self._jrnl_info.get_jrnl_dblk_size_bytes())) + self._last_rec_fid = self._fnum + self._last_rec_offs = self._file.tell() + if to_posn == None: + to_posn = self._jrnl_info.get_jrnl_file_size_bytes() + elif to_posn > self._jrnl_info.get_jrnl_file_size_bytes(): + raise jerr.FillExceedsFileSizeError(to_posn, self._jrnl_info.get_jrnl_file_size_bytes()) + diff = to_posn - self._file.tell() + self._file.write(str("\0" * diff)) + #DEBUG + if self._file.tell() != to_posn: + raise jerr.FillSizeError(self._file.tell(), to_posn) + + def _rotate_file(self, rid = None, fro = None): + """Switch to the next logical file""" + if self._file != None: + self._file.close() + if self._opts.vflag: + if self._file_rec_wr_cnt == 0: + print " (empty)" + elif self._filler_wr_cnt == None: + print " (%d records)" % self._file_rec_wr_cnt + else: + print " (%d records + %d filler(s))" % (self._file_rec_wr_cnt, self._filler_wr_cnt) + if self._fnum == None: + self._fnum = 0 + self._rec_wr_cnt = 0 + elif self._fnum == self._jrnl_info.get_num_jrnl_files() - 1: + return False + else: + self._fnum += 1 + self._file_rec_wr_cnt = 0 + self._fname = os.path.join(self._jrnl_info.get_jrnl_dir(), "%s.%04x.jdat" % + (self._jrnl_info.get_jrnl_base_name(), self._fnum)) + if self._opts.vflag: + print "* Opening file %s" % self._fname, + self._file = open(self._fname, "w") + if rid == None or fro == None: + self._fill_file() + else: + now = time.time() + fhdr = jrnl.FileHdr(0, "RHMf", jrnl.Hdr.HDR_VER, int(jrnl.Hdr.BIG_ENDIAN), 0, rid) + fhdr.init(self._file, 0, self._fnum, self._fnum, fro, int(now), 1000000000*(now - int(now))) + self._file.write(fhdr.encode()) + self._fill_file(self._jrnl_info.get_jrnl_sblk_size_bytes()) + return True + + def _handle_old_files(self): + """Push old journal down into a backup directory""" + target_dir = self._jdir + if not self._opts.npd: + target_dir = os.path.join(self._jdir, self.BAK_DIR) + if os.path.exists(target_dir): + if self._opts.vflag: + print "* Pushdown directory %s exists, deleting content" % target_dir + for fname in glob.glob(os.path.join(target_dir, "*")): + os.unlink(fname) + else: + if self._opts.vflag: + print "* Creating new pushdown directory %s" % target_dir + os.mkdir(target_dir) + + if not self._opts.npd or self._opts.obfn != None: + if self._opts.obfn != None and self._opts.vflag: + print "* Renaming old journal files using base name %s" % self._opts.obfn + # .jdat files + for fname in glob.glob(os.path.join(self._jdir, "%s.*.jdat" % self._opts.bfn)): + tbfn = os.path.basename(fname) + if self._opts.obfn != None: + per1 = tbfn.rfind(".") + if per1 >= 0: + per2 = tbfn.rfind(".", 0, per1) + if per2 >= 0: + tbfn = "%s%s" % (self._opts.obfn, tbfn[per2:]) + os.rename(fname, os.path.join(target_dir, tbfn)) + # .jinf file + self._jrnl_info.write(target_dir, self._opts.obfn) + os.unlink(os.path.join(self._jdir, "%s.jinf" % self._opts.bfn)) + + def _print_options(self): + """Print program options""" + if self._opts.vflag: + print "Journal dir: %s" % self._jdir + print "Options: Base filename: %s" % self._opts.bfn + print " New base filename: %s" % self._opts.nbfn + print " Old base filename: %s" % self._opts.obfn + print " Pushdown: %s" % self._opts.npd + print " No. journal files: %d" % self._opts.njf + print " Journal file size: %d 64kiB blocks" % self._opts.jfs + print " Show records flag: %s" % self._opts.rflag + print " Verbose flag: %s" % True + print + + def _process_args(self): + """Process the command-line arguments""" + opt = optparse.OptionParser(usage="%prog [options] DIR", version="%prog 1.0") + opt.add_option("-b", "--base-filename", + action="store", dest="bfn", default="JournalData", + help="Base filename for old journal files") + opt.add_option("-B", "--new-base-filename", + action="store", dest="nbfn", + help="Base filename for new journal files") + opt.add_option("-n", "--no-pushdown", + action="store_true", dest="npd", + help="Suppress pushdown of old files into \"bak\" dir; old files will remain in existing dir") + opt.add_option("-N", "--num-jfiles", + action="store", type="int", dest="njf", default=8, + help="Number of files for new journal (%d-%d)" % (self.NUM_JFILES_MIN, self.NUM_JFILES_MAX)) + opt.add_option("-o", "--old-base-filename", + action="store", dest="obfn", + help="Base filename for old journal files") + opt.add_option("-q", "--quiet", + action="store_true", dest="qflag", + help="Quiet (suppress all non-error output)") + opt.add_option("-r", "--records", + action="store_true", dest="rflag", + help="Print remaining records and transactions") + opt.add_option("-s", "--jfile-size-pgs", + action="store", type="int", dest="jfs", default=24, + help="Size of each new journal file in 64kiB blocks (%d-%d)" % + (self.JFILE_SIZE_PGS_MIN, self.JFILE_SIZE_PGS_MAX)) + opt.add_option("-v", "--verbose", + action="store_true", dest="vflag", + help="Verbose output") + (self._opts, args) = opt.parse_args() + if len(args) == 0: + opt.error("No journal directory argument") + elif len(args) > 1: + opt.error("Too many positional arguments: %s" % args) + if self._opts.qflag and self._opts.rflag: + opt.error("Quiet (-q/--quiet) and record (-r/--records) options are mutually exclusive") + if self._opts.qflag and self._opts.vflag: + opt.error("Quiet (-q/--quiet) and verbose (-v/--verbose) options are mutually exclusive") + if self._opts.njf != None and (self._opts.njf < self.NUM_JFILES_MIN or self._opts.njf > self.NUM_JFILES_MAX): + opt.error("Number of files (%d) is out of range (%d-%d)" % + (self._opts.njf, self.NUM_JFILES_MIN, self.NUM_JFILES_MAX)) + if self._opts.jfs != None and (self._opts.jfs < self.JFILE_SIZE_PGS_MIN or + self._opts.jfs > self.JFILE_SIZE_PGS_MAX): + opt.error("File size (%d) is out of range (%d-%d)" % + (self._opts.jfs, self.JFILE_SIZE_PGS_MIN, self.JFILE_SIZE_PGS_MAX)) + if self._opts.npd != None and (self._opts.nbfn == None and self._opts.obfn == None): + opt.error("If (-n/--no-pushdown) is used, then at least one of (-B/--new-base-filename) and" + " (-o/--old-base-filename) must be used.") + self._jdir = args[0] + if not os.path.exists(self._jdir): + opt.error("Journal path \"%s\" does not exist" % self._jdir) + self._print_options() + +#============================================================================== +# main program +#============================================================================== + +if __name__ == "__main__": + R = Resize() + try: + R.run() + except Exception, e: + sys.exit(e) diff --git a/qpid/cpp/management/python/bin/qpid-tool b/qpid/cpp/management/python/bin/qpid-tool new file mode 100755 index 0000000000..09ca2b8c13 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-tool @@ -0,0 +1,799 @@ +#!/usr/bin/env python + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +import os +import optparse +import sys +import socket +import locale +from types import * +from cmd import Cmd +from shlex import split +from threading import Lock +from time import strftime, gmtime +from qpidtoollibs import Display +from qmf.console import Session, Console, SchemaClass, ObjectId + +class Mcli(Cmd): + """ Management Command Interpreter """ + + def __init__(self, dataObject, dispObject): + Cmd.__init__(self) + self.dataObject = dataObject + self.dispObject = dispObject + self.dataObject.setCli(self) + self.prompt = "qpid: " + + def emptyline(self): + pass + + def setPromptMessage(self, p): + if p == None: + self.prompt = "qpid: " + else: + self.prompt = "qpid[%s]: " % p + + def do_help(self, data): + print "Management Tool for QPID" + print + print "Commands:" + print " agents - Print a list of the known Agents" + print " list - Print summary of existing objects by class" + print " list - Print list of objects of the specified class" + print " list active - Print list of non-deleted objects of the specified class" +# print " show - Print contents of all objects of specified class" +# print " show active - Print contents of all non-deleted objects of specified class" + print " show - Print contents of an object (infer className)" +# print " show - Print contents of one or more objects" +# print " list is space-separated, ranges may be specified (i.e. 1004-1010)" + print " call [] - Invoke a method on an object" + print " schema - Print summary of object classes seen on the target" + print " schema - Print details of an object class" + print " set time-format short - Select short timestamp format (default)" + print " set time-format long - Select long timestamp format" + print " quit or ^D - Exit the program" + print + + def complete_set(self, text, line, begidx, endidx): + """ Command completion for the 'set' command """ + tokens = split(line) + if len(tokens) < 2: + return ["time-format "] + elif tokens[1] == "time-format": + if len(tokens) == 2: + return ["long", "short"] + elif len(tokens) == 3: + if "long".find(text) == 0: + return ["long"] + elif "short".find(text) == 0: + return ["short"] + elif "time-format".find(text) == 0: + return ["time-format "] + return [] + + def do_set(self, data): + tokens = split(data) + try: + if tokens[0] == "time-format": + self.dispObject.do_setTimeFormat(tokens[1]) + except: + pass + + def complete_schema(self, text, line, begidx, endidx): + tokens = split(line) + if len(tokens) > 2: + return [] + return self.dataObject.classCompletions(text) + + def do_schema(self, data): + try: + self.dataObject.do_schema(data) + except Exception, e: + print "Exception in do_schema: %r" % e + + def do_agents(self, data): + try: + self.dataObject.do_agents(data) + except Exception, e: + print "Exception in do_agents: %r" % e + + def do_id(self, data): + try: + self.dataObject.do_id(data) + except Exception, e: + print "Exception in do_id: %r" % e + + def complete_list(self, text, line, begidx, endidx): + tokens = split(line) + if len(tokens) > 2: + return [] + return self.dataObject.classCompletions(text) + + def do_list(self, data): + try: + self.dataObject.do_list(data) + except Exception, e: + print "Exception in do_list: %r" % e + + def do_show(self, data): + try: + self.dataObject.do_show(data) + except Exception, e: + print "Exception in do_show: %r" % e + + def do_call(self, data): + try: + self.dataObject.do_call(data) + except Exception, e: + print "Exception in do_call: %r" % e + + def do_EOF(self, data): + print "quit" + try: + self.dataObject.do_exit() + except: + pass + return True + + def do_quit(self, data): + try: + self.dataObject.do_exit() + except: + pass + return True + + def postcmd(self, stop, line): + return stop + + def postloop(self): + print "Exiting..." + self.dataObject.close() + +#====================================================================================================== +# QmfData +#====================================================================================================== +class QmfData(Console): + """ + """ + def __init__(self, disp, url, conn_options): + self.disp = disp + self.url = url + self.session = Session(self, manageConnections=True) + self.broker = self.session.addBroker(self.url, **conn_options) + self.lock = Lock() + self.connected = None + self.closing = None + self.first_connect = True + self.cli = None + self.idRegistry = IdRegistry() + self.objects = {} + + #======================= + # Methods to support CLI + #======================= + def setCli(self, cli): + self.cli = cli + + def close(self): + try: + self.closing = True + if self.session and self.broker: + self.session.delBroker(self.broker) + except: + pass # we're shutting down - ignore any errors + + def classCompletions(self, text): + pass + + def do_schema(self, data): + if data == "": + self.schemaSummary() + else: + self.schemaTable(data) + + def do_agents(self, data): + agents = self.session.getAgents() + rows = [] + for agent in agents: + version = 1 + if agent.isV2: + version = 2 + rows.append(("%d.%s" % (agent.getBrokerBank(), agent.getAgentBank()), agent.label, agent.epoch, version)) + self.disp.table("QMF Agents:", ("Agent Name", "Label", "Epoch", "QMF Version"), rows) + + def do_id(self, data): + tokens = data.split() + for token in tokens: + if not token.isdigit(): + print "Value %s is non-numeric" % token + return + title = "Translation of Display IDs:" + heads = ('DisplayID', 'Epoch', 'Agent', 'ObjectName') + if len(tokens) == 0: + tokens = self.idRegistry.getDisplayIds() + rows = [] + for token in tokens: + rows.append(self.idRegistry.getIdInfo(int(token))) + self.disp.table(title, heads, rows) + + def do_list(self, data): + tokens = data.split() + if len(tokens) == 0: + self.listClasses() + else: + self.listObjects(tokens) + + def do_show(self, data): + tokens = data.split() + if len(tokens) == 0: + print "Missing Class or ID" + return + keys = self.classKeysByToken(tokens[0]) + if keys: + self.showObjectsByKey(keys) + elif tokens[0].isdigit(): + self.showObjectById(int(tokens[0])) + + def _build_object_name(self, obj): + values = [] + for p,v in obj.getProperties(): + if p.name != "vhostRef" and p.index == 1: + if p.name == "brokerRef": # reference to broker + values.append('org.apache.qpid.broker:broker:amqp-broker') + else: + values.append(str(v)) + + object_key = ",".join(values) + class_key = obj.getClassKey(); + return class_key.getPackageName() + ":" + class_key.getClassName() + ":" + object_key + + + def do_call(self, data): + tokens = data.split() + if len(tokens) < 2: + print "Not enough arguments supplied" + return + displayId = long(tokens[0]) + methodName = tokens[1] + args = [] + for arg in tokens[2:]: + ## + ## If the argument is a map, list, boolean, integer, or floating (one decimal point), + ## run it through the Python evaluator so it is converted to the correct type. + ## + ## TODO: use a regex for this instead of this convoluted logic, + ## or even consider passing all args through eval() [which would + ## be a minor change to the interface as string args would then + ## always need to be quoted as strings within a map/list would + ## now] + if arg[0] == '{' or arg[0] == '[' or arg[0] == '"' or arg[0] == '\'' or arg == "True" or arg == "False" or \ + ((arg.count('.') < 2 and (arg.count('-') == 0 or \ + (arg.count('-') == 1 and arg[0] == '-')) and \ + arg.replace('.','').replace('-','').isdigit())): + args.append(eval(arg)) + else: + args.append(arg) + + obj = None + try: + self.lock.acquire() + if displayId not in self.objects: + print "Unknown ID" + return + obj = self.objects[displayId] + finally: + self.lock.release() + + object_id = obj.getObjectId(); + if not object_id.isV2 and obj.getAgent().isV2: + object_name = self._build_object_name(obj) + object_id = ObjectId.create(object_id.agentName, object_name) + + self.session._sendMethodRequest(self.broker, obj.getClassKey(), object_id, methodName, args) + + + def do_exit(self): + pass + + #==================== + # Sub-Command Methods + #==================== + def schemaSummary(self, package_filter=None): + rows = [] + packages = self.session.getPackages() + for package in packages: + if package_filter and package_filter != package: + continue + keys = self.session.getClasses(package) + for key in keys: + kind = "object" + schema = self.session.getSchema(key) + if schema: + if schema.kind == SchemaClass.CLASS_KIND_EVENT: + kind = "event" + if schema.kind == SchemaClass.CLASS_KIND_TABLE: + # + # Don't display event schemata. This will be a future feature. + # + rows.append((package, key.getClassName(), kind)) + self.disp.table("QMF Classes:", ("Package", "Name", "Kind"), rows) + + def schemaTable(self, text): + packages = self.session.getPackages() + if text in packages: + self.schemaSummary(package_filter=text) + for package in packages: + keys = self.session.getClasses(package) + for key in keys: + if text == key.getClassName() or text == package + ":" + key.getClassName(): + schema = self.session.getSchema(key) + if schema.kind == SchemaClass.CLASS_KIND_TABLE: + self.schemaObject(schema) + else: + self.schemaEvent(schema) + + def schemaObject(self, schema): + rows = [] + title = "Object Class: %s" % schema.__repr__() + heads = ("Element", "Type", "Access", "Unit", "Notes", "Description") + for prop in schema.getProperties(): + notes = "" + if prop.index : notes += "index " + if prop.optional : notes += "optional " + row = (prop.name, self.typeName(prop.type), self.accessName(prop.access), + self.notNone(prop.unit), notes, self.notNone(prop.desc)) + rows.append(row) + for stat in schema.getStatistics(): + row = (stat.name, self.typeName(stat.type), "", self.notNone(stat.unit), "", self.notNone(stat.desc)) + rows.append(row) + self.disp.table(title, heads, rows) + + for method in schema.methods: + rows = [] + heads = ("Argument", "Type", "Direction", "Unit", "Description") + title = " Method: %s" % method.name + for arg in method.arguments: + row = (arg.name, self.typeName(arg.type), arg.dir, self.notNone(arg.unit), self.notNone(arg.desc)) + rows.append(row) + print + self.disp.table(title, heads, rows) + + def schemaEvent(self, schema): + rows = [] + title = "Event Class: %s" % schema.__repr__() + heads = ("Element", "Type", "Unit", "Description") + for arg in schema.arguments: + row = (arg.name, self.typeName(arg.type), self.notNone(arg.unit), self.notNone(arg.desc)) + rows.append(row) + self.disp.table(title, heads, rows) + + def listClasses(self): + title = "Summary of Objects by Type:" + heads = ("Package", "Class", "Active", "Deleted") + rows = [] + totals = {} + try: + self.lock.acquire() + for dispId in self.objects: + obj = self.objects[dispId] + key = obj.getClassKey() + index = (key.getPackageName(), key.getClassName()) + if index in totals: + stats = totals[index] + else: + stats = (0, 0) + if obj.isDeleted(): + stats = (stats[0], stats[1] + 1) + else: + stats = (stats[0] + 1, stats[1]) + totals[index] = stats + finally: + self.lock.release() + + for index in totals: + stats = totals[index] + rows.append((index[0], index[1], stats[0], stats[1])) + self.disp.table(title, heads, rows) + + def listObjects(self, tokens): + ckeys = self.classKeysByToken(tokens[0]) + show_deleted = True + if len(tokens) > 1 and tokens[1] == 'active': + show_deleted = None + heads = ("ID", "Created", "Destroyed", "Index") + rows = [] + try: + self.lock.acquire() + for dispId in self.objects: + obj = self.objects[dispId] + if obj.getClassKey() in ckeys: + utime, ctime, dtime = obj.getTimestamps() + dtimestr = self.disp.timestamp(dtime) + if dtime == 0: + dtimestr = "-" + if dtime == 0 or (dtime > 0 and show_deleted): + row = (dispId, self.disp.timestamp(ctime), dtimestr, self.objectIndex(obj)) + rows.append(row) + finally: + self.lock.release() + self.disp.table("Object Summary:", heads, rows) + + def showObjectsByKey(self, key): + pass + + def showObjectById(self, dispId): + heads = ("Attribute", str(dispId)) + rows = [] + try: + self.lock.acquire() + if dispId in self.objects: + obj = self.objects[dispId] + caption = "Object of type: %r" % obj.getClassKey() + for prop in obj.getProperties(): + row = (prop[0].name, self.valueByType(prop[0].type, prop[1])) + rows.append(row) + for stat in obj.getStatistics(): + row = (stat[0].name, self.valueByType(stat[0].type, stat[1])) + rows.append(row) + else: + print "No object found with ID %d" % dispId + return + finally: + self.lock.release() + self.disp.table(caption, heads, rows) + + def classKeysByToken(self, token): + """ + Given a token, return a list of matching class keys (if found): + token formats: + : + """ + pname = None + cname = None + parts = token.split(':') + if len(parts) == 1: + cname = parts[0] + elif len(parts) == 2: + pname = parts[0] + cname = parts[1] + else: + raise ValueError("Invalid Class Name: %s" % token) + + keys = [] + packages = self.session.getPackages() + for p in packages: + if pname == None or pname == p: + classes = self.session.getClasses(p) + for key in classes: + if key.getClassName() == cname: + keys.append(key) + return keys + + def typeName (self, typecode): + """ Convert type-codes to printable strings """ + if typecode == 1: return "uint8" + elif typecode == 2: return "uint16" + elif typecode == 3: return "uint32" + elif typecode == 4: return "uint64" + elif typecode == 5: return "bool" + elif typecode == 6: return "short-string" + elif typecode == 7: return "long-string" + elif typecode == 8: return "abs-time" + elif typecode == 9: return "delta-time" + elif typecode == 10: return "reference" + elif typecode == 11: return "boolean" + elif typecode == 12: return "float" + elif typecode == 13: return "double" + elif typecode == 14: return "uuid" + elif typecode == 15: return "field-table" + elif typecode == 16: return "int8" + elif typecode == 17: return "int16" + elif typecode == 18: return "int32" + elif typecode == 19: return "int64" + elif typecode == 20: return "object" + elif typecode == 21: return "list" + elif typecode == 22: return "array" + else: + raise ValueError ("Invalid type code: %s" % str(typecode)) + + def valueByType(self, typecode, val): + if type(val) is type(None): + return "absent" + if typecode == 1: return "%d" % val + elif typecode == 2: return "%d" % val + elif typecode == 3: return "%d" % val + elif typecode == 4: return "%d" % val + elif typecode == 6: return val + elif typecode == 7: return val + elif typecode == 8: return strftime("%c", gmtime(val / 1000000000)) + elif typecode == 9: + if val < 0: val = 0 + sec = val / 1000000000 + min = sec / 60 + hour = min / 60 + day = hour / 24 + result = "" + if day > 0: + result = "%dd " % day + if hour > 0 or result != "": + result += "%dh " % (hour % 24) + if min > 0 or result != "": + result += "%dm " % (min % 60) + result += "%ds" % (sec % 60) + return result + + elif typecode == 10: return str(self.idRegistry.displayId(val)) + elif typecode == 11: + if val: + return "True" + else: + return "False" + + elif typecode == 12: return "%f" % val + elif typecode == 13: return "%f" % val + elif typecode == 14: return "%r" % val + elif typecode == 15: return "%r" % val + elif typecode == 16: return "%d" % val + elif typecode == 17: return "%d" % val + elif typecode == 18: return "%d" % val + elif typecode == 19: return "%d" % val + elif typecode == 20: return "%r" % val + elif typecode == 21: return "%r" % val + elif typecode == 22: return "%r" % val + else: + raise ValueError ("Invalid type code: %s" % str(typecode)) + + def accessName (self, code): + """ Convert element access codes to printable strings """ + if code == '1': return "ReadCreate" + elif code == '2': return "ReadWrite" + elif code == '3': return "ReadOnly" + else: + raise ValueError ("Invalid access code: %s" % str(code)) + + def notNone (self, text): + if text == None: + return "" + else: + return text + + def objectIndex(self, obj): + if obj._objectId.isV2: + return obj._objectId.getObject() + result = "" + first = True + props = obj.getProperties() + for prop in props: + if prop[0].index: + if not first: + result += "." + result += self.valueByType(prop[0].type, prop[1]) + first = None + return result + + + #===================== + # Methods from Console + #===================== + def brokerConnectionFailed(self, broker): + """ Invoked when a connection to a broker fails """ + if self.first_connect: + self.first_connect = None + print "Failed to connect: ", broker.error + + def brokerConnected(self, broker): + """ Invoked when a connection is established to a broker """ + try: + self.lock.acquire() + self.connected = True + finally: + self.lock.release() + if not self.first_connect: + print "Broker connected:", broker + self.first_connect = None + + def brokerDisconnected(self, broker): + """ Invoked when the connection to a broker is lost """ + try: + self.lock.acquire() + self.connected = None + finally: + self.lock.release() + if not self.closing: + print "Broker disconnected:", broker + + def objectProps(self, broker, record): + """ Invoked when an object is updated. """ + oid = record.getObjectId() + dispId = self.idRegistry.displayId(oid) + try: + self.lock.acquire() + if dispId in self.objects: + self.objects[dispId].mergeUpdate(record) + else: + self.objects[dispId] = record + finally: + self.lock.release() + + def objectStats(self, broker, record): + """ Invoked when an object is updated. """ + oid = record.getObjectId() + dispId = self.idRegistry.displayId(oid) + try: + self.lock.acquire() + if dispId in self.objects: + self.objects[dispId].mergeUpdate(record) + finally: + self.lock.release() + + def event(self, broker, event): + """ Invoked when an event is raised. """ + pass + + def methodResponse(self, broker, seq, response): + print response + + +#====================================================================================================== +# IdRegistry +#====================================================================================================== +class IdRegistry(object): + """ + """ + def __init__(self): + self.next_display_id = 101 + self.oid_to_display = {} + self.display_to_oid = {} + self.lock = Lock() + + def displayId(self, oid): + try: + self.lock.acquire() + if oid in self.oid_to_display: + return self.oid_to_display[oid] + newId = self.next_display_id + self.next_display_id += 1 + self.oid_to_display[oid] = newId + self.display_to_oid[newId] = oid + return newId + finally: + self.lock.release() + + def objectId(self, displayId): + try: + self.lock.acquire() + if displayId in self.display_to_oid: + return self.display_to_oid[displayId] + return None + finally: + self.lock.release() + + def getDisplayIds(self): + result = [] + for displayId in self.display_to_oid: + result.append(str(displayId)) + return result + + def getIdInfo(self, displayId): + """ + Given a display ID, return a tuple of (displayID, bootSequence/Durable, AgentBank/Name, ObjectName) + """ + oid = self.objectId(displayId) + if oid == None: + return (displayId, "?", "unknown", "unknown") + bootSeq = oid.getSequence() + if bootSeq == 0: + bootSeq = '' + agent = oid.getAgentBank() + if agent == '0': + agent = 'Broker' + return (displayId, bootSeq, agent, oid.getObject()) + +#========================================================= +# Option Parsing +#========================================================= + +def parse_options( argv ): + _usage = """qpid-tool [OPTIONS] [[/@][:]]""" + + parser = optparse.OptionParser(usage=_usage) + parser.add_option("-b", "--broker", action="store", type="string", metavar="
", help="Address of qpidd broker with syntax: [username/password@] hostname | ip-address [:]") + parser.add_option("--sasl-mechanism", action="store", type="string", metavar="", help="SASL mechanism for authentication (e.g. EXTERNAL, ANONYMOUS, PLAIN, CRAM-MD5, DIGEST-MD5, GSSAPI). SASL automatically picks the most secure available mechanism - use this option to override.") + parser.add_option("--sasl-service-name", action="store", type="string", help="SASL service name to use") + parser.add_option("--ssl-certificate", + action="store", type="string", metavar="", + help="SSL certificate for client authentication") + parser.add_option("--ssl-key", + action="store", type="string", metavar="", + help="Private key (if not contained in certificate)") + + opts, encArgs = parser.parse_args(args=argv) + try: + encoding = locale.getpreferredencoding() + args = [a.decode(encoding) for a in encArgs] + except: + args = encArgs + + conn_options = {} + broker_option = None + if opts.broker: + broker_option = opts.broker + if opts.ssl_certificate: + conn_options['ssl_certfile'] = opts.ssl_certificate + if opts.ssl_key: + if not opts.ssl_certificate: + parser.error("missing '--ssl-certificate' (required by '--ssl-key')") + conn_options['ssl_keyfile'] = opts.ssl_key + if opts.sasl_mechanism: + conn_options['mechanisms'] = opts.sasl_mechanism + if opts.sasl_service_name: + conn_options['service'] = opts.sasl_service_name + return broker_option, conn_options, args[1:] + +#========================================================= +# Main Program +#========================================================= + + +# Get options specified on the command line +broker_option, conn_options, cargs = parse_options(sys.argv) + +_host = "localhost" +if broker_option is not None: + _host = broker_option +elif len(cargs) > 0: + _host = cargs[0] + +# note: prior to supporting options, qpid-tool assumed positional parameters. +# the first argument was assumed to be the broker address. The second argument +# was optional, and, if supplied, was assumed to be the path to the +# certificate. To preserve backward compatibility, accept the certificate if +# supplied via the second parameter. +# +if 'ssl_certfile' not in conn_options: + if len(cargs) > 1: + conn_options['ssl_certfile'] = cargs[1] + +disp = Display() + +# Attempt to make a connection to the target broker +try: + data = QmfData(disp, _host, conn_options) +except Exception, e: + if str(e).find("Exchange not found") != -1: + print "Management not enabled on broker: Use '-m yes' option on broker startup." + else: + print "Failed: %s - %s" % (e.__class__.__name__, e) + sys.exit(1) + +# Instantiate the CLI interpreter and launch it. +cli = Mcli(data, disp) +print("Management Tool for QPID") +try: + cli.cmdloop() +except KeyboardInterrupt: + print + print "Exiting..." +except Exception, e: + print "Failed: %s - %s" % (e.__class__.__name__, e) + +# alway attempt to cleanup broker resources +data.close() diff --git a/qpid/cpp/management/python/bin/qpid-tool.bat b/qpid/cpp/management/python/bin/qpid-tool.bat new file mode 100644 index 0000000000..7eb0210da2 --- /dev/null +++ b/qpid/cpp/management/python/bin/qpid-tool.bat @@ -0,0 +1,2 @@ +@echo off +python %~dp0\qpid-tool %* diff --git a/qpid/cpp/management/python/lib/.gitignore b/qpid/cpp/management/python/lib/.gitignore new file mode 100644 index 0000000000..628d81888c --- /dev/null +++ b/qpid/cpp/management/python/lib/.gitignore @@ -0,0 +1,22 @@ + +# +# +# +# +# http://www.apache.org/licenses/LICENSE-2.0 +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# "License"); you may not use this file except in compliance +# KIND, either express or implied. See the License for the +# Licensed to the Apache Software Foundation (ASF) under one +# Unless required by applicable law or agreed to in writing, +# distributed with this work for additional information +# or more contributor license agreements. See the NOTICE file +# regarding copyright ownership. The ASF licenses this file +# software distributed under the License is distributed on an +# specific language governing permissions and limitations +# to you under the Apache License, Version 2.0 (the +# under the License. +# with the License. You may obtain a copy of the License at +/qpid-configc +/qpid-hac +/qpid-routec diff --git a/qpid/cpp/management/python/lib/README.txt b/qpid/cpp/management/python/lib/README.txt new file mode 100644 index 0000000000..cabeb1be02 --- /dev/null +++ b/qpid/cpp/management/python/lib/README.txt @@ -0,0 +1,4 @@ +To run these programs, please set PYTHONPATH to include: + + qpid/python + qpid/extras/qmf/src/py diff --git a/qpid/cpp/management/python/lib/qlslibs/__init__.py b/qpid/cpp/management/python/lib/qlslibs/__init__.py new file mode 100644 index 0000000000..d8a500d9d8 --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/__init__.py @@ -0,0 +1,19 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + diff --git a/qpid/cpp/management/python/lib/qlslibs/analyze.py b/qpid/cpp/management/python/lib/qlslibs/analyze.py new file mode 100644 index 0000000000..8c5de05b9e --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/analyze.py @@ -0,0 +1,606 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" +Module: qlslibs.analyze + +Classes for recovery and analysis of a Qpid Linear Store (QLS). +""" + +import os.path +import qlslibs.err +import qlslibs.jrnl +import qlslibs.utils + +class HighCounter(object): + def __init__(self): + self.num = 0 + def check(self, num): + if self.num < num: + self.num = num + def get(self): + return self.num + def get_next(self): + self.num += 1 + return self.num + +class JournalRecoveryManager(object): + TPL_DIR_NAME = 'tpl2' + JRNL_DIR_NAME = 'jrnl2' + def __init__(self, directory, args): + if not os.path.exists(directory): + raise qlslibs.err.InvalidQlsDirectoryNameError(directory) + self.directory = directory + self.args = args + self.tpl = None + self.journals = {} + self.high_rid_counter = HighCounter() + self.prepared_list = None + def report(self): + self._reconcile_transactions(self.prepared_list, self.args.txn) + if self.tpl is not None: + self.tpl.report(self.args) + for queue_name in sorted(self.journals.keys()): + self.journals[queue_name].report(self.args) + def run(self): + tpl_dir = os.path.join(self.directory, JournalRecoveryManager.TPL_DIR_NAME) + if os.path.exists(tpl_dir): + self.tpl = Journal(tpl_dir, None, self.args) + self.tpl.recover(self.high_rid_counter) + if self.args.show_recovery_recs or self.args.show_all_recs: + print + jrnl_dir = os.path.join(self.directory, JournalRecoveryManager.JRNL_DIR_NAME) + self.prepared_list = self.tpl.txn_map.get_prepared_list() if self.tpl is not None else {} + if os.path.exists(jrnl_dir): + for dir_entry in sorted(os.listdir(jrnl_dir)): + jrnl = Journal(os.path.join(jrnl_dir, dir_entry), self.prepared_list, self.args) + jrnl.recover(self.high_rid_counter) + self.journals[jrnl.get_queue_name()] = jrnl + if self.args.show_recovery_recs or self.args.show_all_recs: + print + print + def _reconcile_transactions(self, prepared_list, txn_flag): + print 'Transaction reconciliation report:' + print '==================================' + print 'Transaction Prepared List (TPL) contains %d open transaction(s):' % len(prepared_list) + for xid in prepared_list.keys(): + commit_flag = prepared_list[xid] + if commit_flag is None: + status = '[Prepared, neither committed nor aborted - assuming commit]' + elif commit_flag: + status = '[Prepared, but interrupted during commit phase]' + else: + status = '[Prepared, but interrupted during abort phase]' + print ' ', qlslibs.utils.format_xid(xid), status + if prepared_list[xid] is None: # Prepared, but not committed or aborted + enqueue_record = self.tpl.get_txn_map_record(xid)[0][1] + dequeue_record = qlslibs.utils.create_record(qlslibs.jrnl.DequeueRecord.MAGIC, \ + qlslibs.jrnl.DequeueRecord.TXN_COMPLETE_COMMIT_FLAG, \ + self.tpl.current_journal_file, \ + self.high_rid_counter.get_next(), \ + enqueue_record.record_id, xid, None) + if txn_flag: + self.tpl.add_record(dequeue_record) + print + print 'Open transactions found in queues:' + print '----------------------------------' + for queue_name in sorted(self.journals.keys()): + self.journals[queue_name].reconcile_transactions(prepared_list, txn_flag) + print + if len(prepared_list) > 0: + print 'Creating commit records for the following prepared transactions in TPL:' + for xid in prepared_list.keys(): + print ' ', qlslibs.utils.format_xid(xid) + transaction_record = qlslibs.utils.create_record(qlslibs.jrnl.TransactionRecord.MAGIC_COMMIT, 0, \ + self.tpl.current_journal_file, \ + self.high_rid_counter.get_next(), None, xid, None) + if txn_flag: + self.tpl.add_record(transaction_record) + print + +class EnqueueMap(object): + """ + Map of enqueued records in a QLS journal + """ + def __init__(self, journal): + self.journal = journal + self.enq_map = {} + def add(self, journal_file, enq_record, locked_flag): + if enq_record.record_id in self.enq_map: + raise qlslibs.err.DuplicateRecordIdError(self.journal.current_file_header, enq_record) + self.enq_map[enq_record.record_id] = [journal_file, enq_record, locked_flag] + def contains(self, rid): + """Return True if the map contains the given rid""" + return rid in self.enq_map + def delete(self, journal_file, deq_record): + if deq_record.dequeue_record_id in self.enq_map: + enq_list = self.enq_map[deq_record.dequeue_record_id] + del self.enq_map[deq_record.dequeue_record_id] + return enq_list + else: + raise qlslibs.err.RecordIdNotFoundError(journal_file.file_header, deq_record) + def get(self, record_id): + if record_id in self.enq_map: + return self.enq_map[record_id] + return None + def lock(self, journal_file, dequeue_record): + if dequeue_record.dequeue_record_id not in self.enq_map: + raise qlslibs.err.RecordIdNotFoundError(journal_file.file_header, dequeue_record) + self.enq_map[dequeue_record.dequeue_record_id][2] = True + def report_str(self, args): + """Return a string containing a text report for all records in the map""" + if len(self.enq_map) == 0: + return 'No enqueued records found.' + rstr = '%d enqueued records found' % len(self.enq_map) + if args.show_recovered_recs: + rstr += ":" + rid_list = self.enq_map.keys() + rid_list.sort() + for rid in rid_list: + journal_file, record, locked_flag = self.enq_map[rid] + rstr += '\n 0x%x:' % journal_file.file_header.file_num + rstr += record.to_string(args.show_xids, args.show_data, args.txtest) + if locked_flag: + rstr += ' [LOCKED]' + else: + rstr += '.' + return rstr + def unlock(self, journal_file, dequeue_record): + """Set the transaction lock for a given record_id to False""" + if dequeue_record.dequeue_record_id in self.enq_map: + if self.enq_map[dequeue_record.dequeue_record_id][2]: + self.enq_map[dequeue_record.dequeue_record_id][2] = False + else: + raise qlslibs.err.RecordNotLockedError(journal_file.file_header, dequeue_record) + else: + raise qlslibs.err.RecordIdNotFoundError(journal_file.file_header, dequeue_record) + +class TransactionMap(object): + """ + Map of open transactions used while recovering a QLS journal + """ + def __init__(self, enq_map): + self.txn_map = {} + self.enq_map = enq_map + def abort(self, xid): + """Perform an abort operation for the given xid record""" + for journal_file, record, _ in self.txn_map[xid]: + if isinstance(record, qlslibs.jrnl.DequeueRecord): + if self.enq_map.contains(record.dequeue_record_id): + self.enq_map.unlock(journal_file, record) + else: + journal_file.decr_enq_cnt(record) + del self.txn_map[xid] + def add(self, journal_file, record): + if record.xid is None: + raise qlslibs.err.NonTransactionalRecordError(journal_file.file_header, record, 'TransactionMap.add()') + if isinstance(record, qlslibs.jrnl.DequeueRecord): + try: + self.enq_map.lock(journal_file, record) + except qlslibs.err.RecordIdNotFoundError: + # Not in emap, look for rid in tmap - should not happen in practice + txn_op = self._find_record_id(record.xid, record.dequeue_record_id) + if txn_op != None: + if txn_op[2]: + raise qlslibs.err.AlreadyLockedError(journal_file.file_header, record) + txn_op[2] = True + if record.xid in self.txn_map: + self.txn_map[record.xid].append([journal_file, record, False]) # append to existing list + else: + self.txn_map[record.xid] = [[journal_file, record, False]] # create new list + def commit(self, xid): + """Perform a commit operation for the given xid record""" + mismatch_list = [] + for journal_file, record, lock in self.txn_map[xid]: + if isinstance(record, qlslibs.jrnl.EnqueueRecord): + self.enq_map.add(journal_file, record, lock) # Transfer enq to emap + else: + if self.enq_map.contains(record.dequeue_record_id): + self.enq_map.unlock(journal_file, record) + self.enq_map.delete(journal_file, record)[0].decr_enq_cnt(record) + else: + mismatch_list.append('0x%x' % record.dequeue_record_id) + del self.txn_map[xid] + return mismatch_list + def contains(self, xid): + """Return True if the xid exists in the map; False otherwise""" + return xid in self.txn_map + def delete(self, journal_file, transaction_record): + """Remove a transaction record from the map using either a commit or abort header""" + if transaction_record.magic[-1] == 'c': + return self.commit(transaction_record.xid) + if transaction_record.magic[-1] == 'a': + self.abort(transaction_record.xid) + else: + raise qlslibs.err.InvalidRecordTypeError(journal_file.file_header, transaction_record, + 'delete from Transaction Map') + def get(self, xid): + if xid in self.txn_map: + return self.txn_map[xid] + return None + def get_prepared_list(self): + """ + Prepared list is a map of xid(key) to one of None, True or False. These represent respectively: + None: prepared, but neither committed or aborted (interrupted before commit or abort) + False: prepared and aborted (interrupted before abort complete) + True: prepared and committed (interrupted before commit complete) + """ + prepared_list = {} + for xid in self.get_xid_list(): + for _, record, _ in self.txn_map[xid]: + if isinstance(record, qlslibs.jrnl.EnqueueRecord): + prepared_list[xid] = None + else: + prepared_list[xid] = record.is_transaction_complete_commit() + return prepared_list + def get_xid_list(self): + return self.txn_map.keys() + def report_str(self, args): + """Return a string containing a text report for all records in the map""" + if len(self.txn_map) == 0: + return 'No outstanding transactions found.' + rstr = '%d outstanding transaction(s)' % len(self.txn_map) + if args.show_recovered_recs: + rstr += ':' + for xid, op_list in self.txn_map.iteritems(): + rstr += '\n %s containing %d operations:' % (qlslibs.utils.format_xid(xid), len(op_list)) + for journal_file, record, _ in op_list: + rstr += '\n 0x%x:' % journal_file.file_header.file_num + rstr += record.to_string(args.show_xids, args.show_data, args.txtest) + else: + rstr += '.' + return rstr + def _find_record_id(self, xid, record_id): + """ Search for and return map list with supplied rid.""" + if xid in self.txn_map: + for txn_op in self.txn_map[xid]: + if txn_op[1].record_id == record_id: + return txn_op + for this_xid in self.txn_map.iterkeys(): + for txn_op in self.txn_map[this_xid]: + if txn_op[1].record_id == record_id: + return txn_op + return None + +class JournalStatistics(object): + """Journal statistics""" + def __init__(self): + self.total_record_count = 0 + self.transient_record_count = 0 + self.filler_record_count = 0 + self.enqueue_count = 0 + self.dequeue_count = 0 + self.transaction_record_count = 0 + self.transaction_enqueue_count = 0 + self.transaction_dequeue_count = 0 + self.transaction_commit_count = 0 + self.transaction_abort_count = 0 + self.transaction_operation_count = 0 + def __str__(self): + fstr = 'Total record count: %d\n' + \ + 'Transient record count: %d\n' + \ + 'Filler_record_count: %d\n' + \ + 'Enqueue_count: %d\n' + \ + 'Dequeue_count: %d\n' + \ + 'Transaction_record_count: %d\n' + \ + 'Transaction_enqueue_count: %d\n' + \ + 'Transaction_dequeue_count: %d\n' + \ + 'Transaction_commit_count: %d\n' + \ + 'Transaction_abort_count: %d\n' + \ + 'Transaction_operation_count: %d\n' + return fstr % (self.total_record_count, + self.transient_record_count, + self.filler_record_count, + self.enqueue_count, + self.dequeue_count, + self.transaction_record_count, + self.transaction_enqueue_count, + self.transaction_dequeue_count, + self.transaction_commit_count, + self.transaction_abort_count, + self.transaction_operation_count) + +class Journal(object): + """ + Instance of a Qpid Linear Store (QLS) journal. + """ + JRNL_SUFFIX = 'jrnl' + def __init__(self, directory, xid_prepared_list, args): + self.directory = directory + self.queue_name = os.path.basename(directory) + self.files = {} + self.file_num_list = None + self.file_num_itr = None + self.enq_map = EnqueueMap(self) + self.txn_map = TransactionMap(self.enq_map) + self.current_journal_file = None + self.first_rec_flag = None + self.statistics = JournalStatistics() + self.xid_prepared_list = xid_prepared_list # This is None for the TPL instance only + self.args = args + self.last_record_offset = None # TODO: Move into JournalFile + self.num_filler_records_required = None # TODO: Move into JournalFile + self.fill_to_offset = None + def add_record(self, record): + """Used for reconciling transactions only - called from JournalRecoveryManager._reconcile_transactions()""" + if isinstance(record, qlslibs.jrnl.EnqueueRecord) or isinstance(record, qlslibs.jrnl.DequeueRecord): + if record.xid_size > 0: + self.txn_map.add(self.current_journal_file, record) + else: + self.enq_map.add(self.current_journal_file, record, False) + elif isinstance(record, qlslibs.jrnl.TransactionRecord): + self.txn_map.delete(self.current_journal_file, record) + else: + raise qlslibs.err.InvalidRecordTypeError(self.current_journal_file, record, 'add to Journal') + def get_enq_map_record(self, rid): + return self.enq_map.get(rid) + def get_txn_map_record(self, xid): + return self.txn_map.get(xid) + def get_outstanding_txn_list(self): + return self.txn_map.get_xid_list() + def get_queue_name(self): + return self.queue_name + def recover(self, high_rid_counter): + print 'Recovering %s...' % self.queue_name, + self._analyze_files() + try: + while self._get_next_record(high_rid_counter): + pass + self._check_alignment() + except qlslibs.err.NoMoreFilesInJournalError: + print 'No more files in journal' + except qlslibs.err.FirstRecordOffsetMismatchError as err: + print '0x%08x: **** FRO ERROR: queue=\"%s\" fid=0x%x fro actual=0x%08x expected=0x%08x' % \ + (err.get_expected_fro(), err.get_queue_name(), err.get_file_number(), err.get_record_offset(), + err.get_expected_fro()) + print 'done' + def reconcile_transactions(self, prepared_list, txn_flag): + xid_list = self.txn_map.get_xid_list() + if len(xid_list) > 0: + print self.queue_name, 'contains', len(xid_list), 'open transaction(s):' + for xid in xid_list: + if xid in prepared_list.keys(): + commit_flag = prepared_list[xid] + if commit_flag is None: + print ' ', qlslibs.utils.format_xid(xid), '- Assuming commit after prepare' + if txn_flag: + self.txn_map.commit(xid) + elif commit_flag: + print ' ', qlslibs.utils.format_xid(xid), '- Completing interrupted commit operation' + if txn_flag: + self.txn_map.commit(xid) + else: + print ' ', qlslibs.utils.format_xid(xid), '- Completing interrupted abort operation' + if txn_flag: + self.txn_map.abort(xid) + else: + print ' ', qlslibs.utils.format_xid(xid), '- Ignoring, not in prepared transaction list' + if txn_flag: + self.txn_map.abort(xid) + def report(self, args): + print 'Journal "%s":' % self.queue_name + print '=' * (11 + len(self.queue_name)) + if args.stats: + print str(self.statistics) + print self.enq_map.report_str(args) + print self.txn_map.report_str(args) + JournalFile.report_header() + for file_num in sorted(self.files.keys()): + self.files[file_num].report() + #TODO: move this to JournalFile, append to file info + if self.num_filler_records_required is not None and self.fill_to_offset is not None: + print '0x%x:0x%08x: %d filler records required for DBLK alignment to 0x%08x' % \ + (self.current_journal_file.file_header.file_num, self.last_record_offset, + self.num_filler_records_required, self.fill_to_offset) + print + #--- protected functions --- + def _analyze_files(self): + for dir_entry in os.listdir(self.directory): + dir_entry_bits = dir_entry.split('.') + if len(dir_entry_bits) == 2 and dir_entry_bits[1] == Journal.JRNL_SUFFIX: + fq_file_name = os.path.join(self.directory, dir_entry) + file_handle = open(fq_file_name) + args = qlslibs.utils.load_args(file_handle, qlslibs.jrnl.RecordHeader) + file_hdr = qlslibs.jrnl.FileHeader(*args) + file_hdr.init(file_handle, *qlslibs.utils.load_args(file_handle, qlslibs.jrnl.FileHeader)) + if file_hdr.is_header_valid(file_hdr): + file_hdr.load(file_handle) + if file_hdr.is_valid(False): + qlslibs.utils.skip(file_handle, + file_hdr.file_header_size_sblks * qlslibs.utils.DEFAULT_SBLK_SIZE) + self.files[file_hdr.file_num] = JournalFile(file_hdr) + self.file_num_list = sorted(self.files.keys()) + self.file_num_itr = iter(self.file_num_list) + def _check_alignment(self): # TODO: Move into JournalFile + if self.last_record_offset is None: # Empty file, _check_file() never run + return + remaining_sblks = self.last_record_offset % qlslibs.utils.DEFAULT_SBLK_SIZE + if remaining_sblks == 0: + self.num_filler_records_required = 0 + else: + self.num_filler_records_required = (qlslibs.utils.DEFAULT_SBLK_SIZE - remaining_sblks) / \ + qlslibs.utils.DEFAULT_DBLK_SIZE + self.fill_to_offset = self.last_record_offset + \ + (self.num_filler_records_required * qlslibs.utils.DEFAULT_DBLK_SIZE) + if self.args.show_recovery_recs or self.args.show_all_recs: + print '0x%x:0x%08x: %d filler records required for DBLK alignment to 0x%08x' % \ + (self.current_journal_file.file_header.file_num, self.last_record_offset, + self.num_filler_records_required, self.fill_to_offset) + def _check_file(self): + if self.current_journal_file is not None: + if not self.current_journal_file.file_header.is_end_of_file(): + return True + if self.current_journal_file.file_header.is_end_of_file(): + self.last_record_offset = self.current_journal_file.file_header.file_handle.tell() + if not self._get_next_file(): + return False + fhdr = self.current_journal_file.file_header + fhdr.file_handle.seek(fhdr.first_record_offset) + return True + def _get_next_file(self): + if self.current_journal_file is not None: + file_handle = self.current_journal_file.file_header.file_handle + if not file_handle.closed: # sanity check, should not be necessary + file_handle.close() + file_num = 0 + try: + while file_num == 0: + file_num = self.file_num_itr.next() + except StopIteration: + pass + if file_num == 0: + return False + self.current_journal_file = self.files[file_num] + self.first_rec_flag = True + if self.args.show_recovery_recs or self.args.show_all_recs: + file_header = self.current_journal_file.file_header + print '0x%x:%s' % (file_header.file_num, file_header.to_string()) + return True + def _get_next_record(self, high_rid_counter): + if not self._check_file(): + return False + self.last_record_offset = self.current_journal_file.file_header.file_handle.tell() + this_record = qlslibs.utils.load(self.current_journal_file.file_header.file_handle, qlslibs.jrnl.RecordHeader) + if not this_record.is_header_valid(self.current_journal_file.file_header): + return False + if self.first_rec_flag: + if this_record.file_offset != self.current_journal_file.file_header.first_record_offset: + raise qlslibs.err.FirstRecordOffsetMismatchError(self.current_journal_file.file_header, this_record) + self.first_rec_flag = False + self.statistics.total_record_count += 1 + start_journal_file = self.current_journal_file + if isinstance(this_record, qlslibs.jrnl.EnqueueRecord): + ok_flag = self._handle_enqueue_record(this_record, start_journal_file) + high_rid_counter.check(this_record.record_id) + if self.args.show_recovery_recs or self.args.show_all_recs: + print '0x%x:%s' % (start_journal_file.file_header.file_num, \ + this_record.to_string(self.args.show_xids, self.args.show_data, self.args.txtest)) + elif isinstance(this_record, qlslibs.jrnl.DequeueRecord): + ok_flag = self._handle_dequeue_record(this_record, start_journal_file) + high_rid_counter.check(this_record.record_id) + if self.args.show_recovery_recs or self.args.show_all_recs: + print '0x%x:%s' % (start_journal_file.file_header.file_num, this_record.to_string(self.args.show_xids, None, None)) + elif isinstance(this_record, qlslibs.jrnl.TransactionRecord): + ok_flag = self._handle_transaction_record(this_record, start_journal_file) + high_rid_counter.check(this_record.record_id) + if self.args.show_recovery_recs or self.args.show_all_recs: + print '0x%x:%s' % (start_journal_file.file_header.file_num, this_record.to_string(self.args.show_xids, None, None)) + else: + self.statistics.filler_record_count += 1 + ok_flag = True + if self.args.show_all_recs: + print '0x%x:%s' % (start_journal_file.file_header.file_num, this_record) + qlslibs.utils.skip(self.current_journal_file.file_header.file_handle, qlslibs.utils.DEFAULT_DBLK_SIZE) + return ok_flag + def _handle_enqueue_record(self, enqueue_record, start_journal_file): + while enqueue_record.load(self.current_journal_file.file_header.file_handle): + if not self._get_next_file(): + enqueue_record.truncated_flag = True + return False + if not enqueue_record.is_valid(start_journal_file): + return False + if enqueue_record.is_external() and enqueue_record.data != None: + raise qlslibs.err.ExternalDataError(self.current_journal_file.file_header, enqueue_record) + if enqueue_record.is_transient(): + self.statistics.transient_record_count += 1 + return True + if enqueue_record.xid_size > 0: + self.txn_map.add(start_journal_file, enqueue_record) + self.statistics.transaction_operation_count += 1 + self.statistics.transaction_record_count += 1 + self.statistics.transaction_enqueue_count += 1 + else: + self.enq_map.add(start_journal_file, enqueue_record, False) + start_journal_file.incr_enq_cnt() + self.statistics.enqueue_count += 1 + return True + def _handle_dequeue_record(self, dequeue_record, start_journal_file): + while dequeue_record.load(self.current_journal_file.file_header.file_handle): + if not self._get_next_file(): + dequeue_record.truncated_flag = True + return False + if not dequeue_record.is_valid(start_journal_file): + return False + if dequeue_record.xid_size > 0: + if self.xid_prepared_list is None: # ie this is the TPL + dequeue_record.transaction_prepared_list_flag = True + elif not self.enq_map.contains(dequeue_record.dequeue_record_id): + dequeue_record.warnings.append('NOT IN EMAP') # Only for non-TPL records + self.txn_map.add(start_journal_file, dequeue_record) + self.statistics.transaction_operation_count += 1 + self.statistics.transaction_record_count += 1 + self.statistics.transaction_dequeue_count += 1 + else: + try: + self.enq_map.delete(start_journal_file, dequeue_record)[0].decr_enq_cnt(dequeue_record) + except qlslibs.err.RecordIdNotFoundError: + dequeue_record.warnings.append('NOT IN EMAP') + self.statistics.dequeue_count += 1 + return True + def _handle_transaction_record(self, transaction_record, start_journal_file): + while transaction_record.load(self.current_journal_file.file_header.file_handle): + if not self._get_next_file(): + transaction_record.truncated_flag = True + return False + if not transaction_record.is_valid(start_journal_file): + return False + if transaction_record.magic[-1] == 'a': # Abort + self.statistics.transaction_abort_count += 1 + elif transaction_record.magic[-1] == 'c': # Commit + self.statistics.transaction_commit_count += 1 + else: + raise InvalidRecordTypeError('Unknown transaction record magic \'%s\'' % transaction_record.magic) + if self.txn_map.contains(transaction_record.xid): + self.txn_map.delete(self.current_journal_file, transaction_record) + else: + transaction_record.warnings.append('NOT IN TMAP') +# if transaction_record.magic[-1] == 'c': # commits only +# self._txn_obj_list[hdr.xid] = hdr + self.statistics.transaction_record_count += 1 + return True + def _load_data(self, record): + while not record.is_complete: + record.load(self.current_journal_file.file_handle) + +class JournalFile(object): + def __init__(self, file_header): + self.file_header = file_header + self.enq_cnt = 0 + self.deq_cnt = 0 + self.num_filler_records_required = None + def incr_enq_cnt(self): + self.enq_cnt += 1 + def decr_enq_cnt(self, record): + if self.enq_cnt <= self.deq_cnt: + raise qlslibs.err.EnqueueCountUnderflowError(self.file_header, record) + self.deq_cnt += 1 + def get_enq_cnt(self): + return self.enq_cnt - self.deq_cnt + def is_outstanding_enq(self): + return self.enq_cnt > self.deq_cnt + @staticmethod + def report_header(): + print 'file_num enq_cnt p_no efp journal_file' + print '-------- ------- ---- ----- ------------' + def report(self): + comment = '' if self.file_header.file_num == 0 else '' + file_num_str = '0x%x' % self.file_header.file_num + print '%8s %7d %4d %4dk %s %s' % (file_num_str, self.get_enq_cnt(), self.file_header.partition_num, + self.file_header.efp_data_size_kb, + os.path.basename(self.file_header.file_handle.name), comment) diff --git a/qpid/cpp/management/python/lib/qlslibs/efp.py b/qpid/cpp/management/python/lib/qlslibs/efp.py new file mode 100644 index 0000000000..1c751c3d06 --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/efp.py @@ -0,0 +1,327 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" +Module: qlslibs.efp + +Contains empty file pool (EFP) classes. +""" + +import os +import os.path +import qlslibs.err +import shutil +import uuid + +class EfpManager(object): + """ + Top level class to analyze the Qpid Linear Store (QLS) directory for the partitions that make up the + Empty File Pool (EFP). + """ + def __init__(self, directory, disk_space_required_kb): + if not os.path.exists(directory): + raise qlslibs.err.InvalidQlsDirectoryNameError(directory) + self.directory = directory + self.disk_space_required_kb = disk_space_required_kb + self.efp_partitions = [] + self.efp_pools = {} + self.total_num_files = 0 + self.total_cum_file_size_kb = 0 + self.current_efp_partition = None + def add_file_pool(self, file_size_kb, num_files): + """ Add an EFP in the specified partition of the specified size containing the specified number of files """ + dir_name = EmptyFilePool.get_directory_name(file_size_kb) + print 'Adding pool \'%s\' to partition %s' % (dir_name, self.current_efp_partition.partition_number) + self.total_cum_file_size_kb += self.current_efp_partition.create_new_efp(file_size_kb, num_files) + self.total_num_files += num_files + def freshen_file_pool(self, file_size_kb, num_files): + """ Freshen an EFP in the specified partition and of the specified size to the specified number of files """ + if self.current_efp_partition is None: + partition_list = self.efp_partitions + partition_str = 'all partitions' + else: + partition_list = [self.current_efp_partition] + partition_str = 'partition %d' % self.current_efp_partition.partition_number + if file_size_kb is None: + pool_str = 'all pools' + else: + pool_str = 'pool \'%s\'' % EmptyFilePool.get_directory_name(int(file_size_kb)) + print 'Freshening %s in %s to %d files' % (pool_str, partition_str, num_files) + for self.current_efp_partition in partition_list: # Partition objects + if file_size_kb is None: + file_size_list = self.current_efp_partition.efp_pools.keys() + else: + file_size_list = ['%sk' % file_size_kb] + for file_size in file_size_list: + efp = self.current_efp_partition.efp_pools[file_size] + num_files_needed = num_files - efp.get_tot_file_count() + if num_files_needed > 0: + self.current_efp_partition.create_new_efp_files(qlslibs.utils.efp_directory_size(file_size), + num_files_needed) + else: + print ' WARNING: Pool %s in partition %s already contains %d files: no action taken' % \ + (self.current_efp_partition.efp_pools[file_size].size_str, + self.current_efp_partition.partition_number, efp.get_num_files()) + def remove_file_pool(self, file_size_kb): + """ Remove an existing EFP from the specified partition and of the specified size """ + dir_name = EmptyFilePool.get_directory_name(file_size_kb) + print 'Removing pool \'%s\' from partition %s' % (dir_name, self.current_efp_partition.partition_number) + self.efp_partitions.remove(self.current_efp_partition) + shutil.rmtree(os.path.join(self.current_efp_partition.efp_directory, dir_name)) + def report(self): + print 'Empty File Pool (EFP) report' + print '============================' + print 'Found', len(self.efp_partitions), 'partition(s)' + if (len(self.efp_partitions)) > 0: + sorted_efp_partitions = sorted(self.efp_partitions, key=lambda x: x.partition_number) + EfpPartition.print_report_table_header() + for ptn in sorted_efp_partitions: + ptn.print_report_table_line() + print + for ptn in sorted_efp_partitions: + ptn.report() + def run(self, arg_tup): + self._analyze_efp() + if arg_tup is not None: + _, arg_file_size, arg_num_files, arg_add, arg_remove, arg_freshen, arg_list = arg_tup + self._check_args(arg_tup) + if arg_add: + self.add_file_pool(int(arg_file_size), int(arg_num_files)) + if arg_remove: + self.remove_file_pool(int(arg_file_size)) + if arg_freshen: + self.freshen_file_pool(arg_file_size, int(arg_num_files)) + if arg_list: + self.report() + def _analyze_efp(self): + for dir_entry in os.listdir(self.directory): + try: + efp_partition = EfpPartition(os.path.join(self.directory, dir_entry), self.disk_space_required_kb) + efp_partition.scan() + self.efp_partitions.append(efp_partition) + for efpl in efp_partition.efp_pools.iterkeys(): + if efpl not in self.efp_pools: + self.efp_pools[efpl] = [] + self.efp_pools[efpl].append(efp_partition.efp_pools[efpl]) + self.total_num_files += efp_partition.tot_file_count + self.total_cum_file_size_kb += efp_partition.tot_file_size_kb + except qlslibs.err.InvalidPartitionDirectoryNameError: + pass + def _check_args(self, arg_tup): + """ Value check of args. The names of partitions and pools are validated against the discovered instances """ + arg_partition, arg_file_size, _, arg_add, arg_remove, arg_freshen, _ = arg_tup + if arg_partition is not None: + try: + if arg_partition[0] == 'p': # string partition name, eg 'p001' + partition_num = int(arg_partition[1:]) + else: # numeric partition, eg '1' + partition_num = int(arg_partition) + found = False + for partition in self.efp_partitions: + if partition.partition_number == partition_num: + self.current_efp_partition = partition + found = True + break + if not found: + raise qlslibs.err.PartitionDoesNotExistError(arg_partition) + except ValueError: + raise qlslibs.err.InvalidPartitionDirectoryNameError(arg_partition) + if self.current_efp_partition is not None: + pool_list = self.current_efp_partition.efp_pools.keys() + efp_directory_name = EmptyFilePool.get_directory_name(int(arg_file_size)) + if arg_add and efp_directory_name in pool_list: + raise qlslibs.err.PoolDirectoryAlreadyExistsError(efp_directory_name) + if (arg_remove or arg_freshen) and efp_directory_name not in pool_list: + raise qlslibs.err.PoolDirectoryDoesNotExistError(efp_directory_name) + +class EfpPartition(object): + """ + Class that represents a EFP partition. Each partition contains one or more Empty File Pools (EFPs). + """ + PTN_DIR_PREFIX = 'p' + EFP_DIR_NAME = 'efp' + def __init__(self, directory, disk_space_required_kb): + self.directory = directory + self.partition_number = None + self.efp_pools = {} + self.tot_file_count = 0 + self.tot_file_size_kb = 0 + self._validate_partition_directory(disk_space_required_kb) + def create_new_efp_files(self, file_size_kb, num_files): + """ Create new EFP files in this partition """ + dir_name = EmptyFilePool.get_directory_name(file_size_kb) + if dir_name in self.efp_pools.keys(): + efp = self.efp_pools[dir_name] + else: + efp = EmptyFilePool(os.path.join(self.directory, EfpPartition.EFP_DIR_NAME), dir_name) + this_tot_file_size_kb = efp.create_new_efp_files(num_files) + self.tot_file_size_kb += this_tot_file_size_kb + self.tot_file_count += num_files + return this_tot_file_size_kb + @staticmethod + def print_report_table_header(): + print 'p_no no_efp tot_files tot_size_kb directory' + print '---- ------ --------- ----------- ---------' + def print_report_table_line(self): + print '%4d %6d %9d %11d %s' % (self.partition_number, len(self.efp_pools), self.tot_file_count, + self.tot_file_size_kb, self.directory) + def report(self): + print 'Partition %s:' % os.path.basename(self.directory) + if len(self.efp_pools) > 0: + EmptyFilePool.print_report_table_header() + for dir_name in self.efp_pools.keys(): + self.efp_pools[dir_name].print_report_table_line() + else: + print '' + print + def scan(self): + if os.path.exists(self.directory): + efp_dir = os.path.join(self.directory, EfpPartition.EFP_DIR_NAME) + for dir_entry in os.listdir(efp_dir): + efp = EmptyFilePool(os.path.join(efp_dir, dir_entry), self.partition_number) + efp.scan() + self.tot_file_count += efp.get_tot_file_count() + self.tot_file_size_kb += efp.get_tot_file_size_kb() + self.efp_pools[dir_entry] = efp + def _validate_partition_directory(self, disk_space_required_kb): + if os.path.basename(self.directory)[0] is not EfpPartition.PTN_DIR_PREFIX: + raise qlslibs.err.InvalidPartitionDirectoryNameError(self.directory) + try: + self.partition_number = int(os.path.basename(self.directory)[1:]) + except ValueError: + raise qlslibs.err.InvalidPartitionDirectoryNameError(self.directory) + if not qlslibs.utils.has_write_permission(self.directory): + raise qlslibs.err.WritePermissionError(self.directory) + if disk_space_required_kb is not None: + space_avail = qlslibs.utils.get_avail_disk_space(self.directory) + if space_avail < (disk_space_required_kb * 1024): + raise qlslibs.err.InsufficientSpaceOnDiskError(self.directory, space_avail, + disk_space_required_kb * 1024) + +class EmptyFilePool(object): + """ + Class that represents a single Empty File Pool within a partition. Each EFP contains pre-formatted linear store + journal files (but it may also be empty). + """ + EFP_DIR_SUFFIX = 'k' + EFP_JRNL_EXTENTION = '.jrnl' + EFP_INUSE_DIRNAME = 'in_use' + EFP_RETURNED_DIRNAME = 'returned' + def __init__(self, directory, partition_number): + self.base_dir_name = os.path.basename(directory) + self.directory = directory + self.partition_number = partition_number + self.data_size_kb = None + self.efp_files = [] + self.in_use_files = [] + self.returned_files = [] + self._validate_efp_directory() + def create_new_efp_files(self, num_files): + """ Create one or more new empty journal files of the prescribed size for this EFP """ + this_total_file_size = 0 + for _ in range(num_files): + this_total_file_size += self._create_new_efp_file() + return this_total_file_size + def get_directory(self): + return self.directory + @staticmethod + def get_directory_name(file_size_kb): + """ Static function to create an EFP directory name from the size of the files it contains """ + return '%dk' % file_size_kb + def get_tot_file_count(self): + return len(self.efp_files) + def get_tot_file_size_kb(self): + return self.data_size_kb * len(self.efp_files) + @staticmethod + def print_report_table_header(): + print ' ---------- efp ------------ --------- in_use ---------- -------- returned ---------' + print 'data_size_kb file_count tot_file_size_kb file_count tot_file_size_kb file_count tot_file_size_kb efp_directory' + print '------------ ---------- ---------------- ---------- ---------------- ---------- ---------------- -------------' + def print_report_table_line(self): + print '%12d %10d %16d %10d %16d %10d %16d %s' % (self.data_size_kb, len(self.efp_files), + self.data_size_kb * len(self.efp_files), + len(self.in_use_files), + self.data_size_kb * len(self.in_use_files), + len(self.returned_files), + self.data_size_kb * len(self.returned_files), + self.get_directory()) + def scan(self): + for efp_file in os.listdir(self.directory): + if efp_file == self.EFP_INUSE_DIRNAME: + for in_use_file in os.listdir(os.path.join(self.directory, self.EFP_INUSE_DIRNAME)): + self.in_use_files.append(in_use_file) + continue + if efp_file == self.EFP_RETURNED_DIRNAME: + for returned_file in os.listdir(os.path.join(self.directory, self.EFP_RETURNED_DIRNAME)): + self.returned_files.append(returned_file) + continue + if self._validate_efp_file(os.path.join(self.directory, efp_file)): + self.efp_files.append(efp_file) + def _add_efp_file(self, efp_file_name): + """ Add a single journal file of the appropriate size to this EFP. No file size check is made here. """ + self.efp_files.append(efp_file_name) + def _create_new_efp_file(self): + """ Create a single new empty journal file of the prescribed size for this EFP """ + file_name = str(uuid.uuid4()) + EmptyFilePool.EFP_JRNL_EXTENTION + file_header = qlslibs.jrnl.FileHeader(0, qlslibs.jrnl.FileHeader.MAGIC, qlslibs.utils.DEFAULT_RECORD_VERSION, + 0, 0, 0) + file_header.init(None, None, qlslibs.utils.DEFAULT_HEADER_SIZE_SBLKS, self.partition_number, self.data_size_kb, + 0, 0, 0, 0, 0) + efh = file_header.encode() + efh_bytes = len(efh) + file_handle = open(os.path.join(self.directory, file_name), 'wb') + file_handle.write(efh) + file_handle.write('\xff' * (qlslibs.utils.DEFAULT_SBLK_SIZE - efh_bytes)) + file_handle.write('\x00' * (int(self.data_size_kb) * 1024)) + file_handle.close() + fqfn = os.path.join(self.directory, file_name) + self._add_efp_file(fqfn) + return os.path.getsize(fqfn) + def _validate_efp_directory(self): + if self.base_dir_name[-1] is not EmptyFilePool.EFP_DIR_SUFFIX: + raise qlslibs.err.InvalidEfpDirectoryNameError(self.directory) + try: + self.data_size_kb = int(os.path.basename(self.base_dir_name)[:-1]) + except ValueError: + raise qlslibs.err.InvalidEfpDirectoryNameError(self.directory) + def _validate_efp_file(self, efp_file): + file_size = os.path.getsize(efp_file) + expected_file_size = (self.data_size_kb * 1024) + qlslibs.utils.DEFAULT_SBLK_SIZE + if file_size != expected_file_size: + print 'WARNING: File %s not of correct size (size=%d, expected=%d): Ignoring' % (efp_file, file_size, + expected_file_size) + return False + file_handle = open(efp_file) + args = qlslibs.utils.load_args(file_handle, qlslibs.jrnl.RecordHeader) + file_hdr = qlslibs.jrnl.FileHeader(*args) + file_hdr.init(file_handle, *qlslibs.utils.load_args(file_handle, qlslibs.jrnl.FileHeader)) + if not file_hdr.is_header_valid(file_hdr): + file_handle.close() + return False + file_hdr.load(file_handle) + file_handle.close() + if not file_hdr.is_valid(True): + return False + return True + + +# ============================================================================= + +if __name__ == "__main__": + print "This is a library, and cannot be executed." diff --git a/qpid/cpp/management/python/lib/qlslibs/err.py b/qpid/cpp/management/python/lib/qlslibs/err.py new file mode 100644 index 0000000000..f47632ce6a --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/err.py @@ -0,0 +1,261 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" +Module: qlslibs.err + +Contains error classes. +""" + +# --- Parent classes + +class QlsError(Exception): + """Base error class for QLS errors and exceptions""" + def __init__(self): + Exception.__init__(self) + def __str__(self): + return '' + +class QlsRecordError(QlsError): + """Base error class for individual records""" + def __init__(self, file_header, record): + QlsError.__init__(self) + self.file_header = file_header + self.record = record + def get_expected_fro(self): + return self.file_header.first_record_offset + def get_file_number(self): + return self.file_header.file_num + def get_queue_name(self): + return self.file_header.queue_name + def get_record_id(self): + return self.record.record_id + def get_record_offset(self): + return self.record.file_offset + def __str__(self): + return 'queue="%s" file_id=0x%x record_offset=0x%x record_id=0x%x' % \ + (self.file_header.queue_name, self.file_header.file_num, self.record.file_offset, self.record.record_id) + +# --- Error classes + +class AlreadyLockedError(QlsRecordError): + """Transactional record to be locked is already locked""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Transactional operation already locked in TransactionMap: ' + QlsRecordError.__str__(self) + +class DataSizeError(QlsError): + """Error class for Data size mismatch""" + def __init__(self, expected_size, actual_size, data_str): + QlsError.__init__(self) + self.expected_size = expected_size + self.actual_size = actual_size + self.xid_str = data_str + def __str__(self): + return 'Inconsistent data size: expected:%d; actual:%d; data="%s"' % \ + (self.expected_size, self.actual_size, self.data_str) + +class DuplicateRecordIdError(QlsRecordError): + """Duplicate Record Id in Enqueue Map""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Duplicate Record Id in enqueue map: ' + QlsRecordError.__str__(self) + +class EnqueueCountUnderflowError(QlsRecordError): + """Attempted to decrement enqueue count past 0""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Enqueue record count underflow: ' + QlsRecordError.__str__(self) + +class ExternalDataError(QlsRecordError): + """Data present in Enqueue record when external data flag is set""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Data present in external data record: ' + QlsRecordError.__str__(self) + +class FirstRecordOffsetMismatchError(QlsRecordError): + """First Record Offset (FRO) does not match file header""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'First record offset mismatch: ' + QlsRecordError.__str__(self) + ' expected_offset=0x%x' % \ + self.file_header.first_record_offset + +class InsufficientSpaceOnDiskError(QlsError): + """Insufficient space on disk""" + def __init__(self, directory, space_avail, space_requried): + QlsError.__init__(self) + self.directory = directory + self.space_avail = space_avail + self.space_required = space_requried + def __str__(self): + return 'Insufficient space on disk: directory=%s; avail_space=%d required_space=%d' % \ + (self.directory, self.space_avail, self.space_required) + +class InvalidClassError(QlsError): + """Invalid class name or type""" + def __init__(self, class_name): + QlsError.__init__(self) + self.class_name = class_name + def __str__(self): + return 'Invalid class name "%s"' % self.class_name + +class InvalidEfpDirectoryNameError(QlsError): + """Invalid EFP directory name - should be NNNNk, where NNNN is a number (of any length)""" + def __init__(self, directory_name): + QlsError.__init__(self) + self.directory_name = directory_name + def __str__(self): + return 'Invalid EFP directory name "%s"' % self.directory_name + +#class InvalidFileSizeString(QlsError): +# """Invalid file size string""" +# def __init__(self, file_size_string): +# QlsError.__init__(self) +# self.file_size_string = file_size_string +# def __str__(self): +# return 'Invalid file size string "%s"' % self.file_size_string + +class InvalidPartitionDirectoryNameError(QlsError): + """Invalid EFP partition name - should be pNNN, where NNN is a 3-digit partition number""" + def __init__(self, directory_name): + QlsError.__init__(self) + self.directory_name = directory_name + def __str__(self): + return 'Invalid partition directory name "%s"' % self.directory_name + +class InvalidQlsDirectoryNameError(QlsError): + """Invalid QLS directory name""" + def __init__(self, directory_name): + QlsError.__init__(self) + self.directory_name = directory_name + def __str__(self): + return 'Invalid QLS directory name "%s"' % self.directory_name + +class InvalidRecordTypeError(QlsRecordError): + """Error class for any operation using an invalid record type""" + def __init__(self, file_header, record, error_msg): + QlsRecordError.__init__(self, file_header, record) + self.error_msg = error_msg + def __str__(self): + return 'Invalid record type: ' + QlsRecordError.__str__(self) + ':' + self.error_msg + +class InvalidRecordVersionError(QlsRecordError): + """Invalid record version""" + def __init__(self, file_header, record, expected_version): + QlsRecordError.__init__(self, file_header, record) + self.expected_version = expected_version + def __str__(self): + return 'Invalid record version: queue="%s" ' + QlsRecordError.__str__(self) + \ + ' ver_found=0x%x ver_expected=0x%x' % (self.record_header.version, self.expected_version) + +class NoMoreFilesInJournalError(QlsError): + """Raised when trying to obtain the next file in the journal and there are no more files""" + def __init__(self, queue_name): + QlsError.__init__(self) + self.queue_name = queue_name + def __str__(self): + return 'No more journal files in queue "%s"' % self.queue_name + +class NonTransactionalRecordError(QlsRecordError): + """Transactional operation on non-transactional record""" + def __init__(self, file_header, record, operation): + QlsRecordError.__init__(self, file_header, record) + self.operation = operation + def __str__(self): + return 'Transactional operation on non-transactional record: ' + QlsRecordError.__str__() + \ + ' operation=%s' % self.operation + +class PartitionDoesNotExistError(QlsError): + """Partition name does not exist on disk""" + def __init__(self, partition_directory): + QlsError.__init__(self) + self.partition_directory = partition_directory + def __str__(self): + return 'Partition %s does not exist' % self.partition_directory + +class PoolDirectoryAlreadyExistsError(QlsError): + """Pool directory already exists""" + def __init__(self, pool_directory): + QlsError.__init__(self) + self.pool_directory = pool_directory + def __str__(self): + return 'Pool directory %s already exists' % self.pool_directory + +class PoolDirectoryDoesNotExistError(QlsError): + """Pool directory does not exist""" + def __init__(self, pool_directory): + QlsError.__init__(self) + self.pool_directory = pool_directory + def __str__(self): + return 'Pool directory %s does not exist' % self.pool_directory + +class RecordIdNotFoundError(QlsRecordError): + """Record Id not found in enqueue map""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Record Id not found in enqueue map: ' + QlsRecordError.__str__() + +class RecordNotLockedError(QlsRecordError): + """Record in enqueue map is not locked""" + def __init__(self, file_header, record): + QlsRecordError.__init__(self, file_header, record) + def __str__(self): + return 'Record in enqueue map is not locked: ' + QlsRecordError.__str__() + +class UnexpectedEndOfFileError(QlsError): + """The bytes read from a file is less than that expected""" + def __init__(self, size_read, size_expected, file_offset, file_name): + QlsError.__init__(self) + self.size_read = size_read + self.size_expected = size_expected + self.file_offset = file_offset + self.file_name = file_name + def __str__(self): + return 'Tried to read %d at offset %d in file "%s"; only read %d' % \ + (self.size_read, self.file_offset, self.file_name, self.size_expected) + +class WritePermissionError(QlsError): + """No write permission""" + def __init__(self, directory): + QlsError.__init__(self) + self.directory = directory + def __str__(self): + return 'No write permission in directory %s' % self.directory + +class XidSizeError(QlsError): + """Error class for Xid size mismatch""" + def __init__(self, expected_size, actual_size, xid_str): + QlsError.__init__(self) + self.expected_size = expected_size + self.actual_size = actual_size + self.xid_str = xid_str + def __str__(self): + return 'Inconsistent xid size: expected:%d; actual:%d; xid="%s"' % \ + (self.expected_size, self.actual_size, self.xid_str) + +# ============================================================================= + +if __name__ == "__main__": + print "This is a library, and cannot be executed." diff --git a/qpid/cpp/management/python/lib/qlslibs/jrnl.py b/qpid/cpp/management/python/lib/qlslibs/jrnl.py new file mode 100644 index 0000000000..5e65890393 --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/jrnl.py @@ -0,0 +1,394 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" +Module: qlslibs.jrnl + +Contains journal record classes. +""" + +import qlslibs.err +import qlslibs.utils +import string +import struct +import time + +class RecordHeader(object): + FORMAT = '<4s2H2Q' + def __init__(self, file_offset, magic, version, user_flags, serial, record_id): + self.file_offset = file_offset + self.magic = magic + self.version = version + self.user_flags = user_flags + self.serial = serial + self.record_id = record_id + self.warnings = [] + self.truncated_flag = False + def encode(self): + return struct.pack(RecordHeader.FORMAT, self.magic, self.version, self.user_flags, self.serial, self.record_id) + def load(self, file_handle): + pass + @staticmethod + def discriminate(args): + """Use the last char in the header magic to determine the header type""" + return CLASSES.get(args[1][-1], RecordHeader) + def is_empty(self): + """Return True if this record is empty (ie has a magic of 0x0000""" + return self.magic == '\x00'*4 + def is_header_valid(self, file_header): + """Check that this record is valid""" + if self.is_empty(): + return False + if self.magic[:3] != 'QLS' or self.magic[3] not in ['a', 'c', 'd', 'e', 'f', 'x']: + return False + if self.magic[-1] != 'x': + if self.version != qlslibs.utils.DEFAULT_RECORD_VERSION: + raise qlslibs.err.InvalidRecordVersionError(file_header, self, qlslibs.utils.DEFAULT_RECORD_VERSION) + if self.serial != file_header.serial: + return False + return True + def to_rh_string(self): + """Return string representation of this header""" + if self.is_empty(): + return '0x%08x: ' % (self.file_offset) + if self.magic[-1] == 'x': + return '0x%08x: [X]' % (self.file_offset) + if self.magic[-1] in ['a', 'c', 'd', 'e', 'f', 'x']: + return '0x%08x: [%c v=%d f=0x%04x rid=0x%x]' % \ + (self.file_offset, self.magic[-1].upper(), self.version, self.user_flags, self.record_id) + return '0x%08x: ' % (self.file_offset, self.magic) + def _get_warnings(self): + warn_str = '' + for warn in self.warnings: + warn_str += '<%s>' % warn + return warn_str + +class RecordTail(object): + FORMAT = '<4sL2Q' + def __init__(self, file_handle): # TODO - clumsy, only allows reading from disk. Move all disk stuff to laod() + self.file_offset = file_handle.tell() if file_handle is not None else 0 + self.complete = False + self.read_size = struct.calcsize(RecordTail.FORMAT) + self.fbin = file_handle.read(self.read_size) if file_handle is not None else None + self.valid_flag = None + if self.fbin is not None and len(self.fbin) >= self.read_size: + self.complete = True + self.xmagic, self.checksum, self.serial, self.record_id = struct.unpack(RecordTail.FORMAT, self.fbin) + def load(self, file_handle): + """Used to continue load of RecordTail object if it is split between files""" + if not self.is_complete: + self.fbin += file_handle.read(self.read_size - len(self.fbin)) + if (len(self.fbin)) >= self.read_size: + self.complete = True + self.xmagic, self.checksum, self.serial, self.record_id = struct.unpack(RecordTail.FORMAT, self.fbin) + def is_complete(self): + return self.complete + def is_valid(self, record): + if self.valid_flag is None: + if not self.complete: + return False + self.valid_flag = qlslibs.utils.inv_str(self.xmagic) == record.magic and \ + self.serial == record.serial and \ + self.record_id == record.record_id and \ + qlslibs.utils.adler32(record.checksum_encode()) == self.checksum + return self.valid_flag + def to_string(self): + """Return a string representation of the this RecordTail instance""" + if self.valid_flag is not None: + if not self.valid_flag: + return '[INVALID RECORD TAIL]' + magic = qlslibs.utils.inv_str(self.xmagic) + magic_char = magic[-1].upper() if magic[-1] in string.printable else '?' + return '[%c cs=0x%08x rid=0x%x]' % (magic_char, self.checksum, self.record_id) + +class FileHeader(RecordHeader): + FORMAT = '<2H4x5QH' + MAGIC = 'QLSf' + def init(self, file_handle, _, file_header_size_sblks, partition_num, efp_data_size_kb, first_record_offset, + timestamp_sec, timestamp_ns, file_num, queue_name_len): + self.file_handle = file_handle + self.file_header_size_sblks = file_header_size_sblks + self.partition_num = partition_num + self.efp_data_size_kb = efp_data_size_kb + self.first_record_offset = first_record_offset + self.timestamp_sec = timestamp_sec + self.timestamp_ns = timestamp_ns + self.file_num = file_num + self.queue_name_len = queue_name_len + self.queue_name = None + def encode(self): + if self.queue_name is None: + return RecordHeader.encode(self) + struct.pack(self.FORMAT, self.file_header_size_sblks, \ + self.partition_num, self.efp_data_size_kb, \ + self.first_record_offset, self.timestamp_sec, \ + self.timestamp_ns, self.file_num, 0) + return RecordHeader.encode(self) + struct.pack(self.FORMAT, self.file_header_size_sblks, self.partition_num, \ + self.efp_data_size_kb, self.first_record_offset, \ + self.timestamp_sec, self.timestamp_ns, self.file_num, \ + self.queue_name_len) + self.queue_name + def get_file_size(self): + """Sum of file header size and data size""" + return (self.file_header_size_sblks * qlslibs.utils.DEFAULT_SBLK_SIZE) + (self.efp_data_size_kb * 1024) + def load(self, file_handle): + self.queue_name = file_handle.read(self.queue_name_len) + def is_end_of_file(self): + return self.file_handle.tell() >= self.get_file_size() + def is_valid(self, is_empty): + if not RecordHeader.is_header_valid(self, self): + return False + if self.file_handle is None or self.file_header_size_sblks == 0 or self.partition_num == 0 or \ + self.efp_data_size_kb == 0: + return False + if is_empty: + if self.first_record_offset != 0 or self.timestamp_sec != 0 or self.timestamp_ns != 0 or \ + self.file_num != 0 or self.queue_name_len != 0: + return False + else: + if self.first_record_offset == 0 or self.timestamp_sec == 0 or self.timestamp_ns == 0 or \ + self.file_num == 0 or self.queue_name_len == 0: + return False + if self.queue_name is None: + return False + if len(self.queue_name) != self.queue_name_len: + return False + return True + def timestamp_str(self): + """Get the timestamp of this record in string format""" + now = time.gmtime(self.timestamp_sec) + fstr = '%%a %%b %%d %%H:%%M:%%S.%09d %%Y' % (self.timestamp_ns) + return time.strftime(fstr, now) + def to_string(self): + """Return a string representation of the this FileHeader instance""" + return '%s fnum=0x%x fro=0x%08x p=%d s=%dk t=%s %s' % (self.to_rh_string(), self.file_num, + self.first_record_offset, self.partition_num, + self.efp_data_size_kb, self.timestamp_str(), + self._get_warnings()) + +class EnqueueRecord(RecordHeader): + FORMAT = '<2Q' + MAGIC = 'QLSe' + EXTERNAL_FLAG_MASK = 0x20 + TRANSIENT_FLAG_MASK = 0x10 + def init(self, _, xid_size, data_size): + self.xid_size = xid_size + self.data_size = data_size + self.xid = None + self.xid_complete = False + self.data = None + self.data_complete = False + self.record_tail = None + def checksum_encode(self): # encode excluding record tail + cs_bytes = RecordHeader.encode(self) + struct.pack(self.FORMAT, self.xid_size, self.data_size) + if self.xid is not None: + cs_bytes += self.xid + if self.data is not None: + cs_bytes += self.data + return cs_bytes + def is_external(self): + return self.user_flags & EnqueueRecord.EXTERNAL_FLAG_MASK > 0 + def is_transient(self): + return self.user_flags & EnqueueRecord.TRANSIENT_FLAG_MASK > 0 + def is_valid(self, journal_file): + if not RecordHeader.is_header_valid(self, journal_file.file_header): + return False + if not (self.xid_complete and self.data_complete): + return False + if self.xid_size > 0 and len(self.xid) != self.xid_size: + return False + if self.data_size > 0 and len(self.data) != self.data_size: + return False + if self.xid_size > 0 or self.data_size > 0: + if self.record_tail is None: + return False + if not self.record_tail.is_valid(self): + return False + return True + def load(self, file_handle): + """Return True when load is incomplete and must be called again with new file handle""" + self.xid, self.xid_complete = qlslibs.utils.load_data(file_handle, self.xid, self.xid_size) + if not self.xid_complete: + return True + if self.is_external(): + self.data_complete = True + else: + self.data, self.data_complete = qlslibs.utils.load_data(file_handle, self.data, self.data_size) + if not self.data_complete: + return True + if self.xid_size > 0 or self.data_size > 0: + if self.record_tail is None: + self.record_tail = RecordTail(file_handle) + elif not self.record_tail.is_complete(): + self.record_tail.load(file_handle) # Continue loading partially loaded tail + if self.record_tail.is_complete(): + self.record_tail.is_valid(self) + else: + return True + return False + def to_string(self, show_xid_flag, show_data_flag, txtest_flag): + """Return a string representation of the this EnqueueRecord instance""" + if self.truncated_flag: + return '%s xid(%d) data(%d) [Truncated, no more files in journal]' % (RecordHeader.__str__(self), + self.xid_size, self.data_size) + if self.record_tail is None: + record_tail_str = '' + else: + record_tail_str = self.record_tail.to_string() + return '%s %s %s %s %s %s' % (self.to_rh_string(), + qlslibs.utils.format_xid(self.xid, self.xid_size, show_xid_flag), + qlslibs.utils.format_data(self.data, self.data_size, show_data_flag, txtest_flag), + record_tail_str, self._print_flags(), self._get_warnings()) + def _print_flags(self): + """Utility function to decode the flags field in the header and print a string representation""" + fstr = '' + if self.is_transient(): + fstr = '[TRANSIENT' + if self.is_external(): + if len(fstr) > 0: + fstr += ',EXTERNAL' + else: + fstr = '*EXTERNAL' + if len(fstr) > 0: + fstr += ']' + return fstr + +class DequeueRecord(RecordHeader): + FORMAT = '<2Q' + MAGIC = 'QLSd' + TXN_COMPLETE_COMMIT_FLAG = 0x10 + def init(self, _, dequeue_record_id, xid_size): + self.dequeue_record_id = dequeue_record_id + self.xid_size = xid_size + self.transaction_prepared_list_flag = False + self.xid = None + self.xid_complete = False + self.record_tail = None + def checksum_encode(self): # encode excluding record tail + return RecordHeader.encode(self) + struct.pack(self.FORMAT, self.dequeue_record_id, self.xid_size) + \ + self.xid + def is_transaction_complete_commit(self): + return self.user_flags & DequeueRecord.TXN_COMPLETE_COMMIT_FLAG > 0 + def is_valid(self, journal_file): + if not RecordHeader.is_header_valid(self, journal_file.file_header): + return False + if self.xid_size > 0: + if not self.xid_complete: + return False + if self.xid_size > 0 and len(self.xid) != self.xid_size: + return False + if self.record_tail is None: + return False + if not self.record_tail.is_valid(self): + return False + return True + def load(self, file_handle): + """Return True when load is incomplete and must be called again with new file handle""" + self.xid, self.xid_complete = qlslibs.utils.load_data(file_handle, self.xid, self.xid_size) + if not self.xid_complete: + return True + if self.xid_size > 0: + if self.record_tail is None: + self.record_tail = RecordTail(file_handle) + elif not self.record_tail.is_complete(): + self.record_tail.load(file_handle) + if self.record_tail.is_complete(): + self.record_tail.is_valid(self) + else: + return True + return False + def to_string(self, show_xid_flag, _u1, _u2): + """Return a string representation of the this DequeueRecord instance""" + if self.truncated_flag: + return '%s xid(%d) drid=0x%x [Truncated, no more files in journal]' % (RecordHeader.__str__(self), + self.xid_size, + self.dequeue_record_id) + if self.record_tail is None: + record_tail_str = '' + else: + record_tail_str = self.record_tail.to_string() + return '%s drid=0x%x %s %s %s %s' % (self.to_rh_string(), self.dequeue_record_id, + qlslibs.utils.format_xid(self.xid, self.xid_size, show_xid_flag), + record_tail_str, self._print_flags(), self._get_warnings()) + def _print_flags(self): + """Utility function to decode the flags field in the header and print a string representation""" + if self.transaction_prepared_list_flag: + if self.is_transaction_complete_commit(): + return '[COMMIT]' + else: + return '[ABORT]' + return '' + +class TransactionRecord(RecordHeader): + FORMAT = ' 0: + if self.record_tail is None: + self.record_tail = RecordTail(file_handle) + elif not self.record_tail.is_complete(): + self.record_tail.load(file_handle) + if self.record_tail.is_complete(): + self.record_tail.is_valid(self) + else: + return True + return False + def to_string(self, show_xid_flag, _u1, _u2): + """Return a string representation of the this TransactionRecord instance""" + if self.truncated_flag: + return '%s xid(%d) [Truncated, no more files in journal]' % (RecordHeader.__str__(self), self.xid_size) + if self.record_tail is None: + record_tail_str = '' + else: + record_tail_str = self.record_tail.to_string() + return '%s %s %s %s' % (self.to_rh_string(), + qlslibs.utils.format_xid(self.xid, self.xid_size, show_xid_flag), + record_tail_str, self._get_warnings()) + +# ============================================================================= + +CLASSES = { + 'a': TransactionRecord, + 'c': TransactionRecord, + 'd': DequeueRecord, + 'e': EnqueueRecord, +} + +if __name__ == '__main__': + print 'This is a library, and cannot be executed.' diff --git a/qpid/cpp/management/python/lib/qlslibs/utils.py b/qpid/cpp/management/python/lib/qlslibs/utils.py new file mode 100644 index 0000000000..dfa760a839 --- /dev/null +++ b/qpid/cpp/management/python/lib/qlslibs/utils.py @@ -0,0 +1,216 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" +Module: qlslibs.utils + +Contains helper functions for qpid_qls_analyze. +""" + +import os +import qlslibs.jrnl +import stat +import string +import struct +import subprocess +import zlib + +DEFAULT_DBLK_SIZE = 128 +DEFAULT_SBLK_SIZE = 4096 # 32 dblks +DEFAULT_SBLK_SIZE_KB = DEFAULT_SBLK_SIZE / 1024 +DEFAULT_RECORD_VERSION = 2 +DEFAULT_HEADER_SIZE_SBLKS = 1 + +def adler32(data): + """return the adler32 checksum of data""" + return zlib.adler32(data) & 0xffffffff + +def create_record(magic, uflags, journal_file, record_id, dequeue_record_id, xid, data): + """Helper function to construct a record with xid, data (where applicable) and consistent tail with checksum""" + record_class = qlslibs.jrnl.CLASSES.get(magic[-1]) + record = record_class(0, magic, DEFAULT_RECORD_VERSION, uflags, journal_file.file_header.serial, record_id) + xid_length = len(xid) if xid is not None else 0 + if isinstance(record, qlslibs.jrnl.EnqueueRecord): + data_length = len(data) if data is not None else 0 + record.init(None, xid_length, data_length) + elif isinstance(record, qlslibs.jrnl.DequeueRecord): + record.init(None, dequeue_record_id, xid_length) + elif isinstance(record, qlslibs.jrnl.TransactionRecord): + record.init(None, xid_length) + else: + raise qlslibs.err.InvalidClassError(record.__class__.__name__) + if xid is not None: + record.xid = xid + record.xid_complete = True + if data is not None: + record.data = data + record.data_complete = True + record.record_tail = _mk_record_tail(record) + return record + +def efp_directory_size(directory_name): + """"Decode the directory name in the format NNNk to a numeric size, where NNN is a number string""" + try: + if directory_name[-1] == 'k': + return int(directory_name[:-1]) + except ValueError: + pass + return 0 + +def format_data(data, data_size=None, show_data_flag=True, txtest_flag=False): + """Format binary data for printing""" + return _format_binary(data, data_size, show_data_flag, 'data', qlslibs.err.DataSizeError, False, txtest_flag) + +def format_xid(xid, xid_size=None, show_xid_flag=True): + """Format binary XID for printing""" + return _format_binary(xid, xid_size, show_xid_flag, 'xid', qlslibs.err.XidSizeError, True, False) + +def get_avail_disk_space(path): + df_proc = subprocess.Popen(["df", path], stdout=subprocess.PIPE) + output = df_proc.communicate()[0] + return int(output.split('\n')[1].split()[3]) + +def has_write_permission(path): + stat_info = os.stat(path) + return bool(stat_info.st_mode & stat.S_IRGRP) + +def inv_str(in_string): + """Perform a binary 1's compliment (invert all bits) on a binary string""" + istr = '' + for index in range(0, len(in_string)): + istr += chr(~ord(in_string[index]) & 0xff) + return istr + +def load(file_handle, klass): + """Load a record of class klass from a file""" + args = load_args(file_handle, klass) + subclass = klass.discriminate(args) + result = subclass(*args) # create instance of record + if subclass != klass: + result.init(*load_args(file_handle, subclass)) + return result + +def load_args(file_handle, klass): + """Load the arguments from class klass""" + size = struct.calcsize(klass.FORMAT) + foffs = file_handle.tell(), + fbin = file_handle.read(size) + if len(fbin) != size: + raise qlslibs.err.UnexpectedEndOfFileError(len(fbin), size, foffs, file_handle.name) + return foffs + struct.unpack(klass.FORMAT, fbin) + +def load_data(file_handle, element, element_size): + """Read element_size bytes of binary data from file_handle into element""" + if element_size == 0: + return element, True + if element is None: + element = file_handle.read(element_size) + else: + read_size = element_size - len(element) + element += file_handle.read(read_size) + return element, len(element) == element_size + +def skip(file_handle, boundary): + """Read and discard disk bytes until the next multiple of boundary""" + if not file_handle.closed: + file_handle.read(_rem_bytes_in_block(file_handle, boundary)) + +#--- protected functions --- + +def _format_binary(bin_str, bin_size, show_bin_flag, prefix, err_class, hex_num_flag, txtest_flag): + """Format binary XID for printing""" + if bin_str is None and bin_size is not None: + if bin_size > 0: + raise err_class(bin_size, len(bin_str), bin_str) + return '' + if bin_size is None: + bin_size = len(bin_str) + elif bin_size != len(bin_str): + raise err_class(bin_size, len(bin_str), bin_str) + out_str = '%s(%d)' % (prefix, bin_size) + if txtest_flag: + out_str += '=\'%s\'' % _txtest_msg_str(bin_str) + elif show_bin_flag: + if _is_printable(bin_str): + binstr = '"%s"' % _split_str(bin_str) + elif hex_num_flag: + binstr = '0x%s' % _str_to_hex_num(bin_str) + else: + binstr = _hex_split_str(bin_str, 50, 10, 10) + out_str += '=\'%s\'' % binstr + return out_str + +def _hex_str(in_str, begin, end): + """Return a binary string as a hex string""" + hstr = '' + for index in range(begin, end): + if _is_printable(in_str[index]): + hstr += in_str[index] + else: + hstr += '\\%02x' % ord(in_str[index]) + return hstr + +def _hex_split_str(in_str, split_size, head_size, tail_size): + """Split a hex string into two parts separated by an ellipsis""" + if len(in_str) <= split_size: + return _hex_str(in_str, 0, len(in_str)) + return _hex_str(in_str, 0, head_size) + ' ... ' + _hex_str(in_str, len(in_str)-tail_size, len(in_str)) + +def _txtest_msg_str(bin_str): + """Extract the message number used in qpid-txtest""" + msg_index = bin_str.find('msg') + if msg_index >= 0: + end_index = bin_str.find('\x00', msg_index) + assert end_index >= 0 + return bin_str[msg_index:end_index] + return None + +def _is_printable(in_str): + """Return True if in_str in printable; False otherwise.""" + for this_char in in_str: + if this_char not in string.letters and this_char not in string.digits and this_char not in string.punctuation: + return False + return True + +def _mk_record_tail(record): + record_tail = qlslibs.jrnl.RecordTail(None) + record_tail.xmagic = inv_str(record.magic) + record_tail.checksum = adler32(record.checksum_encode()) + record_tail.serial = record.serial + record_tail.record_id = record.record_id + return record_tail + +def _rem_bytes_in_block(file_handle, block_size): + """Return the remaining bytes in a block""" + foffs = file_handle.tell() + return (_size_in_blocks(foffs, block_size) * block_size) - foffs + +def _size_in_blocks(size, block_size): + """Return the size in terms of data blocks""" + return int((size + block_size - 1) / block_size) + +def _split_str(in_str, split_size = 50): + """Split a string into two parts separated by an ellipsis if it is longer than split_size""" + if len(in_str) < split_size: + return in_str + return in_str[:25] + ' ... ' + in_str[-25:] + +def _str_to_hex_num(in_str): + """Turn a string into a hex number representation, little endian assumed (ie LSB is first, MSB is last)""" + return ''.join(x.encode('hex') for x in reversed(in_str)) diff --git a/qpid/cpp/management/python/lib/qmf/__init__.py b/qpid/cpp/management/python/lib/qmf/__init__.py new file mode 100644 index 0000000000..31d5a2ef58 --- /dev/null +++ b/qpid/cpp/management/python/lib/qmf/__init__.py @@ -0,0 +1,18 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# diff --git a/qpid/cpp/management/python/lib/qmf/console.py b/qpid/cpp/management/python/lib/qmf/console.py new file mode 100644 index 0000000000..405c5dcb62 --- /dev/null +++ b/qpid/cpp/management/python/lib/qmf/console.py @@ -0,0 +1,4054 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +""" Console API for Qpid Management Framework """ + +import os +import platform +import qpid +import struct +import socket +import re +import sys +from qpid.datatypes import UUID +from qpid.datatypes import timestamp +from qpid.datatypes import datetime +from qpid.exceptions import Closed +from qpid.session import SessionDetached +from qpid.connection import Connection, ConnectionFailed, Timeout +from qpid.datatypes import Message, RangedSet, UUID +from qpid.util import connect, ssl, URL +from qpid.codec010 import StringCodec as Codec +from threading import Lock, Condition, Thread, Semaphore +from Queue import Queue, Empty +from time import time, strftime, gmtime, sleep +from cStringIO import StringIO + +#import qpid.log +#qpid.log.enable(name="qpid.io.cmd", level=qpid.log.DEBUG) + +#=================================================================================================== +# CONSOLE +#=================================================================================================== +class Console: + """ To access the asynchronous operations, a class must be derived from + Console with overrides of any combination of the available methods. """ + + def brokerConnected(self, broker): + """ Invoked when a connection is established to a broker """ + pass + + def brokerConnectionFailed(self, broker): + """ Invoked when a connection to a broker fails """ + pass + + def brokerDisconnected(self, broker): + """ Invoked when the connection to a broker is lost """ + pass + + def newPackage(self, name): + """ Invoked when a QMF package is discovered. """ + pass + + def newClass(self, kind, classKey): + """ Invoked when a new class is discovered. Session.getSchema can be + used to obtain details about the class.""" + pass + + def newAgent(self, agent): + """ Invoked when a QMF agent is discovered. """ + pass + + def delAgent(self, agent): + """ Invoked when a QMF agent disconects. """ + pass + + def objectProps(self, broker, record): + """ Invoked when an object is updated. """ + pass + + def objectStats(self, broker, record): + """ Invoked when an object is updated. """ + pass + + def event(self, broker, event): + """ Invoked when an event is raised. """ + pass + + def heartbeat(self, agent, timestamp): + """ Invoked when an agent heartbeat is received. """ + pass + + def brokerInfo(self, broker): + """ Invoked when the connection sequence reaches the point where broker information is available. """ + pass + + def methodResponse(self, broker, seq, response): + """ Invoked when a method response from an asynchronous method call is received. """ + pass + + +#=================================================================================================== +# BrokerURL +#=================================================================================================== +class BrokerURL(URL): + def __init__(self, *args, **kwargs): + URL.__init__(self, *args, **kwargs) + if self.port is None: + if self.scheme == URL.AMQPS: + self.port = 5671 + else: + self.port = 5672 + self.authName = None + self.authPass = None + if self.user: + self.authName = str(self.user) + if self.password: + self.authPass = str(self.password) + + def name(self): + return str(self) + + def match(self, host, port): + return socket.getaddrinfo(self.host, self.port)[0][4] == socket.getaddrinfo(host, port)[0][4] + +#=================================================================================================== +# Object +#=================================================================================================== +class Object(object): + """ + This class defines a 'proxy' object representing a real managed object on an agent. + Actions taken on this proxy are remotely affected on the real managed object. + """ + def __init__(self, agent, schema, codec=None, prop=None, stat=None, v2Map=None, agentName=None, kwargs={}): + self._agent = agent + self._session = None + self._broker = None + if agent: + self._session = agent.session + self._broker = agent.broker + self._schema = schema + self._properties = [] + self._statistics = [] + self._currentTime = None + self._createTime = None + self._deleteTime = 0 + self._objectId = None + if v2Map: + self.v2Init(v2Map, agentName) + return + + if self._agent: + self._currentTime = codec.read_uint64() + self._createTime = codec.read_uint64() + self._deleteTime = codec.read_uint64() + self._objectId = ObjectId(codec) + if codec: + if prop: + notPresent = self._parsePresenceMasks(codec, schema) + for property in schema.getProperties(): + if property.name in notPresent: + self._properties.append((property, None)) + else: + self._properties.append((property, self._session._decodeValue(codec, property.type, self._broker))) + if stat: + for statistic in schema.getStatistics(): + self._statistics.append((statistic, self._session._decodeValue(codec, statistic.type, self._broker))) + else: + for property in schema.getProperties(): + if property.optional: + self._properties.append((property, None)) + else: + self._properties.append((property, self._session._defaultValue(property, self._broker, kwargs))) + for statistic in schema.getStatistics(): + self._statistics.append((statistic, self._session._defaultValue(statistic, self._broker, kwargs))) + + def v2Init(self, omap, agentName): + if omap.__class__ != dict: + raise Exception("QMFv2 object data must be a map/dict") + if '_values' not in omap: + raise Exception("QMFv2 object must have '_values' element") + + values = omap['_values'] + for prop in self._schema.getProperties(): + if prop.name in values: + if prop.type == 10: # Reference + self._properties.append((prop, ObjectId(values[prop.name], agentName=agentName))) + else: + self._properties.append((prop, values[prop.name])) + for stat in self._schema.getStatistics(): + if stat.name in values: + self._statistics.append((stat, values[stat.name])) + if '_subtypes' in omap: + self._subtypes = omap['_subtypes'] + if '_object_id' in omap: + self._objectId = ObjectId(omap['_object_id'], agentName=agentName) + else: + self._objectId = None + + self._currentTime = omap.get("_update_ts", 0) + self._createTime = omap.get("_create_ts", 0) + self._deleteTime = omap.get("_delete_ts", 0) + + def getAgent(self): + """ Return the agent from which this object was sent """ + return self._agent + + def getBroker(self): + """ Return the broker from which this object was sent """ + return self._broker + + def getV2RoutingKey(self): + """ Get the QMFv2 routing key to address this object """ + return self._agent.getV2RoutingKey() + + def getObjectId(self): + """ Return the object identifier for this object """ + return self._objectId + + def getClassKey(self): + """ Return the class-key that references the schema describing this object. """ + return self._schema.getKey() + + def getSchema(self): + """ Return the schema that describes this object. """ + return self._schema + + def getMethods(self): + """ Return a list of methods available for this object. """ + return self._schema.getMethods() + + def getTimestamps(self): + """ Return the current, creation, and deletion times for this object. """ + return self._currentTime, self._createTime, self._deleteTime + + def isDeleted(self): + """ Return True iff this object has been deleted. """ + return self._deleteTime != 0 + + def isManaged(self): + """ Return True iff this object is a proxy for a managed object on an agent. """ + return self._objectId and self._agent + + def getIndex(self): + """ Return a string describing this object's primary key. """ + if self._objectId.isV2: + return self._objectId.getObject() + result = u"" + for prop, value in self._properties: + if prop.index: + if result != u"": + result += u":" + try: + valstr = unicode(self._session._displayValue(value, prop.type)) + except Exception, e: + valstr = u"" + result += valstr + return result + + def getProperties(self): + """ Return a list of object properties """ + return self._properties + + def getStatistics(self): + """ Return a list of object statistics """ + return self._statistics + + def mergeUpdate(self, newer): + """ Replace properties and/or statistics with a newly received update """ + if not self.isManaged(): + raise Exception("Object is not managed") + if self._objectId != newer._objectId: + raise Exception("Objects with different object-ids") + if len(newer.getProperties()) > 0: + self._properties = newer.getProperties() + if len(newer.getStatistics()) > 0: + self._statistics = newer.getStatistics() + self._currentTime = newer._currentTime + self._deleteTime = newer._deleteTime + + def update(self): + """ Contact the agent and retrieve the lastest property and statistic values for this object. """ + if not self.isManaged(): + raise Exception("Object is not managed") + obj = self._agent.getObjects(_objectId=self._objectId) + if obj: + self.mergeUpdate(obj[0]) + else: + raise Exception("Underlying object no longer exists") + + def __repr__(self): + if self.isManaged(): + id = self.getObjectId().__repr__() + else: + id = "unmanaged" + key = self.getClassKey() + return key.getPackageName() + ":" + key.getClassName() +\ + "[" + id + "] " + self.getIndex().encode("utf8") + + def __getattr__(self, name): + for method in self._schema.getMethods(): + if name == method.name: + return lambda *args, **kwargs : self._invoke(name, args, kwargs) + for prop, value in self._properties: + if name == prop.name: + return value + if name == "_" + prop.name + "_" and prop.type == 10: # Dereference references + deref = self._agent.getObjects(_objectId=value) + if len(deref) != 1: + return None + else: + return deref[0] + for stat, value in self._statistics: + if name == stat.name: + return value + + # + # Check to see if the name is in the schema. If so, return None (i.e. this is a not-present attribute) + # + for prop in self._schema.getProperties(): + if name == prop.name: + return None + for stat in self._schema.getStatistics(): + if name == stat.name: + return None + raise Exception("Type Object has no attribute '%s'" % name) + + def __setattr__(self, name, value): + if name[0] == '_': + super.__setattr__(self, name, value) + return + + for prop, unusedValue in self._properties: + if name == prop.name: + newprop = (prop, value) + newlist = [] + for old, val in self._properties: + if name == old.name: + newlist.append(newprop) + else: + newlist.append((old, val)) + self._properties = newlist + return + super.__setattr__(self, name, value) + + def _parseDefault(self, typ, val): + try: + if typ in (2, 3, 4): # 16, 32, 64 bit numbers + val = int(val, 0) + elif typ == 11: # bool + val = val.lower() in ("t", "true", "1", "yes", "y") + elif typ == 15: # map + val = eval(val) + except: + pass + return val + + def _handleDefaultArguments(self, method, args, kwargs): + count = len([x for x in method.arguments if x.dir.find("I") != -1]) + for kwarg in kwargs.keys(): + if not [x for x in method.arguments if x.dir.find("I") != -1 and \ + x.name == kwarg]: + del kwargs[kwarg] + + # If there were not enough args supplied, add any defaulted arguments + # from the schema (starting at the end) until we either get enough + # arguments or run out of defaults + while count > len(args) + len(kwargs): + for arg in reversed(method.arguments): + if arg.dir.find("I") != -1 and getattr(arg, "default") is not None and \ + arg.name not in kwargs: + # add missing defaulted value to the kwargs dict + kwargs[arg.name] = self._parseDefault(arg.type, arg.default) + break + else: + # no suitable defaulted args found, end the while loop + break + + return count + + def _sendMethodRequest(self, name, args, kwargs, synchronous=False, timeWait=None): + for method in self._schema.getMethods(): + if name == method.name: + aIdx = 0 + sendCodec = Codec() + seq = self._session.seqMgr._reserve((method, synchronous)) + + count = self._handleDefaultArguments(method, args, kwargs) + if count != len(args) + len(kwargs): + raise Exception("Incorrect number of arguments: expected %d, got %d" % (count, len(args) + len(kwargs))) + + if self._agent.isV2: + # + # Compose and send a QMFv2 method request + # + call = {} + call['_object_id'] = self._objectId.asMap() + call['_method_name'] = name + argMap = {} + for arg in method.arguments: + if arg.dir.find("I") != -1: + # If any kwargs match this schema arg, insert them in the proper place + if arg.name in kwargs: + argMap[arg.name] = kwargs[arg.name] + elif aIdx < len(args): + argMap[arg.name] = args[aIdx] + aIdx += 1 + call['_arguments'] = argMap + + dp = self._broker.amqpSession.delivery_properties() + dp.routing_key = self.getV2RoutingKey() + mp = self._broker.amqpSession.message_properties() + mp.content_type = "amqp/map" + if self._broker.saslUser: + mp.user_id = self._broker.saslUser + mp.correlation_id = str(seq) + mp.app_id = "qmf2" + mp.reply_to = self._broker.amqpSession.reply_to("qmf.default.direct", self._broker.v2_direct_queue) + mp.application_headers = {'qmf.opcode':'_method_request'} + sendCodec.write_map(call) + smsg = Message(dp, mp, sendCodec.encoded) + exchange = "qmf.default.direct" + + else: + # + # Associate this sequence with the agent hosting the object so we can correctly + # route the method-response + # + agent = self._broker.getAgent(self._broker.getBrokerBank(), self._objectId.getAgentBank()) + self._broker._setSequence(seq, agent) + + # + # Compose and send a QMFv1 method request + # + self._broker._setHeader(sendCodec, 'M', seq) + self._objectId.encode(sendCodec) + self._schema.getKey().encode(sendCodec) + sendCodec.write_str8(name) + + for arg in method.arguments: + if arg.dir.find("I") != -1: + self._session._encodeValue(sendCodec, args[aIdx], arg.type) + aIdx += 1 + smsg = self._broker._message(sendCodec.encoded, "agent.%d.%s" % + (self._objectId.getBrokerBank(), self._objectId.getAgentBank())) + exchange = "qpid.management" + + if synchronous: + try: + self._broker.cv.acquire() + self._broker.syncInFlight = True + finally: + self._broker.cv.release() + self._broker._send(smsg, exchange) + return seq + return None + + def _invoke(self, name, args, kwargs): + if not self.isManaged(): + raise Exception("Object is not managed") + if "_timeout" in kwargs: + timeout = kwargs["_timeout"] + else: + timeout = self._broker.SYNC_TIME + + if "_async" in kwargs and kwargs["_async"]: + sync = False + if "_timeout" not in kwargs: + timeout = None + else: + sync = True + + # Remove special "meta" kwargs before handing to _sendMethodRequest() to process + if "_timeout" in kwargs: del kwargs["_timeout"] + if "_async" in kwargs: del kwargs["_async"] + + seq = self._sendMethodRequest(name, args, kwargs, sync, timeout) + if seq: + if not sync: + return seq + self._broker.cv.acquire() + try: + starttime = time() + while self._broker.syncInFlight and self._broker.error == None: + self._broker.cv.wait(timeout) + if time() - starttime > timeout: + raise RuntimeError("Timed out waiting for method to respond") + finally: + self._session.seqMgr._release(seq) + self._broker.cv.release() + if self._broker.error != None: + errorText = self._broker.error + self._broker.error = None + raise Exception(errorText) + return self._broker.syncResult + raise Exception("Invalid Method (software defect) [%s]" % name) + + def _encodeUnmanaged(self, codec): + codec.write_uint8(20) + codec.write_str8(self._schema.getKey().getPackageName()) + codec.write_str8(self._schema.getKey().getClassName()) + codec.write_bin128(self._schema.getKey().getHash()) + + # emit presence masks for optional properties + mask = 0 + bit = 0 + for prop, value in self._properties: + if prop.optional: + if bit == 0: + bit = 1 + if value: + mask |= bit + bit = bit << 1 + if bit == 256: + bit = 0 + codec.write_uint8(mask) + mask = 0 + if bit != 0: + codec.write_uint8(mask) + + # encode properties + for prop, value in self._properties: + if value != None: + self._session._encodeValue(codec, value, prop.type) + + # encode statistics + for stat, value in self._statistics: + self._session._encodeValue(codec, value, stat.type) + + def _parsePresenceMasks(self, codec, schema): + excludeList = [] + bit = 0 + for property in schema.getProperties(): + if property.optional: + if bit == 0: + mask = codec.read_uint8() + bit = 1 + if (mask & bit) == 0: + excludeList.append(property.name) + bit *= 2 + if bit == 256: + bit = 0 + return excludeList + + +#=================================================================================================== +# Session +#=================================================================================================== +class Session: + """ + An instance of the Session class represents a console session running + against one or more QMF brokers. A single instance of Session is needed + to interact with the management framework as a console. + """ + _CONTEXT_SYNC = 1 + _CONTEXT_STARTUP = 2 + _CONTEXT_MULTIGET = 3 + + DEFAULT_GET_WAIT_TIME = 60 + + ENCODINGS = { + str: 7, + timestamp: 8, + datetime: 8, + int: 9, + long: 9, + float: 13, + UUID: 14, + Object: 20, + list: 21 + } + + + def __init__(self, console=None, rcvObjects=True, rcvEvents=True, rcvHeartbeats=True, + manageConnections=False, userBindings=False): + """ + Initialize a session. If the console argument is provided, the + more advanced asynchronous features are available. If console is + defaulted, the session will operate in a simpler, synchronous manner. + + The rcvObjects, rcvEvents, and rcvHeartbeats arguments are meaningful only if 'console' + is provided. They control whether object updates, events, and agent-heartbeats are + subscribed to. If the console is not interested in receiving one or more of the above, + setting the argument to False will reduce tha bandwidth used by the API. + + If manageConnections is set to True, the Session object will manage connections to + the brokers. This means that if a broker is unreachable, it will retry until a connection + can be established. If a connection is lost, the Session will attempt to reconnect. + + If manageConnections is set to False, the user is responsible for handing failures. In + this case, an unreachable broker will cause addBroker to raise an exception. + + If userBindings is set to False (the default) and rcvObjects is True, the console will + receive data for all object classes. If userBindings is set to True, the user must select + which classes the console shall receive by invoking the bindPackage or bindClass methods. + This allows the console to be configured to receive only information that is relavant to + a particular application. If rcvObjects id False, userBindings has no meaning. + """ + self.console = console + self.brokers = [] + self.schemaCache = SchemaCache() + self.seqMgr = SequenceManager() + self.cv = Condition() + self.syncSequenceList = [] + self.getResult = [] + self.getSelect = [] + self.error = None + self.rcvObjects = rcvObjects + self.rcvEvents = rcvEvents + self.rcvHeartbeats = rcvHeartbeats + self.userBindings = userBindings + if self.console == None: + self.rcvObjects = False + self.rcvEvents = False + self.rcvHeartbeats = False + self.v1BindingKeyList, self.v2BindingKeyList = self._bindingKeys() + self.manageConnections = manageConnections + # callback filters: + self.agent_filter = [] # (vendor, product, instance) || v1-agent-label-str + self.class_filter = [] # (pkg, class) + self.event_filter = [] # (pkg, event) + self.agent_heartbeat_min = 10 # minimum agent heartbeat timeout interval + self.agent_heartbeat_miss = 3 # # of heartbeats to miss before deleting agent + + if self.userBindings and not self.console: + raise Exception("userBindings can't be set unless a console is provided.") + + def close(self): + """ Releases all resources held by the session. Must be called by the + application when it is done with the Session object. + """ + self.cv.acquire() + try: + while len(self.brokers): + b = self.brokers.pop() + try: + b._shutdown() + except: + pass + finally: + self.cv.release() + + def _getBrokerForAgentAddr(self, agent_addr): + try: + self.cv.acquire() + key = (1, agent_addr) + for b in self.brokers: + if key in b.agents: + return b + finally: + self.cv.release() + return None + + + def _getAgentForAgentAddr(self, agent_addr): + try: + self.cv.acquire() + key = agent_addr + for b in self.brokers: + if key in b.agents: + return b.agents[key] + finally: + self.cv.release() + return None + + + def __repr__(self): + return "QMF Console Session Manager (brokers: %d)" % len(self.brokers) + + + def addBroker(self, target="localhost", timeout=None, mechanisms=None, sessTimeout=None, **connectArgs): + """ Connect to a Qpid broker. Returns an object of type Broker. + Will raise an exception if the session is not managing the connection and + the connection setup to the broker fails. + """ + if isinstance(target, BrokerURL): + url = target + else: + url = BrokerURL(target) + broker = Broker(self, url.host, url.port, mechanisms, url.authName, url.authPass, + ssl = url.scheme == URL.AMQPS, connTimeout=timeout, sessTimeout=sessTimeout, **connectArgs) + + self.brokers.append(broker) + return broker + + + def delBroker(self, broker): + """ Disconnect from a broker, and deallocate the broker proxy object. The + 'broker' argument is the object returned from the addBroker call. Errors + are ignored. + """ + broker._shutdown() + self.brokers.remove(broker) + del broker + + + def getPackages(self): + """ Get the list of known QMF packages """ + for broker in self.brokers: + broker._waitForStable() + return self.schemaCache.getPackages() + + + def getClasses(self, packageName): + """ Get the list of known classes within a QMF package """ + for broker in self.brokers: + broker._waitForStable() + return self.schemaCache.getClasses(packageName) + + + def getSchema(self, classKey): + """ Get the schema for a QMF class """ + for broker in self.brokers: + broker._waitForStable() + return self.schemaCache.getSchema(classKey) + + + def bindPackage(self, packageName): + """ Filter object and event callbacks to only those elements of the + specified package. Also filters newPackage and newClass callbacks to the + given package. Only valid if userBindings is True. + """ + if not self.userBindings: + raise Exception("userBindings option must be set for this Session.") + if not self.rcvObjects and not self.rcvEvents: + raise Exception("Session needs to be configured to receive events or objects.") + v1keys = ["console.obj.*.*.%s.#" % packageName, "console.event.*.*.%s.#" % packageName] + v2keys = ["agent.ind.data.%s.#" % packageName.replace(".", "_"), + "agent.ind.event.%s.#" % packageName.replace(".", "_"),] + if (packageName, None) not in self.class_filter: + self.class_filter.append((packageName, None)) + if (packageName, None) not in self.event_filter: + self.event_filter.append((packageName, None)) + self.v1BindingKeyList.extend(v1keys) + self.v2BindingKeyList.extend(v2keys) + for broker in self.brokers: + if broker.isConnected(): + for v1key in v1keys: + broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key) + if broker.brokerSupportsV2: + for v2key in v2keys: + # data indications should arrive on the unsolicited indication queue + broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key) + + + def bindClass(self, pname, cname=None): + """ Filter object callbacks to only those objects of the specified package + and optional class. Will also filter newPackage/newClass callbacks to the + specified package and class. Only valid if userBindings is True and + rcvObjects is True. + """ + if not self.userBindings: + raise Exception("userBindings option must be set for this Session.") + if not self.rcvObjects: + raise Exception("Session needs to be configured with rcvObjects=True.") + if cname is not None: + v1key = "console.obj.*.*.%s.%s.#" % (pname, cname) + v2key = "agent.ind.data.%s.%s.#" % (pname.replace(".", "_"), cname.replace(".", "_")) + else: + v1key = "console.obj.*.*.%s.#" % pname + v2key = "agent.ind.data.%s.#" % pname.replace(".", "_") + self.v1BindingKeyList.append(v1key) + self.v2BindingKeyList.append(v2key) + if (pname, cname) not in self.class_filter: + self.class_filter.append((pname, cname)) + for broker in self.brokers: + if broker.isConnected(): + broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key) + if broker.brokerSupportsV2: + # data indications should arrive on the unsolicited indication queue + broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key) + + + def bindClassKey(self, classKey): + """ Filter object callbacks to only those objects of the specified + class. Will also filter newPackage/newClass callbacks to the specified + package and class. Only valid if userBindings is True and rcvObjects is + True. + """ + pname = classKey.getPackageName() + cname = classKey.getClassName() + self.bindClass(pname, cname) + + def bindEvent(self, pname, ename=None): + """ Filter event callbacks only from a particular class by package and + event name, or all events in a package if ename=None. Will also filter + newPackage/newClass callbacks to the specified package and class. Only + valid if userBindings is True and rcvEvents is True. + """ + if not self.userBindings: + raise Exception("userBindings option must be set for this Session.") + if not self.rcvEvents: + raise Exception("Session needs to be configured with rcvEvents=True.") + if ename is not None: + v1key = "console.event.*.*.%s.%s.#" % (pname, ename) + v2key = "agent.ind.event.%s.%s.#" % (pname.replace(".", "_"), ename.replace(".", "_")) + else: + v1key = "console.event.*.*.%s.#" % pname + v2key = "agent.ind.event.%s.#" % pname.replace(".", "_") + self.v1BindingKeyList.append(v1key) + self.v2BindingKeyList.append(v2key) + if (pname, ename) not in self.event_filter: + self.event_filter.append((pname, ename)) + for broker in self.brokers: + if broker.isConnected(): + broker.amqpSession.exchange_bind(exchange="qpid.management", queue=broker.topicName, binding_key=v1key) + if broker.brokerSupportsV2: + # event indications should arrive on the unsolicited indication queue + broker.amqpSession.exchange_bind(exchange="qmf.default.topic", queue=broker.v2_topic_queue_ui, binding_key=v2key) + + def bindEventKey(self, eventKey): + """ Filter event callbacks only from a particular class key. Will also + filter newPackage/newClass callbacks to the specified package and + class. Only valid if userBindings is True and rcvEvents is True. + """ + pname = eventKey.getPackageName() + ename = eventKey.getClassName() + self.bindEvent(pname, ename) + + def bindAgent(self, vendor=None, product=None, instance=None, label=None): + """ Receive heartbeats, newAgent and delAgent callbacks only for those + agent(s) that match the passed identification criteria: + V2 agents: vendor, optionally product and instance strings + V1 agents: the label string. + Only valid if userBindings is True. + """ + if not self.userBindings: + raise Exception("Session not configured for binding specific agents.") + if vendor is None and label is None: + raise Exception("Must specify at least a vendor (V2 agents)" + " or label (V1 agents).") + + if vendor: # V2 agent identification + if product is not None: + v2key = "agent.ind.heartbeat.%s.%s.#" % (vendor.replace(".", "_"), product.replace(".", "_")) + else: + v2key = "agent.ind.heartbeat.%s.#" % vendor.replace(".", "_") + self.v2BindingKeyList.append(v2key) + + # allow wildcards - only add filter if a non-wildcarded component is given + if vendor == "*": + vendor = None + if product == "*": + product = None + if instance == "*": + instance = None + if vendor or product or instance: + if (vendor, product, instance) not in self.agent_filter: + self.agent_filter.append((vendor, product, instance)) + + for broker in self.brokers: + if broker.isConnected(): + if broker.brokerSupportsV2: + # heartbeats should arrive on the heartbeat queue + broker.amqpSession.exchange_bind(exchange="qmf.default.topic", + queue=broker.v2_topic_queue_hb, + binding_key=v2key) + elif label != "*": # non-wildcard V1 agent label + # V1 format heartbeats do not have any agent identifier in the routing + # key, so we cannot filter them by bindings. + if label not in self.agent_filter: + self.agent_filter.append(label) + + + def getAgents(self, broker=None): + """ Get a list of currently known agents """ + brokerList = [] + if broker == None: + for b in self.brokers: + brokerList.append(b) + else: + brokerList.append(broker) + + for b in brokerList: + b._waitForStable() + agentList = [] + for b in brokerList: + for a in b.getAgents(): + agentList.append(a) + return agentList + + + def makeObject(self, classKey, **kwargs): + """ Create a new, unmanaged object of the schema indicated by classKey """ + schema = self.getSchema(classKey) + if schema == None: + raise Exception("Schema not found for classKey") + return Object(None, schema, None, True, True, kwargs) + + + def getObjects(self, **kwargs): + """ Get a list of objects from QMF agents. + All arguments are passed by name(keyword). + + The class for queried objects may be specified in one of the following ways: + + _schema = - supply a schema object returned from getSchema. + _key = - supply a classKey from the list returned by getClasses. + _class = - supply a class name as a string. If the class name exists + in multiple packages, a _package argument may also be supplied. + _objectId = - get the object referenced by the object-id + + If objects should be obtained from only one agent, use the following argument. + Otherwise, the query will go to all agents. + + _agent = - supply an agent from the list returned by getAgents. + + If the get query is to be restricted to one broker (as opposed to all connected brokers), + add the following argument: + + _broker = - supply a broker as returned by addBroker. + + The default timeout for this synchronous operation is 60 seconds. To change the timeout, + use the following argument: + + _timeout =