summaryrefslogtreecommitdiff
path: root/cpp/src/tests
diff options
context:
space:
mode:
authorKim van der Riet <kpvdr@apache.org>2013-02-28 16:14:30 +0000
committerKim van der Riet <kpvdr@apache.org>2013-02-28 16:14:30 +0000
commit9c73ef7a5ac10acd6a50d5d52bd721fc2faa5919 (patch)
tree2a890e1df09e5b896a9b4168a7b22648f559a1f2 /cpp/src/tests
parent172d9b2a16cfb817bbe632d050acba7e31401cd2 (diff)
downloadqpid-python-asyncstore.tar.gz
Update from trunk r1375509 through r1450773asyncstore
git-svn-id: https://svn.apache.org/repos/asf/qpid/branches/asyncstore@1451244 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'cpp/src/tests')
-rw-r--r--cpp/src/tests/.valgrind.supp23
-rw-r--r--cpp/src/tests/BrokerMgmtAgent.cpp979
-rw-r--r--cpp/src/tests/CMakeLists.txt108
-rw-r--r--cpp/src/tests/ClusterFailover.cpp115
-rw-r--r--cpp/src/tests/ClusterFixture.cpp160
-rw-r--r--cpp/src/tests/ClusterFixture.h115
-rw-r--r--cpp/src/tests/DispatcherTest.cpp5
-rw-r--r--cpp/src/tests/FieldTable.cpp1
-rw-r--r--cpp/src/tests/ForkedBroker.cpp157
-rw-r--r--cpp/src/tests/ForkedBroker.h82
-rw-r--r--cpp/src/tests/InitialStatusMap.cpp239
-rw-r--r--cpp/src/tests/Makefile.am46
-rw-r--r--cpp/src/tests/MessageUtils.h5
-rw-r--r--cpp/src/tests/MessagingSessionTests.cpp53
-rw-r--r--cpp/src/tests/PartialFailure.cpp291
-rw-r--r--cpp/src/tests/PollerTest.cpp9
-rw-r--r--cpp/src/tests/QueueTest.cpp1
-rw-r--r--cpp/src/tests/StoreStatus.cpp117
-rw-r--r--cpp/src/tests/SystemInfo.cpp16
-rw-r--r--cpp/src/tests/Variant.cpp10
-rwxr-xr-xcpp/src/tests/acl.py688
-rwxr-xr-xcpp/src/tests/benchmark95
-rw-r--r--cpp/src/tests/brokertest.py102
-rw-r--r--cpp/src/tests/cluster_authentication_soak.cpp310
-rwxr-xr-xcpp/src/tests/cluster_failover19
-rwxr-xr-xcpp/src/tests/cluster_python_tests28
-rw-r--r--cpp/src/tests/cluster_python_tests_failing.txt4
-rwxr-xr-xcpp/src/tests/cluster_read_credit29
-rw-r--r--cpp/src/tests/cluster_test.cpp1231
-rwxr-xr-xcpp/src/tests/cluster_test_logs.py123
-rw-r--r--cpp/src/tests/cluster_test_scripts/README.txt20
-rwxr-xr-xcpp/src/tests/cluster_test_scripts/cluster_check37
-rwxr-xr-xcpp/src/tests/cluster_test_scripts/cluster_start56
-rwxr-xr-xcpp/src/tests/cluster_test_scripts/cluster_stop38
-rwxr-xr-xcpp/src/tests/cluster_test_scripts/config_example.sh44
-rwxr-xr-xcpp/src/tests/cluster_test_scripts/perftest54
-rw-r--r--cpp/src/tests/cluster_tests.fail3
-rwxr-xr-xcpp/src/tests/cluster_tests.py1834
-rwxr-xr-xcpp/src/tests/clustered_replication_test111
-rwxr-xr-xcpp/src/tests/dynamic_log_hires_timestamp75
-rw-r--r--cpp/src/tests/failover_soak.cpp827
-rwxr-xr-xcpp/src/tests/federated_cluster_test153
-rwxr-xr-xcpp/src/tests/federation.py106
-rwxr-xr-xcpp/src/tests/federation_sys.py1251
-rwxr-xr-xcpp/src/tests/ha_store_tests.py130
-rwxr-xr-xcpp/src/tests/ha_test.py304
-rwxr-xr-xcpp/src/tests/ha_tests.py1097
-rwxr-xr-xcpp/src/tests/ipv6_test40
-rw-r--r--cpp/src/tests/legacystore/.valgrind.supp35
-rw-r--r--cpp/src/tests/legacystore/.valgrindrc7
-rw-r--r--cpp/src/tests/legacystore/CMakeLists.txt117
-rw-r--r--cpp/src/tests/legacystore/MessageUtils.h105
-rw-r--r--cpp/src/tests/legacystore/OrderingTest.cpp168
-rw-r--r--cpp/src/tests/legacystore/SimpleTest.cpp497
-rw-r--r--cpp/src/tests/legacystore/TestFramework.cpp30
-rw-r--r--cpp/src/tests/legacystore/TestFramework.h37
-rw-r--r--cpp/src/tests/legacystore/TransactionalTest.cpp351
-rw-r--r--cpp/src/tests/legacystore/TwoPhaseCommitTest.cpp675
-rw-r--r--[-rwxr-xr-x]cpp/src/tests/legacystore/clean.sh (renamed from cpp/src/tests/stop_cluster)23
-rw-r--r--cpp/src/tests/legacystore/persistence.py574
-rw-r--r--[-rwxr-xr-x]cpp/src/tests/legacystore/run_long_python_tests (renamed from cpp/src/tests/federated_cluster_test_with_node_failure)8
-rw-r--r--cpp/src/tests/legacystore/run_python_tests64
-rw-r--r--[-rwxr-xr-x]cpp/src/tests/legacystore/run_short_python_tests (renamed from cpp/src/tests/run_long_cluster_tests)9
-rw-r--r--cpp/src/tests/legacystore/run_test69
-rw-r--r--[-rwxr-xr-x]cpp/src/tests/legacystore/start_broker (renamed from cpp/src/tests/run_cluster_test)14
-rw-r--r--[-rwxr-xr-x]cpp/src/tests/legacystore/stop_broker (renamed from cpp/src/tests/cpg_check.sh.in)36
-rw-r--r--cpp/src/tests/legacystore/system_test.sh51
-rw-r--r--cpp/src/tests/legacystore/tests_env.sh260
-rw-r--r--cpp/src/tests/legacystore/unit_test.cpp28
-rw-r--r--cpp/src/tests/legacystore/unit_test.h69
-rwxr-xr-xcpp/src/tests/long_cluster_tests.py38
-rwxr-xr-xcpp/src/tests/qpid-cluster-benchmark2
-rwxr-xr-xcpp/src/tests/qpid-cpp-benchmark10
-rw-r--r--cpp/src/tests/qpid-receive.cpp2
-rw-r--r--cpp/src/tests/qpid-send.cpp2
-rwxr-xr-xcpp/src/tests/qpid-test-cluster109
-rwxr-xr-xcpp/src/tests/reliable_replication_test84
-rwxr-xr-xcpp/src/tests/replication_test182
-rwxr-xr-xcpp/src/tests/restart_cluster38
-rwxr-xr-xcpp/src/tests/run_acl_tests18
-rwxr-xr-xcpp/src/tests/run_cluster_authentication_soak27
-rwxr-xr-xcpp/src/tests/run_cluster_authentication_test27
-rwxr-xr-xcpp/src/tests/run_cluster_tests39
-rwxr-xr-xcpp/src/tests/run_failover_soak39
-rwxr-xr-xcpp/src/tests/run_federation_sys_tests28
-rwxr-xr-xcpp/src/tests/sasl_fed_ex92
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_dynamic_cluster30
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_link_cluster29
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_queue_cluster29
-rwxr-xr-xcpp/src/tests/sasl_fed_ex_route_cluster29
-rwxr-xr-xcpp/src/tests/ssl_test26
-rwxr-xr-xcpp/src/tests/start_cluster43
-rwxr-xr-xcpp/src/tests/start_cluster_hosts70
-rw-r--r--cpp/src/tests/storePerftools/asyncPerf/PerfTest.cpp2
-rw-r--r--cpp/src/tests/test_env.ps1.in2
-rw-r--r--cpp/src/tests/test_env.sh.in4
-rw-r--r--cpp/src/tests/test_store.cpp6
-rw-r--r--cpp/src/tests/testagent.mk2
-rw-r--r--cpp/src/tests/testlib.py766
-rwxr-xr-xcpp/src/tests/verify_cluster_objects107
100 files changed, 5807 insertions, 10643 deletions
diff --git a/cpp/src/tests/.valgrind.supp b/cpp/src/tests/.valgrind.supp
index 2c6a1509ff..1a24a9178e 100644
--- a/cpp/src/tests/.valgrind.supp
+++ b/cpp/src/tests/.valgrind.supp
@@ -39,21 +39,6 @@
fun:_sasl_load_plugins
fun:sasl_client_init
}
-{
- Benign leak in CPG - patched version.
- Memcheck:Leak
- fun:*
- fun:openais_service_connect
- fun:cpg_initialize
-}
-
-{
- Benign error in libcpg.
- Memcheck:Param
- socketcall.sendmsg(msg.msg_iov[i])
- obj:*/libpthread-2.5.so
- obj:*/libcpg.so.2.0.0
-}
{
Uninitialised value problem in _dl_relocate (F7, F8)
@@ -161,14 +146,6 @@
}
{
- CPG error - seems benign.
- Memcheck:Param
- socketcall.sendmsg(msg.msg_iov[i])
- obj:*
- obj:*/libcpg.so.2.0.0
-}
-
-{
Known leak in boost.thread 1.33.1. Wildcards for 64/32 bit diffs.
Memcheck:Leak
fun:*
diff --git a/cpp/src/tests/BrokerMgmtAgent.cpp b/cpp/src/tests/BrokerMgmtAgent.cpp
index 02aa87f876..29c3faf809 100644
--- a/cpp/src/tests/BrokerMgmtAgent.cpp
+++ b/cpp/src/tests/BrokerMgmtAgent.cpp
@@ -22,6 +22,7 @@
#include "unit_test.h"
#include "MessagingFixture.h"
#include "qpid/management/Buffer.h"
+#include "qpid/management/ManagementAgent.h"
#include "qpid/messaging/Message.h"
#include "qpid/amqp_0_10/Codecs.h"
#include "qpid/log/Logger.h"
@@ -41,751 +42,345 @@ using namespace qpid::types;
namespace qpid {
- namespace tests {
-
- namespace _qmf = qmf::org::apache::qpid::broker::mgmt::test;
- namespace {
-
- typedef boost::shared_ptr<_qmf::TestObject> TestObjectPtr;
- typedef std::vector<TestObjectPtr> TestObjectVector;
-
- // Instantiates a broker and its internal management agent. Provides
- // factories for constructing Receivers for object indication messages.
- //
- class AgentFixture
- {
- MessagingFixture *mFix;
-
- public:
- AgentFixture( unsigned int pubInterval=10,
- bool qmfV2=false,
- qpid::broker::Broker::Options opts = qpid::broker::Broker::Options())
- {
- opts.enableMgmt=true;
- opts.qmf2Support=qmfV2;
- opts.mgmtPubInterval=pubInterval;
- mFix = new MessagingFixture(opts, true);
-
- _qmf::TestObject::registerSelf(getBrokerAgent());
- };
- ~AgentFixture()
- {
- delete mFix;
- };
- ::qpid::management::ManagementAgent *getBrokerAgent() { return mFix->broker->getManagementAgent(); }
- Receiver createV1DataIndRcvr( const std::string package, const std::string klass )
- {
- return mFix->session.createReceiver(std::string("kqueue; {create: always, delete: always, "
- "node: {type: queue, "
- "x-bindings: [{exchange: qpid.management, "
- "key: 'console.obj.1.0.")
- + package + std::string(".") + klass
- + std::string("'}]}}"));
- };
- Receiver createV2DataIndRcvr( const std::string package, const std::string klass )
- {
- std::string p(package);
- std::replace(p.begin(), p.end(), '.', '_');
- std::string k(klass);
- std::replace(k.begin(), k.end(), '.', '_');
-
- return mFix->session.createReceiver(std::string("kqueue; {create: always, delete: always, "
- "node: {type: queue, "
- "x-bindings: [{exchange: qmf.default.topic, "
- "key: 'agent.ind.data.")
- + p + std::string(".") + k
- + std::string("'}]}}"));
- };
- };
-
-
- // A "management object" that supports the TestObject
- //
- class TestManageable : public qpid::management::Manageable
- {
- management::ManagementObject* mgmtObj;
- const std::string key;
- public:
- TestManageable(management::ManagementAgent *agent, std::string _key)
- : key(_key)
- {
- _qmf::TestObject *tmp = new _qmf::TestObject(agent, this);
-
- // seed it with some default values...
- tmp->set_string1(key);
- tmp->set_bool1(true);
- qpid::types::Variant::Map vMap;
- vMap["one"] = qpid::types::Variant(1);
- vMap["two"] = qpid::types::Variant("two");
- vMap["three"] = qpid::types::Variant("whatever");
- tmp->set_map1(vMap);
-
- mgmtObj = tmp;
- };
- ~TestManageable() { mgmtObj = 0; /* deleted by agent on shutdown */ };
- management::ManagementObject* GetManagementObject() const { return mgmtObj; };
- static void validateTestObjectProperties(_qmf::TestObject& to)
- {
- // verify the default values are as expected. We don't check 'string1',
- // as it is the object key, and is unique for each object (no default value).
- BOOST_CHECK(to.get_bool1() == true);
- BOOST_CHECK(to.get_map1().size() == 3);
- qpid::types::Variant::Map mappy = to.get_map1();
- BOOST_CHECK(1 == (unsigned int)mappy["one"]);
- BOOST_CHECK(mappy["two"].asString() == std::string("two"));
- BOOST_CHECK(mappy["three"].asString() == std::string("whatever"));
- };
- };
-
-
- // decode a V1 Content Indication message
- //
- void decodeV1ObjectUpdates(const Message& inMsg, TestObjectVector& objs, const size_t objLen)
- {
- const size_t MAX_BUFFER_SIZE=65536;
- char tmp[MAX_BUFFER_SIZE];
-
- objs.clear();
-
- BOOST_CHECK(inMsg.getContent().size() <= MAX_BUFFER_SIZE);
-
- ::memcpy(tmp, inMsg.getContent().data(), inMsg.getContent().size());
- Buffer buf(tmp, inMsg.getContent().size());
-
- while (buf.available() > 8) { // 8 == qmf v1 header size
- BOOST_CHECK_EQUAL(buf.getOctet(), 'A');
- BOOST_CHECK_EQUAL(buf.getOctet(), 'M');
- BOOST_CHECK_EQUAL(buf.getOctet(), '2');
- BOOST_CHECK_EQUAL(buf.getOctet(), 'c'); // opcode == content indication
- // @@todo: kag: how do we skip 'i' entries???
- buf.getLong(); // ignore sequence
-
- std::string str1; // decode content body as string
- buf.getRawData(str1, objLen);
-
- TestObjectPtr fake(new _qmf::TestObject(0,0));
- fake->readProperties( str1 );
- objs.push_back(fake);
- }
- }
-
-
- // decode a V2 Content Indication message
- //
- void decodeV2ObjectUpdates(const qpid::messaging::Message& inMsg, TestObjectVector& objs)
- {
- objs.clear();
-
- BOOST_CHECK_EQUAL(inMsg.getContentType(), std::string("amqp/list"));
-
- const ::qpid::types::Variant::Map& m = inMsg.getProperties();
- Variant::Map::const_iterator iter = m.find(std::string("qmf.opcode"));
- BOOST_CHECK(iter != m.end());
- BOOST_CHECK_EQUAL(iter->second.asString(), std::string("_data_indication"));
-
- Variant::List vList;
- ::qpid::amqp_0_10::ListCodec::decode(inMsg.getContent(), vList);
-
- for (Variant::List::iterator lIter = vList.begin(); lIter != vList.end(); lIter++) {
- TestObjectPtr fake(new _qmf::TestObject(0,0));
- fake->readTimestamps(lIter->asMap());
- fake->mapDecodeValues((lIter->asMap())["_values"].asMap());
- objs.push_back(fake);
- }
- }
- }
-
- QPID_AUTO_TEST_SUITE(BrokerMgmtAgent)
-
- // verify that an object that is added to the broker's management database is
- // published correctly. Furthermore, verify that it is published once after
- // it has been deleted.
- //
- QPID_AUTO_TEST_CASE(v1ObjPublish)
- {
- AgentFixture* fix = new AgentFixture(3);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- // create a manageable test object
- TestManageable *tm = new TestManageable(agent, std::string("obj1"));
- uint32_t objLen = tm->GetManagementObject()->writePropertiesSize();
-
- Receiver r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
-
- agent->addObject(tm->GetManagementObject(), 1);
-
- // wait for the object to be published
- Message m1;
- BOOST_CHECK(r1.fetch(m1, Duration::SECOND * 6));
-
- TestObjectVector objs;
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
-
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- BOOST_CHECK(0 == mappy["_delete_ts"].asUint64()); // not deleted
- }
-
- // destroy the object
-
- tm->GetManagementObject()->resourceDestroy();
-
- // wait for the deleted object to be published
-
- bool isDeleted = false;
- while (!isDeleted && r1.fetch(m1, Duration::SECOND * 6)) {
-
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
-
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0)
- isDeleted = true;
- }
- }
-
- BOOST_CHECK(isDeleted);
-
- r1.close();
- delete fix;
- delete tm;
- }
-
- // Repeat the previous test, but with V2-based object support
- //
- QPID_AUTO_TEST_CASE(v2ObjPublish)
- {
- AgentFixture* fix = new AgentFixture(3, true);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- TestManageable *tm = new TestManageable(agent, std::string("obj2"));
-
- Receiver r1 = fix->createV2DataIndRcvr(tm->GetManagementObject()->getPackageName(), "#");
-
- agent->addObject(tm->GetManagementObject(), "testobj-1");
-
- // wait for the object to be published
- Message m1;
- BOOST_CHECK(r1.fetch(m1, Duration::SECOND * 6));
-
- TestObjectVector objs;
- decodeV2ObjectUpdates(m1, objs);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
-
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- BOOST_CHECK(0 == mappy["_delete_ts"].asUint64());
- }
-
- // destroy the object
-
- tm->GetManagementObject()->resourceDestroy();
-
- // wait for the deleted object to be published
-
- bool isDeleted = false;
- while (!isDeleted && r1.fetch(m1, Duration::SECOND * 6)) {
-
- decodeV2ObjectUpdates(m1, objs);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
-
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0)
- isDeleted = true;
- }
- }
-
- BOOST_CHECK(isDeleted);
-
- r1.close();
- delete fix;
- delete tm;
- }
-
-
- // verify that a deleted object is exported correctly using the
- // exportDeletedObjects() method. V1 testcase.
- //
- QPID_AUTO_TEST_CASE(v1ExportDelObj)
- {
- AgentFixture* fix = new AgentFixture(3);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- // create a manageable test object
- TestManageable *tm = new TestManageable(agent, std::string("myObj"));
- uint32_t objLen = tm->GetManagementObject()->writePropertiesSize();
-
- Receiver r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
-
- agent->addObject(tm->GetManagementObject(), 1);
-
- // wait for the object to be published
- Message m1;
- BOOST_CHECK(r1.fetch(m1, Duration::SECOND * 6));
-
- TestObjectVector objs;
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
-
- // destroy the object, then immediately export (before the next poll cycle)
-
- ::qpid::management::ManagementAgent::DeletedObjectList delObjs;
- tm->GetManagementObject()->resourceDestroy();
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 1);
-
- // wait for the deleted object to be published
-
- bool isDeleted = false;
- while (!isDeleted && r1.fetch(m1, Duration::SECOND * 6)) {
-
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
-
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0)
- isDeleted = true;
- }
- }
-
- BOOST_CHECK(isDeleted);
-
- // verify there are no deleted objects to export now.
-
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 0);
-
- r1.close();
- delete fix;
- delete tm;
- }
-
-
- // verify that a deleted object is imported correctly using the
- // importDeletedObjects() method. V1 testcase.
- //
- QPID_AUTO_TEST_CASE(v1ImportDelObj)
- {
- AgentFixture* fix = new AgentFixture(3);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- // create a manageable test object
- TestManageable *tm = new TestManageable(agent, std::string("anObj"));
- uint32_t objLen = tm->GetManagementObject()->writePropertiesSize();
-
- Receiver r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
+namespace tests {
+
+namespace _qmf = qmf::org::apache::qpid::broker::mgmt::test;
+namespace {
+
+typedef boost::shared_ptr<_qmf::TestObject> TestObjectPtr;
+typedef std::vector<TestObjectPtr> TestObjectVector;
+
+// Instantiates a broker and its internal management agent. Provides
+// factories for constructing Receivers for object indication messages.
+//
+class AgentFixture
+{
+ MessagingFixture *mFix;
+
+ public:
+ AgentFixture( unsigned int pubInterval=10,
+ bool qmfV2=false,
+ qpid::broker::Broker::Options opts = qpid::broker::Broker::Options())
+ {
+ opts.enableMgmt=true;
+ opts.qmf2Support=qmfV2;
+ opts.mgmtPubInterval=pubInterval;
+ mFix = new MessagingFixture(opts, true);
+
+ _qmf::TestObject::registerSelf(getBrokerAgent());
+ };
+ ~AgentFixture()
+ {
+ delete mFix;
+ };
+ ::qpid::management::ManagementAgent *getBrokerAgent() { return mFix->broker->getManagementAgent(); }
+ Receiver createV1DataIndRcvr( const std::string package, const std::string klass )
+ {
+ return mFix->session.createReceiver(std::string("kqueue; {create: always, delete: always, "
+ "node: {type: queue, "
+ "x-bindings: [{exchange: qpid.management, "
+ "key: 'console.obj.1.0.")
+ + package + std::string(".") + klass
+ + std::string("'}]}}"));
+ };
+ Receiver createV2DataIndRcvr( const std::string package, const std::string klass )
+ {
+ std::string p(package);
+ std::replace(p.begin(), p.end(), '.', '_');
+ std::string k(klass);
+ std::replace(k.begin(), k.end(), '.', '_');
+
+ return mFix->session.createReceiver(std::string("kqueue; {create: always, delete: always, "
+ "node: {type: queue, "
+ "x-bindings: [{exchange: qmf.default.topic, "
+ "key: 'agent.ind.data.")
+ + p + std::string(".") + k
+ + std::string("'}]}}"));
+ };
+};
+
+
+// A "management object" that supports the TestObject
+//
+class TestManageable : public qpid::management::Manageable
+{
+ management::ManagementObject::shared_ptr mgmtObj;
+ const std::string key;
+ public:
+ TestManageable(management::ManagementAgent *agent, std::string _key)
+ : key(_key)
+ {
+ _qmf::TestObject::shared_ptr tmp(new _qmf::TestObject(agent, this));
+
+ // seed it with some default values...
+ tmp->set_string1(key);
+ tmp->set_bool1(true);
+ qpid::types::Variant::Map vMap;
+ vMap["one"] = qpid::types::Variant(1);
+ vMap["two"] = qpid::types::Variant("two");
+ vMap["three"] = qpid::types::Variant("whatever");
+ tmp->set_map1(vMap);
+
+ mgmtObj = tmp;
+ };
+ ~TestManageable() { mgmtObj.reset(); }
+ management::ManagementObject::shared_ptr GetManagementObject() const { return mgmtObj; };
+ static void validateTestObjectProperties(_qmf::TestObject& to)
+ {
+ // verify the default values are as expected. We don't check 'string1',
+ // as it is the object key, and is unique for each object (no default value).
+ BOOST_CHECK(to.get_bool1() == true);
+ BOOST_CHECK(to.get_map1().size() == 3);
+ qpid::types::Variant::Map mappy = to.get_map1();
+ BOOST_CHECK(1 == (unsigned int)mappy["one"]);
+ BOOST_CHECK(mappy["two"].asString() == std::string("two"));
+ BOOST_CHECK(mappy["three"].asString() == std::string("whatever"));
+ };
+};
+
+
+// decode a V1 Content Indication message
+//
+void decodeV1ObjectUpdates(const Message& inMsg, TestObjectVector& objs, const size_t objLen)
+{
+ const size_t MAX_BUFFER_SIZE=65536;
+ char tmp[MAX_BUFFER_SIZE];
+
+ objs.clear();
+
+ BOOST_CHECK(inMsg.getContent().size() <= MAX_BUFFER_SIZE);
+
+ ::memcpy(tmp, inMsg.getContent().data(), inMsg.getContent().size());
+ Buffer buf(tmp, inMsg.getContent().size());
+
+ while (buf.available() > 8) { // 8 == qmf v1 header size
+ BOOST_CHECK_EQUAL(buf.getOctet(), 'A');
+ BOOST_CHECK_EQUAL(buf.getOctet(), 'M');
+ BOOST_CHECK_EQUAL(buf.getOctet(), '2');
+ BOOST_CHECK_EQUAL(buf.getOctet(), 'c'); // opcode == content indication
+ // @@todo: kag: how do we skip 'i' entries???
+ buf.getLong(); // ignore sequence
+
+ std::string str1; // decode content body as string
+ buf.getRawData(str1, objLen);
+
+ TestObjectPtr fake(new _qmf::TestObject(0,0));
+ fake->readProperties( str1 );
+ objs.push_back(fake);
+ }
+}
- agent->addObject(tm->GetManagementObject(), 1);
- // wait for the object to be published
- Message m1;
- BOOST_CHECK(r1.fetch(m1, Duration::SECOND * 6));
+// decode a V2 Content Indication message
+//
+void decodeV2ObjectUpdates(const qpid::messaging::Message& inMsg, TestObjectVector& objs)
+{
+ objs.clear();
- TestObjectVector objs;
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
+ BOOST_CHECK_EQUAL(inMsg.getContentType(), std::string("amqp/list"));
- // destroy the object, then immediately export (before the next poll cycle)
+ const ::qpid::types::Variant::Map& m = inMsg.getProperties();
+ Variant::Map::const_iterator iter = m.find(std::string("qmf.opcode"));
+ BOOST_CHECK(iter != m.end());
+ BOOST_CHECK_EQUAL(iter->second.asString(), std::string("_data_indication"));
- ::qpid::management::ManagementAgent::DeletedObjectList delObjs;
- tm->GetManagementObject()->resourceDestroy();
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 1);
+ Variant::List vList;
+ ::qpid::amqp_0_10::ListCodec::decode(inMsg.getContent(), vList);
- // destroy the broker, and reinistantiate a new one without populating it
- // with a TestObject.
+ for (Variant::List::iterator lIter = vList.begin(); lIter != vList.end(); lIter++) {
+ TestObjectPtr fake(new _qmf::TestObject(0,0));
+ fake->readTimestamps(lIter->asMap());
+ fake->mapDecodeValues((lIter->asMap())["_values"].asMap());
+ objs.push_back(fake);
+ }
+}
+}
- r1.close();
- delete fix;
- delete tm; // should no longer be necessary
+QPID_AUTO_TEST_SUITE(BrokerMgmtAgent)
- fix = new AgentFixture(3);
- r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
- agent = fix->getBrokerAgent();
- agent->importDeletedObjects( delObjs );
+// verify that an object that is added to the broker's management database is
+// published correctly. Furthermore, verify that it is published once after
+// it has been deleted.
+//
+QPID_AUTO_TEST_CASE(v1ObjPublish)
+{
+ AgentFixture* fix = new AgentFixture(3);
+ management::ManagementAgent* agent;
+ agent = fix->getBrokerAgent();
- // wait for the deleted object to be published
+ // create a manageable test object
+ TestManageable *tm = new TestManageable(agent, std::string("obj1"));
+ uint32_t objLen = tm->GetManagementObject()->writePropertiesSize();
- bool isDeleted = false;
- while (!isDeleted && r1.fetch(m1, Duration::SECOND * 6)) {
+ Receiver r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
+ agent->addObject(tm->GetManagementObject(), 1);
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
+ // wait for the object to be published
+ Message m1;
+ BOOST_CHECK(r1.fetch(m1, Duration::SECOND * 6));
- TestManageable::validateTestObjectProperties(**oIter);
+ TestObjectVector objs;
+ decodeV1ObjectUpdates(m1, objs, objLen);
+ BOOST_CHECK(objs.size() > 0);
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0)
- isDeleted = true;
- }
- }
+ for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
- BOOST_CHECK(isDeleted);
+ TestManageable::validateTestObjectProperties(**oIter);
- // verify there are no deleted objects to export now.
+ qpid::types::Variant::Map mappy;
+ (*oIter)->writeTimestamps(mappy);
+ BOOST_CHECK(0 == mappy["_delete_ts"].asUint64()); // not deleted
+ }
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 0);
+ // destroy the object
- r1.close();
- delete fix;
- }
+ tm->GetManagementObject()->resourceDestroy();
+ // wait for the deleted object to be published
- // verify that an object that is added and deleted prior to the
- // first poll cycle is accounted for by the export
- //
- QPID_AUTO_TEST_CASE(v1ExportFastDelObj)
- {
- AgentFixture* fix = new AgentFixture(3);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
+ bool isDeleted = false;
+ while (!isDeleted && r1.fetch(m1, Duration::SECOND * 6)) {
- // create a manageable test object
- TestManageable *tm = new TestManageable(agent, std::string("objectifyMe"));
+ decodeV1ObjectUpdates(m1, objs, objLen);
+ BOOST_CHECK(objs.size() > 0);
- // add, then immediately delete and export the object...
+ for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
- ::qpid::management::ManagementAgent::DeletedObjectList delObjs;
- agent->addObject(tm->GetManagementObject(), 999);
- tm->GetManagementObject()->resourceDestroy();
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 1);
+ TestManageable::validateTestObjectProperties(**oIter);
- delete fix;
- delete tm;
+ qpid::types::Variant::Map mappy;
+ (*oIter)->writeTimestamps(mappy);
+ if (mappy["_delete_ts"].asUint64() != 0)
+ isDeleted = true;
}
+ }
+ BOOST_CHECK(isDeleted);
- // Verify that we can export and import multiple deleted objects correctly.
- //
- QPID_AUTO_TEST_CASE(v1ImportMultiDelObj)
- {
- AgentFixture* fix = new AgentFixture(3);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- Receiver r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
-
- // populate the agent with multiple test objects
- const size_t objCount = 50;
- std::vector<TestManageable *> tmv;
- uint32_t objLen;
-
- for (size_t i = 0; i < objCount; i++) {
- std::stringstream key;
- key << "testobj-" << std::setfill('x') << std::setw(4) << i;
- // (no, seriously, I didn't just do that.)
- // Note well: we have to keep the key string length EXACTLY THE SAME
- // FOR ALL OBJECTS, so objLen will be the same. Otherwise the
- // decodeV1ObjectUpdates() will fail (v1 lacks explict encoded length).
- TestManageable *tm = new TestManageable(agent, key.str());
- objLen = tm->GetManagementObject()->writePropertiesSize();
- agent->addObject(tm->GetManagementObject(), i + 1);
- tmv.push_back(tm);
- }
+ r1.close();
+ delete fix;
+ delete tm;
+}
- // wait for the objects to be published
- Message m1;
- uint32_t msgCount = 0;
- while(r1.fetch(m1, Duration::SECOND * 6)) {
- TestObjectVector objs;
- decodeV1ObjectUpdates(m1, objs, objLen);
- msgCount += objs.size();
- }
+// Repeat the previous test, but with V2-based object support
+//
+QPID_AUTO_TEST_CASE(v2ObjPublish)
+{
+ AgentFixture* fix = new AgentFixture(3, true);
+ management::ManagementAgent* agent;
+ agent = fix->getBrokerAgent();
- BOOST_CHECK_EQUAL(msgCount, objCount);
+ TestManageable *tm = new TestManageable(agent, std::string("obj2"));
- // destroy some of the objects, then immediately export (before the next poll cycle)
+ Receiver r1 = fix->createV2DataIndRcvr(tm->GetManagementObject()->getPackageName(), "#");
- uint32_t delCount = 0;
- for (size_t i = 0; i < objCount; i += 2) {
- tmv[i]->GetManagementObject()->resourceDestroy();
- delCount++;
- }
+ agent->addObject(tm->GetManagementObject(), "testobj-1");
- ::qpid::management::ManagementAgent::DeletedObjectList delObjs;
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK_EQUAL(delObjs.size(), delCount);
+ // wait for the object to be published
+ Message m1;
+ BOOST_CHECK(r1.fetch(m1, Duration::SECOND * 6));
- // destroy the broker, and reinistantiate a new one without populating it
- // with TestObjects.
+ TestObjectVector objs;
+ decodeV2ObjectUpdates(m1, objs);
+ BOOST_CHECK(objs.size() > 0);
- r1.close();
- delete fix;
- while (tmv.size()) {
- delete tmv.back();
- tmv.pop_back();
- }
+ for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
- fix = new AgentFixture(3);
- r1 = fix->createV1DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
- agent = fix->getBrokerAgent();
- agent->importDeletedObjects( delObjs );
+ TestManageable::validateTestObjectProperties(**oIter);
- // wait for the deleted object to be published, verify the count
+ qpid::types::Variant::Map mappy;
+ (*oIter)->writeTimestamps(mappy);
+ BOOST_CHECK(0 == mappy["_delete_ts"].asUint64());
+ }
- uint32_t countDels = 0;
- while (r1.fetch(m1, Duration::SECOND * 6)) {
- TestObjectVector objs;
- decodeV1ObjectUpdates(m1, objs, objLen);
- BOOST_CHECK(objs.size() > 0);
+ // destroy the object
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
+ tm->GetManagementObject()->resourceDestroy();
- TestManageable::validateTestObjectProperties(**oIter);
+ // wait for the deleted object to be published
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0)
- countDels++;
- }
- }
+ bool isDeleted = false;
+ while (!isDeleted && r1.fetch(m1, Duration::SECOND * 6)) {
- // make sure we get the correct # of deleted objects
- BOOST_CHECK_EQUAL(countDels, delCount);
+ decodeV2ObjectUpdates(m1, objs);
+ BOOST_CHECK(objs.size() > 0);
- // verify there are no deleted objects to export now.
+ for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 0);
+ TestManageable::validateTestObjectProperties(**oIter);
- r1.close();
- delete fix;
+ qpid::types::Variant::Map mappy;
+ (*oIter)->writeTimestamps(mappy);
+ if (mappy["_delete_ts"].asUint64() != 0)
+ isDeleted = true;
}
+ }
- // Verify that we can export and import multiple deleted objects correctly.
- // QMF V2 variant
- QPID_AUTO_TEST_CASE(v2ImportMultiDelObj)
- {
- AgentFixture* fix = new AgentFixture(3, true);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- Receiver r1 = fix->createV2DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
-
- // populate the agent with multiple test objects
- const size_t objCount = 50;
- std::vector<TestManageable *> tmv;
-
- for (size_t i = 0; i < objCount; i++) {
- std::stringstream key;
- key << "testobj-" << i;
- TestManageable *tm = new TestManageable(agent, key.str());
- if (tm->GetManagementObject()->writePropertiesSize()) {}
- agent->addObject(tm->GetManagementObject(), key.str());
- tmv.push_back(tm);
- }
-
- // wait for the objects to be published
- Message m1;
- uint32_t msgCount = 0;
- while(r1.fetch(m1, Duration::SECOND * 6)) {
- TestObjectVector objs;
- decodeV2ObjectUpdates(m1, objs);
- msgCount += objs.size();
- }
-
- BOOST_CHECK_EQUAL(msgCount, objCount);
-
- // destroy some of the objects, then immediately export (before the next poll cycle)
-
- uint32_t delCount = 0;
- for (size_t i = 0; i < objCount; i += 2) {
- tmv[i]->GetManagementObject()->resourceDestroy();
- delCount++;
- }
-
- ::qpid::management::ManagementAgent::DeletedObjectList delObjs;
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK_EQUAL(delObjs.size(), delCount);
-
- // destroy the broker, and reinistantiate a new one without populating it
- // with TestObjects.
-
- r1.close();
- delete fix;
- while (tmv.size()) {
- delete tmv.back();
- tmv.pop_back();
- }
-
- fix = new AgentFixture(3, true);
- r1 = fix->createV2DataIndRcvr("org.apache.qpid.broker.mgmt.test", "#");
- agent = fix->getBrokerAgent();
- agent->importDeletedObjects( delObjs );
-
- // wait for the deleted object to be published, verify the count
-
- uint32_t countDels = 0;
- while (r1.fetch(m1, Duration::SECOND * 6)) {
- TestObjectVector objs;
- decodeV2ObjectUpdates(m1, objs);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
-
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0)
- countDels++;
- }
- }
-
- // make sure we get the correct # of deleted objects
- BOOST_CHECK_EQUAL(countDels, delCount);
-
- // verify there are no deleted objects to export now.
-
- agent->exportDeletedObjects( delObjs );
- BOOST_CHECK(delObjs.size() == 0);
+ BOOST_CHECK(isDeleted);
- r1.close();
- delete fix;
- }
+ r1.close();
+ delete fix;
+ delete tm;
+}
- // See QPID-2997
- QPID_AUTO_TEST_CASE(v2RapidRestoreObj)
- {
- AgentFixture* fix = new AgentFixture(3, true);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- // two objects, same ObjID
- TestManageable *tm1 = new TestManageable(agent, std::string("obj2"));
- TestManageable *tm2 = new TestManageable(agent, std::string("obj2"));
-
- Receiver r1 = fix->createV2DataIndRcvr(tm1->GetManagementObject()->getPackageName(), "#");
-
- // add, then immediately delete and re-add a copy of the object
- agent->addObject(tm1->GetManagementObject(), "testobj-1");
- tm1->GetManagementObject()->resourceDestroy();
- agent->addObject(tm2->GetManagementObject(), "testobj-1");
-
- // expect: a delete notification, then an update notification
- TestObjectVector objs;
- bool isDeleted = false;
- bool isAdvertised = false;
- size_t count = 0;
- Message m1;
- while (r1.fetch(m1, Duration::SECOND * 6)) {
-
- decodeV2ObjectUpdates(m1, objs);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
- count++;
- TestManageable::validateTestObjectProperties(**oIter);
-
- qpid::types::Variant::Map mappy;
- (*oIter)->writeTimestamps(mappy);
- if (mappy["_delete_ts"].asUint64() != 0) {
- isDeleted = true;
- BOOST_CHECK(isAdvertised == false); // delete must be first
- } else {
- isAdvertised = true;
- BOOST_CHECK(isDeleted == true); // delete must be first
- }
- }
+// See QPID-2997
+QPID_AUTO_TEST_CASE(v2RapidRestoreObj)
+{
+ AgentFixture* fix = new AgentFixture(3, true);
+ management::ManagementAgent* agent;
+ agent = fix->getBrokerAgent();
+
+ // two objects, same ObjID
+ TestManageable *tm1 = new TestManageable(agent, std::string("obj2"));
+ TestManageable *tm2 = new TestManageable(agent, std::string("obj2"));
+
+ Receiver r1 = fix->createV2DataIndRcvr(tm1->GetManagementObject()->getPackageName(), "#");
+
+ // add, then immediately delete and re-add a copy of the object
+ agent->addObject(tm1->GetManagementObject(), "testobj-1");
+ tm1->GetManagementObject()->resourceDestroy();
+ agent->addObject(tm2->GetManagementObject(), "testobj-1");
+
+ // expect: a delete notification, then an update notification
+ TestObjectVector objs;
+ bool isDeleted = false;
+ bool isAdvertised = false;
+ size_t count = 0;
+ Message m1;
+ while (r1.fetch(m1, Duration::SECOND * 6)) {
+
+ decodeV2ObjectUpdates(m1, objs);
+ BOOST_CHECK(objs.size() > 0);
+
+ for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
+ count++;
+ TestManageable::validateTestObjectProperties(**oIter);
+
+ qpid::types::Variant::Map mappy;
+ (*oIter)->writeTimestamps(mappy);
+ if (mappy["_delete_ts"].asUint64() != 0) {
+ isDeleted = true;
+ BOOST_CHECK(isAdvertised == false); // delete must be first
+ } else {
+ isAdvertised = true;
+ BOOST_CHECK(isDeleted == true); // delete must be first
}
-
- BOOST_CHECK(isDeleted);
- BOOST_CHECK(isAdvertised);
- BOOST_CHECK(count == 2);
-
- r1.close();
- delete fix;
- delete tm1;
- delete tm2;
}
+ }
- // See QPID-2997
- QPID_AUTO_TEST_CASE(v2DuplicateErrorObj)
- {
- AgentFixture* fix = new AgentFixture(3, true);
- management::ManagementAgent* agent;
- agent = fix->getBrokerAgent();
-
- // turn off the expected error log message
- qpid::log::Options logOpts;
- logOpts.selectors.clear();
- logOpts.selectors.push_back("critical+");
- qpid::log::Logger::instance().configure(logOpts);
-
- // two objects, same ObjID
- TestManageable *tm1 = new TestManageable(agent, std::string("obj2"));
- TestManageable *tm2 = new TestManageable(agent, std::string("obj2"));
- // Keep a pointer to the ManagementObject. This test simulates a user-caused error
- // case (duplicate objects) where the broker has no choice but to leak a management
- // object (safest assumption). To prevent valgrind from flagging this leak, we
- // manually clean up the object at the end of the test.
- management::ManagementObject *save = tm2->GetManagementObject();
-
- Receiver r1 = fix->createV2DataIndRcvr(tm1->GetManagementObject()->getPackageName(), "#");
-
- // add, then immediately delete and re-add a copy of the object
- agent->addObject(tm1->GetManagementObject(), "testobj-1");
- agent->addObject(tm2->GetManagementObject(), "testobj-1");
-
- TestObjectVector objs;
- size_t count = 0;
- Message m1;
- while (r1.fetch(m1, Duration::SECOND * 6)) {
-
- decodeV2ObjectUpdates(m1, objs);
- BOOST_CHECK(objs.size() > 0);
-
- for (TestObjectVector::iterator oIter = objs.begin(); oIter != objs.end(); oIter++) {
- count++;
- TestManageable::validateTestObjectProperties(**oIter);
- }
- }
-
- BOOST_CHECK(count == 1); // only one should be accepted.
+ BOOST_CHECK(isDeleted);
+ BOOST_CHECK(isAdvertised);
+ BOOST_CHECK(count == 2);
- r1.close();
- delete fix;
- delete tm1;
- delete tm2;
- delete save;
- }
+ r1.close();
+ delete fix;
+ delete tm1;
+ delete tm2;
+}
- QPID_AUTO_TEST_SUITE_END()
- }
+QPID_AUTO_TEST_SUITE_END()
+}
}
diff --git a/cpp/src/tests/CMakeLists.txt b/cpp/src/tests/CMakeLists.txt
index be7864fcb4..12e9cbed95 100644
--- a/cpp/src/tests/CMakeLists.txt
+++ b/cpp/src/tests/CMakeLists.txt
@@ -98,66 +98,78 @@ endif (CMAKE_SYSTEM_NAME STREQUAL Windows)
# when running the tests. If you want to build a subset of the tests run
# ccmake and set unit_tests_to_build to the set you want to build.
-set(unit_tests_to_build
- exception_test
- RefCounted
- SessionState
- logging
- AsyncCompletion
- Url
- Uuid
- Shlib
- FieldValue
- FieldTable
+set(all_unit_tests
+ AccumulatedAckTest
Array
- QueueOptionsTest
- InlineAllocator
- InlineVector
- ClientSessionTest
- MessagingSessionTests
- SequenceSet
- StringUtils
- RangeSet
+ AsyncCompletion
AtomicValue
- QueueTest
- AccumulatedAckTest
- DtxWorkRecordTest
+ ClientMessage
+ ClientMessageTest
+ ClientSessionTest
+ ConsoleTest
DeliveryRecordTest
+ DtxWorkRecordTest
+ exception_test
ExchangeTest
+ FieldTable
+ FieldValue
+ FrameDecoder
+ FramingTest
HeadersExchangeTest
+ HeaderTest
+ InlineAllocator
+ InlineVector
+ logging
+ ManagementTest
+ MessageReplayTracker
MessageTest
+ MessagingSessionTests
+ PollableCondition
+ ProxyTest
QueueDepth
- QueueRegistryTest
- QueuePolicyTest
QueueFlowLimitTest
- FramingTest
- HeaderTest
+ QueueOptionsTest
+ QueuePolicyTest
+ QueueRegistryTest
+ QueueTest
+ RangeSet
+ RefCounted
+ RetryList
SequenceNumberTest
+ SequenceSet
+ SessionState
+ Shlib
+ StringUtils
+ SystemInfo
TimerTest
TopicExchangeTest
TxBufferTest
- ManagementTest
- MessageReplayTracker
- ConsoleTest
- ProxyTest
- RetryList
- FrameDecoder
- ClientMessageTest
- PollableCondition
+ Url
+ Uuid
Variant
- ClientMessage
- SystemInfo
${xml_tests}
+ )
+
+set(unit_tests_to_build
+ ""
CACHE STRING "Which unit tests to build"
)
mark_as_advanced(unit_tests_to_build)
+# If no unit_test specifically set then use all unit tests
+if (unit_tests_to_build)
+set(actual_unit_tests ${unit_tests_to_build})
+else()
+set(actual_unit_tests ${all_unit_tests})
+endif()
+
add_executable (unit_test unit_test
- ${unit_tests_to_build} ${platform_test_additions})
+ ${actual_unit_tests} ${platform_test_additions})
target_link_libraries (unit_test
${qpid_test_boost_libs}
qpidmessaging qpidbroker qmfconsole)
+set_target_properties (unit_test PROPERTIES COMPILE_DEFINITIONS _IN_QPID_BROKER)
remember_location(unit_test)
add_library (shlibtest MODULE shlibtest.cpp)
@@ -274,8 +286,12 @@ remember_location(msg_group_test)
# qpid-perftest and qpid-latency-test are generally useful so install them
-install (TARGETS qpid-perftest qpid-latency-test RUNTIME
- DESTINATION ${QPID_INSTALL_BINDIR})
+install (TARGETS
+ qpid-perftest qpid-latency-test qpid-client-test
+ qpid-ping
+ qpid-receive qpid-send qpid-topic-listener qpid-topic-publisher receiver sender
+ qpid-txtest
+ RUNTIME DESTINATION ${QPID_INSTALL_TESTDIR})
# This should ideally be done as part of the test run, but I don't know a way
# to get these arguments and the working directory set like Makefile.am does,
@@ -309,10 +325,12 @@ if (PYTHON_EXECUTABLE)
add_test (ha_tests ${test_wrap} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/ha_tests.py)
add_test (ipv6_test ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/ipv6_test${test_script_suffix})
add_test (federation_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_federation_tests${test_script_suffix})
+ add_test (federation_sys_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_federation_sys_tests${test_script_suffix})
if (BUILD_ACL)
add_test (acl_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_acl_tests${test_script_suffix})
endif (BUILD_ACL)
add_test (dynamic_log_level_test ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/dynamic_log_level_test${test_script_suffix})
+add_test (dynamic_log_hires_timestamp ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/dynamic_log_hires_timestamp${test_script_suffix})
if (BUILD_MSSQL)
add_test (store_tests ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_store_tests${test_script_suffix} MSSQL)
endif (BUILD_MSSQL)
@@ -323,7 +341,9 @@ endif (PYTHON_EXECUTABLE)
add_library(test_store MODULE test_store.cpp)
target_link_libraries (test_store qpidbroker qpidcommon)
-set_target_properties (test_store PROPERTIES PREFIX "")
+set_target_properties (test_store PROPERTIES
+ COMPILE_DEFINITIONS _IN_QPID_BROKER
+ PREFIX "")
add_library (dlclose_noop MODULE dlclose_noop.c)
@@ -333,10 +353,16 @@ add_library (dlclose_noop MODULE dlclose_noop.c)
#
## Longer running stability tests, not run by default check: target.
## Not run under valgrind, too slow
-#LONG_TESTS=fanout_perftest shared_perftest multiq_perftest topic_perftest run_failover_soak
+#LONG_TESTS=fanout_perftest shared_perftest multiq_perftest topic_perftest
#EXTRA_DIST+=$(LONG_TESTS) run_perftest
#check-long:
# $(MAKE) check TESTS="start_broker $(LONG_TESTS) stop_broker" VALGRIND=
# Include async store tests
include(asyncstore.cmake)
+
+#
+# legacystore
+#
+#add_subdirectory(legacystore)
+
diff --git a/cpp/src/tests/ClusterFailover.cpp b/cpp/src/tests/ClusterFailover.cpp
deleted file mode 100644
index bf5c147f19..0000000000
--- a/cpp/src/tests/ClusterFailover.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/**@file Tests for partial failure in a cluster.
- * Partial failure means some nodes experience a failure while others do not.
- * In this case the failed nodes must shut down.
- */
-
-#include "test_tools.h"
-#include "unit_test.h"
-#include "ClusterFixture.h"
-#include "qpid/client/FailoverManager.h"
-#include <boost/assign.hpp>
-#include <boost/algorithm/string.hpp>
-#include <boost/bind.hpp>
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(ClusterFailoverTestSuite)
-
-using namespace std;
-using namespace qpid;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::client;
-using namespace qpid::client::arg;
-using namespace boost::assign;
-using broker::Broker;
-using boost::shared_ptr;
-
-// Timeout for tests that wait for messages
-const sys::Duration TIMEOUT=sys::TIME_SEC/4;
-
-ClusterFixture::Args getArgs(bool durable=std::getenv("STORE_LIB"))
-{
- ClusterFixture::Args args;
- args += "--auth", "no", "--no-module-dir",
- "--load-module", getLibPath("CLUSTER_LIB");
- if (durable)
- args += "--load-module", getLibPath("STORE_LIB"), "TMP_DATA_DIR";
- else
- args += "--no-data-dir";
- return args;
-}
-
-// Test re-connecting with same session name after a failure.
-QPID_AUTO_TEST_CASE(testReconnectSameSessionName) {
- ClusterFixture cluster(2, getArgs(), -1);
- // Specify a timeout to make sure it is ignored, session resume is
- // not implemented so sessions belonging to dead brokers should
- // not be kept.
- Client c0(cluster[0], "foo", 5);
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c0.connection, 2).size()); // wait for both.
- c0.session.queueDeclare("q");
- c0.session.messageTransfer(arg::content=Message("sendme", "q"));
- BOOST_CHECK_EQUAL(c0.subs.get("q").getData(), "sendme");
- cluster.killWithSilencer(0, c0.connection, 9);
- Client c1(cluster[1], "foo", 5);
- c1.session.queueQuery(); // Try to use the session.
-}
-
-QPID_AUTO_TEST_CASE(testReconnectExclusiveQueue) {
- // Regresion test. Session timeouts should be ignored
- // by the broker as session resume is not implemented.
- ClusterFixture cluster(2, getArgs(), -1);
- Client c0(cluster[0], "foo", 5);
- c0.session.queueDeclare("exq", arg::exclusive=true);
- SubscriptionSettings settings;
- settings.exclusive = true;
- settings.autoAck = 0;
- Subscription s0 = c0.subs.subscribe(c0.lq, "exq", settings, "exsub");
- c0.session.messageTransfer(arg::content=Message("sendme", "exq"));
- BOOST_CHECK_EQUAL(c0.lq.get().getData(), "sendme");
-
- // Regression: core dump on exit if unacked messages were left in
- // a session with a timeout.
- cluster.killWithSilencer(0, c0.connection);
-
- // Regression: session timeouts prevented re-connecting to
- // exclusive queue.
- Client c1(cluster[1]);
- c1.session.queueDeclare("exq", arg::exclusive=true);
- Subscription s1 = c1.subs.subscribe(c1.lq, "exq", settings, "exsub");
- s1.cancel();
-
- // Regression: session timeouts prevented new member joining
- // cluster with exclusive queues.
- cluster.add();
- Client c2(cluster[2]);
- c2.session.queueQuery();
-}
-
-
-QPID_AUTO_TEST_SUITE_END()
-
-}} // namespace qpid::tests
diff --git a/cpp/src/tests/ClusterFixture.cpp b/cpp/src/tests/ClusterFixture.cpp
deleted file mode 100644
index 6b62cb6fc7..0000000000
--- a/cpp/src/tests/ClusterFixture.cpp
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test_tools.h"
-#include "unit_test.h"
-#include "ForkedBroker.h"
-#include "BrokerFixture.h"
-
-#include "qpid/client/Connection.h"
-#include "qpid/client/ConnectionAccess.h"
-#include "qpid/client/Session.h"
-#include "qpid/client/FailoverListener.h"
-#include "qpid/cluster/Cluster.h"
-#include "qpid/cluster/Cpg.h"
-#include "qpid/cluster/UpdateClient.h"
-#include "qpid/framing/AMQBody.h"
-#include "qpid/framing/Uuid.h"
-#include "qpid/framing/reply_exceptions.h"
-#include "qpid/framing/enum.h"
-#include "qpid/log/Logger.h"
-
-#include <boost/bind.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/assign.hpp>
-
-#include <string>
-#include <iostream>
-#include <iterator>
-#include <vector>
-#include <set>
-#include <algorithm>
-#include <iterator>
-
-
-using namespace std;
-using namespace qpid;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::client;
-using qpid::sys::TIME_SEC;
-using qpid::broker::Broker;
-using boost::shared_ptr;
-using qpid::cluster::Cluster;
-using boost::assign::list_of;
-
-
-#include "ClusterFixture.h"
-
-namespace qpid {
-namespace tests {
-
-ClusterFixture::ClusterFixture(size_t n, const Args& args_, int localIndex_)
- : name(Uuid(true).str()), localIndex(localIndex_), userArgs(args_)
-{
- add(n);
-}
-
-ClusterFixture::ClusterFixture(size_t n, boost::function<void (Args&, size_t)> updateArgs_, int localIndex_)
- : name(Uuid(true).str()), localIndex(localIndex_), updateArgs(updateArgs_)
-{
- add(n);
-}
-
-ClusterFixture::Args ClusterFixture::makeArgs(const std::string& prefix, size_t index) {
- Args args = list_of<string>("qpidd ")
- ("--cluster-name")(name)
- ("--log-prefix")(prefix);
- args.insert(args.end(), userArgs.begin(), userArgs.end());
- if (updateArgs) updateArgs(args, index);
- return args;
-}
-
-void ClusterFixture::add() {
- if (size() != size_t(localIndex)) { // fork a broker process.
- std::ostringstream os; os << "fork" << size();
- std::string prefix = os.str();
- forkedBrokers.push_back(shared_ptr<ForkedBroker>(new ForkedBroker(makeArgs(prefix, size()))));
- push_back(forkedBrokers.back()->getPort());
- }
- else { // Run in this process
- addLocal();
- }
-}
-
-namespace {
-/** Parse broker & cluster options */
-Broker::Options parseOpts(size_t argc, const char* argv[]) {
- Broker::Options opts;
- Plugin::addOptions(opts); // Pick up cluster options.
- opts.parse(argc, argv, "", true); // Allow-unknown for --load-module
- return opts;
-}
-}
-
-void ClusterFixture::addLocal() {
- assert(int(size()) == localIndex);
- ostringstream os; os << "local" << localIndex;
- string prefix = os.str();
- Args args(makeArgs(prefix, localIndex));
- vector<const char*> argv(args.size());
- transform(args.begin(), args.end(), argv.begin(), boost::bind(&string::c_str, _1));
- qpid::log::Logger::instance().setPrefix(prefix);
- localBroker.reset(new BrokerFixture(parseOpts(argv.size(), &argv[0])));
- push_back(localBroker->getPort());
- forkedBrokers.push_back(shared_ptr<ForkedBroker>());
-}
-
-bool ClusterFixture::hasLocal() const { return localIndex >= 0 && size_t(localIndex) < size(); }
-
-/** Kill a forked broker with sig, or shutdown localBroker if n==0. */
-void ClusterFixture::kill(size_t n, int sig) {
- if (n == size_t(localIndex))
- localBroker->broker->shutdown();
- else
- forkedBrokers[n]->kill(sig);
-}
-
-/** Kill a broker and suppress errors from closing connection c. */
-void ClusterFixture::killWithSilencer(size_t n, client::Connection& c, int sig) {
- ScopedSuppressLogging sl;
- try { c.close(); } catch(...) {}
- kill(n,sig);
-}
-
-/**
- * Get the known broker ports from a Connection.
- *@param n if specified wait for the cluster size to be n, up to a timeout.
- */
-std::set<int> knownBrokerPorts(qpid::client::Connection& c, int n) {
- FailoverListener fl(c, false);
- std::vector<qpid::Url> urls = fl.getKnownBrokers();
- if (n >= 0 && unsigned(n) != urls.size()) {
- // Retry up to 10 secs in .1 second intervals.
- for (size_t retry=100; urls.size() != unsigned(n) && retry != 0; --retry) {
- qpid::sys::usleep(1000*100); // 0.1 secs
- urls = fl.getKnownBrokers();
- }
- }
- std::set<int> s;
- for (std::vector<qpid::Url>::const_iterator i = urls.begin(); i != urls.end(); ++i)
- s.insert((*i)[0].port);
- return s;
-}
-
-}} // namespace qpid::tests
diff --git a/cpp/src/tests/ClusterFixture.h b/cpp/src/tests/ClusterFixture.h
deleted file mode 100644
index f548ff9376..0000000000
--- a/cpp/src/tests/ClusterFixture.h
+++ /dev/null
@@ -1,115 +0,0 @@
-#ifndef CLUSTER_FIXTURE_H
-#define CLUSTER_FIXTURE_H
-
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test_tools.h"
-#include "unit_test.h"
-#include "ForkedBroker.h"
-#include "BrokerFixture.h"
-
-#include "qpid/client/Connection.h"
-#include "qpid/client/ConnectionAccess.h"
-#include "qpid/client/Session.h"
-#include "qpid/client/FailoverListener.h"
-#include "qpid/cluster/Cluster.h"
-#include "qpid/cluster/Cpg.h"
-#include "qpid/cluster/UpdateClient.h"
-#include "qpid/framing/AMQBody.h"
-#include "qpid/framing/Uuid.h"
-#include "qpid/framing/reply_exceptions.h"
-#include "qpid/framing/enum.h"
-#include "qpid/log/Logger.h"
-
-#include <boost/bind.hpp>
-#include <boost/function.hpp>
-#include <boost/shared_ptr.hpp>
-
-#include <string>
-#include <iostream>
-#include <iterator>
-#include <vector>
-#include <set>
-#include <algorithm>
-#include <iterator>
-
-
-using namespace std;
-using namespace qpid;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::client;
-using qpid::sys::TIME_SEC;
-using qpid::broker::Broker;
-using boost::shared_ptr;
-using qpid::cluster::Cluster;
-
-namespace qpid {
-namespace tests {
-
-/** Cluster fixture is a vector of ports for the replicas.
- *
- * At most one replica (by default replica 0) is in the current
- * process, all others are forked as children.
- */
-class ClusterFixture : public vector<uint16_t> {
- public:
- typedef std::vector<std::string> Args;
-
- /** @param localIndex can be -1 meaning don't automatically start a local broker.
- * A local broker can be started with addLocal().
- */
- ClusterFixture(size_t n, const Args& args, int localIndex=-1);
-
- /**@param updateArgs function is passed the index of the cluster member and can update the arguments. */
- ClusterFixture(size_t n, boost::function<void (Args&, size_t)> updateArgs, int localIndex=-1);
-
- void add(size_t n) { for (size_t i=0; i < n; ++i) add(); }
- void add(); // Add a broker.
- void setup();
-
- bool hasLocal() const;
-
- /** Kill a forked broker with sig, or shutdown localBroker. */
- void kill(size_t n, int sig=SIGINT);
-
- /** Kill a broker and suppress errors from closing connection c. */
- void killWithSilencer(size_t n, client::Connection& c, int sig=SIGINT);
-
- private:
-
- void addLocal(); // Add a local broker.
- Args makeArgs(const std::string& prefix, size_t index);
- string name;
- std::auto_ptr<BrokerFixture> localBroker;
- int localIndex;
- std::vector<shared_ptr<ForkedBroker> > forkedBrokers;
- Args userArgs;
- boost::function<void (Args&, size_t)> updateArgs;
-};
-
-/**
- * Get the known broker ports from a Connection.
- *@param n if specified wait for the cluster size to be n, up to a timeout.
- */
-std::set<int> knownBrokerPorts(qpid::client::Connection& source, int n=-1);
-
-}} // namespace qpid::tests
-
-#endif /*!CLUSTER_FIXTURE_H*/
diff --git a/cpp/src/tests/DispatcherTest.cpp b/cpp/src/tests/DispatcherTest.cpp
index e1691db584..7312fe8d2e 100644
--- a/cpp/src/tests/DispatcherTest.cpp
+++ b/cpp/src/tests/DispatcherTest.cpp
@@ -20,7 +20,6 @@
*/
#include "qpid/sys/Poller.h"
-#include "qpid/sys/IOHandle.h"
#include "qpid/sys/Dispatcher.h"
#include "qpid/sys/DispatchHandle.h"
#include "qpid/sys/posix/PrivatePosix.h"
@@ -147,8 +146,8 @@ int main(int /*argc*/, char** /*argv*/)
for (int i = 0; i < 8; i++)
testString += testString;
- PosixIOHandle f0(sv[0]);
- PosixIOHandle f1(sv[1]);
+ IOHandle f0(sv[0]);
+ IOHandle f1(sv[1]);
rh = new DispatchHandleRef(f0, boost::bind(reader, _1, sv[0]), 0, 0);
wh = new DispatchHandleRef(f1, 0, boost::bind(writer, _1, sv[1], testString), 0);
diff --git a/cpp/src/tests/FieldTable.cpp b/cpp/src/tests/FieldTable.cpp
index 8aeeb031b3..81372d87c5 100644
--- a/cpp/src/tests/FieldTable.cpp
+++ b/cpp/src/tests/FieldTable.cpp
@@ -20,7 +20,6 @@
*/
#include <iostream>
#include <algorithm>
-#include "qpid/sys/alloca.h"
#include "qpid/framing/Array.h"
#include "qpid/framing/FieldTable.h"
#include "qpid/framing/FieldValue.h"
diff --git a/cpp/src/tests/ForkedBroker.cpp b/cpp/src/tests/ForkedBroker.cpp
deleted file mode 100644
index de1b42d40f..0000000000
--- a/cpp/src/tests/ForkedBroker.cpp
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-#include "ForkedBroker.h"
-#include "qpid/log/Statement.h"
-#include <boost/bind.hpp>
-#include <algorithm>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <signal.h>
-
-using namespace std;
-using qpid::ErrnoException;
-
-namespace std {
-static ostream& operator<<(ostream& o, const qpid::tests::ForkedBroker::Args& a) {
-copy(a.begin(), a.end(), ostream_iterator<string>(o, " "));
-return o;
-}
-}
-
-namespace qpid {
-namespace tests {
-
-ForkedBroker::ForkedBroker(const Args& constArgs) : running(false), exitStatus(0) {
- Args args(constArgs);
- // Substitute the special value "TMP_DATA_DIR" with a temporary data dir.
- Args::iterator i = find(args.begin(), args.end(), string("TMP_DATA_DIR"));
- if (i != args.end()) {
- args.erase(i);
- char dd[] = "/tmp/ForkedBroker.XXXXXX";
- if (!mkdtemp(dd))
- throw qpid::ErrnoException("Can't create data dir");
- dataDir = dd;
- args.push_back("--data-dir");
- args.push_back(dataDir);
- }
- // Never use the default data directory, set --no-data-dir if no other data-dir arg.
- Args::iterator j = find(args.begin(), args.end(), string("--data-dir"));
- Args::iterator k = find(args.begin(), args.end(), string("--no-data-dir"));
- if (j == args.end() && k == args.end())
- args.push_back("--no-data-dir");
- init(args);
-}
-
-ForkedBroker::~ForkedBroker() {
- try { kill(); }
- catch (const std::exception& e) {
- QPID_LOG(error, QPID_MSG("Killing forked broker: " << e.what()));
- }
- if (!dataDir.empty())
- {
- if(::system(("rm -rf "+dataDir).c_str())) {}
- }
-}
-
-void ForkedBroker::kill(int sig) {
- if (pid == 0) return;
- int savePid = pid;
- pid = 0; // Reset pid here in case of an exception.
- using qpid::ErrnoException;
- if (::kill(savePid, sig) < 0)
- throw ErrnoException("kill failed");
- int status;
- if (::waitpid(savePid, &status, 0) < 0 && sig != 9)
- throw ErrnoException("wait for forked process failed");
- if (WEXITSTATUS(status) != 0 && sig != 9)
- throw qpid::Exception(QPID_MSG("Forked broker exited with: " << WEXITSTATUS(status)));
- running = false;
- exitStatus = status;
-}
-
-bool isLogOption(const std::string& s) {
- const char * log_enable = "--log-enable",
- * trace = "--trace";
- return( (! strncmp(s.c_str(), log_enable, strlen(log_enable))) ||
- (! strncmp(s.c_str(), trace, strlen(trace)))
- );
-}
-
-namespace {
- void ignore_signal(int)
- {
- }
-}
-
-void ForkedBroker::init(const Args& userArgs) {
- using qpid::ErrnoException;
- port = 0;
- int pipeFds[2];
- if(::pipe(pipeFds) < 0) throw ErrnoException("Can't create pipe");
-
- // Ignore the SIGCHLD signal generated by an exitting child
- // We will clean up any exitting children in the waitpid above
- // This should really be neater (like only once not per fork)
- struct ::sigaction sa;
- sa.sa_handler = ignore_signal;
- ::sigemptyset(&sa.sa_mask);
- ::sigaddset(&sa.sa_mask, SIGCHLD);
- sa.sa_flags = SA_NOCLDSTOP | SA_RESTART;
- ::sigaction(SIGCHLD, &sa, 0);
-
- pid = ::fork();
- if (pid < 0) throw ErrnoException("Fork failed");
- if (pid) { // parent
- ::close(pipeFds[1]);
- FILE* f = ::fdopen(pipeFds[0], "r");
- if (!f) throw ErrnoException("fopen failed");
- if (::fscanf(f, "%d", &port) != 1) {
- if (ferror(f)) throw ErrnoException("Error reading port number from child.");
- else throw qpid::Exception("EOF reading port number from child.");
- }
- ::fclose(f);
- running = true;
- }
- else { // child
- ::close(pipeFds[0]);
- int fd = ::dup2(pipeFds[1], 1); // pipe stdout to the parent.
- if (fd < 0) throw ErrnoException("dup2 failed");
- const char* prog = ::getenv("QPIDD_EXEC");
- if (!prog) prog = "../qpidd"; // This only works from within svn checkout
- Args args(userArgs);
- args.push_back("--port=0");
- // Keep quiet except for errors.
- if (!::getenv("QPID_TRACE") && !::getenv("QPID_LOG_ENABLE")
- && find_if(userArgs.begin(), userArgs.end(), isLogOption) == userArgs.end())
- args.push_back("--log-enable=error+");
- std::vector<const char*> argv(args.size());
- std::transform(args.begin(), args.end(), argv.begin(), boost::bind(&std::string::c_str, _1));
- argv.push_back(0);
- QPID_LOG(debug, "ForkedBroker exec " << prog << ": " << args);
-
- execv(prog, const_cast<char* const*>(&argv[0]));
- QPID_LOG(critical, "execv failed to start broker: prog=\"" << prog << "\"; args=\"" << args << "\"; errno=" << errno << " (" << std::strerror(errno) << ")");
- ::exit(1);
- }
-}
-
-}} // namespace qpid::tests
diff --git a/cpp/src/tests/ForkedBroker.h b/cpp/src/tests/ForkedBroker.h
deleted file mode 100644
index 87e141a425..0000000000
--- a/cpp/src/tests/ForkedBroker.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#ifndef TESTS_FORKEDBROKER_H
-#define TESTS_FORKEDBROKER_H
-
-
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-#include "qpid/Exception.h"
-#include "qpid/log/Statement.h"
-#include "qpid/broker/Broker.h"
-#include <boost/lexical_cast.hpp>
-#include <string>
-#include <stdio.h>
-#include <sys/wait.h>
-
-namespace qpid {
-namespace tests {
-
-/**
- * Class to fork a broker child process.
- *
- * For most tests a BrokerFixture may be more convenient as it starts
- * a broker in the same process which allows you to easily debug into
- * the broker.
- *
- * This useful for tests that need to start multiple brokers where
- * those brokers can't coexist in the same process (e.g. for cluster
- * tests where CPG doesn't allow multiple group members in a single
- * process.)
- *
- */
-class ForkedBroker {
- public:
- typedef std::vector<std::string> Args;
-
- // argv args are passed to broker.
- //
- // Special value "TMP_DATA_DIR" is substituted with a temporary
- // data directory for the broker.
- //
- ForkedBroker(const Args& argv);
- ~ForkedBroker();
-
- void kill(int sig=SIGINT);
- int wait(); // Wait for exit, return exit status.
- uint16_t getPort() { return port; }
- pid_t getPID() { return pid; }
- bool isRunning() { return running; }
-
- private:
-
- void init(const Args& args);
-
- bool running;
- int exitStatus;
-
- pid_t pid;
- int port;
- std::string dataDir;
-};
-
-}} // namespace qpid::tests
-
-#endif /*!TESTS_FORKEDBROKER_H*/
diff --git a/cpp/src/tests/InitialStatusMap.cpp b/cpp/src/tests/InitialStatusMap.cpp
deleted file mode 100644
index 95806737e3..0000000000
--- a/cpp/src/tests/InitialStatusMap.cpp
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-
-#include "unit_test.h"
-#include "test_tools.h"
-#include "qpid/cluster/InitialStatusMap.h"
-#include "qpid/framing/Uuid.h"
-#include <boost/assign.hpp>
-
-using namespace std;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::framing::cluster;
-using namespace boost::assign;
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(InitialStatusMapTestSuite)
-
-typedef InitialStatusMap::Status Status;
-
-Status activeStatus(const Uuid& id=Uuid(), const MemberSet& ms=MemberSet(),
- const framing::Array& urls=framing::Array())
-{
- return Status(ProtocolVersion(), 0, true, id, STORE_STATE_NO_STORE, Uuid(),
- encodeMemberSet(ms), urls);
-}
-
-Status newcomerStatus(const Uuid& id=Uuid(), const MemberSet& ms=MemberSet(),
- const framing::Array& urls=framing::Array())
-{
- return Status(ProtocolVersion(), 0, false, id, STORE_STATE_NO_STORE, Uuid(),
- encodeMemberSet(ms), urls);
-}
-
-Status storeStatus(bool active, StoreState state, Uuid start=Uuid(), Uuid stop=Uuid(),
- const MemberSet& ms=MemberSet(), const framing::Array& urls=framing::Array())
-{
- return Status(ProtocolVersion(), 0, active, start, state, stop,
- encodeMemberSet(ms), urls);
-}
-
-QPID_AUTO_TEST_CASE(testFirstInCluster) {
- // Single member is first in cluster.
- InitialStatusMap map(MemberId(0), 1);
- Uuid id(true);
- BOOST_CHECK(!map.isComplete());
- MemberSet members = list_of(MemberId(0));
- map.configChange(members);
- BOOST_CHECK(!map.isComplete());
- map.received(MemberId(0), newcomerStatus(id, list_of<MemberId>(0)));
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK(map.getElders().empty());
- BOOST_CHECK(!map.isUpdateNeeded());
- BOOST_CHECK_EQUAL(id, map.getClusterId());
-}
-
-QPID_AUTO_TEST_CASE(testJoinExistingCluster) {
- // Single member 0 joins existing cluster 1,2
- InitialStatusMap map(MemberId(0), 1);
- Uuid id(true);
- MemberSet members = list_of(MemberId(0))(MemberId(1))(MemberId(2));
- map.configChange(members);
- BOOST_CHECK(map.isResendNeeded());
- BOOST_CHECK(!map.isComplete());
- map.received(MemberId(0), newcomerStatus());
- map.received(MemberId(1), activeStatus(id));
- BOOST_CHECK(!map.isComplete());
- map.received(MemberId(2), activeStatus(id));
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK_EQUAL(map.getElders(), list_of<MemberId>(1)(2));
- BOOST_CHECK(map.isUpdateNeeded());
- BOOST_CHECK_EQUAL(map.getClusterId(), id);
-
- // Check that transitionToComplete is reset.
- map.configChange(list_of<MemberId>(0)(1));
- BOOST_CHECK(!map.transitionToComplete());
-}
-
-QPID_AUTO_TEST_CASE(testMultipleFirstInCluster) {
- // Multiple members 0,1,2 join at same time.
- InitialStatusMap map(MemberId(1), 1); // self is 1
- Uuid id(true);
- MemberSet members = list_of(MemberId(0))(MemberId(1))(MemberId(2));
- map.configChange(members);
- BOOST_CHECK(map.isResendNeeded());
-
- // All new members
- map.received(MemberId(0), newcomerStatus(id, list_of<MemberId>(0)(1)(2)));
- map.received(MemberId(1), newcomerStatus(id, list_of<MemberId>(0)(1)(2)));
- map.received(MemberId(2), newcomerStatus(id, list_of<MemberId>(0)(1)(2)));
- BOOST_CHECK(!map.isResendNeeded());
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK_EQUAL(map.getElders(), list_of(MemberId(2)));
- BOOST_CHECK(!map.isUpdateNeeded());
- BOOST_CHECK_EQUAL(map.getClusterId(), id);
-}
-
-QPID_AUTO_TEST_CASE(testMultipleJoinExisting) {
- // Multiple members 2,3 join simultaneously a cluster containing 0,1.
- InitialStatusMap map(MemberId(2), 1); // self is 2
- Uuid id(true);
- MemberSet members = list_of(MemberId(0))(MemberId(1))(MemberId(2))(MemberId(3));
- map.configChange(members);
- BOOST_CHECK(map.isResendNeeded());
- map.received(MemberId(0), activeStatus(id, list_of<MemberId>(0)));
- map.received(MemberId(1), newcomerStatus(id, list_of<MemberId>(0)(1)));
- map.received(MemberId(2), newcomerStatus(id, list_of<MemberId>(0)(1)(2)(3)));
- map.received(MemberId(3), newcomerStatus(id, list_of<MemberId>(0)(1)(2)(3)));
- BOOST_CHECK(!map.isResendNeeded());
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK_EQUAL(map.getElders(), list_of<MemberId>(0)(1)(3));
- BOOST_CHECK(map.isUpdateNeeded());
- BOOST_CHECK_EQUAL(map.getClusterId(), id);
-}
-
-QPID_AUTO_TEST_CASE(testMembersLeave) {
- // Test that map completes if members leave rather than send status.
- InitialStatusMap map(MemberId(0), 1);
- Uuid id(true);
- map.configChange(list_of(MemberId(0))(MemberId(1))(MemberId(2)));
- map.received(MemberId(0), newcomerStatus());
- map.received(MemberId(1), activeStatus(id));
- BOOST_CHECK(!map.isComplete());
- map.configChange(list_of(MemberId(0))(MemberId(1))); // 2 left
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK_EQUAL(map.getElders(), list_of(MemberId(1)));
- BOOST_CHECK_EQUAL(map.getClusterId(), id);
-}
-
-QPID_AUTO_TEST_CASE(testInteveningConfig) {
- // Multiple config changes arrives before we complete the map.
- InitialStatusMap map(MemberId(0), 1);
- Uuid id(true);
-
- map.configChange(list_of<MemberId>(0)(1));
- BOOST_CHECK(map.isResendNeeded());
- map.received(MemberId(0), newcomerStatus());
- BOOST_CHECK(!map.isComplete());
- BOOST_CHECK(!map.isResendNeeded());
- // New member 2 joins before we receive 1
- map.configChange(list_of<MemberId>(0)(1)(2));
- BOOST_CHECK(!map.isComplete());
- BOOST_CHECK(map.isResendNeeded());
- map.received(1, activeStatus(id));
- map.received(2, newcomerStatus());
- // We should not be complete as we haven't received 0 since new member joined
- BOOST_CHECK(!map.isComplete());
- BOOST_CHECK(!map.isResendNeeded());
-
- map.received(0, newcomerStatus());
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK_EQUAL(map.getElders(), list_of<MemberId>(1));
- BOOST_CHECK_EQUAL(map.getClusterId(), id);
-}
-
-QPID_AUTO_TEST_CASE(testAllCleanNoUpdate) {
- InitialStatusMap map(MemberId(0), 3);
- map.configChange(list_of<MemberId>(0)(1)(2));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_CLEAN_STORE));
- map.received(MemberId(1), storeStatus(false, STORE_STATE_CLEAN_STORE));
- map.received(MemberId(2), storeStatus(false, STORE_STATE_CLEAN_STORE));
- BOOST_CHECK(!map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_CASE(testAllEmptyNoUpdate) {
- InitialStatusMap map(MemberId(0), 3);
- map.configChange(list_of<MemberId>(0)(1)(2));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_EMPTY_STORE));
- map.received(MemberId(1), storeStatus(false, STORE_STATE_EMPTY_STORE));
- map.received(MemberId(2), storeStatus(false, STORE_STATE_EMPTY_STORE));
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(!map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_CASE(testAllNoStoreNoUpdate) {
- InitialStatusMap map(MemberId(0), 3);
- map.configChange(list_of<MemberId>(0)(1)(2));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_NO_STORE));
- map.received(MemberId(1), storeStatus(false, STORE_STATE_NO_STORE));
- map.received(MemberId(2), storeStatus(false, STORE_STATE_NO_STORE));
- BOOST_CHECK(map.isComplete());
- BOOST_CHECK(!map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_CASE(testDirtyNeedUpdate) {
- InitialStatusMap map(MemberId(0), 3);
- map.configChange(list_of<MemberId>(0)(1)(2));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_DIRTY_STORE));
- map.received(MemberId(1), storeStatus(false, STORE_STATE_CLEAN_STORE));
- map.received(MemberId(2), storeStatus(false, STORE_STATE_CLEAN_STORE));
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK(map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_CASE(testEmptyNeedUpdate) {
- InitialStatusMap map(MemberId(0), 3);
- map.configChange(list_of<MemberId>(0)(1)(2));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_EMPTY_STORE));
- map.received(MemberId(1), storeStatus(false, STORE_STATE_CLEAN_STORE));
- map.received(MemberId(2), storeStatus(false, STORE_STATE_CLEAN_STORE));
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK(map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_CASE(testEmptyAlone) {
- InitialStatusMap map(MemberId(0), 1);
- map.configChange(list_of<MemberId>(0));
- map.received(MemberId(0), storeStatus(false, STORE_STATE_EMPTY_STORE));
- BOOST_CHECK(map.transitionToComplete());
- BOOST_CHECK(!map.isUpdateNeeded());
-}
-
-QPID_AUTO_TEST_SUITE_END()
-
-}} // namespace qpid::tests
diff --git a/cpp/src/tests/Makefile.am b/cpp/src/tests/Makefile.am
index f9eed9270b..3513e0a6d6 100644
--- a/cpp/src/tests/Makefile.am
+++ b/cpp/src/tests/Makefile.am
@@ -17,7 +17,7 @@
# under the License.
#
-AM_CXXFLAGS = $(WARNING_CFLAGS) -DBOOST_TEST_DYN_LINK
+AM_CXXFLAGS = $(WARNING_CFLAGS) -DBOOST_TEST_DYN_LINK -D_IN_QPID_BROKER
INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/include -I$(top_srcdir)/src -I$(top_builddir)/src
PUBLIC_INCLUDES = -I$(top_srcdir)/include -I$(top_builddir)/include # Use public API only
QMF_GEN=$(top_srcdir)/managementgen/qmf-gen
@@ -28,6 +28,7 @@ abs_srcdir=@abs_srcdir@
extra_libs =
lib_client = $(abs_builddir)/../libqpidclient.la
lib_messaging = $(abs_builddir)/../libqpidmessaging.la
+lib_types = $(abs_builddir)/../libqpidtypes.la
lib_common = $(abs_builddir)/../libqpidcommon.la
lib_broker = $(abs_builddir)/../libqpidbroker.la
lib_console = $(abs_builddir)/../libqmfconsole.la
@@ -107,8 +108,6 @@ unit_test_SOURCES= unit_test.cpp unit_test.h \
TopicExchangeTest.cpp \
TxBufferTest.cpp \
ConnectionOptions.h \
- ForkedBroker.h \
- ForkedBroker.cpp \
ManagementTest.cpp \
MessageReplayTracker.cpp \
ConsoleTest.cpp \
@@ -154,7 +153,7 @@ receiver_SOURCES = \
receiver.cpp \
TestOptions.h \
ConnectionOptions.h
-receiver_LDADD = $(lib_client)
+receiver_LDADD = $(lib_client) -lboost_program_options $(lib_common)
qpidexectest_PROGRAMS += sender
sender_SOURCES = \
@@ -162,7 +161,7 @@ sender_SOURCES = \
TestOptions.h \
ConnectionOptions.h \
Statistics.cpp
-sender_LDADD = $(lib_messaging)
+sender_LDADD = $(lib_messaging) -lboost_program_options $(lib_common) $(lib_types) $(lib_client)
qpidexectest_PROGRAMS += qpid-receive
qpid_receive_SOURCES = \
@@ -171,7 +170,7 @@ qpid_receive_SOURCES = \
ConnectionOptions.h \
Statistics.h \
Statistics.cpp
-qpid_receive_LDADD = $(lib_messaging)
+qpid_receive_LDADD = $(lib_messaging) -lboost_program_options $(lib_common) $(lib_types)
qpidexectest_PROGRAMS += qpid-send
qpid_send_SOURCES = \
@@ -180,42 +179,42 @@ qpid_send_SOURCES = \
ConnectionOptions.h \
Statistics.h \
Statistics.cpp
-qpid_send_LDADD = $(lib_messaging)
+qpid_send_LDADD = $(lib_messaging) -lboost_program_options $(lib_common) $(lib_types)
qpidexectest_PROGRAMS+=qpid-perftest
qpid_perftest_SOURCES=qpid-perftest.cpp test_tools.h TestOptions.h ConnectionOptions.h
qpid_perftest_INCLUDES=$(PUBLIC_INCLUDES)
-qpid_perftest_LDADD=$(lib_client)
+qpid_perftest_LDADD=$(lib_client) -lboost_program_options $(lib_common)
qpidexectest_PROGRAMS+=qpid-txtest
qpid_txtest_INCLUDES=$(PUBLIC_INCLUDES)
qpid_txtest_SOURCES=qpid-txtest.cpp TestOptions.h ConnectionOptions.h
-qpid_txtest_LDADD=$(lib_client)
+qpid_txtest_LDADD=$(lib_client) -lboost_program_options $(lib_common)
qpidexectest_PROGRAMS+=qpid-latency-test
qpid_latency_test_INCLUDES=$(PUBLIC_INCLUDES)
qpid_latency_test_SOURCES=qpid-latency-test.cpp TestOptions.h ConnectionOptions.h
-qpid_latency_test_LDADD=$(lib_client)
+qpid_latency_test_LDADD=$(lib_client) -lboost_program_options $(lib_common)
qpidexectest_PROGRAMS+=qpid-client-test
qpid_client_test_INCLUDES=$(PUBLIC_INCLUDES)
qpid_client_test_SOURCES=qpid-client-test.cpp TestOptions.h ConnectionOptions.h
-qpid_client_test_LDADD=$(lib_client)
+qpid_client_test_LDADD=$(lib_client) -lboost_program_options $(lib_common)
qpidexectest_PROGRAMS+=qpid-topic-listener
qpid_topic_listener_INCLUDES=$(PUBLIC_INCLUDES)
qpid_topic_listener_SOURCES=qpid-topic-listener.cpp TestOptions.h ConnectionOptions.h
-qpid_topic_listener_LDADD=$(lib_client)
+qpid_topic_listener_LDADD=$(lib_client) -lboost_program_options $(lib_common)
qpidexectest_PROGRAMS+=qpid-topic-publisher
qpid_topic_publisher_INCLUDES=$(PUBLIC_INCLUDES)
qpid_topic_publisher_SOURCES=qpid-topic-publisher.cpp TestOptions.h ConnectionOptions.h
-qpid_topic_publisher_LDADD=$(lib_client)
+qpid_topic_publisher_LDADD=$(lib_client) -lboost_program_options $(lib_common)
qpidexectest_PROGRAMS+=qpid-ping
qpid_ping_INCLUDES=$(PUBLIC_INCLUDES)
qpid_ping_SOURCES=qpid-ping.cpp test_tools.h TestOptions.h ConnectionOptions.h
-qpid_ping_LDADD=$(lib_client)
+qpid_ping_LDADD=$(lib_client) -lboost_program_options $(lib_common)
#
# Other test programs
@@ -241,11 +240,6 @@ header_test_INCLUDES=$(PUBLIC_INCLUDES)
header_test_SOURCES=header_test.cpp TestOptions.h ConnectionOptions.h
header_test_LDADD=$(lib_client)
-check_PROGRAMS+=failover_soak
-failover_soak_INCLUDES=$(PUBLIC_INCLUDES)
-failover_soak_SOURCES=failover_soak.cpp ForkedBroker.h ForkedBroker.cpp
-failover_soak_LDADD=$(lib_client) $(lib_broker)
-
check_PROGRAMS+=declare_queues
declare_queues_INCLUDES=$(PUBLIC_INCLUDES)
declare_queues_SOURCES=declare_queues.cpp
@@ -303,8 +297,8 @@ system_tests = qpid-client-test quick_perftest quick_topictest run_header_test q
run_msg_group_tests
TESTS += start_broker $(system_tests) python_tests stop_broker \
run_ha_tests run_federation_tests run_federation_sys_tests \
- run_acl_tests run_cli_tests replication_test dynamic_log_level_test \
- run_queue_flow_limit_tests ipv6_test
+ run_acl_tests run_cli_tests dynamic_log_level_test \
+ dynamic_log_hires_timestamp run_queue_flow_limit_tests ipv6_test
EXTRA_DIST += \
run_test vg_check \
@@ -316,8 +310,8 @@ EXTRA_DIST += \
run_header_test \
header_test.py \
ssl_test \
+ ping_broker \
config.null \
- cpg_check.sh.in \
run_federation_tests \
run_federation_sys_tests \
run_long_federation_sys_tests \
@@ -327,11 +321,11 @@ EXTRA_DIST += \
MessageUtils.h \
TestMessageStore.h \
TxMocks.h \
- replication_test \
run_perftest \
ring_queue_test \
run_ring_queue_test \
dynamic_log_level_test \
+ dynamic_log_hires_timestamp \
qpid-ctrl \
CMakeLists.txt \
windows/DisableWin32ErrorWindows.cpp \
@@ -349,7 +343,10 @@ EXTRA_DIST += \
run_msg_group_tests \
ipv6_test \
run_ha_tests \
+ ha_test.py \
ha_tests.py \
+ brokertest.py \
+ ha_store_tests.py \
test_env.ps1.in
check_LTLIBRARIES += libdlclose_noop.la
@@ -372,9 +369,6 @@ EXTRA_DIST+= \
shared_perftest \
multiq_perftest \
topic_perftest \
- run_failover_soak \
- reliable_replication_test \
- federated_cluster_test_with_node_failure \
sasl_test_setup.sh \
run_msg_group_tests_soak \
qpidd-empty.conf
diff --git a/cpp/src/tests/MessageUtils.h b/cpp/src/tests/MessageUtils.h
index c2eabd804d..5024f5b77c 100644
--- a/cpp/src/tests/MessageUtils.h
+++ b/cpp/src/tests/MessageUtils.h
@@ -1,3 +1,6 @@
+#ifndef TESTS_MESSAGEUTILS_H
+#define TESTS_MESSAGEUTILS_H
+
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
@@ -98,3 +101,5 @@ struct MessageUtils
};
}} // namespace qpid::tests
+
+#endif /*!TESTS_MESSAGEUTILS_H*/
diff --git a/cpp/src/tests/MessagingSessionTests.cpp b/cpp/src/tests/MessagingSessionTests.cpp
index c8ee3aa401..55cff046e2 100644
--- a/cpp/src/tests/MessagingSessionTests.cpp
+++ b/cpp/src/tests/MessagingSessionTests.cpp
@@ -1164,6 +1164,59 @@ QPID_AUTO_TEST_CASE(testAlternateExchangeInLinkDeclare)
}
}
+QPID_AUTO_TEST_CASE(testBrowseOnly)
+{
+ /* Set up a queue browse-only, and try to receive
+ the same messages twice with two different receivers.
+ This works because the browse-only queue does not
+ allow message acquisition. */
+
+ QueueFixture fix;
+ std::string addr = "q; {create:always, node:{type:queue, durable:false, x-declare:{arguments:{qpid.browse-only:1}}}}";
+ Sender sender = fix.session.createSender(addr);
+ Message out("test-message");
+
+ int count = 10;
+ for ( int i = 0; i < count; ++ i ) {
+ sender.send(out);
+ }
+
+ Message m;
+
+ Receiver receiver_1 = fix.session.createReceiver(addr);
+ for ( int i = 0; i < count; ++ i ) {
+ BOOST_CHECK(receiver_1.fetch(m, Duration::SECOND));
+ }
+
+ Receiver receiver_2 = fix.session.createReceiver(addr);
+ for ( int i = 0; i < count; ++ i ) {
+ BOOST_CHECK(receiver_2.fetch(m, Duration::SECOND));
+ }
+
+ fix.session.acknowledge();
+}
+
+QPID_AUTO_TEST_CASE(testLinkBindingCleanup)
+{
+ MessagingFixture fix;
+
+ Sender sender = fix.session.createSender("test.ex;{create:always,node:{type:topic}}");
+
+ Connection connection = fix.newConnection();
+ connection.open();
+
+ Session session(connection.createSession());
+ Receiver receiver1 = session.createReceiver("test.q;{create:always, node:{type:queue, x-bindings:[{exchange:test.ex,queue:test.q,key:#,arguments:{x-scope:session}}]}}");
+ Receiver receiver2 = fix.session.createReceiver("test.q;{create:never, delete:always}");
+ connection.close();
+
+ sender.send(Message("test-message"), true);
+
+ // The session-scoped binding should be removed when receiver1's network connection is lost
+ Message in;
+ BOOST_CHECK(!receiver2.fetch(in, Duration::IMMEDIATE));
+}
+
QPID_AUTO_TEST_SUITE_END()
}} // namespace qpid::tests
diff --git a/cpp/src/tests/PartialFailure.cpp b/cpp/src/tests/PartialFailure.cpp
deleted file mode 100644
index 63ee28017a..0000000000
--- a/cpp/src/tests/PartialFailure.cpp
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-/**@file Tests for partial failure in a cluster.
- * Partial failure means some nodes experience a failure while others do not.
- * In this case the failed nodes must shut down.
- */
-
-#include "test_tools.h"
-#include "unit_test.h"
-#include "ClusterFixture.h"
-#include <boost/assign.hpp>
-#include <boost/algorithm/string.hpp>
-#include <boost/bind.hpp>
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(PartialFailureTestSuite)
-
-using namespace std;
-using namespace qpid;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::client;
-using namespace qpid::client::arg;
-using namespace boost::assign;
-using broker::Broker;
-using boost::shared_ptr;
-
-// Timeout for tests that wait for messages
-const sys::Duration TIMEOUT=sys::TIME_SEC/4;
-
-static bool isLogOption(const std::string& s) { return boost::starts_with(s, "--log-enable"); }
-
-void updateArgs(ClusterFixture::Args& args, size_t index) {
- ostringstream clusterLib, testStoreLib, storeName;
- clusterLib << getLibPath("CLUSTER_LIB");
- testStoreLib << getLibPath("TEST_STORE_LIB");
- storeName << "s" << index;
- args.push_back("--auth");
- args.push_back("no");
- args.push_back("--no-module-dir");
- args.push_back("--load-module");
- args.push_back(clusterLib.str());
- args.push_back("--load-module");
- args.push_back(testStoreLib.str());
- args.push_back("--test-store-name");
- args.push_back(storeName.str());
- args.push_back("TMP_DATA_DIR");
-
- // These tests generate errors deliberately, disable error logging unless a log env var is set.
- if (!::getenv("QPID_TRACE") && !::getenv("QPID_LOG_ENABLE")) {
- remove_if(args.begin(), args.end(), isLogOption);
- args.push_back("--log-enable=critical+:DISABLED"); // hacky way to disable logs.
- }
-}
-
-Message pMessage(string data, string q) {
- Message msg(data, q);
- msg.getDeliveryProperties().setDeliveryMode(PERSISTENT);
- return msg;
-}
-
-void queueAndSub(Client& c) {
- c.session.queueDeclare(c.name, durable=true);
- c.subs.subscribe(c.lq, c.name);
-}
-
-// Handle near-simultaneous errors
-QPID_AUTO_TEST_CASE(testCoincidentErrors) {
- ClusterFixture cluster(2, updateArgs, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
-
- c0.session.queueDeclare("q", durable=true);
- {
- ScopedSuppressLogging allQuiet;
- async(c0.session).messageTransfer(content=pMessage("TEST_STORE_DO: s0[exception]", "q"));
- async(c1.session).messageTransfer(content=pMessage("TEST_STORE_DO: s1[exception]", "q"));
-
- int alive=0;
- try { Client c00(cluster[0], "c00"); ++alive; c00.close(); } catch (...) {}
- try { Client c11(cluster[1], "c11"); ++alive; c11.close(); } catch (...) {}
-
- BOOST_CHECK_EQUAL(alive, 1);
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- c1.close();
- }
-}
-
-// Verify normal cluster-wide errors.
-QPID_AUTO_TEST_CASE(testNormalErrors) {
- // FIXME aconway 2009-04-10: Would like to put a scope just around
- // the statements expected to fail (in BOOST_CHECK_yTHROW) but that
- // sproadically lets out messages, possibly because they're in
- // Connection thread.
-
- ClusterFixture cluster(3, updateArgs, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
- Client c2(cluster[2], "c2");
-
- {
- ScopedSuppressLogging allQuiet;
- queueAndSub(c0);
- c0.session.messageTransfer(content=Message("x", "c0"));
- BOOST_CHECK_EQUAL(c0.lq.get(TIMEOUT).getData(), "x");
-
- // Session error.
- BOOST_CHECK_THROW(c0.session.exchangeBind(), SessionException);
- c1.session.messageTransfer(content=Message("stay", "c0")); // Will stay on queue, session c0 is dead.
-
- // Connection error, kill c1 on all members.
- queueAndSub(c1);
- BOOST_CHECK_THROW(
- c1.session.messageTransfer(
- content=pMessage("TEST_STORE_DO: s0[exception] s1[exception] s2[exception] testNormalErrors", "c1")),
- ConnectionException);
- c2.session.messageTransfer(content=Message("stay", "c1")); // Will stay on queue, session/connection c1 is dead.
-
- BOOST_CHECK_EQUAL(3u, knownBrokerPorts(c2.connection, 3).size());
- BOOST_CHECK_EQUAL(c2.subs.get("c0", TIMEOUT).getData(), "stay");
- BOOST_CHECK_EQUAL(c2.subs.get("c1", TIMEOUT).getData(), "stay");
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- c1.close();
- c2.close();
- }
-}
-
-
-// Test errors after a new member joins to verify frame-sequence-numbers are ok in update.
-QPID_AUTO_TEST_CASE(testErrorAfterJoin) {
- ClusterFixture cluster(1, updateArgs, -1);
- Client c0(cluster[0]);
- {
- ScopedSuppressLogging allQuiet;
-
- c0.session.queueDeclare("q", durable=true);
- c0.session.messageTransfer(content=pMessage("a", "q"));
-
- // Kill the new guy
- cluster.add();
- Client c1(cluster[1]);
- c0.session.messageTransfer(content=pMessage("TEST_STORE_DO: s1[exception] testErrorAfterJoin", "q"));
- BOOST_CHECK_THROW(c1.session.messageTransfer(content=pMessage("xxx", "q")), TransportFailure);
- BOOST_CHECK_EQUAL(1u, knownBrokerPorts(c0.connection, 1).size());
-
- // Kill the old guy
- cluster.add();
- Client c2(cluster[2]);
- c2.session.messageTransfer(content=pMessage("TEST_STORE_DO: s0[exception] testErrorAfterJoin2", "q"));
- BOOST_CHECK_THROW(c0.session.messageTransfer(content=pMessage("xxx", "q")), TransportFailure);
-
- BOOST_CHECK_EQUAL(1u, knownBrokerPorts(c2.connection, 1).size());
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- c1.close();
- c2.close();
- }
-}
-
-// Test that if one member fails and others do not, the failure leaves the cluster.
-QPID_AUTO_TEST_CASE(testSinglePartialFailure) {
- ClusterFixture cluster(3, updateArgs, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
- Client c2(cluster[2], "c2");
-
- {
- ScopedSuppressLogging allQuiet;
-
- c0.session.queueDeclare("q", durable=true);
- c0.session.messageTransfer(content=pMessage("a", "q"));
- // Cause partial failure on c1
- c0.session.messageTransfer(content=pMessage("TEST_STORE_DO: s1[exception] testSinglePartialFailure", "q"));
- BOOST_CHECK_THROW(c1.session.queueQuery("q"), TransportFailure);
-
- c0.session.messageTransfer(content=pMessage("b", "q"));
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q").getMessageCount(), 3u);
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c0.connection, 2).size());
-
- // Cause partial failure on c2
- c0.session.messageTransfer(content=pMessage("TEST_STORE_DO: s2[exception] testSinglePartialFailure2", "q"));
- BOOST_CHECK_THROW(c2.session.queueQuery("q"), TransportFailure);
-
- c0.session.messageTransfer(content=pMessage("c", "q"));
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q").getMessageCount(), 5u);
- BOOST_CHECK_EQUAL(1u, knownBrokerPorts(c0.connection, 1).size());
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- c1.close();
- c2.close();
- }
-}
-
-// Test multiple partial falures: 2 fail 2 pass
-QPID_AUTO_TEST_CASE(testMultiPartialFailure) {
- ClusterFixture cluster(4, updateArgs, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
- Client c2(cluster[2], "c2");
- Client c3(cluster[3], "c3");
-
- {
- ScopedSuppressLogging allQuiet;
-
- c0.session.queueDeclare("q", durable=true);
- c0.session.messageTransfer(content=pMessage("a", "q"));
-
- // Cause partial failure on c1, c2
- c0.session.messageTransfer(content=pMessage("TEST_STORE_DO: s1[exception] s2[exception] testMultiPartialFailure", "q"));
- BOOST_CHECK_THROW(c1.session.queueQuery("q"), TransportFailure);
- BOOST_CHECK_THROW(c2.session.queueQuery("q"), TransportFailure);
-
- c0.session.messageTransfer(content=pMessage("b", "q"));
- c3.session.messageTransfer(content=pMessage("c", "q"));
- BOOST_CHECK_EQUAL(c3.session.queueQuery("q").getMessageCount(), 4u);
- // FIXME aconway 2009-06-30: This check fails sporadically with 2 != 3.
- // It should pass reliably.
- // BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c0.connection, 2).size());
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- c1.close();
- c2.close();
- c3.close();
- }
-}
-
-/** FIXME aconway 2009-04-10:
- * The current approach to shutting down a process in test_store
- * sometimes leads to assertion failures and errors in the shut-down
- * process. Need a cleaner solution
- */
-#if 0
-QPID_AUTO_TEST_CASE(testPartialFailureMemberLeaves) {
- ClusterFixture cluster(2, updateArgs, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
-
- {
- ScopedSuppressLogging allQuiet;
-
- c0.session.queueDeclare("q", durable=true);
- c0.session.messageTransfer(content=pMessage("a", "q"));
-
- // Cause failure on member 0 and simultaneous crash on member 1.
- BOOST_CHECK_THROW(
- c0.session.messageTransfer(
- content=pMessage("TEST_STORE_DO: s0[exception] s1[exit_process] testPartialFailureMemberLeaves", "q")),
- ConnectionException);
- cluster.wait(1);
-
- Client c00(cluster[0], "c00"); // Old connection is dead.
- BOOST_CHECK_EQUAL(c00.session.queueQuery("q").getMessageCount(), 1u);
- BOOST_CHECK_EQUAL(1u, knownBrokerPorts(c00.connection, 1).size());
-
- // Close inside ScopedSuppressLogging to avoid warnings
- c0.close();
- }
-}
-#endif
-
-QPID_AUTO_TEST_SUITE_END()
-
-}} // namespace qpid::tests
diff --git a/cpp/src/tests/PollerTest.cpp b/cpp/src/tests/PollerTest.cpp
index 9fa5689c5f..5a1d02964c 100644
--- a/cpp/src/tests/PollerTest.cpp
+++ b/cpp/src/tests/PollerTest.cpp
@@ -23,7 +23,6 @@
* Use socketpair to test the poller
*/
-#include "qpid/sys/IOHandle.h"
#include "qpid/sys/Poller.h"
#include "qpid/sys/posix/PrivatePosix.h"
@@ -106,8 +105,8 @@ int main(int /*argc*/, char** /*argv*/)
auto_ptr<Poller> poller(new Poller);
- PosixIOHandle f0(sv[0]);
- PosixIOHandle f1(sv[1]);
+ IOHandle f0(sv[0]);
+ IOHandle f1(sv[1]);
PollerHandle h0(f0);
PollerHandle h1(f1);
@@ -225,8 +224,8 @@ int main(int /*argc*/, char** /*argv*/)
auto_ptr<Poller> poller1(new Poller);
- PosixIOHandle f2(sv[0]);
- PosixIOHandle f3(sv[1]);
+ IOHandle f2(sv[0]);
+ IOHandle f3(sv[1]);
PollerHandle h2(f2);
PollerHandle h3(f3);
diff --git a/cpp/src/tests/QueueTest.cpp b/cpp/src/tests/QueueTest.cpp
index d86c18c38d..60ba036a7e 100644
--- a/cpp/src/tests/QueueTest.cpp
+++ b/cpp/src/tests/QueueTest.cpp
@@ -40,6 +40,7 @@
#include "qpid/framing/reply_exceptions.h"
#include "qpid/broker/QueueFlowLimit.h"
#include "qpid/broker/QueueSettings.h"
+#include "qpid/sys/Timer.h"
#include <iostream>
#include <vector>
diff --git a/cpp/src/tests/StoreStatus.cpp b/cpp/src/tests/StoreStatus.cpp
deleted file mode 100644
index 43d4cfd920..0000000000
--- a/cpp/src/tests/StoreStatus.cpp
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-
-#include "unit_test.h"
-#include "test_tools.h"
-#include "qpid/cluster/StoreStatus.h"
-#include "qpid/framing/Uuid.h"
-#include <boost/assign.hpp>
-#include <boost/filesystem/operations.hpp>
-
-using namespace std;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::framing::cluster;
-using namespace boost::assign;
-using namespace boost::filesystem;
-
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(StoreStatusTestSuite)
-
-const char* TEST_DIR = "StoreStatus.tmp";
-
-struct TestDir {
- TestDir() {
- remove_all(TEST_DIR);
- create_directory(TEST_DIR);
- }
- ~TestDir() {
- remove_all(TEST_DIR);
- }
-};
-
-QPID_AUTO_TEST_CASE(testLoadEmpty) {
- TestDir td;
- StoreStatus ss(TEST_DIR);
- BOOST_CHECK_EQUAL(ss.getState(), STORE_STATE_NO_STORE);
- BOOST_CHECK(!ss.getClusterId());
- BOOST_CHECK(!ss.getShutdownId());
- ss.load();
- BOOST_CHECK_EQUAL(ss.getState(), STORE_STATE_EMPTY_STORE);
- BOOST_CHECK(!ss.getShutdownId());
-}
-
-QPID_AUTO_TEST_CASE(testSaveLoadDirty) {
- TestDir td;
- Uuid clusterId = Uuid(true);
- StoreStatus ss(TEST_DIR);
- ss.load();
- ss.setClusterId(clusterId);
- ss.dirty();
- BOOST_CHECK_EQUAL(ss.getState(), STORE_STATE_DIRTY_STORE);
-
- StoreStatus ss2(TEST_DIR);
- ss2.load();
- BOOST_CHECK_EQUAL(ss2.getState(), STORE_STATE_DIRTY_STORE);
- BOOST_CHECK_EQUAL(ss2.getClusterId(), clusterId);
- BOOST_CHECK(!ss2.getShutdownId());
-}
-
-QPID_AUTO_TEST_CASE(testSaveLoadClean) {
- TestDir td;
- Uuid clusterId = Uuid(true);
- Uuid shutdownId = Uuid(true);
- StoreStatus ss(TEST_DIR);
- ss.load();
- ss.setClusterId(clusterId);
- ss.clean(shutdownId);
- BOOST_CHECK_EQUAL(ss.getState(), STORE_STATE_CLEAN_STORE);
-
- StoreStatus ss2(TEST_DIR);
- ss2.load();
- BOOST_CHECK_EQUAL(ss2.getState(), STORE_STATE_CLEAN_STORE);
- BOOST_CHECK_EQUAL(ss2.getClusterId(), clusterId);
- BOOST_CHECK_EQUAL(ss2.getShutdownId(), shutdownId);
-}
-
-QPID_AUTO_TEST_CASE(testMarkDirty) {
- // Save clean then mark to dirty.
- TestDir td;
- Uuid clusterId = Uuid(true);
- Uuid shutdownId = Uuid(true);
- StoreStatus ss(TEST_DIR);
- ss.load();
- ss.setClusterId(clusterId);
- ss.dirty();
- ss.clean(shutdownId);
- ss.dirty();
-
- StoreStatus ss2(TEST_DIR);
- ss2.load();
- BOOST_CHECK_EQUAL(ss2.getState(), STORE_STATE_DIRTY_STORE);
- BOOST_CHECK_EQUAL(ss2.getClusterId(), clusterId);
- BOOST_CHECK(!ss2.getShutdownId());
-}
-
-QPID_AUTO_TEST_SUITE_END()
-
- }} // namespace qpid::tests
diff --git a/cpp/src/tests/SystemInfo.cpp b/cpp/src/tests/SystemInfo.cpp
index 12d8d3dba8..34f1ac408e 100644
--- a/cpp/src/tests/SystemInfo.cpp
+++ b/cpp/src/tests/SystemInfo.cpp
@@ -31,22 +31,6 @@ namespace tests {
QPID_AUTO_TEST_SUITE(SystemInfoTestSuite)
-QPID_AUTO_TEST_CASE(TestIsLocalHost) {
- // Test that local hostname and addresses are considered local
- Address a;
- BOOST_ASSERT(SystemInfo::getLocalHostname(a));
- BOOST_ASSERT(SystemInfo::isLocalHost(a.host));
- std::vector<Address> addrs;
- SystemInfo::getLocalIpAddresses(0, addrs);
- for (std::vector<Address>::iterator i = addrs.begin(); i != addrs.end(); ++i)
- BOOST_ASSERT(SystemInfo::isLocalHost(i->host));
- // Check some non-local addresses
- BOOST_ASSERT(!SystemInfo::isLocalHost("123.4.5.6"));
- BOOST_ASSERT(!SystemInfo::isLocalHost("nosuchhost"));
- BOOST_ASSERT(SystemInfo::isLocalHost("127.0.0.1"));
- BOOST_ASSERT(SystemInfo::isLocalHost("::1"));
-}
-
QPID_AUTO_TEST_SUITE_END()
}} // namespace qpid::tests
diff --git a/cpp/src/tests/Variant.cpp b/cpp/src/tests/Variant.cpp
index 40f1c0cf75..6d629bbb4a 100644
--- a/cpp/src/tests/Variant.cpp
+++ b/cpp/src/tests/Variant.cpp
@@ -135,6 +135,16 @@ QPID_AUTO_TEST_CASE(testConversionsFromString)
BOOST_CHECK_EQUAL(0, value.asInt16());
BOOST_CHECK_EQUAL(0u, value.asUint16());
+ value = "-Blah";
+ BOOST_CHECK_THROW(value.asUint16(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asInt16(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asUint32(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asInt32(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asUint64(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asInt64(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asFloat(), InvalidConversion);
+ BOOST_CHECK_THROW(value.asDouble(), InvalidConversion);
+
value = "-000";
BOOST_CHECK_EQUAL(0, value.asInt16());
BOOST_CHECK_EQUAL(0u, value.asUint16());
diff --git a/cpp/src/tests/acl.py b/cpp/src/tests/acl.py
index 102796cba6..48723bfde9 100755
--- a/cpp/src/tests/acl.py
+++ b/cpp/src/tests/acl.py
@@ -53,6 +53,9 @@ class ACLTests(TestBase010):
def port_u(self):
return int(self.defines["port-u"])
+ def port_q(self):
+ return int(self.defines["port-q"])
+
def get_session_by_port(self, user, passwd, byPort):
socket = connect(self.broker.host, byPort)
connection = Connection (sock=socket, username=user, password=passwd,
@@ -542,6 +545,123 @@ class ACLTests(TestBase010):
self.fail(result)
+ def test_illegal_filemaxsize_upper_limit_spec(self):
+ """
+ Test illegal file policy
+ """
+ #
+ # Use filemaxsizeupperlimit
+ #
+ aclf = self.get_acl_file()
+ aclf.write('acl deny bob@QPID create queue name=q2 filemaxsizeupperlimit=-1\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ expected = "-1 is not a valid value for 'filemaxsizeupperlimit', " \
+ "values should be between 0 and 9223372036854775807";
+ if (result.find(expected) == -1):
+ self.fail(result)
+
+ aclf = self.get_acl_file()
+ aclf.write('acl deny bob@QPID create queue name=q2 filemaxsizeupperlimit=9223372036854775808\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ expected = "9223372036854775808 is not a valid value for 'filemaxsizeupperlimit', " \
+ "values should be between 0 and 9223372036854775807";
+ if (result.find(expected) == -1):
+ self.fail(result)
+
+
+
+ def test_illegal_filemaxcount_upper_limit_spec(self):
+ """
+ Test illegal file policy
+ """
+ #
+ # use maxfilecountupperlimit
+ #
+ aclf = self.get_acl_file()
+ aclf.write('acl deny bob@QPID create queue name=q2 filemaxcountupperlimit=-1\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ expected = "-1 is not a valid value for 'filemaxcountupperlimit', " \
+ "values should be between 0 and 9223372036854775807";
+ if (result.find(expected) == -1):
+ self.fail(result)
+
+ aclf = self.get_acl_file()
+ aclf.write('acl deny bob@QPID create queue name=q2 filemaxcountupperlimit=9223372036854775808\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ expected = "9223372036854775808 is not a valid value for 'filemaxcountupperlimit', " \
+ "values should be between 0 and 9223372036854775807";
+ if (result.find(expected) == -1):
+ self.fail(result)
+
+
+ def test_illegal_filemaxsize_lower_limit_spec(self):
+ """
+ Test illegal file policy
+ """
+ aclf = self.get_acl_file()
+ aclf.write('acl deny bob@QPID create queue name=q2 filemaxsizelowerlimit=-1\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ expected = "-1 is not a valid value for 'filemaxsizelowerlimit', " \
+ "values should be between 0 and 9223372036854775807";
+ if (result.find(expected) == -1):
+ self.fail(result)
+
+ aclf = self.get_acl_file()
+ aclf.write('acl deny bob@QPID create queue name=q2 filemaxsizelowerlimit=9223372036854775808\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ expected = "9223372036854775808 is not a valid value for 'filemaxsizelowerlimit', " \
+ "values should be between 0 and 9223372036854775807";
+ if (result.find(expected) == -1):
+ self.fail(result)
+
+
+
+ def test_illegal_filemaxcount_lower_limit_spec(self):
+ """
+ Test illegal file policy
+ """
+
+ aclf = self.get_acl_file()
+ aclf.write('acl deny bob@QPID create queue name=q2 filemaxcountlowerlimit=-1\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ expected = "-1 is not a valid value for 'filemaxcountlowerlimit', " \
+ "values should be between 0 and 9223372036854775807";
+ if (result.find(expected) == -1):
+ self.fail(result)
+
+ aclf = self.get_acl_file()
+ aclf.write('acl deny bob@QPID create queue name=q2 filemaxcountlowerlimit=9223372036854775808\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ expected = "9223372036854775808 is not a valid value for 'filemaxcountlowerlimit', " \
+ "values should be between 0 and 9223372036854775807";
+ if (result.find(expected) == -1):
+ self.fail(result)
+
+
#=====================================
# ACL queue tests
#=====================================
@@ -831,6 +951,171 @@ class ACLTests(TestBase010):
self.fail("ACL should allow queue delete request for q4");
#=====================================
+ # ACL file tests
+ #=====================================
+
+ def test_file_allow_mode(self):
+ """
+ Test cases for file acl in allow mode
+ """
+ aclf = self.get_acl_file()
+ aclf.write('acl deny bob@QPID access queue name=qf1\n')
+ aclf.write('acl deny bob@QPID create queue name=qf1 durable=true\n')
+ aclf.write('acl deny bob@QPID create queue name=qf2 exclusive=true policytype=ring\n')
+ aclf.write('acl deny bob@QPID access queue name=qf3\n')
+ aclf.write('acl deny bob@QPID purge queue name=qf3\n')
+ aclf.write('acl deny bob@QPID delete queue name=qf4\n')
+ aclf.write('acl deny bob@QPID create queue name=qf5 filemaxsizeupperlimit=1000 filemaxcountupperlimit=100\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ if (result):
+ self.fail(result)
+
+ session = self.get_session('bob','bob')
+
+ try:
+ queue_options = {}
+ queue_options["qpid.file_count"] = 200
+ queue_options["qpid.file_size"] = 500
+ session.queue_declare(queue="qf5", exclusive=True, arguments=queue_options)
+ self.fail("ACL should deny queue create request with name=qf5, qpid.file_size=500 and qpid.file_count=200");
+ except qpid.session.SessionException, e:
+ self.assertEqual(403,e.args[0].error_code)
+ session = self.get_session('bob','bob')
+
+ try:
+ queue_options = {}
+ queue_options["qpid.file_count"] = 200
+ queue_options["qpid.file_size"] = 100
+ session.queue_declare(queue="qf2", exclusive=True, arguments=queue_options)
+ except qpid.session.SessionException, e:
+ if (403 == e.args[0].error_code):
+ self.fail("ACL should allow queue create request with name=qf2, qpid.file_size=100 and qpid.file_count=200 ");
+
+
+ def test_file_deny_mode(self):
+ """
+ Test cases for queue acl in deny mode
+ """
+ aclf = self.get_acl_file()
+ aclf.write('acl allow bob@QPID access queue name=qfd1\n')
+ aclf.write('acl allow bob@QPID create queue name=qfd1 durable=true\n')
+ aclf.write('acl allow bob@QPID create queue name=qfd2 exclusive=true policytype=ring\n')
+ aclf.write('acl allow bob@QPID access queue name=qfd3\n')
+ aclf.write('acl allow bob@QPID purge queue name=qfd3\n')
+ aclf.write('acl allow bob@QPID create queue name=qfd3\n')
+ aclf.write('acl allow bob@QPID create queue name=qfd4\n')
+ aclf.write('acl allow bob@QPID delete queue name=qfd4\n')
+ aclf.write('acl allow bob@QPID create queue name=qfd5 filemaxsizeupperlimit=1000 filemaxcountupperlimit=100\n')
+ aclf.write('acl allow bob@QPID create queue name=qfd6 filemaxsizelowerlimit=50 filemaxsizeupperlimit=100 filemaxcountlowerlimit=50 filemaxcountupperlimit=100\n')
+ aclf.write('acl allow anonymous all all\n')
+ aclf.write('acl deny all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ if (result):
+ self.fail(result)
+
+ session = self.get_session('bob','bob')
+
+ try:
+ session.queue_declare(queue="qfd1", durable=True)
+ except qpid.session.SessionException, e:
+ if (403 == e.args[0].error_code):
+ self.fail("ACL should allow queue create request with name=qfd1 durable=true");
+
+ try:
+ session.queue_declare(queue="qfd1", durable=True, passive=True)
+ except qpid.session.SessionException, e:
+ if (403 == e.args[0].error_code):
+ self.fail("ACL should allow queue passive declare request with name=qfd1 durable=true passive=true");
+
+ try:
+ session.queue_declare(queue="qfd1", durable=False, passive=False)
+ self.fail("ACL should deny queue create request with name=qfd1 durable=true passive=false");
+ except qpid.session.SessionException, e:
+ self.assertEqual(403,e.args[0].error_code)
+ session = self.get_session('bob','bob')
+
+ try:
+ session.queue_declare(queue="qfd2", exclusive=False)
+ self.fail("ACL should deny queue create request with name=qfd2 exclusive=false");
+ except qpid.session.SessionException, e:
+ self.assertEqual(403,e.args[0].error_code)
+ session = self.get_session('bob','bob')
+
+ try:
+ queue_options = {}
+ queue_options["qpid.file_count"] = 200
+ queue_options["qpid.file_size"] = 500
+ session.queue_declare(queue="qfd5", arguments=queue_options)
+ self.fail("ACL should deny queue create request with name=qfd5 filemaxsizeupperlimit=500 filemaxcountupperlimit=200");
+ except qpid.session.SessionException, e:
+ self.assertEqual(403,e.args[0].error_code)
+ session = self.get_session('bob','bob')
+
+ try:
+ queue_options = {}
+ queue_options["qpid.file_count"] = 100
+ queue_options["qpid.file_size"] = 500
+ session.queue_declare(queue="qfd5", arguments=queue_options)
+ except qpid.session.SessionException, e:
+ if (403 == e.args[0].error_code):
+ self.fail("ACL should allow queue create request with name=qfd5 filemaxsizeupperlimit=500 filemaxcountupperlimit=200");
+
+ try:
+ queue_options = {}
+ queue_options["qpid.file_count"] = 49
+ queue_options["qpid.file_size"] = 100
+ session.queue_declare(queue="qfd6", arguments=queue_options)
+ self.fail("ACL should deny queue create request with name=qfd6 filemaxsizeupperlimit=100 filemaxcountupperlimit=49");
+ except qpid.session.SessionException, e:
+ self.assertEqual(403,e.args[0].error_code)
+ session = self.get_session('bob','bob')
+
+ try:
+ queue_options = {}
+ queue_options["qpid.file_count"] = 101
+ queue_options["qpid.file_size"] = 100
+ session.queue_declare(queue="qfd6", arguments=queue_options)
+ self.fail("ACL should allow queue create request with name=qfd6 filemaxsizeupperlimit=100 filemaxcountupperlimit=101");
+ except qpid.session.SessionException, e:
+ self.assertEqual(403,e.args[0].error_code)
+ session = self.get_session('bob','bob')
+
+ try:
+ queue_options = {}
+ queue_options["qpid.file_count"] = 100
+ queue_options["qpid.file_size"] = 49
+ session.queue_declare(queue="qfd6", arguments=queue_options)
+ self.fail("ACL should deny queue create request with name=qfd6 filemaxsizeupperlimit=49 filemaxcountupperlimit=100");
+ except qpid.session.SessionException, e:
+ self.assertEqual(403,e.args[0].error_code)
+ session = self.get_session('bob','bob')
+
+ try:
+ queue_options = {}
+ queue_options["qpid.file_count"] = 100
+ queue_options["qpid.file_size"] =101
+ session.queue_declare(queue="qfd6", arguments=queue_options)
+ self.fail("ACL should deny queue create request with name=qfd6 filemaxsizeupperlimit=101 filemaxcountupperlimit=100");
+ except qpid.session.SessionException, e:
+ self.assertEqual(403,e.args[0].error_code)
+ session = self.get_session('bob','bob')
+
+ try:
+ queue_options = {}
+ queue_options["qpid.file_count"] = 50
+ queue_options["qpid.file_size"] = 50
+ session.queue_declare(queue="qfd6", arguments=queue_options)
+ except qpid.session.SessionException, e:
+ if (403 == e.args[0].error_code):
+ self.fail("ACL should allow queue create request with name=qfd6 filemaxsizeupperlimit=50 filemaxcountupperlimit=50");
+
+
+ #=====================================
# ACL exchange tests
#=====================================
@@ -1538,10 +1823,10 @@ class ACLTests(TestBase010):
#=====================================
- # QMF Topic Exchange tests
+ # Routingkey lookup using Topic Exchange tests
#=====================================
- def test_qmf_topic_exchange_tests(self):
+ def test_topic_exchange_publish_tests(self):
"""
Test using QMF method hooks into ACL logic
"""
@@ -1655,40 +1940,367 @@ class ACLTests(TestBase010):
self.LookupPublish("dev@QPID", "X", "a.M.N", "allow-log")
self.LookupPublish("dev@QPID", "X", "a.M.p.qq.N", "allow-log")
+ def test_topic_exchange_other_tests(self):
+ """
+ Test using QMF method hooks into ACL logic
+ """
+ action_list = ['access','bind','unbind']
+
+ aclf = self.get_acl_file()
+ aclf.write('# begin hack alert: allow anonymous to access the lookup debug functions\n')
+ aclf.write('acl allow-log anonymous create queue\n')
+ aclf.write('acl allow-log anonymous all exchange name=qmf.*\n')
+ aclf.write('acl allow-log anonymous all exchange name=amq.direct\n')
+ aclf.write('acl allow-log anonymous all exchange name=qpid.management\n')
+ aclf.write('acl allow-log anonymous access method name=*\n')
+ aclf.write('# end hack alert\n')
+ for action in action_list:
+ aclf.write('acl allow-log uPlain1@COMPANY ' + action + ' exchange name=X routingkey=ab.cd.e\n')
+ aclf.write('acl allow-log uPlain2@COMPANY ' + action + ' exchange name=X routingkey=.\n')
+ aclf.write('acl allow-log uStar1@COMPANY ' + action + ' exchange name=X routingkey=a.*.b\n')
+ aclf.write('acl allow-log uStar2@COMPANY ' + action + ' exchange name=X routingkey=*.x\n')
+ aclf.write('acl allow-log uStar3@COMPANY ' + action + ' exchange name=X routingkey=x.x.*\n')
+ aclf.write('acl allow-log uHash1@COMPANY ' + action + ' exchange name=X routingkey=a.#.b\n')
+ aclf.write('acl allow-log uHash2@COMPANY ' + action + ' exchange name=X routingkey=a.#\n')
+ aclf.write('acl allow-log uHash3@COMPANY ' + action + ' exchange name=X routingkey=#.a\n')
+ aclf.write('acl allow-log uHash4@COMPANY ' + action + ' exchange name=X routingkey=a.#.b.#.c\n')
+ aclf.write('acl allow-log uMixed1@COMPANY ' + action + ' exchange name=X routingkey=*.x.#.y\n')
+ aclf.write('acl allow-log uMixed2@COMPANY ' + action + ' exchange name=X routingkey=a.#.b.*\n')
+ aclf.write('acl allow-log uMixed3@COMPANY ' + action + ' exchange name=X routingkey=*.*.*.#\n')
+
+ aclf.write('acl allow-log all ' + action + ' exchange name=X routingkey=MN.OP.Q\n')
+ aclf.write('acl allow-log all ' + action + ' exchange name=X routingkey=M.*.N\n')
+ aclf.write('acl allow-log all ' + action + ' exchange name=X routingkey=M.#.N\n')
+ aclf.write('acl allow-log all ' + action + ' exchange name=X routingkey=*.M.#.N\n')
+
+ aclf.write('acl deny-log all all\n')
+ aclf.close()
+
+ result = self.reload_acl()
+ if (result):
+ self.fail(result)
+
+ for action in action_list:
+ # aclKey: "ab.cd.e"
+ self.Lookup("uPlain1@COMPANY", action, "exchange", "X", {"routingkey":"ab.cd.e"}, "allow-log")
+ self.Lookup("uPlain1@COMPANY", action, "exchange", "X", {"routingkey":"ab.cd.e"}, "allow-log")
+
+ self.Lookup("uPlain1@COMPANY", action, "exchange", "X", {"routingkey":"ab.cd.e"}, "allow-log")
+ self.Lookup("uPlain1@COMPANY", action, "exchange", "X", {"routingkey":"abx.cd.e"}, "deny-log")
+ self.Lookup("uPlain1@COMPANY", action, "exchange", "X", {"routingkey":"ab.cd"}, "deny-log")
+ self.Lookup("uPlain1@COMPANY", action, "exchange", "X", {"routingkey":"ab.cd..e."}, "deny-log")
+ self.Lookup("uPlain1@COMPANY", action, "exchange", "X", {"routingkey":"ab.cd.e."}, "deny-log")
+ self.Lookup("uPlain1@COMPANY", action, "exchange", "X", {"routingkey":".ab.cd.e"}, "deny-log")
+ # aclKey: "."
+ self.Lookup("uPlain2@COMPANY", action, "exchange", "X", {"routingkey":"."}, "allow-log")
+
+ # aclKey: "a.*.b"
+ self.Lookup("uStar1@COMPANY", action, "exchange", "X", {"routingkey":"a.xx.b"}, "allow-log")
+ self.Lookup("uStar1@COMPANY", action, "exchange", "X", {"routingkey":"a.b"}, "deny-log")
+ # aclKey: "*.x"
+ self.Lookup("uStar2@COMPANY", action, "exchange", "X", {"routingkey":"y.x"}, "allow-log")
+ self.Lookup("uStar2@COMPANY", action, "exchange", "X", {"routingkey":".x"}, "allow-log")
+ self.Lookup("uStar2@COMPANY", action, "exchange", "X", {"routingkey":"x"}, "deny-log")
+ # aclKey: "x.x.*"
+ self.Lookup("uStar3@COMPANY", action, "exchange", "X", {"routingkey":"x.x.y"}, "allow-log")
+ self.Lookup("uStar3@COMPANY", action, "exchange", "X", {"routingkey":"x.x."}, "allow-log")
+ self.Lookup("uStar3@COMPANY", action, "exchange", "X", {"routingkey":"x.x"}, "deny-log")
+ self.Lookup("uStar3@COMPANY", action, "exchange", "X", {"routingkey":"q.x.y"}, "deny-log")
+
+ # aclKey: "a.#.b"
+ self.Lookup("uHash1@COMPANY", action, "exchange", "X", {"routingkey":"a.b"}, "allow-log")
+ self.Lookup("uHash1@COMPANY", action, "exchange", "X", {"routingkey":"a.x.b"}, "allow-log")
+ self.Lookup("uHash1@COMPANY", action, "exchange", "X", {"routingkey":"a..x.y.zz.b"}, "allow-log")
+ self.Lookup("uHash1@COMPANY", action, "exchange", "X", {"routingkey":"a.b."}, "deny-log")
+ self.Lookup("uHash1@COMPANY", action, "exchange", "X", {"routingkey":"q.x.b"}, "deny-log")
+
+ # aclKey: "a.#"
+ self.Lookup("uHash2@COMPANY", action, "exchange", "X", {"routingkey":"a"}, "allow-log")
+ self.Lookup("uHash2@COMPANY", action, "exchange", "X", {"routingkey":"a.b"}, "allow-log")
+ self.Lookup("uHash2@COMPANY", action, "exchange", "X", {"routingkey":"a.b.c"}, "allow-log")
+
+ # aclKey: "#.a"
+ self.Lookup("uHash3@COMPANY", action, "exchange", "X", {"routingkey":"a"}, "allow-log")
+ self.Lookup("uHash3@COMPANY", action, "exchange", "X", {"routingkey":"x.y.a"}, "allow-log")
+
+ # aclKey: "a.#.b.#.c"
+ self.Lookup("uHash4@COMPANY", action, "exchange", "X", {"routingkey":"a.b.c"}, "allow-log")
+ self.Lookup("uHash4@COMPANY", action, "exchange", "X", {"routingkey":"a.x.b.y.c"}, "allow-log")
+ self.Lookup("uHash4@COMPANY", action, "exchange", "X", {"routingkey":"a.x.x.b.y.y.c"}, "allow-log")
+
+ # aclKey: "*.x.#.y"
+ self.Lookup("uMixed1@COMPANY", action, "exchange", "X", {"routingkey":"a.x.y"}, "allow-log")
+ self.Lookup("uMixed1@COMPANY", action, "exchange", "X", {"routingkey":"a.x.p.qq.y"}, "allow-log")
+ self.Lookup("uMixed1@COMPANY", action, "exchange", "X", {"routingkey":"a.a.x.y"}, "deny-log")
+ self.Lookup("uMixed1@COMPANY", action, "exchange", "X", {"routingkey":"aa.x.b.c"}, "deny-log")
+
+ # aclKey: "a.#.b.*"
+ self.Lookup("uMixed2@COMPANY", action, "exchange", "X", {"routingkey":"a.b.x"}, "allow-log")
+ self.Lookup("uMixed2@COMPANY", action, "exchange", "X", {"routingkey":"a.x.x.x.b.x"}, "allow-log")
+
+ # aclKey: "*.*.*.#"
+ self.Lookup("uMixed3@COMPANY", action, "exchange", "X", {"routingkey":"x.y.z"}, "allow-log")
+ self.Lookup("uMixed3@COMPANY", action, "exchange", "X", {"routingkey":"x.y.z.a.b.c"}, "allow-log")
+ self.Lookup("uMixed3@COMPANY", action, "exchange", "X", {"routingkey":"x.y"}, "deny-log")
+ self.Lookup("uMixed3@COMPANY", action, "exchange", "X", {"routingkey":"x"}, "deny-log")
+
+ # Repeat the keys with wildcard user spec
+ self.Lookup("uPlain1@COMPANY", action, "exchange", "X", {"routingkey":"MN.OP.Q"}, "allow-log")
+ self.Lookup("uStar1@COMPANY" , action, "exchange", "X", {"routingkey":"M.xx.N"}, "allow-log")
+ self.Lookup("uHash1@COMPANY" , action, "exchange", "X", {"routingkey":"M.N"}, "allow-log")
+ self.Lookup("uHash1@COMPANY" , action, "exchange", "X", {"routingkey":"M.x.N"}, "allow-log")
+ self.Lookup("uHash1@COMPANY" , action, "exchange", "X", {"routingkey":"M..x.y.zz.N"}, "allow-log")
+ self.Lookup("uMixed1@COMPANY", action, "exchange", "X", {"routingkey":"a.M.N"}, "allow-log")
+ self.Lookup("uMixed1@COMPANY", action, "exchange", "X", {"routingkey":"a.M.p.qq.N"}, "allow-log")
+
+ self.Lookup("dev@QPID", action, "exchange", "X", {"routingkey": "MN.OP.Q"}, "allow-log")
+ self.Lookup("dev@QPID", action, "exchange", "X", {"routingkey": "M.xx.N"}, "allow-log")
+ self.Lookup("dev@QPID", action, "exchange", "X", {"routingkey": "M.N"}, "allow-log")
+ self.Lookup("dev@QPID", action, "exchange", "X", {"routingkey": "M.x.N"}, "allow-log")
+ self.Lookup("dev@QPID", action, "exchange", "X", {"routingkey": "M..x.y.zz.N"}, "allow-log")
+ self.Lookup("dev@QPID", action, "exchange", "X", {"routingkey": "a.M.N"}, "allow-log")
+ self.Lookup("dev@QPID", action, "exchange", "X", {"routingkey": "a.M.p.qq.N"}, "allow-log")
+
#=====================================
# Connection limits
#=====================================
- def test_connection_limits(self):
+ def test_connection_limits_cli_sets_all(self):
+
+ try:
+ sessiona1 = self.get_session_by_port('alice','alice', self.port_u())
+ sessiona2 = self.get_session_by_port('alice','alice', self.port_u())
+ except Exception, e:
+ self.fail("Could not create two connections for user alice: " + str(e))
+
+ # Third session should fail
+ try:
+ sessiona3 = self.get_session_by_port('alice','alice', self.port_u())
+ self.fail("Should not be able to create third connection for user alice")
+ except Exception, e:
+ result = None
+
+
+
+ def test_connection_limits_by_named_user(self):
"""
Test ACL control connection limits
"""
+ aclf = self.get_acl_file()
+ aclf.write('quota connections 2 alice bob\n')
+ aclf.write('quota connections 0 evildude\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ if (result):
+ self.fail(result)
+
# By username should be able to connect twice per user
try:
- sessiona1 = self.get_session_by_port('alice','alice', self.port_u())
- sessiona2 = self.get_session_by_port('alice','alice', self.port_u())
+ sessiona1 = self.get_session('alice','alice')
+ sessiona2 = self.get_session('alice','alice')
except Exception, e:
self.fail("Could not create two connections for user alice: " + str(e))
# Third session should fail
try:
- sessiona3 = self.get_session_by_port('alice','alice', self.port_u())
+ sessiona3 = self.get_session('alice','alice')
+ self.fail("Should not be able to create third connection for user alice")
+ except Exception, e:
+ result = None
+
+ # Disconnecting should allow another session.
+ sessiona1.close()
+ try:
+ sessiona3 = self.get_session('alice','alice')
+ except Exception, e:
+ self.fail("Could not recreate second connection for user alice: " + str(e))
+
+ # By username should be able to connect twice per user
+ try:
+ sessionb1 = self.get_session('bob','bob')
+ sessionb2 = self.get_session('bob','bob')
+ except Exception, e:
+ self.fail("Could not create two connections for user bob: " + str(e))
+
+ # Third session should fail
+ try:
+ sessionb3 = self.get_session('bob','bob')
+ self.fail("Should not be able to create third connection for user bob")
+ except Exception, e:
+ result = None
+
+
+ # User with quota of 0 is denied
+ try:
+ sessione1 = self.get_session('evildude','evildude')
+ self.fail("Should not be able to create a connection for user evildude")
+ except Exception, e:
+ result = None
+
+
+ # User not named in quotas is denied
+ try:
+ sessionc1 = self.get_session('charlie','charlie')
+ self.fail("Should not be able to create a connection for user charlie")
+ except Exception, e:
+ result = None
+
+ # Clean up the sessions
+ sessiona2.close()
+ sessiona3.close()
+ sessionb1.close()
+ sessionb2.close()
+
+
+
+ def test_connection_limits_by_unnamed_all(self):
+ """
+ Test ACL control connection limits
+ """
+ aclf = self.get_acl_file()
+ aclf.write('quota connections 2 alice bob\n')
+ aclf.write('quota connections 1 all\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ if (result):
+ self.fail(result)
+
+ # By username should be able to connect twice per user
+ try:
+ sessiona1 = self.get_session('alice','alice')
+ sessiona2 = self.get_session('alice','alice')
+ except Exception, e:
+ self.fail("Could not create two connections for user alice: " + str(e))
+
+ # Third session should fail
+ try:
+ sessiona3 = self.get_session('alice','alice')
self.fail("Should not be able to create third connection for user alice")
except Exception, e:
result = None
+ # By username should be able to connect twice per user
try:
- sessionb1 = self.get_session_by_port('bob','bob', self.port_u())
- sessionb2 = self.get_session_by_port('bob','bob', self.port_u())
+ sessionb1 = self.get_session('bob','bob')
+ sessionb2 = self.get_session('bob','bob')
except Exception, e:
self.fail("Could not create two connections for user bob: " + str(e))
+ # Third session should fail
try:
- sessionb3 = self.get_session_by_port('bob','bob', self.port_u())
+ sessionb3 = self.get_session('bob','bob')
self.fail("Should not be able to create third connection for user bob")
except Exception, e:
result = None
+ # User not named in quotas gets 'all' quota
+ try:
+ sessionc1 = self.get_session('charlie','charlie')
+ except Exception, e:
+ self.fail("Could not create one connection for user charlie: " + str(e))
+
+ # Next session should fail
+ try:
+ sessionc2 = self.get_session('charlie','charlie')
+ self.fail("Should not be able to create second connection for user charlie")
+ except Exception, e:
+ result = None
+
+ # Clean up the sessions
+ sessiona1.close()
+ sessiona2.close()
+ sessionb1.close()
+ sessionb2.close()
+ sessionc1.close()
+
+
+ def test_connection_limits_by_group(self):
+ """
+ Test ACL control connection limits
+ """
+ aclf = self.get_acl_file()
+ aclf.write('group stooges moe@QPID larry@QPID curly@QPID\n')
+ aclf.write('quota connections 2 alice bob\n')
+ aclf.write('quota connections 2 stooges charlie\n')
+ aclf.write('# user and groups may be overwritten. Should use last value\n')
+ aclf.write('quota connections 3 bob stooges\n')
+ aclf.write('acl allow all all')
+ aclf.close()
+
+ result = self.reload_acl()
+ if (result):
+ self.fail(result)
+
+ # Alice gets 2
+ try:
+ sessiona1 = self.get_session('alice','alice')
+ sessiona2 = self.get_session('alice','alice')
+ except Exception, e:
+ self.fail("Could not create two connections for user alice: " + str(e))
+
+ # Third session should fail
+ try:
+ sessiona3 = self.get_session('alice','alice')
+ self.fail("Should not be able to create third connection for user alice")
+ except Exception, e:
+ result = None
+
+ # Bob gets 3
+ try:
+ sessionb1 = self.get_session('bob','bob')
+ sessionb2 = self.get_session('bob','bob')
+ sessionb3 = self.get_session('bob','bob')
+ except Exception, e:
+ self.fail("Could not create three connections for user bob: " + str(e))
+
+ # Fourth session should fail
+ try:
+ sessionb4 = self.get_session('bob','bob')
+ self.fail("Should not be able to create fourth connection for user bob")
+ except Exception, e:
+ result = None
+
+ # Moe gets 3
+ try:
+ sessionm1 = self.get_session('moe','moe')
+ sessionm2 = self.get_session('moe','moe')
+ sessionm3 = self.get_session('moe','moe')
+ except Exception, e:
+ self.fail("Could not create three connections for user moe: " + str(e))
+
+ # Fourth session should fail
+ try:
+ sessionb4 = self.get_session('moe','moe')
+ self.fail("Should not be able to create fourth connection for user ,pe")
+ except Exception, e:
+ result = None
+
+ # User not named in quotas is denied
+ try:
+ sessions1 = self.get_session('shemp','shemp')
+ self.fail("Should not be able to create a connection for user shemp")
+ except Exception, e:
+ result = None
+
+ # Clean up the sessions
+ sessiona1.close()
+ sessiona2.close()
+ sessionb1.close()
+ sessionb2.close()
+ sessionb3.close()
+ sessionm1.close()
+ sessionm2.close()
+ sessionm3.close()
+
+
+ def test_connection_limits_by_ip_address(self):
+ """
+ Test ACL control connection limits by ip address
+ """
# By IP address should be able to connect twice per client address
try:
sessionb1 = self.get_session_by_port('alice','alice', self.port_i())
@@ -1703,6 +2315,8 @@ class ACLTests(TestBase010):
except Exception, e:
result = None
+ sessionb1.close()
+ sessionb2.close()
#=====================================
# User name substitution
@@ -2243,6 +2857,62 @@ class ACLTests(TestBase010):
self.LookupPublish("joe@QPID", "QPID-work", "QPID", "allow")
self.LookupPublish("joe@QPID", "QPID-work2", "QPID", "allow")
+ #=====================================
+ # Queue per-user quota
+ #=====================================
+
+ def test_queue_per_user_quota(self):
+ """
+ Test ACL queue counting limits.
+ port_q has a limit of 2
+ """
+ # bob should be able to create two queues
+ session = self.get_session_by_port('bob','bob', self.port_q())
+
+ try:
+ session.queue_declare(queue="queue1")
+ session.queue_declare(queue="queue2")
+ except qpid.session.SessionException, e:
+ self.fail("Error during queue create request");
+
+ # third queue should fail
+ try:
+ session.queue_declare(queue="queue3")
+ self.fail("Should not be able to create third queue")
+ except Exception, e:
+ result = None
+ session = self.get_session_by_port('bob','bob', self.port_q())
+
+ # alice should be able to create two queues
+ session2 = self.get_session_by_port('alice','alice', self.port_q())
+
+ try:
+ session2.queue_declare(queue="queuea1")
+ session2.queue_declare(queue="queuea2")
+ except qpid.session.SessionException, e:
+ self.fail("Error during queue create request");
+
+ # third queue should fail
+ try:
+ session2.queue_declare(queue="queuea3")
+ self.fail("Should not be able to create third queue")
+ except Exception, e:
+ result = None
+ session2 = self.get_session_by_port('alice','alice', self.port_q())
+
+ # bob should be able to delete a queue and create another
+ try:
+ session.queue_delete(queue="queue1")
+ session.queue_declare(queue="queue3")
+ except qpid.session.SessionException, e:
+ self.fail("Error during queue create request");
+
+ # alice should be able to delete a queue and create another
+ try:
+ session2.queue_delete(queue="queuea1")
+ session2.queue_declare(queue="queuea3")
+ except qpid.session.SessionException, e:
+ self.fail("Error during queue create request");
class BrokerAdmin:
def __init__(self, broker, username=None, password=None):
diff --git a/cpp/src/tests/benchmark b/cpp/src/tests/benchmark
deleted file mode 100755
index c075837847..0000000000
--- a/cpp/src/tests/benchmark
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-# A basic "benchmark" to generate performacne samples of throughput
-# and latency against a single cluster member while they are replicating.
-#
-# Must be run in the qpid src/tests build directory.
-#
-
-usage() {
-cat <<EOF
-Usage: $0 [options] -- client hosts --- broker hosts
-Read the script for options.
-EOF
-}
-# Defaults
-TESTDIR=${TESTDIR:-$PWD} # Absolute path to test exes on all hosts.
-SCRIPTDIR=${SCRIPTDIR:-`dirname $0`} # Path to local test scripts directory.
-SAMPLES=10 # Runs of each test.
-COUNT=${COUNT:-10000} # Count for pub/sub tests.
-SIZE=${SIZE:-600} # Size of messages
-ECHO=${ECHO:-1000} # Count for echo test.
-NSUBS=${NSUBS:-4}
-NPUBS=${NPUBS:-4}
-
-collect() { eval $COLLECT=\""\$$COLLECT $*"\"; }
-COLLECT=ARGS
-while test $# -gt 0; do
- case $1 in
- --testdir) TESTDIR=$2 ; shift 2 ;;
- --samples) SAMPLES=$2 ; shift 2 ;;
- --count) COUNT=$2 ; shift 2 ;;
- --echos) ECHO=$2 ; shift 2 ;;
- --size) SIZE=$2 ; shift 2 ;;
- --nsubs) NSUBS=$2 ; shift 2 ;;
- --npubs) NPUBS=$2 ; shift 2 ;;
- --) COLLECT=CLIENTARG; shift ;;
- ---) COLLECT=BROKERARG; shift;;
- *) collect $1; shift ;;
- esac
-done
-
-CLIENTS=${CLIENTARG:-$CLIENTS}
-BROKERS=${BROKERARG:-$BROKERS}
-test -z "$CLIENTS" && { echo "Must specify at least one client host."; exit 1; }
-test -z "$BROKERS" && { echo "Must specify at least one broker host."; exit 1; }
-
-export TESTDIR # For perfdist
-CLIENTS=($CLIENTS) # Convert to array
-BROKERS=($BROKERS)
-trap "rm -f $FILES" EXIT
-
-dosamples() {
- FILE=`mktemp`
- FILES="$FILES $FILE"
- TABS=`echo "$HEADING" | sed s'/[^ ]//g'`
- {
- echo "\"$*\"$TABS"
- echo "$HEADING"
- for (( i=0; i<$SAMPLES; ++i)) ; do echo "`$*`" ; done
- echo
- } | tee $FILE
-}
-
-HEADING="pub sub total Mb"
-dosamples $SCRIPTDIR/perfdist --size $SIZE --count $COUNT --nsubs $NSUBS --npubs $NPUBS -s -- ${CLIENTS[*]} --- ${BROKERS[*]}
-HEADING="pub"
-dosamples ssh -A ${CLIENTS[0]} $TESTDIR/publish --routing-key perftest0 --size $SIZE --count $COUNT -s -b ${BROKERS[0]}
-HEADING="sub"
-dosamples ssh -A ${CLIENTS[0]} $TESTDIR/consume --queue perftest0 -s --count $COUNT -b ${BROKERS[0]}
-HEADING="min max avg"
-dosamples ssh -A ${CLIENTS[0]} $TESTDIR/echotest --count $ECHO -s -b ${BROKERS[0]}
-
-echo
-echo "Tab separated spreadsheet (also saved as benchmark.tab):"
-echo
-
-echo "benchmark -- ${CLIENTS[*]} --- ${BROKERS[*]} " | tee benchmark.tab
-paste $FILES | tee -a benchmark.tab
diff --git a/cpp/src/tests/brokertest.py b/cpp/src/tests/brokertest.py
index aea4460e5a..70c145a51b 100644
--- a/cpp/src/tests/brokertest.py
+++ b/cpp/src/tests/brokertest.py
@@ -17,8 +17,7 @@
# under the License.
#
-# Support library for tests that start multiple brokers, e.g. cluster
-# or federation
+# Support library for tests that start multiple brokers, e.g. HA or federation
import os, signal, string, tempfile, subprocess, socket, threading, time, imp, re
import qpid, traceback, signal
@@ -203,8 +202,10 @@ class Popen(subprocess.Popen):
self.wait()
def kill(self):
- try:
- subprocess.Popen.kill(self)
+ # Set to EXPECT_UNKNOWN, EXPECT_EXIT_FAIL creates a race condition
+ # if the process exits normally concurrent with the call to kill.
+ self.expect = EXPECT_UNKNOWN
+ try: subprocess.Popen.kill(self)
except AttributeError: # No terminate method
try:
os.kill( self.pid , signal.SIGKILL)
@@ -380,8 +381,7 @@ class Broker(Popen):
if not retry(self.log_ready, timeout=timeout):
raise Exception(
"Timed out waiting for broker %s%s"%(self.name, error_line(self.log,5)))
- # Create a connection and a session. For a cluster broker this will
- # return after cluster init has finished.
+ # Create a connection and a session.
try:
c = self.connect(**kwargs)
try: c.session()
@@ -389,54 +389,6 @@ class Broker(Popen):
except Exception,e: raise RethrownException(
"Broker %s not responding: (%s)%s"%(self.name,e,error_line(self.log, 5)))
- def store_state(self):
- f = open(os.path.join(self.datadir, "cluster", "store.status"))
- try: uuids = f.readlines()
- finally: f.close()
- null_uuid="00000000-0000-0000-0000-000000000000\n"
- if len(uuids) < 2: return "unknown" # we looked while the file was being updated.
- if uuids[0] == null_uuid: return "empty"
- if uuids[1] == null_uuid: return "dirty"
- return "clean"
-
-class Cluster:
- """A cluster of brokers in a test."""
- # Client connection options for use in failover tests.
- CONNECTION_OPTIONS = "reconnect:true,reconnect-timeout:10,reconnect-urls-replace:true"
-
- _cluster_count = 0
-
- def __init__(self, test, count=0, args=[], expect=EXPECT_RUNNING, wait=True, show_cmd=False):
- self.test = test
- self._brokers=[]
- self.name = "cluster%d" % Cluster._cluster_count
- Cluster._cluster_count += 1
- # Use unique cluster name
- self.args = copy(args)
- self.args += [ "--cluster-name", "%s-%s:%d" % (self.name, socket.gethostname(), os.getpid()) ]
- self.args += [ "--log-enable=info+", "--log-enable=debug+:cluster"]
- assert BrokerTest.cluster_lib, "Cannot locate cluster plug-in"
- self.args += [ "--load-module", BrokerTest.cluster_lib ]
- self.start_n(count, expect=expect, wait=wait, show_cmd=show_cmd)
-
- def start(self, name=None, expect=EXPECT_RUNNING, wait=True, args=[], port=0, show_cmd=False):
- """Add a broker to the cluster. Returns the index of the new broker."""
- if not name: name="%s-%d" % (self.name, len(self._brokers))
- self._brokers.append(self.test.broker(self.args+args, name, expect, wait, port=port, show_cmd=show_cmd))
- return self._brokers[-1]
-
- def ready(self):
- for b in self: b.ready()
-
- def start_n(self, count, expect=EXPECT_RUNNING, wait=True, args=[], show_cmd=False):
- for i in range(count): self.start(expect=expect, wait=wait, args=args, show_cmd=show_cmd)
-
- # Behave like a list of brokers.
- def __len__(self): return len(self._brokers)
- def __getitem__(self,index): return self._brokers[index]
- def __iter__(self): return self._brokers.__iter__()
-
-
def browse(session, queue, timeout=0, transform=lambda m: m.content):
"""Return a list with the contents of each message on queue."""
r = session.receiver("%s;{mode:browse}"%(queue))
@@ -473,7 +425,6 @@ class BrokerTest(TestCase):
# Environment settings.
qpidd_exec = os.path.abspath(checkenv("QPIDD_EXEC"))
- cluster_lib = os.getenv("CLUSTER_LIB")
ha_lib = os.getenv("HA_LIB")
xml_lib = os.getenv("XML_LIB")
qpid_config_exec = os.getenv("QPID_CONFIG_EXEC")
@@ -525,11 +476,6 @@ class BrokerTest(TestCase):
raise RethrownException("Failed to start broker %s(%s): %s" % (b.name, b.log, e))
return b
- def cluster(self, count=0, args=[], expect=EXPECT_RUNNING, wait=True, show_cmd=False):
- """Create and return a cluster ready for use"""
- cluster = Cluster(self, count, args, expect=expect, wait=wait, show_cmd=show_cmd)
- return cluster
-
def browse(self, *args, **kwargs): browse(*args, **kwargs)
def assert_browse(self, *args, **kwargs): assert_browse(*args, **kwargs)
def assert_browse_retry(self, *args, **kwargs): assert_browse_retry(*args, **kwargs)
@@ -558,14 +504,17 @@ class StoppableThread(Thread):
join(self)
if self.error: raise self.error
+# Options for a client that wants to reconnect automatically.
+RECONNECT_OPTIONS="reconnect:true,reconnect-timeout:10,reconnect-urls-replace:true"
+
class NumberedSender(Thread):
"""
Thread to run a sender client and send numbered messages until stopped.
"""
def __init__(self, broker, max_depth=None, queue="test-queue",
- connection_options=Cluster.CONNECTION_OPTIONS,
- failover_updates=True, url=None):
+ connection_options=RECONNECT_OPTIONS,
+ failover_updates=True, url=None, args=[]):
"""
max_depth: enable flow control, ensure sent - received <= max_depth.
Requires self.notify_received(n) to be called each time messages are received.
@@ -576,7 +525,7 @@ class NumberedSender(Thread):
"--address", "%s;{create:always}"%queue,
"--connection-options", "{%s}"%(connection_options),
"--content-stdin"
- ]
+ ] + args
if failover_updates: cmd += ["--failover-updates"]
self.sender = broker.test.popen(
cmd, expect=EXPECT_RUNNING, stdin=PIPE)
@@ -627,7 +576,7 @@ class NumberedReceiver(Thread):
sequentially numbered messages.
"""
def __init__(self, broker, sender=None, queue="test-queue",
- connection_options=Cluster.CONNECTION_OPTIONS,
+ connection_options=RECONNECT_OPTIONS,
failover_updates=True, url=None):
"""
sender: enable flow control. Call sender.received(n) for each message received.
@@ -676,31 +625,6 @@ class NumberedReceiver(Thread):
join(self)
self.check()
-class ErrorGenerator(StoppableThread):
- """
- Thread that continuously generates errors by trying to consume from
- a non-existent queue. For cluster regression tests, error handling
- caused issues in the past.
- """
-
- def __init__(self, broker):
- StoppableThread.__init__(self)
- self.broker=broker
- broker.test.cleanup_stop(self)
- self.start()
-
- def run(self):
- c = self.broker.connect_old()
- try:
- while not self.stopped:
- try:
- c.session(str(qpid.datatypes.uuid4())).message_subscribe(
- queue="non-existent-queue")
- assert(False)
- except qpid.session.SessionException: pass
- time.sleep(0.01)
- except: pass # Normal if broker is killed.
-
def import_script(path):
"""
Import executable script at path as a module.
diff --git a/cpp/src/tests/cluster_authentication_soak.cpp b/cpp/src/tests/cluster_authentication_soak.cpp
deleted file mode 100644
index a3271701c3..0000000000
--- a/cpp/src/tests/cluster_authentication_soak.cpp
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <signal.h>
-#include <fcntl.h>
-
-#include <sys/wait.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-
-#include <string>
-#include <iostream>
-#include <sstream>
-#include <vector>
-
-#include <boost/assign.hpp>
-
-#include "qpid/framing/Uuid.h"
-
-#include <ForkedBroker.h>
-#include <qpid/client/Connection.h>
-
-#include <sasl/sasl.h>
-
-#ifdef HAVE_CONFIG_H
-#include "config.h"
-#endif
-
-
-
-
-using namespace std;
-using boost::assign::list_of;
-using namespace qpid::framing;
-using namespace qpid::client;
-
-
-namespace qpid {
-namespace tests {
-
-vector<pid_t> brokerPids;
-
-typedef vector<ForkedBroker *> brokerVector;
-
-
-
-
-
-int runSilent = 1;
-int newbiePort = 0;
-
-
-void
-makeClusterName ( string & s ) {
- stringstream ss;
- ss << "authenticationSoakCluster_" << Uuid(true).str();
- s = ss.str();
-}
-
-
-
-void
-startBroker ( brokerVector & brokers , int brokerNumber, string const & clusterName ) {
- stringstream prefix, clusterArg;
- prefix << "soak-" << brokerNumber;
- clusterArg << "--cluster-name=" << clusterName;
-
- std::vector<std::string> argv;
-
- argv.push_back ("../qpidd");
- argv.push_back ("--no-module-dir");
- argv.push_back ("--load-module=../.libs/cluster.so");
- argv.push_back (clusterArg.str());
- argv.push_back ("--cluster-username=zig");
- argv.push_back ("--cluster-password=zig");
- argv.push_back ("--cluster-mechanism=PLAIN");
- argv.push_back ("--sasl-config=./sasl_config");
- argv.push_back ("--auth=yes");
- argv.push_back ("--mgmt-enable=yes");
- argv.push_back ("--log-prefix");
- argv.push_back (prefix.str());
- argv.push_back ("--log-to-file");
- argv.push_back (prefix.str()+".log");
- argv.push_back ("TMP_DATA_DIR");
-
- ForkedBroker * newbie = new ForkedBroker (argv);
- newbiePort = newbie->getPort();
- brokers.push_back ( newbie );
-}
-
-
-
-
-bool
-runPerftest ( bool hangTest ) {
- stringstream portSs;
- portSs << newbiePort;
- string portStr = portSs.str();
- char const * path = "./qpid-perftest";
-
- vector<char const *> argv;
- argv.push_back ( "./qpid-perftest" );
- argv.push_back ( "-p" );
- argv.push_back ( portStr.c_str() );
- argv.push_back ( "--username" );
- argv.push_back ( "zig" );
- argv.push_back ( "--password" );
- argv.push_back ( "zig" );
- argv.push_back ( "--mechanism" );
- argv.push_back ( "DIGEST-MD5" );
- argv.push_back ( "--count" );
- argv.push_back ( "20000" );
- argv.push_back ( 0 );
-
- pid_t pid = fork();
-
- if ( ! pid ) {
- int i=open("/dev/null",O_RDWR);
- dup2 ( i, fileno(stdout) );
- dup2 ( i, fileno(stderr) );
-
- execv ( path, const_cast<char * const *>(&argv[0]) );
- // The exec failed: we are still in parent process.
- perror ( "error running qpid-perftest: " );
- return false;
- }
- else {
- if ( hangTest ) {
- if ( ! runSilent )
- cerr << "Pausing perftest " << pid << endl;
- kill ( pid, 19 );
- }
-
- struct timeval startTime,
- currentTime,
- duration;
-
- gettimeofday ( & startTime, 0 );
-
- while ( 1 ) {
- sleep ( 2 );
- int status;
- int returned_pid = waitpid ( pid, &status, WNOHANG );
- if ( returned_pid == pid ) {
- int exit_status = WEXITSTATUS(status);
- if ( exit_status ) {
- cerr << "qpid-perftest failed. exit_status was: " << exit_status << endl;
- return false;
- }
- else {
- return true; // qpid-perftest succeeded.
- }
- }
- else { // qpid-perftest has not yet completed.
- gettimeofday ( & currentTime, 0 );
- timersub ( & currentTime, & startTime, & duration );
- if ( duration.tv_sec > 60 ) {
- kill ( pid, 9 );
- cerr << "qpid-perftest pid " << pid << " hanging: killed.\n";
- return false;
- }
- }
- }
-
- }
-}
-
-
-
-bool
-allBrokersAreAlive ( brokerVector & brokers ) {
- for ( unsigned int i = 0; i < brokers.size(); ++ i )
- if ( ! brokers[i]->isRunning() )
- return false;
-
- return true;
-}
-
-
-
-
-
-void
-killAllBrokers ( brokerVector & brokers ) {
- for ( unsigned int i = 0; i < brokers.size(); ++ i ) {
- brokers[i]->kill ( 9 );
- }
-}
-
-
-
-
-void
-killOneBroker ( brokerVector & brokers ) {
- int doomedBroker = getpid() % brokers.size();
- cout << "Killing broker " << brokers[doomedBroker]->getPID() << endl;
- brokers[doomedBroker]->kill ( 9 );
- sleep ( 2 );
-}
-
-
-
-
-}} // namespace qpid::tests
-
-using namespace qpid::tests;
-
-
-
-/*
- * Please note that this test has self-test capability.
- * It is intended to detect
- * 1. perftest hangs.
- * 2. broker deaths
- * Both of these condtions can be forced when running manually
- * to ensure that the test really does detect them.
- * See command-line arguments 3 and 4.
- */
-int
-main ( int argc, char ** argv )
-{
- // I need the SASL_PATH_TYPE_CONFIG feature, which did not appear until SASL 2.1.22
-#if (SASL_VERSION_FULL < ((2<<16)|(1<<8)|22))
- cout << "Skipping SASL test, SASL version too low." << endl;
- return 0;
-#endif
-
- int n_iterations = argc > 1 ? atoi(argv[1]) : 1;
- runSilent = argc > 2 ? atoi(argv[2]) : 1; // default to silent
- int killBroker = argc > 3 ? atoi(argv[3]) : 0; // Force the kill of one broker.
- int hangTest = argc > 4 ? atoi(argv[4]) : 0; // Force the first perftest to hang.
- int n_brokers = 3;
- brokerVector brokers;
-
- srand ( getpid() );
- string clusterName;
- makeClusterName ( clusterName );
- for ( int i = 0; i < n_brokers; ++ i ) {
- startBroker ( brokers, i, clusterName );
- }
-
- sleep ( 3 );
-
- /* Run all qpid-perftest iterations, and only then check for brokers
- * still being up. If you just want a quick check for the failure
- * mode in which a single iteration would kill all brokers except
- * the client-connected one, just run it with the iterations arg
- * set to 1.
- */
- for ( int iteration = 0; iteration < n_iterations; ++ iteration ) {
- if ( ! runPerftest ( hangTest ) ) {
- if ( ! runSilent )
- cerr << "qpid-perftest " << iteration << " failed.\n";
- return 1;
- }
- if ( ! ( iteration % 10 ) ) {
- if ( ! runSilent )
- cerr << "qpid-perftest " << iteration << " complete. -------------- \n";
- }
- }
- if ( ! runSilent )
- cerr << "\nqpid-perftest " << n_iterations << " iterations complete. -------------- \n\n";
-
- /* If the command-line tells us to kill a broker, do
- * it now. Use this option to prove that this test
- * really can detect broker-deaths.
- */
- if ( killBroker ) {
- killOneBroker ( brokers );
- }
-
- if ( ! allBrokersAreAlive ( brokers ) ) {
- if ( ! runSilent )
- cerr << "not all brokers are alive.\n";
- killAllBrokers ( brokers );
- return 2;
- }
-
- killAllBrokers ( brokers );
- if ( ! runSilent )
- cout << "success.\n";
-
- return 0;
-}
-
-
-
diff --git a/cpp/src/tests/cluster_failover b/cpp/src/tests/cluster_failover
deleted file mode 100755
index 43170c731a..0000000000
--- a/cpp/src/tests/cluster_failover
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh
-# A simple manual failover test, sends a stream of numbered messages.
-# You can kill the connected broker and verify that the clients reconnect
-# and no messages are lost.
-
-URL=$1
-test -n "$URL" || { echo Usage: $0 URL ; exit 1; }
-SEND=$(mktemp /tmp/send.XXXXXXXXXX)
-RECV=$(mktemp /tmp/recv.XXXXXXXXXX)
-echo $SEND $RECV
-
-seq 1000000 > $SEND
-
-qpid-send -a 'cluster_failover;{create:always}' -b $URL --connection-options "{reconnect:true}" --send-rate 10 --content-stdin < $SEND &
-
-while msg=$(qpid-receive -m1 -f -a 'cluster_failover;{create:always}' -b $URL --connection-options "{reconnect:true,heartbeat:1}"); do
- echo -n $msg; date
-done
-wait
diff --git a/cpp/src/tests/cluster_python_tests b/cpp/src/tests/cluster_python_tests
deleted file mode 100755
index 25c7889246..0000000000
--- a/cpp/src/tests/cluster_python_tests
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Skip if cluster services not running.
-. cpg_check.sh
-cpg_enabled || exit 0
-
-FAILING=`dirname $0`/cluster_python_tests_failing.txt
-source `dirname $0`/python_tests
-
diff --git a/cpp/src/tests/cluster_python_tests_failing.txt b/cpp/src/tests/cluster_python_tests_failing.txt
deleted file mode 100644
index f8639d7b59..0000000000
--- a/cpp/src/tests/cluster_python_tests_failing.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-qpid_tests.broker_0_10.management.ManagementTest.test_purge_queue
-qpid_tests.broker_0_10.management.ManagementTest.test_connection_close
-qpid_tests.broker_0_10.message.MessageTests.test_ttl
-qpid_tests.broker_0_10.management.ManagementTest.test_broker_connectivity_oldAPI
diff --git a/cpp/src/tests/cluster_read_credit b/cpp/src/tests/cluster_read_credit
deleted file mode 100755
index 552ffee53b..0000000000
--- a/cpp/src/tests/cluster_read_credit
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Regression test for http://issues.apache.org/jira/browse/QPID-2086
-
-srcdir=`dirname $0`
-source cpg_check.sh
-cpg_enabled || exit 0
-
-$srcdir/start_cluster 1 --cluster-read-max=2 || exit 1
-trap $srcdir/stop_cluster EXIT
-seq 1 10000 | ./sender --port `cat cluster.ports` --routing-key no-such-queue
diff --git a/cpp/src/tests/cluster_test.cpp b/cpp/src/tests/cluster_test.cpp
deleted file mode 100644
index f2ccd0ba84..0000000000
--- a/cpp/src/tests/cluster_test.cpp
+++ /dev/null
@@ -1,1231 +0,0 @@
-/*
- *
- * Copyright (c) 2006 The Apache Software Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include "test_tools.h"
-#include "unit_test.h"
-#include "ForkedBroker.h"
-#include "BrokerFixture.h"
-#include "ClusterFixture.h"
-
-#include "qpid/client/Connection.h"
-#include "qpid/client/ConnectionSettings.h"
-#include "qpid/client/ConnectionAccess.h"
-#include "qpid/client/Session.h"
-#include "qpid/client/FailoverListener.h"
-#include "qpid/client/FailoverManager.h"
-#include "qpid/client/QueueOptions.h"
-#include "qpid/cluster/Cluster.h"
-#include "qpid/cluster/Cpg.h"
-#include "qpid/cluster/UpdateClient.h"
-#include "qpid/framing/AMQBody.h"
-#include "qpid/framing/Uuid.h"
-#include "qpid/framing/reply_exceptions.h"
-#include "qpid/framing/enum.h"
-#include "qpid/framing/MessageTransferBody.h"
-#include "qpid/log/Logger.h"
-#include "qpid/sys/Monitor.h"
-#include "qpid/sys/Thread.h"
-
-#include <boost/bind.hpp>
-#include <boost/shared_ptr.hpp>
-#include <boost/assign.hpp>
-
-#include <string>
-#include <iostream>
-#include <fstream>
-#include <iterator>
-#include <vector>
-#include <set>
-#include <algorithm>
-#include <iterator>
-
-
-using namespace std;
-using namespace qpid;
-using namespace qpid::cluster;
-using namespace qpid::framing;
-using namespace qpid::client;
-using namespace boost::assign;
-using broker::Broker;
-using boost::shared_ptr;
-
-namespace qpid {
-namespace tests {
-
-QPID_AUTO_TEST_SUITE(cluster_test)
-
-bool durableFlag = std::getenv("STORE_LIB") != 0;
-
-void prepareArgs(ClusterFixture::Args& args, const bool durableFlag = false) {
- ostringstream clusterLib;
- clusterLib << getLibPath("CLUSTER_LIB");
- args += "--auth", "no", "--no-module-dir", "--load-module", clusterLib.str();
- if (durableFlag)
- args += "--load-module", getLibPath("STORE_LIB"), "TMP_DATA_DIR";
- else
- args += "--no-data-dir";
-}
-
-ClusterFixture::Args prepareArgs(const bool durableFlag = false) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- return args;
-}
-
-// Timeout for tests that wait for messages
-const sys::Duration TIMEOUT=2*sys::TIME_SEC;
-
-
-ostream& operator<<(ostream& o, const cpg_name* n) {
- return o << Cpg::str(*n);
-}
-
-ostream& operator<<(ostream& o, const cpg_address& a) {
- return o << "(" << a.nodeid <<","<<a.pid<<","<<a.reason<<")";
-}
-
-template <class T>
-ostream& operator<<(ostream& o, const pair<T*, int>& array) {
- o << "{ ";
- ostream_iterator<cpg_address> i(o, " ");
- copy(array.first, array.first+array.second, i);
- o << "}";
- return o;
-}
-
-template <class C> set<int> makeSet(const C& c) {
- set<int> s;
- copy(c.begin(), c.end(), inserter(s, s.begin()));
- return s;
-}
-
-class Sender {
- public:
- Sender(boost::shared_ptr<ConnectionImpl> ci, uint16_t ch) : connection(ci), channel(ch) {}
- void send(const AMQBody& body, bool firstSeg, bool lastSeg, bool firstFrame, bool lastFrame) {
- AMQFrame f(body);
- f.setChannel(channel);
- f.setFirstSegment(firstSeg);
- f.setLastSegment(lastSeg);
- f.setFirstFrame(firstFrame);
- f.setLastFrame(lastFrame);
- connection->expand(f.encodedSize(), false);
- connection->handle(f);
- }
-
- private:
- boost::shared_ptr<ConnectionImpl> connection;
- uint16_t channel;
-};
-
-int64_t getMsgSequence(const Message& m) {
- return m.getMessageProperties().getApplicationHeaders().getAsInt64("qpid.msg_sequence");
-}
-
-Message ttlMessage(const string& data, const string& key, uint64_t ttl, bool durable = false) {
- Message m(data, key);
- m.getDeliveryProperties().setTtl(ttl);
- if (durable) m.getDeliveryProperties().setDeliveryMode(framing::PERSISTENT);
- return m;
-}
-
-Message makeMessage(const string& data, const string& key, bool durable = false) {
- Message m(data, key);
- if (durable) m.getDeliveryProperties().setDeliveryMode(framing::PERSISTENT);
- return m;
-}
-
-vector<string> browse(Client& c, const string& q, int n) {
- SubscriptionSettings browseSettings(
- FlowControl::messageCredit(n),
- ACCEPT_MODE_NONE,
- ACQUIRE_MODE_NOT_ACQUIRED,
- 0 // No auto-ack.
- );
- LocalQueue lq;
- c.subs.subscribe(lq, q, browseSettings);
- c.session.messageFlush(q);
- vector<string> result;
- for (int i = 0; i < n; ++i) {
- Message m;
- if (!lq.get(m, TIMEOUT))
- break;
- result.push_back(m.getData());
- }
- c.subs.getSubscription(q).cancel();
- return result;
-}
-
-ConnectionSettings aclSettings(int port, const std::string& id) {
- ConnectionSettings settings;
- settings.port = port;
- settings.mechanism = "PLAIN";
- settings.username = id;
- settings.password = id;
- return settings;
-}
-
-// An illegal frame body
-struct PoisonPill : public AMQBody {
- virtual uint8_t type() const { return 0xFF; }
- virtual void encode(Buffer& ) const {}
- virtual void decode(Buffer& , uint32_t=0) {}
- virtual uint32_t encodedSize() const { return 0; }
-
- virtual void print(std::ostream&) const {};
- virtual void accept(AMQBodyConstVisitor&) const {};
-
- virtual AMQMethodBody* getMethod() { return 0; }
- virtual const AMQMethodBody* getMethod() const { return 0; }
-
- /** Match if same type and same class/method ID for methods */
- static bool match(const AMQBody& , const AMQBody& ) { return false; }
- virtual boost::intrusive_ptr<AMQBody> clone() const { return new PoisonPill; }
-};
-
-QPID_AUTO_TEST_CASE(testBadClientData) {
- // Ensure that bad data on a client connection closes the
- // connection but does not stop the broker.
- ClusterFixture::Args args;
- prepareArgs(args, false);
- args += "--log-enable=critical"; // Supress expected errors
- ClusterFixture cluster(2, args, -1);
- Client c0(cluster[0]);
- Client c1(cluster[1]);
- boost::shared_ptr<client::ConnectionImpl> ci =
- client::ConnectionAccess::getImpl(c0.connection);
- AMQFrame poison(boost::intrusive_ptr<AMQBody>(new PoisonPill));
- ci->expand(poison.encodedSize(), false);
- ci->handle(poison);
- {
- ScopedSuppressLogging sl;
- BOOST_CHECK_THROW(c0.session.queueQuery("q0"), Exception);
- }
- Client c00(cluster[0]);
- BOOST_CHECK_EQUAL(c00.session.queueQuery("q00").getQueue(), "");
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q1").getQueue(), "");
-}
-
-QPID_AUTO_TEST_CASE(testAcl) {
- ofstream policyFile("cluster_test.acl");
- policyFile << "acl allow foo@QPID create queue name=foo" << endl
- << "acl allow foo@QPID create queue name=foo2" << endl
- << "acl deny foo@QPID create queue name=bar" << endl
- << "acl allow all all" << endl;
- policyFile.close();
- char cwd[1024];
- BOOST_CHECK(::getcwd(cwd, sizeof(cwd)));
- ostringstream aclLib;
- aclLib << getLibPath("ACL_LIB");
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- args += "--log-enable=critical"; // Supress expected errors
- args += "--acl-file", string(cwd) + "/cluster_test.acl",
- "--cluster-mechanism", "PLAIN",
- "--cluster-username", "cluster",
- "--cluster-password", "cluster",
- "--load-module", aclLib.str();
- ClusterFixture cluster(2, args, -1);
-
- Client c0(aclSettings(cluster[0], "c0"), "c0");
- Client c1(aclSettings(cluster[1], "c1"), "c1");
- Client foo(aclSettings(cluster[1], "foo"), "foo");
-
- foo.session.queueDeclare("foo", arg::durable=durableFlag);
- BOOST_CHECK_EQUAL(c0.session.queueQuery("foo").getQueue(), "foo");
-
- {
- ScopedSuppressLogging sl;
- BOOST_CHECK_THROW(foo.session.queueDeclare("bar", arg::durable=durableFlag), framing::UnauthorizedAccessException);
- }
- BOOST_CHECK(c0.session.queueQuery("bar").getQueue().empty());
- BOOST_CHECK(c1.session.queueQuery("bar").getQueue().empty());
-
- cluster.add();
- Client c2(aclSettings(cluster[2], "c2"), "c2");
- {
- ScopedSuppressLogging sl;
- BOOST_CHECK_THROW(foo.session.queueDeclare("bar", arg::durable=durableFlag), framing::UnauthorizedAccessException);
- }
- BOOST_CHECK(c2.session.queueQuery("bar").getQueue().empty());
-}
-
-QPID_AUTO_TEST_CASE(testMessageTimeToLive) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(2, args, -1);
- Client c0(cluster[0], "c0");
- Client c1(cluster[1], "c1");
- c0.session.queueDeclare("p", arg::durable=durableFlag);
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=ttlMessage("a", "q", 200, durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("b", "q", durableFlag));
- c0.session.messageTransfer(arg::content=ttlMessage("x", "p", 100000, durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("y", "p", durableFlag));
- cluster.add();
- Client c2(cluster[1], "c2");
-
- BOOST_CHECK_EQUAL(browse(c0, "p", 1), list_of<string>("x"));
- BOOST_CHECK_EQUAL(browse(c1, "p", 1), list_of<string>("x"));
- BOOST_CHECK_EQUAL(browse(c2, "p", 1), list_of<string>("x"));
-
- sys::usleep(200*1000);
- BOOST_CHECK_EQUAL(browse(c0, "q", 1), list_of<string>("b"));
- BOOST_CHECK_EQUAL(browse(c1, "q", 1), list_of<string>("b"));
- BOOST_CHECK_EQUAL(browse(c2, "q", 1), list_of<string>("b"));
-}
-
-QPID_AUTO_TEST_CASE(testSequenceOptions) {
- // Make sure the exchange qpid.msg_sequence property is properly replicated.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
- FieldTable ftargs;
- ftargs.setInt("qpid.msg_sequence", 1);
- c0.session.queueDeclare(arg::queue="q", arg::durable=durableFlag);
- c0.session.exchangeDeclare(arg::exchange="ex", arg::type="direct", arg::arguments=ftargs);
- c0.session.exchangeBind(arg::exchange="ex", arg::queue="q", arg::bindingKey="k");
- c0.session.messageTransfer(arg::content=makeMessage("1", "k", durableFlag), arg::destination="ex");
- c0.session.messageTransfer(arg::content=makeMessage("2", "k", durableFlag), arg::destination="ex");
- BOOST_CHECK_EQUAL(1, getMsgSequence(c0.subs.get("q", TIMEOUT)));
- BOOST_CHECK_EQUAL(2, getMsgSequence(c0.subs.get("q", TIMEOUT)));
-
- cluster.add();
- Client c1(cluster[1]);
- c1.session.messageTransfer(arg::content=makeMessage("3", "k", durableFlag), arg::destination="ex");
- BOOST_CHECK_EQUAL(3, getMsgSequence(c1.subs.get("q", TIMEOUT)));
-}
-
-QPID_AUTO_TEST_CASE(testTxTransaction) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
- c0.session.queueDeclare(arg::queue="q", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("A", "q", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("B", "q", durableFlag));
-
- // Start a transaction that will commit.
- Session commitSession = c0.connection.newSession("commit");
- SubscriptionManager commitSubs(commitSession);
- commitSession.txSelect();
- commitSession.messageTransfer(arg::content=makeMessage("a", "q", durableFlag));
- commitSession.messageTransfer(arg::content=makeMessage("b", "q", durableFlag));
- BOOST_CHECK_EQUAL(commitSubs.get("q", TIMEOUT).getData(), "A");
-
- // Start a transaction that will roll back.
- Session rollbackSession = c0.connection.newSession("rollback");
- SubscriptionManager rollbackSubs(rollbackSession);
- rollbackSession.txSelect();
- rollbackSession.messageTransfer(arg::content=makeMessage("1", "q", durableFlag));
- Message rollbackMessage = rollbackSubs.get("q", TIMEOUT);
- BOOST_CHECK_EQUAL(rollbackMessage.getData(), "B");
-
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q").getMessageCount(), 0u);
- // Add new member mid transaction.
- cluster.add();
- Client c1(cluster[1], "c1");
-
- // More transactional work
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 0u);
- rollbackSession.messageTransfer(arg::content=makeMessage("2", "q", durableFlag));
- commitSession.messageTransfer(arg::content=makeMessage("c", "q", durableFlag));
- rollbackSession.messageTransfer(arg::content=makeMessage("3", "q", durableFlag));
-
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 0u);
-
- // Commit/roll back.
- commitSession.txCommit();
- rollbackSession.txRollback();
- rollbackSession.messageRelease(rollbackMessage.getId());
-
- // Verify queue status: just the comitted messages and dequeues should remain.
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 4u);
- BOOST_CHECK_EQUAL(c1.subs.get("q", TIMEOUT).getData(), "B");
- BOOST_CHECK_EQUAL(c1.subs.get("q", TIMEOUT).getData(), "a");
- BOOST_CHECK_EQUAL(c1.subs.get("q", TIMEOUT).getData(), "b");
- BOOST_CHECK_EQUAL(c1.subs.get("q", TIMEOUT).getData(), "c");
-
- commitSession.close();
- rollbackSession.close();
-}
-
-QPID_AUTO_TEST_CASE(testUnacked) {
- // Verify replication of unacknowledged messages.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
-
- Message m;
-
- // Create unacked message: acquired but not accepted.
- SubscriptionSettings manualAccept(FlowControl::unlimited(), ACCEPT_MODE_EXPLICIT, ACQUIRE_MODE_PRE_ACQUIRED, 0);
- c0.session.queueDeclare("q1", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("11","q1", durableFlag));
- LocalQueue q1;
- c0.subs.subscribe(q1, "q1", manualAccept);
- BOOST_CHECK_EQUAL(q1.get(TIMEOUT).getData(), "11"); // Acquired but not accepted
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q1").getMessageCount(), 0u); // Gone from queue
-
- // Create unacked message: not acquired, accepted or completeed.
- SubscriptionSettings manualAcquire(FlowControl::unlimited(), ACCEPT_MODE_EXPLICIT, ACQUIRE_MODE_NOT_ACQUIRED, 0);
- c0.session.queueDeclare("q2", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("21","q2", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("22","q2", durableFlag));
- LocalQueue q2;
- c0.subs.subscribe(q2, "q2", manualAcquire);
- m = q2.get(TIMEOUT); // Not acquired or accepted, still on queue
- BOOST_CHECK_EQUAL(m.getData(), "21");
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q2").getMessageCount(), 2u); // Not removed
- c0.subs.getSubscription("q2").acquire(m); // Acquire manually
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q2").getMessageCount(), 1u); // Removed
- BOOST_CHECK_EQUAL(q2.get(TIMEOUT).getData(), "22"); // Not acquired or accepted, still on queue
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q2").getMessageCount(), 1u); // 1 not acquired.
-
- // Create empty credit record: acquire and accept but don't complete.
- SubscriptionSettings manualComplete(FlowControl::messageWindow(1), ACCEPT_MODE_EXPLICIT, ACQUIRE_MODE_PRE_ACQUIRED, 1, MANUAL_COMPLETION);
- c0.session.queueDeclare("q3", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("31", "q3", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("32", "q3", durableFlag));
- LocalQueue q3;
- c0.subs.subscribe(q3, "q3", manualComplete);
- Message m31=q3.get(TIMEOUT);
- BOOST_CHECK_EQUAL(m31.getData(), "31"); // Automatically acquired & accepted but not completed.
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q3").getMessageCount(), 1u);
-
- // Add new member while there are unacked messages.
- cluster.add();
- Client c1(cluster[1], "c1");
-
- // Check queue counts
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q1").getMessageCount(), 0u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q2").getMessageCount(), 1u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q3").getMessageCount(), 1u);
-
- // Complete the empty credit message, should unblock the message behind it.
- BOOST_CHECK_THROW(q3.get(0), Exception);
- c0.session.markCompleted(SequenceSet(m31.getId()), true);
- BOOST_CHECK_EQUAL(q3.get(TIMEOUT).getData(), "32");
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q3").getMessageCount(), 0u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q3").getMessageCount(), 0u);
-
- // Close the original session - unacked messages should be requeued.
- c0.session.close();
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q1").getMessageCount(), 1u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q2").getMessageCount(), 2u);
-
- BOOST_CHECK_EQUAL(c1.subs.get("q1", TIMEOUT).getData(), "11");
- BOOST_CHECK_EQUAL(c1.subs.get("q2", TIMEOUT).getData(), "21");
- BOOST_CHECK_EQUAL(c1.subs.get("q2", TIMEOUT).getData(), "22");
-}
-
-// FIXME aconway 2009-06-17: test for unimplemented feature, enable when implemented.
-void testUpdateTxState() {
- // Verify that we update transaction state correctly to new members.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
-
- // Do work in a transaction.
- c0.session.txSelect();
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("1","q", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("2","q", durableFlag));
- Message m;
- BOOST_CHECK(c0.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "1");
-
- // New member, TX not comitted, c1 should see nothing.
- cluster.add();
- Client c1(cluster[1], "c1");
- BOOST_CHECK_EQUAL(c1.session.queueQuery(arg::queue="q").getMessageCount(), 0u);
-
- // After commit c1 shoudl see results of tx.
- c0.session.txCommit();
- BOOST_CHECK_EQUAL(c1.session.queueQuery(arg::queue="q").getMessageCount(), 1u);
- BOOST_CHECK(c1.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "2");
-
- // Another transaction with both members active.
- c0.session.messageTransfer(arg::content=makeMessage("3","q", durableFlag));
- BOOST_CHECK_EQUAL(c1.session.queueQuery(arg::queue="q").getMessageCount(), 0u);
- c0.session.txCommit();
- BOOST_CHECK_EQUAL(c1.session.queueQuery(arg::queue="q").getMessageCount(), 1u);
- BOOST_CHECK(c1.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "3");
-}
-
-QPID_AUTO_TEST_CASE(testUpdateMessageBuilder) {
- // Verify that we update a partially recieved message to a new member.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- Sender sender(ConnectionAccess::getImpl(c0.connection), c0.session.getChannel());
-
- // Send first 2 frames of message.
- MessageTransferBody transfer(
- ProtocolVersion(), string(), // default exchange.
- framing::message::ACCEPT_MODE_NONE,
- framing::message::ACQUIRE_MODE_PRE_ACQUIRED);
- sender.send(transfer, true, false, true, true);
- AMQHeaderBody header;
- header.get<DeliveryProperties>(true)->setRoutingKey("q");
- if (durableFlag)
- header.get<DeliveryProperties>(true)->setDeliveryMode(DELIVERY_MODE_PERSISTENT);
- else
- header.get<DeliveryProperties>(true)->setDeliveryMode(DELIVERY_MODE_NON_PERSISTENT);
- sender.send(header, false, false, true, true);
-
- // No reliable way to ensure the partial message has arrived
- // before we start the new broker, so we sleep.
- sys::usleep(2500);
- cluster.add();
-
- // Send final 2 frames of message.
- sender.send(AMQContentBody("ab"), false, true, true, false);
- sender.send(AMQContentBody("cd"), false, true, false, true);
-
- // Verify message is enqued correctly on second member.
- Message m;
- Client c1(cluster[1], "c1");
- BOOST_CHECK(c1.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "abcd");
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c1.connection, 2).size());
-}
-
-QPID_AUTO_TEST_CASE(testConnectionKnownHosts) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
- set<int> kb0 = knownBrokerPorts(c0.connection, 1);
- BOOST_CHECK_EQUAL(kb0.size(), 1u);
- BOOST_CHECK_EQUAL(kb0, makeSet(cluster));
-
- cluster.add();
- Client c1(cluster[1], "c1");
- set<int> kb1 = knownBrokerPorts(c1.connection, 2);
- kb0 = knownBrokerPorts(c0.connection, 2);
- BOOST_CHECK_EQUAL(kb1.size(), 2u);
- BOOST_CHECK_EQUAL(kb1, makeSet(cluster));
- BOOST_CHECK_EQUAL(kb1,kb0);
-
- cluster.add();
- Client c2(cluster[2], "c2");
- set<int> kb2 = knownBrokerPorts(c2.connection, 3);
- kb1 = knownBrokerPorts(c1.connection, 3);
- kb0 = knownBrokerPorts(c0.connection, 3);
- BOOST_CHECK_EQUAL(kb2.size(), 3u);
- BOOST_CHECK_EQUAL(kb2, makeSet(cluster));
- BOOST_CHECK_EQUAL(kb2,kb0);
- BOOST_CHECK_EQUAL(kb2,kb1);
-
- cluster.killWithSilencer(1,c1.connection,9);
- kb0 = knownBrokerPorts(c0.connection, 2);
- kb2 = knownBrokerPorts(c2.connection, 2);
- BOOST_CHECK_EQUAL(kb0.size(), 2u);
- BOOST_CHECK_EQUAL(kb0, kb2);
-}
-
-QPID_AUTO_TEST_CASE(testUpdateConsumers) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
-
- Client c0(cluster[0], "c0");
- c0.session.queueDeclare("p", arg::durable=durableFlag);
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.subs.subscribe(c0.lq, "q", FlowControl::zero());
- LocalQueue lp;
- c0.subs.subscribe(lp, "p", FlowControl::messageCredit(1));
- c0.session.sync();
-
- // Start new members
- cluster.add(); // Local
- Client c1(cluster[1], "c1");
- cluster.add();
- Client c2(cluster[2], "c2");
-
- // Transfer messages
- c0.session.messageTransfer(arg::content=makeMessage("aaa", "q", durableFlag));
-
- c0.session.messageTransfer(arg::content=makeMessage("bbb", "p", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("ccc", "p", durableFlag));
-
- // Activate the subscription, ensure message removed on all queues.
- c0.subs.setFlowControl("q", FlowControl::unlimited());
- Message m;
- BOOST_CHECK(c0.lq.get(m, TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "aaa");
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q").getMessageCount(), 0u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 0u);
- BOOST_CHECK_EQUAL(c2.session.queueQuery("q").getMessageCount(), 0u);
-
- // Check second subscription's flow control: gets first message, not second.
- BOOST_CHECK(lp.get(m, TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "bbb");
- BOOST_CHECK_EQUAL(c0.session.queueQuery("p").getMessageCount(), 1u);
- BOOST_CHECK_EQUAL(c1.session.queueQuery("p").getMessageCount(), 1u);
- BOOST_CHECK_EQUAL(c2.session.queueQuery("p").getMessageCount(), 1u);
-
- BOOST_CHECK(c0.subs.get(m, "p", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "ccc");
-
- // Kill the subscribing member, ensure further messages are not removed.
- cluster.killWithSilencer(0,c0.connection,9);
- BOOST_REQUIRE_EQUAL(knownBrokerPorts(c1.connection, 2).size(), 2u);
- for (int i = 0; i < 10; ++i) {
- c1.session.messageTransfer(arg::content=makeMessage("xxx", "q", durableFlag));
- BOOST_REQUIRE(c1.subs.get(m, "q", TIMEOUT));
- BOOST_REQUIRE_EQUAL(m.getData(), "xxx");
- }
-}
-
-// Test that message data and delivery properties are updated properly.
-QPID_AUTO_TEST_CASE(testUpdateMessages) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
-
- // Create messages with different delivery properties
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.exchangeBind(arg::exchange="amq.fanout", arg::queue="q");
- c0.session.messageTransfer(arg::content=makeMessage("foo","q", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("bar","q", durableFlag),
- arg::destination="amq.fanout");
-
- while (c0.session.queueQuery("q").getMessageCount() != 2)
- sys::usleep(1000); // Wait for message to show up on broker 0.
-
- // Add a new broker, it will catch up.
- cluster.add();
-
- // Do some work post-add
- c0.session.queueDeclare("p", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("pfoo","p", durableFlag));
-
- // Do some work post-join
- BOOST_REQUIRE_EQUAL(knownBrokerPorts(c0.connection, 2).size(), 2u);
- c0.session.messageTransfer(arg::content=makeMessage("pbar","p", durableFlag));
-
- // Verify new brokers have state.
- Message m;
-
- Client c1(cluster[1], "c1");
-
- BOOST_CHECK(c1.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "foo");
- BOOST_CHECK(m.getDeliveryProperties().hasExchange());
- BOOST_CHECK_EQUAL(m.getDeliveryProperties().getExchange(), "");
- BOOST_CHECK(c1.subs.get(m, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "bar");
- BOOST_CHECK(m.getDeliveryProperties().hasExchange());
- BOOST_CHECK_EQUAL(m.getDeliveryProperties().getExchange(), "amq.fanout");
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 0u);
-
- // Add another broker, don't wait for join - should be stalled till ready.
- cluster.add();
- Client c2(cluster[2], "c2");
- BOOST_CHECK(c2.subs.get(m, "p", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "pfoo");
- BOOST_CHECK(c2.subs.get(m, "p", TIMEOUT));
- BOOST_CHECK_EQUAL(m.getData(), "pbar");
- BOOST_CHECK_EQUAL(c2.session.queueQuery("p").getMessageCount(), 0u);
-}
-
-QPID_AUTO_TEST_CASE(testWiringReplication) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(3, args, -1);
- Client c0(cluster[0]);
- BOOST_CHECK(c0.session.queueQuery("q").getQueue().empty());
- BOOST_CHECK(c0.session.exchangeQuery("ex").getType().empty());
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.exchangeDeclare("ex", arg::type="direct");
- c0.session.close();
- c0.connection.close();
- // Verify all brokers get wiring update.
- for (size_t i = 0; i < cluster.size(); ++i) {
- BOOST_MESSAGE("i == "<< i);
- Client c(cluster[i]);
- BOOST_CHECK_EQUAL("q", c.session.queueQuery("q").getQueue());
- BOOST_CHECK_EQUAL("direct", c.session.exchangeQuery("ex").getType());
- }
-}
-
-QPID_AUTO_TEST_CASE(testMessageEnqueue) {
- // Enqueue on one broker, dequeue on another.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(2, args, -1);
- Client c0(cluster[0]);
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("foo", "q", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("bar", "q", durableFlag));
- c0.session.close();
- Client c1(cluster[1]);
- Message msg;
- BOOST_CHECK(c1.subs.get(msg, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(string("foo"), msg.getData());
- BOOST_CHECK(c1.subs.get(msg, "q", TIMEOUT));
- BOOST_CHECK_EQUAL(string("bar"), msg.getData());
-}
-
-QPID_AUTO_TEST_CASE(testMessageDequeue) {
- // Enqueue on one broker, dequeue on two others.
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(3, args, -1);
- Client c0(cluster[0], "c0");
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.session.messageTransfer(arg::content=makeMessage("foo", "q", durableFlag));
- c0.session.messageTransfer(arg::content=makeMessage("bar", "q", durableFlag));
-
- Message msg;
-
- // Dequeue on 2 others, ensure correct order.
- Client c1(cluster[1], "c1");
- BOOST_CHECK(c1.subs.get(msg, "q"));
- BOOST_CHECK_EQUAL("foo", msg.getData());
-
- Client c2(cluster[2], "c2");
- BOOST_CHECK(c1.subs.get(msg, "q"));
- BOOST_CHECK_EQUAL("bar", msg.getData());
-
- // Queue should be empty on all cluster members.
- BOOST_CHECK_EQUAL(0u, c0.session.queueQuery("q").getMessageCount());
- BOOST_CHECK_EQUAL(0u, c1.session.queueQuery("q").getMessageCount());
- BOOST_CHECK_EQUAL(0u, c2.session.queueQuery("q").getMessageCount());
-}
-
-QPID_AUTO_TEST_CASE(testDequeueWaitingSubscription) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(3, args, -1);
- Client c0(cluster[0]);
- BOOST_REQUIRE_EQUAL(knownBrokerPorts(c0.connection, 3).size(), 3u); // Wait for brokers.
-
- // First start a subscription.
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- c0.subs.subscribe(c0.lq, "q", FlowControl::messageCredit(2));
-
- // Now send messages
- Client c1(cluster[1]);
- c1.session.messageTransfer(arg::content=makeMessage("foo", "q", durableFlag));
- c1.session.messageTransfer(arg::content=makeMessage("bar", "q", durableFlag));
-
- // Check they arrived
- Message m;
- BOOST_CHECK(c0.lq.get(m, TIMEOUT));
- BOOST_CHECK_EQUAL("foo", m.getData());
- BOOST_CHECK(c0.lq.get(m, TIMEOUT));
- BOOST_CHECK_EQUAL("bar", m.getData());
-
- // Queue should be empty on all cluster members.
- Client c2(cluster[2]);
- BOOST_CHECK_EQUAL(0u, c0.session.queueQuery("q").getMessageCount());
- BOOST_CHECK_EQUAL(0u, c1.session.queueQuery("q").getMessageCount());
- BOOST_CHECK_EQUAL(0u, c2.session.queueQuery("q").getMessageCount());
-}
-
-QPID_AUTO_TEST_CASE(queueDurabilityPropagationToNewbie)
-{
- /*
- Start with a single broker.
- Set up two queues: one durable, and one not.
- Add a new broker to the cluster.
- Make sure it has one durable and one non-durable queue.
- */
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0]);
- c0.session.queueDeclare("durable_queue", arg::durable=true);
- c0.session.queueDeclare("non_durable_queue", arg::durable=false);
- cluster.add();
- Client c1(cluster[1]);
- QueueQueryResult durable_query = c1.session.queueQuery ( "durable_queue" );
- QueueQueryResult non_durable_query = c1.session.queueQuery ( "non_durable_queue" );
- BOOST_CHECK_EQUAL(durable_query.getQueue(), std::string("durable_queue"));
- BOOST_CHECK_EQUAL(non_durable_query.getQueue(), std::string("non_durable_queue"));
-
- BOOST_CHECK_EQUAL ( durable_query.getDurable(), true );
- BOOST_CHECK_EQUAL ( non_durable_query.getDurable(), false );
-}
-
-
-QPID_AUTO_TEST_CASE(testHeartbeatCancelledOnFailover)
-{
-
- struct Sender : FailoverManager::Command
- {
- std::string queue;
- std::string content;
-
- Sender(const std::string& q, const std::string& c) : queue(q), content(c) {}
-
- void execute(AsyncSession& session, bool)
- {
- session.messageTransfer(arg::content=makeMessage(content, queue, durableFlag));
- }
- };
-
- struct Receiver : FailoverManager::Command, MessageListener, qpid::sys::Runnable
- {
- FailoverManager& mgr;
- std::string queue;
- std::string expectedContent;
- qpid::client::Subscription subscription;
- qpid::sys::Monitor lock;
- bool ready, failed;
-
- Receiver(FailoverManager& m, const std::string& q, const std::string& c) : mgr(m), queue(q), expectedContent(c), ready(false), failed(false) {}
-
- void received(Message& message)
- {
- BOOST_CHECK_EQUAL(expectedContent, message.getData());
- subscription.cancel();
- }
-
- void execute(AsyncSession& session, bool)
- {
- session.queueDeclare(arg::queue=queue, arg::durable=durableFlag);
- SubscriptionManager subs(session);
- subscription = subs.subscribe(*this, queue);
- session.sync();
- setReady();
- subs.run();
- //cleanup:
- session.queueDelete(arg::queue=queue);
- }
-
- void run()
- {
- try {
- mgr.execute(*this);
- }
- catch (const std::exception& e) {
- BOOST_MESSAGE("Exception in mgr.execute: " << e.what());
- failed = true;
- }
- }
-
- void waitForReady()
- {
- qpid::sys::Monitor::ScopedLock l(lock);
- while (!ready) {
- lock.wait();
- }
- }
-
- void setReady()
- {
- qpid::sys::Monitor::ScopedLock l(lock);
- ready = true;
- lock.notify();
- }
- };
-
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(2, args, -1);
- ConnectionSettings settings;
- settings.port = cluster[1];
- settings.heartbeat = 1;
- FailoverManager fmgr(settings);
- Sender sender("my-queue", "my-data");
- Receiver receiver(fmgr, "my-queue", "my-data");
- qpid::sys::Thread runner(receiver);
- receiver.waitForReady();
- {
- ScopedSuppressLogging allQuiet; // suppress connection closed messages
- cluster.kill(1);
- //sleep for 2 secs to allow the heartbeat task to fire on the now dead connection:
- ::usleep(2*1000*1000);
- }
- fmgr.execute(sender);
- runner.join();
- BOOST_CHECK(!receiver.failed);
- fmgr.close();
-}
-
-QPID_AUTO_TEST_CASE(testPolicyUpdate) {
- //tests that the policys internal state is accurate on newly
- //joined nodes
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- QueueOptions options;
- options.setSizePolicy(REJECT, 0, 2);
- c1.session.queueDeclare("q", arg::arguments=options, arg::durable=durableFlag);
- c1.session.messageTransfer(arg::content=makeMessage("one", "q", durableFlag));
- cluster.add();
- Client c2(cluster[1], "c2");
- c2.session.messageTransfer(arg::content=makeMessage("two", "q", durableFlag));
-
- BOOST_CHECK_THROW(c2.session.messageTransfer(arg::content=makeMessage("three", "q", durableFlag)), framing::ResourceLimitExceededException);
-
- Message received;
- BOOST_CHECK(c1.subs.get(received, "q"));
- BOOST_CHECK_EQUAL(received.getData(), std::string("one"));
- BOOST_CHECK(c1.subs.get(received, "q"));
- BOOST_CHECK_EQUAL(received.getData(), std::string("two"));
- BOOST_CHECK(!c1.subs.get(received, "q"));
- }
-}
-
-QPID_AUTO_TEST_CASE(testExclusiveQueueUpdate) {
- //tests that exclusive queues are accurately replicated on newly
- //joined nodes
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- c1.session.queueDeclare("q", arg::exclusive=true, arg::autoDelete=true, arg::alternateExchange="amq.fanout");
- cluster.add();
- Client c2(cluster[1], "c2");
- QueueQueryResult result = c2.session.queueQuery("q");
- BOOST_CHECK_EQUAL(result.getQueue(), std::string("q"));
- BOOST_CHECK(result.getExclusive());
- BOOST_CHECK(result.getAutoDelete());
- BOOST_CHECK(!result.getDurable());
- BOOST_CHECK_EQUAL(result.getAlternateExchange(), std::string("amq.fanout"));
- BOOST_CHECK_THROW(c2.session.queueDeclare(arg::queue="q", arg::exclusive=true, arg::passive=true), framing::ResourceLockedException);
- c1.session.close();
- c1.connection.close();
- c2.session = c2.connection.newSession();
- BOOST_CHECK_THROW(c2.session.queueDeclare(arg::queue="q", arg::passive=true), framing::NotFoundException);
- }
-}
-
-/**
- * Subscribes to specified queue and acquires up to the specified
- * number of message but does not accept or release them. These
- * message are therefore 'locked' by the clients session.
- */
-Subscription lockMessages(Client& client, const std::string& queue, int count)
-{
- LocalQueue q;
- SubscriptionSettings settings(FlowControl::messageCredit(count));
- settings.autoAck = 0;
- Subscription sub = client.subs.subscribe(q, queue, settings);
- client.session.messageFlush(sub.getName());
- return sub;
-}
-
-/**
- * check that the specified queue contains the expected set of
- * messages (matched on content) for all nodes in the cluster
- */
-void checkQueue(ClusterFixture& cluster, const std::string& queue, const std::vector<std::string>& messages)
-{
- for (size_t i = 0; i < cluster.size(); i++) {
- Client client(cluster[i], (boost::format("%1%_%2%") % "c" % (i+1)).str());
- BOOST_CHECK_EQUAL(browse(client, queue, messages.size()), messages);
- client.close();
- }
-}
-
-void send(Client& client, const std::string& queue, int count, int start=1, const std::string& base="m",
- const std::string& lvqKey="")
-{
- for (int i = 0; i < count; i++) {
- Message message = makeMessage((boost::format("%1%_%2%") % base % (i+start)).str(), queue, durableFlag);
- if (!lvqKey.empty()) message.getHeaders().setString(QueueOptions::strLVQMatchProperty, lvqKey);
- client.session.messageTransfer(arg::content=message);
- }
-}
-
-QPID_AUTO_TEST_CASE(testRingQueueUpdate) {
- //tests that ring queues are accurately replicated on newly
- //joined nodes
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- QueueOptions options;
- options.setSizePolicy(RING, 0, 5);
- c1.session.queueDeclare("q", arg::arguments=options, arg::durable=durableFlag);
- send(c1, "q", 5);
- lockMessages(c1, "q", 1);
- //add new node
- cluster.add();
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c1.connection, 2).size());//wait till joined
- //send one more message
- send(c1, "q", 1, 6);
- //release locked message
- c1.close();
- //check state of queue on both nodes
- checkQueue(cluster, "q", list_of<string>("m_2")("m_3")("m_4")("m_5")("m_6"));
- }
-}
-
-QPID_AUTO_TEST_CASE(testRingQueueUpdate2) {
- //tests that ring queues are accurately replicated on newly joined
- //nodes; just like testRingQueueUpdate, but new node joins after
- //the sixth message has been sent.
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- QueueOptions options;
- options.setSizePolicy(RING, 0, 5);
- c1.session.queueDeclare("q", arg::arguments=options, arg::durable=durableFlag);
- send(c1, "q", 5);
- lockMessages(c1, "q", 1);
- //send sixth message
- send(c1, "q", 1, 6);
- //add new node
- cluster.add();
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c1.connection, 2).size());//wait till joined
- //release locked message
- c1.close();
- //check state of queue on both nodes
- checkQueue(cluster, "q", list_of<string>("m_2")("m_3")("m_4")("m_5")("m_6"));
- }
-}
-
-QPID_AUTO_TEST_CASE(testLvqUpdate) {
- //tests that lvqs are accurately replicated on newly joined nodes
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- QueueOptions options;
- options.setOrdering(LVQ);
- c1.session.queueDeclare("q", arg::arguments=options, arg::durable=durableFlag);
-
- send(c1, "q", 5, 1, "a", "a");
- send(c1, "q", 2, 1, "b", "b");
- send(c1, "q", 1, 1, "c", "c");
- send(c1, "q", 1, 3, "b", "b");
-
- //add new node
- cluster.add();
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c1.connection, 2).size());//wait till joined
-
- //check state of queue on both nodes
- checkQueue(cluster, "q", list_of<string>("a_5")("b_3")("c_1"));
- }
-}
-
-
-QPID_AUTO_TEST_CASE(testBrowsedLvqUpdate) {
- //tests that lvqs are accurately replicated on newly joined nodes
- //if the lvq state has been affected by browsers
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- QueueOptions options;
- options.setOrdering(LVQ);
- c1.session.queueDeclare("q", arg::arguments=options, arg::durable=durableFlag);
-
- send(c1, "q", 1, 1, "a", "a");
- send(c1, "q", 2, 1, "b", "b");
- send(c1, "q", 1, 1, "c", "c");
- checkQueue(cluster, "q", list_of<string>("a_1")("b_2")("c_1"));
- send(c1, "q", 4, 2, "a", "a");
- send(c1, "q", 1, 3, "b", "b");
-
- //add new node
- cluster.add();
- BOOST_CHECK_EQUAL(2u, knownBrokerPorts(c1.connection, 2).size());//wait till joined
-
- //check state of queue on both nodes
- checkQueue(cluster, "q", list_of<string>("a_1")("b_2")("c_1")("a_5")("b_3"));
- }
-}
-
-QPID_AUTO_TEST_CASE(testRelease) {
- //tests that releasing a messages that was unacked when one node
- //joined works correctly
- ClusterFixture::Args args;
- args += "--log-enable", "critical";
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c1(cluster[0], "c1");
- {
- ScopedSuppressLogging allQuiet;
- c1.session.queueDeclare("q", arg::durable=durableFlag);
- for (int i = 0; i < 5; i++) {
- c1.session.messageTransfer(arg::content=makeMessage((boost::format("%1%_%2%") % "m" % (i+1)).str(), "q", durableFlag));
- }
- //receive but don't ack a message
- LocalQueue lq;
- SubscriptionSettings lqSettings(FlowControl::messageCredit(1));
- lqSettings.autoAck = 0;
- Subscription lqSub = c1.subs.subscribe(lq, "q", lqSettings);
- c1.session.messageFlush("q");
- Message received;
- BOOST_CHECK(lq.get(received));
- BOOST_CHECK_EQUAL(received.getData(), std::string("m_1"));
-
- //add new node
- cluster.add();
-
- lqSub.release(lqSub.getUnaccepted());
-
- //check state of queue on both nodes
- vector<string> expected = list_of<string>("m_1")("m_2")("m_3")("m_4")("m_5");
- Client c3(cluster[0], "c3");
- BOOST_CHECK_EQUAL(browse(c3, "q", 5), expected);
- Client c2(cluster[1], "c2");
- BOOST_CHECK_EQUAL(browse(c2, "q", 5), expected);
- }
-}
-
-
-// Browse for 1 message with byte credit, return true if a message was
-// received false if not.
-bool browseByteCredit(Client& c, const string& q, int n, Message& m) {
- SubscriptionSettings browseSettings(
- FlowControl(1, n, false), // 1 message, n bytes credit, no window
- ACCEPT_MODE_NONE,
- ACQUIRE_MODE_NOT_ACQUIRED,
- 0 // No auto-ack.
- );
- LocalQueue lq;
- Subscription s = c.subs.subscribe(lq, q, browseSettings);
- c.session.messageFlush(arg::destination=q, arg::sync=true);
- c.session.sync();
- c.subs.getSubscription(q).cancel();
- return lq.get(m, 0); // No timeout, flush should push message thru.
-}
-
-// Ensure cluster update preserves exact message size, use byte credt as test.
-QPID_AUTO_TEST_CASE(testExactByteCredit) {
- ClusterFixture cluster(1, prepareArgs(), -1);
- Client c0(cluster[0], "c0");
- c0.session.queueDeclare("q");
- c0.session.messageTransfer(arg::content=Message("MyMessage", "q"));
- cluster.add();
-
- int size=36; // Size of message on broker: headers+body
- Client c1(cluster[1], "c1");
- Message m;
-
- // Ensure we get the message with exact credit.
- BOOST_CHECK(browseByteCredit(c0, "q", size, m));
- BOOST_CHECK(browseByteCredit(c1, "q", size, m));
- // and not with one byte less.
- BOOST_CHECK(!browseByteCredit(c0, "q", size-1, m));
- BOOST_CHECK(!browseByteCredit(c1, "q", size-1, m));
-}
-
-// Test that consumer positions are updated correctly.
-// Regression test for https://bugzilla.redhat.com/show_bug.cgi?id=541927
-//
-QPID_AUTO_TEST_CASE(testUpdateConsumerPosition) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
-
- c0.session.queueDeclare("q", arg::durable=durableFlag);
- SubscriptionSettings settings;
- settings.autoAck = 0;
- // Set the acquire mode to 'not-acquired' the consumer moves along the queue
- // but does not acquire (remove) messages.
- settings.acquireMode = ACQUIRE_MODE_NOT_ACQUIRED;
- Subscription s = c0.subs.subscribe(c0.lq, "q", settings);
- c0.session.messageTransfer(arg::content=makeMessage("1", "q", durableFlag));
- BOOST_CHECK_EQUAL("1", c0.lq.get(TIMEOUT).getData());
-
- // Add another member, send/receive another message and acquire
- // the messages. With the bug, this creates an inconsistency
- // because the browse position was not updated to the new member.
- cluster.add();
- c0.session.messageTransfer(arg::content=makeMessage("2", "q", durableFlag));
- BOOST_CHECK_EQUAL("2", c0.lq.get(TIMEOUT).getData());
- s.acquire(s.getUnacquired());
- s.accept(s.getUnaccepted());
-
- // In the bug we now have 0 messages on cluster[0] and 1 message on cluster[1]
- // Subscribing on cluster[1] provokes an error that shuts down cluster[0]
- Client c1(cluster[1], "c1");
- Subscription s1 = c1.subs.subscribe(c1.lq, "q"); // Default auto-ack=1
- Message m;
- BOOST_CHECK(!c1.lq.get(m, TIMEOUT/10));
- BOOST_CHECK_EQUAL(c1.session.queueQuery("q").getMessageCount(), 0u);
- BOOST_CHECK_EQUAL(c0.session.queueQuery("q").getMessageCount(), 0u);
-}
-
-QPID_AUTO_TEST_CASE(testFairsharePriorityDelivery) {
- ClusterFixture::Args args;
- prepareArgs(args, durableFlag);
- ClusterFixture cluster(1, args, -1);
- Client c0(cluster[0], "c0");
-
- FieldTable arguments;
- arguments.setInt("x-qpid-priorities", 10);
- arguments.setInt("x-qpid-fairshare", 5);
- c0.session.queueDeclare("q", arg::durable=durableFlag, arg::arguments=arguments);
-
- //send messages of different priorities
- for (int i = 0; i < 20; i++) {
- Message msg = makeMessage((boost::format("msg-%1%") % i).str(), "q", durableFlag);
- msg.getDeliveryProperties().setPriority(i % 2 ? 9 : 5);
- c0.session.messageTransfer(arg::content=msg);
- }
-
- //pull off a couple of the messages (first four should be the top priority messages
- for (int i = 0; i < 4; i++) {
- BOOST_CHECK_EQUAL((boost::format("msg-%1%") % ((i*2)+1)).str(), c0.subs.get("q", TIMEOUT).getData());
- }
-
- // Add another member
- cluster.add();
- Client c1(cluster[1], "c1");
-
- //pull off some more messages
- BOOST_CHECK_EQUAL((boost::format("msg-%1%") % 9).str(), c0.subs.get("q", TIMEOUT).getData());
- BOOST_CHECK_EQUAL((boost::format("msg-%1%") % 0).str(), c1.subs.get("q", TIMEOUT).getData());
- BOOST_CHECK_EQUAL((boost::format("msg-%1%") % 2).str(), c0.subs.get("q", TIMEOUT).getData());
-
- //check queue has same content on both nodes
- BOOST_CHECK_EQUAL(browse(c0, "q", 12), browse(c1, "q", 12));
-}
-
-QPID_AUTO_TEST_SUITE_END()
-}} // namespace qpid::tests
diff --git a/cpp/src/tests/cluster_test_logs.py b/cpp/src/tests/cluster_test_logs.py
deleted file mode 100755
index 22f2470590..0000000000
--- a/cpp/src/tests/cluster_test_logs.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Functions for comparing broker log files, used by cluster_tests.py.
-
-import os, os.path, re, glob
-from itertools import izip
-
-def split_log(log):
- """Split a broker log at checkpoints where a member joins.
- Return the set of checkpoints discovered."""
- checkpoint_re = re.compile("Member joined, frameSeq=([0-9]+), queue snapshot:")
- outfile = None
- checkpoints = []
- for l in open(log):
- match = checkpoint_re.search(l)
- if match:
- checkpoint = match.groups()[0]
- checkpoints.append(checkpoint)
- if outfile: outfile.close()
- outfile = open("%s.%s"%(log, checkpoint), 'w')
-
- if outfile: outfile.write(l)
- if outfile: outfile.close()
- return checkpoints
-
-def filter_log(log):
- """Filter the contents of a log file to remove data that is expected
- to differ between brokers in a cluster. Filtered log contents between
- the same checkpoints should match across the cluster."""
- out = open("%s.filter"%(log), 'w')
- # Lines to skip entirely, expected differences
- skip = "|".join([
- 'local connection', # Only on local broker
- 'UPDATER|UPDATEE', # Ignore update process
- 'stall for update|unstall, ignore update|cancelled offer .* unstall',
- 'caught up',
- 'active for links|Passivating links|Activating links',
- 'info Connecting: .*', # UpdateClient connection
- 'info Connection.* connected to', # UpdateClient connection
- 'warning Connection \\[[-0-9.: ]+\\] closed', # UpdateClient connection
- 'warning Broker closed connection: 200, OK',
- 'task late',
- 'task overran',
- 'warning CLOSING .* unsent data',
- 'Inter-broker link ', # ignore link state changes
- 'Updated link key from ', # ignore link state changes
- 'Running in a cluster, marking store',
- 'debug Sending keepalive signal to watchdog', # Watchdog timer thread
- 'last broker standing joined by 1 replicas, updating queue policies.',
- 'Connection .* timed out: closing', # heartbeat connection close
- "org.apache.qpid.broker:bridge:", # ignore bridge index
- "closed connection"
- ])
- # Regex to match a UUID
- uuid='\w\w\w\w\w\w\w\w-\w\w\w\w-\w\w\w\w-\w\w\w\w-\w\w\w\w\w\w\w\w\w\w\w\w'
- # Substitutions to remove expected differences
- subs = [
- (r'\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d ', ''), # Remove timestamp
- (r'cluster\([0-9.: ]*', 'cluster('), # Remove cluster node id
- (r' local\)| shadow\)', ')'), # Remove local/shadow indication
- (r'CATCHUP', 'READY'), # Treat catchup as equivalent to ready.
- (r'OFFER', 'READY'), # Treat offer as equivalent to ready.
- # System UUID expected to be different
- (r'(org.apache.qpid.broker:system[:(])%s(\)?)'%(uuid), r'\1UUID\2'),
-
- # TODO aconway 2010-12-20: review if these should be expected:
- (r' len=\d+', ' len=NN'), # buffer lengths
- (r' map={.*_object_name:([^,}]*)[,}].*', r' \1'), # V2 map - just keep name
- (r'\d+-\d+-\d+--\d+', 'X-X-X--X'), # V1 Object IDs
- ]
- # Substitutions to mask known issue: durable test shows inconsistent "changed stats for com.redhat.rhm.store:journal" messages.
- skip += '|Changed V[12] statistics com.redhat.rhm.store:journal'
- subs += [(r'to=console.obj.1.0.com.redhat.rhm.store.journal props=\d+ stats=\d+',
- 'to=console.obj.1.0.com.redhat.rhm.store.journal props=NN stats=NN')]
-
- skip_re = re.compile(skip)
- subs = [(re.compile(pattern), subst) for pattern, subst in subs]
- for l in open(log):
- if skip_re.search(l): continue
- for pattern,subst in subs: l = re.sub(pattern,subst,l)
- out.write(l)
- out.close()
-
-def verify_logs():
- """Compare log files from cluster brokers, verify that they correspond correctly."""
- for l in glob.glob("*.log"): filter_log(l)
- checkpoints = set()
- for l in glob.glob("*.filter"): checkpoints = checkpoints.union(set(split_log(l)))
- errors=[]
- for c in checkpoints:
- fragments = glob.glob("*.filter.%s"%(c))
- fragments.sort(reverse=True, key=os.path.getsize)
- while len(fragments) >= 2:
- a = fragments.pop(0)
- b = fragments[0]
- for ab in izip(open(a), open(b)):
- if ab[0] != ab[1]:
- errors.append("\n %s %s"%(a, b))
- break
- if errors:
- raise Exception("Files differ in %s"%(os.getcwd())+"".join(errors))
-
-# Can be run as a script.
-if __name__ == "__main__":
- verify_logs()
diff --git a/cpp/src/tests/cluster_test_scripts/README.txt b/cpp/src/tests/cluster_test_scripts/README.txt
deleted file mode 100644
index e861a2f397..0000000000
--- a/cpp/src/tests/cluster_test_scripts/README.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Cluster test scripts.
-
-A set of scripts to start and stop cluster and test clients on
-multiple hosts using ssh.
-
-Pre-requisites: You must be
- - set up for password-free ssh access to the test hosts.
- - a member of the ais group on all the test hosts.
-
-Configuration:
-
-Copy defaults.sh to config.sh and edit the values as necessary.
-
-Test scripts:
-
-Test scripts use the functions in functions.sh to start & monitor
-cluster and clients.
-A test script can collect other scripts.
-
-
diff --git a/cpp/src/tests/cluster_test_scripts/cluster_check b/cpp/src/tests/cluster_test_scripts/cluster_check
deleted file mode 100755
index 05fcc1bcd2..0000000000
--- a/cpp/src/tests/cluster_test_scripts/cluster_check
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Check that all members of a cluster are running
-
-source config.sh
-
-HOSTS=(`cat $CLUSTER_HOME/hosts`)
-PORTS=(`cat $CLUSTER_HOME/ports`)
-
-for ((i=0; i<${#HOSTS[*]}; ++i)); do
- host=${HOSTS[$i]}
- port=${PORTS[$i]}
- ssh $host "$QPIDD -cp $port" > /dev/null || {
- ret=1
- echo "ERROR: broker not running $host:$port"
- }
-done
-exit $ret
diff --git a/cpp/src/tests/cluster_test_scripts/cluster_start b/cpp/src/tests/cluster_test_scripts/cluster_start
deleted file mode 100755
index 8911358f7e..0000000000
--- a/cpp/src/tests/cluster_test_scripts/cluster_start
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Start a cluster
-#
-# Arguments: NAME HOST [host...]
-# Start a cluster called NAME with N nodes running on the given HOSTs
-# repeat the host name to run multiple brokers on one host. Use dynamic
-# ports.
-#
-# Log files, data directories and hosts/ports files are all stored under
-# $HOME/cluster_test/$NAME
-#
-
-source config.sh
-
-CLUSTER_NAME=`date +"${USER}_%F_%T"`
-HOSTS=($BROKER_HOSTS)
-for ((i = 0; i < ${#HOSTS[*]}; ++i)) ; do
- host=${HOSTS[$i]}
- datadir=$CLUSTER_HOME/broker$i
- log=$datadir/qpidd.log
- ssh $host "rm -rf $datadir; mkdir -p $datadir" || {
- echo "ERROR: can't make data dir $datadir"; exit 1
- }
- port=`ssh $host "echo $QPIDD -dp0 --cluster-name=$CLUSTER_NAME \
- --data-dir=$datadir \
- --log-to-file=$log --log-prefix=broker$i \
- $QPIDD_OPTS | newgrp ais"` || {
- error "ERROR: can't start broker $i on $host"; exit 1;
- }
- PORTS="$PORTS $port"
-done
-
-echo "$BROKER_HOSTS" > $CLUSTER_HOME/hosts
-echo "$PORTS" > $CLUSTER_HOME/ports
-
-`dirname $0`/cluster_check $NAME
diff --git a/cpp/src/tests/cluster_test_scripts/cluster_stop b/cpp/src/tests/cluster_test_scripts/cluster_stop
deleted file mode 100755
index 09aa8f3b21..0000000000
--- a/cpp/src/tests/cluster_test_scripts/cluster_stop
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Stop the cluster.
-
-source config.sh
-
-HOSTS=(`cat $CLUSTER_HOME/hosts`)
-PORTS=(`cat $CLUSTER_HOME/ports`)
-
-for ((i=0; i<${#HOSTS[*]}; ++i)); do
- host=${HOSTS[$i]}
- port=${PORTS[$i]}
- ssh $host "$QPIDD -qp $port" > /dev/null || {
- ret=1
- echo "ERROR: stopping broker at $host:$port"
- }
-done
-
-exit $ret
diff --git a/cpp/src/tests/cluster_test_scripts/config_example.sh b/cpp/src/tests/cluster_test_scripts/config_example.sh
deleted file mode 100755
index d47c9a9c77..0000000000
--- a/cpp/src/tests/cluster_test_scripts/config_example.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-# Cluster configuration.
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# All output stored under $HOME/$CLUSTER_HOME.
-CLUSTER_HOME=$HOME/cluster_test
-
-# Hosts where brokers will be run. Repeat hostname to run multiple brokers on 1 host.
-BROKER_HOSTS="mrg22 mrg23 mrg24 mrg25 mrg26"
-
-# Hosts where clients will be run.
-CLIENT_HOSTS="$BROKER_HOSTS"
-
-# Paths to executables
-QPIDD=qpidd
-PERFTEST=perftest
-
-# Directory containing tests
-TESTDIR=/usr/bin
-
-# Options for qpidd, must be sufficient to load the cluster plugin.
-# Scripts will add --cluster-name, --daemon, --port and --log-to-file options here.
-QPIDD_OPTS=" \
---auth=no \
---log-enable=notice+ \
---log-enable=debug+:cluster \
-"
diff --git a/cpp/src/tests/cluster_test_scripts/perftest b/cpp/src/tests/cluster_test_scripts/perftest
deleted file mode 100755
index 984761eb5f..0000000000
--- a/cpp/src/tests/cluster_test_scripts/perftest
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Run a distributed perftest against a cluster.
-# Args: npubs nsubs [perftest-options]
-
-source config.sh
-
-NPUBS=${1:-4} ; shift
-NSUBS=${1:-4} ; shift
-OPTS="--npubs $NPUBS --nsubs $NSUBS $*"
-
-CLIENTS=($CLIENT_HOSTS)
-BROKERS=(`cat $CLUSTER_HOME/hosts`)
-PORTS=(`cat $CLUSTER_HOME/ports`)
-
-start() {
- client=${CLIENTS[i % ${#CLIENTS[*]}]}
- broker=${BROKERS[i % ${#BROKERS[*]}]}
- port=${PORTS[i % ${#PORTS[*]}]}
- ssh -n $client $PERFTEST $OPTS $* -b $broker -p $port &
- PIDS="$PIDS $!"
-}
-
-ssh ${CLIENTS[0]} $PERFTEST $OPTS --setup -b ${BROKERS[0]} -p${PORTS[0]}
-for (( i=0 ; i < $NPUBS ; ++i)); do start --publish; done
-for (( ; i < $NPUBS+$NSUBS ; ++i)); do start --subscribe; done
-ssh ${CLIENTS[0]} $PERFTEST $OPTS --control -b ${BROKERS[0]} -p${PORTS[0]}
-
-for pid in $PIDS; do
- wait $pid || echo "ERROR: client process $pid failed"
-done
-
-`dirname $0`/cluster_check
-
-
diff --git a/cpp/src/tests/cluster_tests.fail b/cpp/src/tests/cluster_tests.fail
deleted file mode 100644
index b28b04f643..0000000000
--- a/cpp/src/tests/cluster_tests.fail
+++ /dev/null
@@ -1,3 +0,0 @@
-
-
-
diff --git a/cpp/src/tests/cluster_tests.py b/cpp/src/tests/cluster_tests.py
deleted file mode 100755
index 3c96b252df..0000000000
--- a/cpp/src/tests/cluster_tests.py
+++ /dev/null
@@ -1,1834 +0,0 @@
-#!/usr/bin/env python
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import os, signal, sys, time, imp, re, subprocess, glob, random, logging
-import cluster_test_logs
-from qpid import datatypes, messaging
-from brokertest import *
-from qpid.harness import Skipped
-from qpid.messaging import Message, Empty, Disposition, REJECTED, util
-from threading import Thread, Lock, Condition
-from logging import getLogger
-from itertools import chain
-from tempfile import NamedTemporaryFile
-
-log = getLogger("qpid.cluster_tests")
-
-# Note: brokers that shut themselves down due to critical error during
-# normal operation will still have an exit code of 0. Brokers that
-# shut down because of an error found during initialize will exit with
-# a non-0 code. Hence the apparently inconsistent use of EXPECT_EXIT_OK
-# and EXPECT_EXIT_FAIL in some of the tests below.
-
-# TODO aconway 2010-03-11: resolve this - ideally any exit due to an error
-# should give non-0 exit status.
-
-# Import scripts as modules
-qpid_cluster=import_script(checkenv("QPID_CLUSTER_EXEC"))
-
-def readfile(filename):
- """Returns te content of file named filename as a string"""
- f = file(filename)
- try: return f.read()
- finally: f.close()
-
-class ShortTests(BrokerTest):
- """Short cluster functionality tests."""
-
- def test_message_replication(self):
- """Test basic cluster message replication."""
- # Start a cluster, send some messages to member 0.
- cluster = self.cluster(2)
- s0 = cluster[0].connect().session()
- s0.sender("q; {create:always}").send(Message("x"))
- s0.sender("q; {create:always}").send(Message("y"))
- s0.connection.close()
-
- # Verify messages available on member 1.
- s1 = cluster[1].connect().session()
- m = s1.receiver("q", capacity=1).fetch(timeout=1)
- s1.acknowledge()
- self.assertEqual("x", m.content)
- s1.connection.close()
-
- # Start member 2 and verify messages available.
- s2 = cluster.start().connect().session()
- m = s2.receiver("q", capacity=1).fetch(timeout=1)
- s2.acknowledge()
- self.assertEqual("y", m.content)
- s2.connection.close()
-
- def test_store_direct_update_match(self):
- """Verify that brokers stores an identical message whether they receive it
- direct from clients or during an update, no header or other differences"""
- cluster = self.cluster(0, args=["--load-module", self.test_store_lib])
- cluster.start(args=["--test-store-dump", "direct.dump"])
- # Try messages with various headers
- cluster[0].send_message("q", Message(durable=True, content="foobar",
- subject="subject",
- reply_to="reply_to",
- properties={"n":10}))
- # Try messages of different sizes
- for size in range(0,10000,100):
- cluster[0].send_message("q", Message(content="x"*size, durable=True))
- # Try sending via named exchange
- c = cluster[0].connect_old()
- s = c.session(str(qpid.datatypes.uuid4()))
- s.exchange_bind(exchange="amq.direct", binding_key="foo", queue="q")
- props = s.delivery_properties(routing_key="foo", delivery_mode=2)
- s.message_transfer(
- destination="amq.direct",
- message=qpid.datatypes.Message(props, "content"))
-
- # Try message with TTL and differnet headers/properties
- cluster[0].send_message("q", Message(durable=True, ttl=100000))
- cluster[0].send_message("q", Message(durable=True, properties={}, ttl=100000))
- cluster[0].send_message("q", Message(durable=True, properties={"x":10}, ttl=100000))
-
- # Now update a new member and compare their dumps.
- cluster.start(args=["--test-store-dump", "updatee.dump"])
- assert readfile("direct.dump") == readfile("updatee.dump")
-
- os.remove("direct.dump")
- os.remove("updatee.dump")
-
- def test_sasl(self):
- """Test SASL authentication and encryption in a cluster"""
- sasl_config=os.path.join(self.rootdir, "sasl_config")
- acl=os.path.join(os.getcwd(), "policy.acl")
- aclf=file(acl,"w")
- # Must allow cluster-user (zag) access to credentials exchange.
- aclf.write("""
-acl allow zag@QPID publish exchange name=qpid.cluster-credentials
-acl allow zig@QPID all all
-acl deny all all
-""")
- aclf.close()
- cluster = self.cluster(1, args=["--auth", "yes",
- "--sasl-config", sasl_config,
- "--load-module", os.getenv("ACL_LIB"),
- "--acl-file", acl,
- "--cluster-username=zag",
- "--cluster-password=zag",
- "--cluster-mechanism=PLAIN"
- ])
-
- # Valid user/password, ensure queue is created.
- c = cluster[0].connect(username="zig", password="zig")
- c.session().sender("ziggy;{create:always,node:{x-declare:{exclusive:true}}}")
- c.close()
- cluster.start() # Start second node.
-
- # Check queue is created on second node.
- c = cluster[1].connect(username="zig", password="zig")
- c.session().receiver("ziggy;{assert:always}")
- c.close()
- for b in cluster: b.ready() # Make sure all brokers still running.
-
- # Valid user, bad password
- try:
- cluster[0].connect(username="zig", password="foo").close()
- self.fail("Expected exception")
- except messaging.exceptions.ConnectionError: pass
- for b in cluster: b.ready() # Make sure all brokers still running.
-
- # Bad user ID
- try:
- cluster[0].connect(username="foo", password="bar").close()
- self.fail("Expected exception")
- except messaging.exceptions.ConnectionError: pass
- for b in cluster: b.ready() # Make sure all brokers still running.
-
- # Action disallowed by ACL
- c = cluster[0].connect(username="zag", password="zag")
- try:
- s = c.session()
- s.sender("zaggy;{create:always}")
- s.close()
- self.fail("Expected exception")
- except messaging.exceptions.UnauthorizedAccess: pass
- # make sure the queue was not created at the other node.
- c = cluster[1].connect(username="zig", password="zig")
- try:
- s = c.session()
- s.sender("zaggy;{assert:always}")
- s.close()
- self.fail("Expected exception")
- except messaging.exceptions.NotFound: pass
-
- def test_sasl_join_good(self):
- """Verify SASL authentication between brokers when joining a cluster."""
- sasl_config=os.path.join(self.rootdir, "sasl_config")
- # Test with a valid username/password
- cluster = self.cluster(1, args=["--auth", "yes",
- "--sasl-config", sasl_config,
- "--cluster-username=zig",
- "--cluster-password=zig",
- "--cluster-mechanism=PLAIN"
- ])
- cluster.start()
- c = cluster[1].connect(username="zag", password="zag", mechanism="PLAIN")
-
- def test_sasl_join_bad_password(self):
- # Test with an invalid password
- cluster = self.cluster(1, args=["--auth", "yes",
- "--sasl-config", os.path.join(self.rootdir, "sasl_config"),
- "--cluster-username=zig",
- "--cluster-password=bad",
- "--cluster-mechanism=PLAIN"
- ])
- cluster.start(wait=False, expect=EXPECT_EXIT_FAIL)
- assert cluster[1].log_contains("critical Unexpected error: connection-forced: Authentication failed")
-
- def test_sasl_join_wrong_user(self):
- # Test with a valid user that is not the cluster user.
- cluster = self.cluster(0, args=["--auth", "yes",
- "--sasl-config", os.path.join(self.rootdir, "sasl_config")])
- cluster.start(args=["--cluster-username=zig",
- "--cluster-password=zig",
- "--cluster-mechanism=PLAIN"
- ])
-
- cluster.start(wait=False, expect=EXPECT_EXIT_FAIL,
- args=["--cluster-username=zag",
- "--cluster-password=zag",
- "--cluster-mechanism=PLAIN"
- ])
- assert cluster[1].log_contains("critical Unexpected error: unauthorized-access: unauthorized-access: Unauthorized user zag@QPID for qpid.cluster-credentials, should be zig")
-
- def test_user_id_update(self):
- """Ensure that user-id of an open session is updated to new cluster members"""
- sasl_config=os.path.join(self.rootdir, "sasl_config")
- cluster = self.cluster(1, args=["--auth", "yes", "--sasl-config", sasl_config,
- "--cluster-mechanism=ANONYMOUS"])
- c = cluster[0].connect(username="zig", password="zig")
- s = c.session().sender("q;{create:always}")
- s.send(Message("x", user_id="zig")) # Message sent before start new broker
- cluster.start()
- s.send(Message("y", user_id="zig")) # Messsage sent after start of new broker
- # Verify brokers are healthy and messages are on the queue.
- self.assertEqual("x", cluster[0].get_message("q").content)
- self.assertEqual("y", cluster[1].get_message("q").content)
-
- def test_other_mech(self):
- """Test using a mechanism other than PLAIN/ANONYMOUS for cluster update authentication.
- Regression test for https://issues.apache.org/jira/browse/QPID-3849"""
- sasl_config=os.path.join(self.rootdir, "sasl_config")
- cluster = self.cluster(2, args=["--auth", "yes", "--sasl-config", sasl_config,
- "--cluster-username=zig",
- "--cluster-password=zig",
- "--cluster-mechanism=DIGEST-MD5"])
- cluster[0].connect()
- cluster.start() # Before the fix this broker falied to join the cluster.
- cluster[2].connect()
-
- def test_link_events(self):
- """Regression test for https://bugzilla.redhat.com/show_bug.cgi?id=611543"""
- args = ["--mgmt-pub-interval", 1] # Publish management information every second.
- broker1 = self.cluster(1, args)[0]
- broker2 = self.cluster(1, args)[0]
- qp = self.popen(["qpid-printevents", broker1.host_port()], EXPECT_RUNNING)
- qr = self.popen(["qpid-route", "route", "add",
- broker1.host_port(), broker2.host_port(),
- "amq.fanout", "key"
- ], EXPECT_EXIT_OK)
- # Look for link event in printevents output.
- retry(lambda: find_in_file("brokerLinkUp", qp.outfile("out")))
- broker1.ready()
- broker2.ready()
- qr.wait()
-
- def test_queue_cleaner(self):
- """ Regression test to ensure that cleanup of expired messages works correctly """
- cluster = self.cluster(2, args=["--queue-purge-interval", 3])
-
- s0 = cluster[0].connect().session()
- sender = s0.sender("my-lvq; {create: always, node:{x-declare:{arguments:{'qpid.last_value_queue':1}}}}")
- #send 10 messages that will all expire and be cleaned up
- for i in range(1, 10):
- msg = Message("message-%s" % i)
- msg.properties["qpid.LVQ_key"] = "a"
- msg.ttl = 0.1
- sender.send(msg)
- #wait for queue cleaner to run
- time.sleep(3)
-
- #test all is ok by sending and receiving a message
- msg = Message("non-expiring")
- msg.properties["qpid.LVQ_key"] = "b"
- sender.send(msg)
- s0.connection.close()
- s1 = cluster[1].connect().session()
- m = s1.receiver("my-lvq", capacity=1).fetch(timeout=1)
- s1.acknowledge()
- self.assertEqual("non-expiring", m.content)
- s1.connection.close()
-
- for b in cluster: b.ready() # Make sure all brokers still running.
-
-
- def test_amqfailover_visible(self):
- """Verify that the amq.failover exchange can be seen by
- QMF-based tools - regression test for BZ615300."""
- broker1 = self.cluster(1)[0]
- broker2 = self.cluster(1)[0]
- qs = subprocess.Popen(["qpid-stat", "-e", "-b", broker1.host_port()], stdout=subprocess.PIPE)
- out = qs.communicate()[0]
- assert out.find("amq.failover") > 0
-
- def evaluate_address(self, session, address):
- """Create a receiver just to evaluate an address for its side effects"""
- r = session.receiver(address)
- r.close()
-
- def test_expire_fanout(self):
- """Regression test for QPID-2874: Clustered broker crashes in assertion in
- cluster/ExpiryPolicy.cpp.
- Caused by a fan-out message being updated as separate messages"""
- cluster = self.cluster(1)
- session0 = cluster[0].connect().session()
- # Create 2 queues bound to fanout exchange.
- self.evaluate_address(session0, "q1;{create:always,node:{x-bindings:[{exchange:'amq.fanout',queue:q1}]}}")
- self.evaluate_address(session0, "q2;{create:always,node:{x-bindings:[{exchange:'amq.fanout',queue:q2}]}}")
- queues = ["q1", "q2"]
- # Send a fanout message with a long timeout
- s = session0.sender("amq.fanout")
- s.send(Message("foo", ttl=100), sync=False)
- # Start a new member, check the messages
- cluster.start()
- session1 = cluster[1].connect().session()
- for q in queues: self.assert_browse(session1, "q1", ["foo"])
-
- def test_route_update(self):
- """Regression test for https://issues.apache.org/jira/browse/QPID-2982
- Links and bridges associated with routes were not replicated on update.
- This meant extra management objects and caused an exit if a management
- client was attached.
- """
- args=["--mgmt-pub-interval=1","--log-enable=trace+:management"]
-
- # First broker will be killed.
- cluster0 = self.cluster(1, args=args)
- cluster1 = self.cluster(1, args=args)
- assert 0 == subprocess.call(
- ["qpid-route", "route", "add", cluster0[0].host_port(),
- cluster1[0].host_port(), "dummy-exchange", "dummy-key", "-d"])
- cluster0.start()
-
- # Wait for qpid-tool:list on cluster0[0] to generate expected output.
- pattern = re.compile("org.apache.qpid.broker.*link")
- qpid_tool = subprocess.Popen(["qpid-tool", cluster0[0].host_port()],
- stdin=subprocess.PIPE, stdout=subprocess.PIPE)
- class Scanner(Thread):
- def __init__(self): self.found = False; Thread.__init__(self)
- def run(self):
- for l in qpid_tool.stdout:
- if pattern.search(l): self.found = True; return
- scanner = Scanner()
- scanner.start()
- start = time.time()
- try:
- # Wait up to 5 second timeout for scanner to find expected output
- while not scanner.found and time.time() < start + 5:
- qpid_tool.stdin.write("list\n") # Ask qpid-tool to list
- for b in cluster0: b.ready() # Raise if any brokers are down
- finally:
- qpid_tool.stdin.write("quit\n")
- qpid_tool.wait()
- scanner.join()
- assert scanner.found
- # Regression test for https://issues.apache.org/jira/browse/QPID-3235
- # Inconsistent stats when changing elder.
-
- # Force a change of elder
- cluster0.start()
- for b in cluster0: b.ready()
- cluster0[0].expect=EXPECT_EXIT_FAIL # About to die.
- cluster0[0].kill()
- time.sleep(2) # Allow a management interval to pass.
- for b in cluster0[1:]: b.ready()
- # Verify logs are consistent
- cluster_test_logs.verify_logs()
-
- def test_redelivered(self):
- """Verify that redelivered flag is set correctly on replayed messages"""
- cluster = self.cluster(2, expect=EXPECT_EXIT_FAIL)
- url = "amqp:tcp:%s,tcp:%s" % (cluster[0].host_port(), cluster[1].host_port())
- queue = "my-queue"
- cluster[0].declare_queue(queue)
- self.sender = self.popen(
- ["qpid-send",
- "--broker", url,
- "--address", queue,
- "--sequence=true",
- "--send-eos=1",
- "--messages=100000",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS)
- ])
- self.receiver = self.popen(
- ["qpid-receive",
- "--broker", url,
- "--address", queue,
- "--ignore-duplicates",
- "--check-redelivered",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--forever"
- ])
- time.sleep(1)#give sender enough time to have some messages to replay
- cluster[0].kill()
- self.sender.wait()
- self.receiver.wait()
- cluster[1].kill()
-
- class BlockedSend(Thread):
- """Send a message, send is expected to block.
- Verify that it does block (for a given timeout), then allow
- waiting till it unblocks when it is expected to do so."""
- def __init__(self, sender, msg):
- self.sender, self.msg = sender, msg
- self.blocked = True
- self.condition = Condition()
- self.timeout = 0.1 # Time to wait for expected results.
- Thread.__init__(self)
- def run(self):
- try:
- self.sender.send(self.msg, sync=True)
- self.condition.acquire()
- try:
- self.blocked = False
- self.condition.notify()
- finally: self.condition.release()
- except Exception,e: print "BlockedSend exception: %s"%e
- def start(self):
- Thread.start(self)
- time.sleep(self.timeout)
- assert self.blocked # Expected to block
- def assert_blocked(self): assert self.blocked
- def wait(self): # Now expecting to unblock
- self.condition.acquire()
- try:
- while self.blocked:
- self.condition.wait(self.timeout)
- if self.blocked: raise Exception("Timed out waiting for send to unblock")
- finally: self.condition.release()
- self.join()
-
- def queue_flowlimit_test(self, brokers):
- """Verify that the queue's flowlimit configuration and state are
- correctly replicated.
- The brokers argument allows this test to run on single broker,
- cluster of 2 pre-startd brokers or cluster where second broker
- starts after queue is in flow control.
- """
- # configure a queue with a specific flow limit on first broker
- ssn0 = brokers.first().connect().session()
- s0 = ssn0.sender("flq; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':5, 'qpid.flow_resume_count':3}}}}")
- brokers.first().startQmf()
- q1 = [q for q in brokers.first().qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- oid = q1.getObjectId()
- self.assertEqual(q1.name, "flq")
- self.assertEqual(q1.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert not q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 0)
-
- # fill the queue on one broker until flow control is active
- for x in range(5): s0.send(Message(str(x)))
- sender = ShortTests.BlockedSend(s0, Message(str(6)))
- sender.start() # Tests that sender does block
- # Verify the broker queue goes into a flowStopped state
- deadline = time.time() + 1
- while not q1.flowStopped and time.time() < deadline: q1.update()
- assert q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 1)
- sender.assert_blocked() # Still blocked
-
- # Now verify the both brokers in cluster have same configuration
- brokers.second().startQmf()
- qs = brokers.second().qmf_session.getObjects(_objectId=oid)
- self.assertEqual(len(qs), 1)
- q2 = qs[0]
- self.assertEqual(q2.name, "flq")
- self.assertEqual(q2.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert q2.flowStopped
- self.assertEqual(q2.flowStoppedCount, 1)
-
- # now drain the queue using a session to the other broker
- ssn1 = brokers.second().connect().session()
- r1 = ssn1.receiver("flq", capacity=6)
- for x in range(4):
- r1.fetch(timeout=0)
- ssn1.acknowledge()
- sender.wait() # Verify no longer blocked.
-
- # and re-verify state of queue on both brokers
- q1.update()
- assert not q1.flowStopped
- q2.update()
- assert not q2.flowStopped
-
- ssn0.connection.close()
- ssn1.connection.close()
- cluster_test_logs.verify_logs()
-
- def test_queue_flowlimit(self):
- """Test flow limits on a standalone broker"""
- broker = self.broker()
- class Brokers:
- def first(self): return broker
- def second(self): return broker
- self.queue_flowlimit_test(Brokers())
-
- def test_queue_flowlimit_cluster(self):
- cluster = self.cluster(2)
- class Brokers:
- def first(self): return cluster[0]
- def second(self): return cluster[1]
- self.queue_flowlimit_test(Brokers())
-
- def test_queue_flowlimit_cluster_join(self):
- cluster = self.cluster(1)
- class Brokers:
- def first(self): return cluster[0]
- def second(self):
- if len(cluster) == 1: cluster.start()
- return cluster[1]
- self.queue_flowlimit_test(Brokers())
-
- def test_queue_flowlimit_replicate(self):
- """ Verify that a queue which is in flow control BUT has drained BELOW
- the flow control 'stop' threshold, is correctly replicated when a new
- broker is added to the cluster.
- """
-
- class AsyncSender(Thread):
- """Send a fixed number of msgs from a sender in a separate thread
- so it may block without blocking the test.
- """
- def __init__(self, broker, address, count=1, size=4):
- Thread.__init__(self)
- self.daemon = True
- self.broker = broker
- self.queue = address
- self.count = count
- self.size = size
- self.done = False
-
- def run(self):
- self.sender = subprocess.Popen(["qpid-send",
- "--capacity=1",
- "--content-size=%s" % self.size,
- "--messages=%s" % self.count,
- "--failover-updates",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--address=%s" % self.queue,
- "--broker=%s" % self.broker.host_port()])
- self.sender.wait()
- self.done = True
-
- cluster = self.cluster(2)
- # create a queue with rather draconian flow control settings
- ssn0 = cluster[0].connect().session()
- s0 = ssn0.sender("flq; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':100, 'qpid.flow_resume_count':20}}}}")
-
- # fire off the sending thread to broker[0], and wait until the queue
- # hits flow control on broker[1]
- sender = AsyncSender(cluster[0], "flq", count=110);
- sender.start();
-
- cluster[1].startQmf()
- q_obj = [q for q in cluster[1].qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- deadline = time.time() + 10
- while not q_obj.flowStopped and time.time() < deadline:
- q_obj.update()
- assert q_obj.flowStopped
- assert not sender.done
- assert q_obj.msgDepth < 110
-
- # Now drain enough messages on broker[1] to drop below the flow stop
- # threshold, but not relieve flow control...
- receiver = subprocess.Popen(["qpid-receive",
- "--messages=15",
- "--timeout=1",
- "--print-content=no",
- "--failover-updates",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--ack-frequency=1",
- "--address=flq",
- "--broker=%s" % cluster[1].host_port()])
- receiver.wait()
- q_obj.update()
- assert q_obj.flowStopped
- assert not sender.done
- current_depth = q_obj.msgDepth
-
- # add a new broker to the cluster, and verify that the queue is in flow
- # control on that broker
- cluster.start()
- cluster[2].startQmf()
- q_obj = [q for q in cluster[2].qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- assert q_obj.flowStopped
- assert q_obj.msgDepth == current_depth
-
- # now drain the queue on broker[2], and verify that the sender becomes
- # unblocked
- receiver = subprocess.Popen(["qpid-receive",
- "--messages=95",
- "--timeout=1",
- "--print-content=no",
- "--failover-updates",
- "--connection-options={%s}"%(Cluster.CONNECTION_OPTIONS),
- "--ack-frequency=1",
- "--address=flq",
- "--broker=%s" % cluster[2].host_port()])
- receiver.wait()
- q_obj.update()
- assert not q_obj.flowStopped
- self.assertEqual(q_obj.msgDepth, 0)
-
- # verify that the sender has become unblocked
- sender.join(timeout=5)
- assert not sender.isAlive()
- assert sender.done
-
- def test_blocked_queue_delete(self):
- """Verify that producers which are blocked on a queue due to flow
- control are unblocked when that queue is deleted.
- """
-
- cluster = self.cluster(2)
- cluster[0].startQmf()
- cluster[1].startQmf()
-
- # configure a queue with a specific flow limit on first broker
- ssn0 = cluster[0].connect().session()
- s0 = ssn0.sender("flq; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':5, 'qpid.flow_resume_count':3}}}}")
- q1 = [q for q in cluster[0].qmf_session.getObjects(_class="queue") if q.name == "flq"][0]
- oid = q1.getObjectId()
- self.assertEqual(q1.name, "flq")
- self.assertEqual(q1.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert not q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 0)
-
- # fill the queue on one broker until flow control is active
- for x in range(5): s0.send(Message(str(x)))
- sender = ShortTests.BlockedSend(s0, Message(str(6)))
- sender.start() # Tests that sender does block
- # Verify the broker queue goes into a flowStopped state
- deadline = time.time() + 1
- while not q1.flowStopped and time.time() < deadline: q1.update()
- assert q1.flowStopped
- self.assertEqual(q1.flowStoppedCount, 1)
- sender.assert_blocked() # Still blocked
-
- # Now verify the both brokers in cluster have same configuration
- qs = cluster[1].qmf_session.getObjects(_objectId=oid)
- self.assertEqual(len(qs), 1)
- q2 = qs[0]
- self.assertEqual(q2.name, "flq")
- self.assertEqual(q2.arguments, {u'qpid.flow_stop_count': 5L, u'qpid.flow_resume_count': 3L})
- assert q2.flowStopped
- self.assertEqual(q2.flowStoppedCount, 1)
-
- # now delete the blocked queue from other broker
- ssn1 = cluster[1].connect().session()
- self.evaluate_address(ssn1, "flq;{delete:always}")
- sender.wait() # Verify no longer blocked.
-
- ssn0.connection.close()
- ssn1.connection.close()
- cluster_test_logs.verify_logs()
-
-
- def test_alternate_exchange_update(self):
- """Verify that alternate-exchange on exchanges and queues is propagated to new members of a cluster. """
- cluster = self.cluster(1)
- s0 = cluster[0].connect().session()
- # create alt queue bound to amq.fanout exchange, will be destination for alternate exchanges
- self.evaluate_address(s0, "alt;{create:always,node:{x-bindings:[{exchange:'amq.fanout',queue:alt}]}}")
- # create direct exchange ex with alternate-exchange amq.fanout and no queues bound
- self.evaluate_address(s0, "ex;{create:always,node:{type:topic, x-declare:{type:'direct', alternate-exchange:'amq.fanout'}}}")
- # create queue q with alternate-exchange amq.fanout
- self.evaluate_address(s0, "q;{create:always,node:{type:queue, x-declare:{alternate-exchange:'amq.fanout'}}}")
-
- def verify(broker):
- s = broker.connect().session()
- # Verify unmatched message goes to ex's alternate.
- s.sender("ex").send("foo")
- self.assertEqual("foo", s.receiver("alt").fetch(timeout=0).content)
- # Verify rejected message goes to q's alternate.
- s.sender("q").send("bar")
- msg = s.receiver("q").fetch(timeout=0)
- self.assertEqual("bar", msg.content)
- s.acknowledge(msg, Disposition(REJECTED)) # Reject the message
- self.assertEqual("bar", s.receiver("alt").fetch(timeout=0).content)
-
- verify(cluster[0])
- cluster.start()
- verify(cluster[1])
-
- def test_binding_order(self):
- """Regression test for binding order inconsistency in cluster"""
- cluster = self.cluster(1)
- c0 = cluster[0].connect()
- s0 = c0.session()
- # Declare multiple queues bound to same key on amq.topic
- def declare(q,max=0):
- if max: declare = 'x-declare:{arguments:{"qpid.max_count":%d, "qpid.flow_stop_count":0}}'%max
- else: declare = 'x-declare:{}'
- bind='x-bindings:[{queue:%s,key:key,exchange:"amq.topic"}]'%(q)
- s0.sender("%s;{create:always,node:{%s,%s}}" % (q,declare,bind))
- declare('d',max=4) # Only one with a limit
- for q in ['c', 'b','a']: declare(q)
- # Add a cluster member, send enough messages to exceed the max count
- cluster.start()
- try:
- s = s0.sender('amq.topic/key')
- for m in xrange(1,6): s.send(Message(str(m)))
- self.fail("Expected capacity exceeded exception")
- except messaging.exceptions.TargetCapacityExceeded: pass
- c1 = cluster[1].connect()
- s1 = c1.session()
- s0 = c0.session() # Old session s0 is broken by exception.
- # Verify queue contents are consistent.
- for q in ['a','b','c','d']:
- self.assertEqual(self.browse(s0, q), self.browse(s1, q))
- # Verify queue contents are "best effort"
- for q in ['a','b','c']: self.assert_browse(s1,q,[str(n) for n in xrange(1,6)])
- self.assert_browse(s1,'d',[str(n) for n in xrange(1,5)])
-
- def test_deleted_exchange(self):
- """QPID-3215: cached exchange reference can cause cluster inconsistencies
- if exchange is deleted/recreated
- Verify stand-alone case
- """
- cluster = self.cluster()
- # Verify we do not route message via an exchange that has been destroyed.
- cluster.start()
- s0 = cluster[0].connect().session()
- self.evaluate_address(s0, "ex;{create:always,node:{type:topic}}")
- self.evaluate_address(s0, "q;{create:always,node:{x-bindings:[{exchange:'ex',queue:q,key:foo}]}}")
- send0 = s0.sender("ex/foo")
- send0.send("foo")
- self.assert_browse(s0, "q", ["foo"])
- self.evaluate_address(s0, "ex;{delete:always}")
- try:
- send0.send("bar") # Should fail, exchange is deleted.
- self.fail("Expected not-found exception")
- except qpid.messaging.NotFound: pass
- self.assert_browse(cluster[0].connect().session(), "q", ["foo"])
-
- def test_deleted_exchange_inconsistent(self):
- """QPID-3215: cached exchange reference can cause cluster inconsistencies
- if exchange is deleted/recreated
-
- Verify cluster inconsistency.
- """
- cluster = self.cluster()
- cluster.start()
- s0 = cluster[0].connect().session()
- self.evaluate_address(s0, "ex;{create:always,node:{type:topic}}")
- self.evaluate_address(s0, "q;{create:always,node:{x-bindings:[{exchange:'ex',queue:q,key:foo}]}}")
- send0 = s0.sender("ex/foo")
- send0.send("foo")
- self.assert_browse(s0, "q", ["foo"])
-
- cluster.start()
- s1 = cluster[1].connect().session()
- self.evaluate_address(s0, "ex;{delete:always}")
- try:
- send0.send("bar")
- self.fail("Expected not-found exception")
- except qpid.messaging.NotFound: pass
-
- self.assert_browse(s1, "q", ["foo"])
-
-
- def test_ttl_consistent(self):
- """Ensure we don't get inconsistent errors with message that have TTL very close together"""
- messages = [ Message(str(i), ttl=i/1000.0) for i in xrange(0,1000)]
- messages.append(Message("x"))
- cluster = self.cluster(2)
- sender = cluster[0].connect().session().sender("q;{create:always}")
-
- def fetch(b):
- receiver = b.connect().session().receiver("q;{create:always}")
- while receiver.fetch().content != "x": pass
-
- for m in messages: sender.send(m, sync=False)
- for m in messages: sender.send(m, sync=False)
- fetch(cluster[0])
- fetch(cluster[1])
- for m in messages: sender.send(m, sync=False)
- cluster.start()
- fetch(cluster[2])
-
-
- def _verify_federation(self, src_broker, src, dst_broker, dst, timeout=30):
- """ Prove that traffic can pass between two federated brokers.
- """
- tot_time = 0
- active = False
- send_session = src_broker.connect().session()
- sender = send_session.sender(src)
- receive_session = dst_broker.connect().session()
- receiver = receive_session.receiver(dst)
- while not active and tot_time < timeout:
- sender.send(Message("Hello from Source!"))
- try:
- receiver.fetch(timeout = 1)
- receive_session.acknowledge()
- # Get this far without Empty exception, and the link is good!
- active = True
- while True:
- # Keep receiving msgs, as several may have accumulated
- receiver.fetch(timeout = 1)
- receive_session.acknowledge()
- except Empty:
- if not active:
- tot_time += 1
- receiver.close()
- receive_session.close()
- sender.close()
- send_session.close()
- return active
-
- def test_federation_failover(self):
- """
- Verify that federation operates across failures occuring in a cluster.
- Specifically:
- 1) Destination cluster learns of membership changes in the source
- cluster
- 2) Destination cluster replicates the current state of the source
- cluster to newly-added members
- """
-
- # 2 node cluster source, 2 node cluster destination
- src_cluster = self.cluster(2, expect=EXPECT_EXIT_FAIL)
- src_cluster.ready();
- dst_cluster = self.cluster(2, expect=EXPECT_EXIT_FAIL)
- dst_cluster.ready();
-
- cmd = self.popen(["qpid-config",
- "--broker", src_cluster[0].host_port(),
- "add", "queue", "srcQ"], EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "add", "exchange", "fanout", "destX"], EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "add", "queue", "destQ"], EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "bind", "destX", "destQ"], EXPECT_EXIT_OK)
- cmd.wait()
-
- # federate the srcQ to the destination exchange
- dst_cluster[0].startQmf()
- dst_broker = dst_cluster[0].qmf_session.getObjects(_class="broker")[0]
- result = dst_broker.connect(src_cluster[0].host(), src_cluster[0].port(), False, "PLAIN",
- "guest", "guest", "tcp")
- self.assertEqual(result.status, 0, result);
-
- link = dst_cluster[0].qmf_session.getObjects(_class="link")[0]
- result = link.bridge(False, "srcQ", "destX", "", "", "", True, False, False, 10)
- self.assertEqual(result.status, 0, result)
-
- # check that traffic passes
- assert self._verify_federation(src_cluster[0], "srcQ", dst_cluster[0], "destQ")
-
- # add src[2] broker to source cluster
- src_cluster.start(expect=EXPECT_EXIT_FAIL);
- src_cluster.ready();
- assert self._verify_federation(src_cluster[2], "srcQ", dst_cluster[0], "destQ")
-
- # Kill src[0]. dst[0] should fail over to src[1]
- src_cluster[0].kill()
- for b in src_cluster[1:]: b.ready()
- assert self._verify_federation(src_cluster[1], "srcQ", dst_cluster[0], "destQ")
-
- # Kill src[1], dst[0] should fail over to src[2]
- src_cluster[1].kill()
- for b in src_cluster[2:]: b.ready()
- assert self._verify_federation(src_cluster[2], "srcQ", dst_cluster[0], "destQ")
-
- # Kill dest[0], force failover to dest[1]
- dst_cluster[0].kill()
- for b in dst_cluster[1:]: b.ready()
- assert self._verify_federation(src_cluster[2], "srcQ", dst_cluster[1], "destQ")
-
- # Add dest[2]
- # dest[1] syncs dest[2] to current remote state
- dst_cluster.start(expect=EXPECT_EXIT_FAIL);
- for b in dst_cluster[1:]: b.ready()
- assert self._verify_federation(src_cluster[2], "srcQ", dst_cluster[1], "destQ")
-
- # Kill dest[1], force failover to dest[2]
- dst_cluster[1].kill()
- for b in dst_cluster[2:]: b.ready()
- assert self._verify_federation(src_cluster[2], "srcQ", dst_cluster[2], "destQ")
-
- for i in range(2, len(src_cluster)): src_cluster[i].kill()
- for i in range(2, len(dst_cluster)): dst_cluster[i].kill()
-
-
- def test_federation_multilink_failover(self):
- """
- Verify that multi-link federation operates across failures occuring in
- a cluster.
- """
-
- # 1 node cluster source, 1 node cluster destination
- src_cluster = self.cluster(1, expect=EXPECT_EXIT_FAIL)
- src_cluster.ready();
- dst_cluster = self.cluster(1, expect=EXPECT_EXIT_FAIL)
- dst_cluster.ready();
-
- # federate a direct binding across two separate links
-
- # first, create a direct exchange bound to two queues using different
- # bindings
- cmd = self.popen(["qpid-config",
- "--broker", src_cluster[0].host_port(),
- "add", "exchange", "direct", "FedX"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "add", "exchange", "direct", "FedX"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "add", "queue", "destQ1"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "bind", "FedX", "destQ1", "one"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "add", "queue", "destQ2"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- cmd = self.popen(["qpid-config",
- "--broker", dst_cluster[0].host_port(),
- "bind", "FedX", "destQ2", "two"],
- EXPECT_EXIT_OK)
- cmd.wait()
-
- # Create two separate links between the dst and source brokers, bind
- # each to different keys
- dst_cluster[0].startQmf()
- dst_broker = dst_cluster[0].qmf_session.getObjects(_class="broker")[0]
-
- for _l in [("link1", "bridge1", "one"),
- ("link2", "bridge2", "two")]:
- result = dst_broker.create("link", _l[0],
- {"host":src_cluster[0].host(),
- "port":src_cluster[0].port()},
- False)
- self.assertEqual(result.status, 0, result);
- result = dst_broker.create("bridge", _l[1],
- {"link":_l[0],
- "src":"FedX",
- "dest":"FedX",
- "key":_l[2]}, False)
- self.assertEqual(result.status, 0);
-
- # check that traffic passes
- assert self._verify_federation(src_cluster[0], "FedX/one", dst_cluster[0], "destQ1")
- assert self._verify_federation(src_cluster[0], "FedX/two", dst_cluster[0], "destQ2")
-
- # add new member, verify traffic
- src_cluster.start(expect=EXPECT_EXIT_FAIL);
- src_cluster.ready();
-
- dst_cluster.start(expect=EXPECT_EXIT_FAIL);
- dst_cluster.ready();
-
- assert self._verify_federation(src_cluster[0], "FedX/one", dst_cluster[0], "destQ1")
- assert self._verify_federation(src_cluster[0], "FedX/two", dst_cluster[0], "destQ2")
-
- src_cluster[0].kill()
- for b in src_cluster[1:]: b.ready()
-
- assert self._verify_federation(src_cluster[1], "FedX/one", dst_cluster[0], "destQ1")
- assert self._verify_federation(src_cluster[1], "FedX/two", dst_cluster[0], "destQ2")
-
- dst_cluster[0].kill()
- for b in dst_cluster[1:]: b.ready()
-
- assert self._verify_federation(src_cluster[1], "FedX/one", dst_cluster[1], "destQ1")
- assert self._verify_federation(src_cluster[1], "FedX/two", dst_cluster[1], "destQ2")
-
- for i in range(1, len(src_cluster)): src_cluster[i].kill()
- for i in range(1, len(dst_cluster)): dst_cluster[i].kill()
-
-
-
-# Some utility code for transaction tests
-XA_RBROLLBACK = 1
-XA_RBTIMEOUT = 2
-XA_OK = 0
-dtx_branch_counter = 0
-
-class DtxStatusException(Exception):
- def __init__(self, expect, actual):
- self.expect = expect
- self.actual = actual
-
- def str(self):
- return "DtxStatusException(expect=%s, actual=%s)"%(self.expect, self.actual)
-
-class DtxTestFixture:
- """Bundle together some common requirements for dtx tests."""
- def __init__(self, test, broker, name, exclusive=False):
- self.test = test
- self.broker = broker
- self.name = name
- # Use old API. DTX is not supported in messaging API.
- self.connection = broker.connect_old()
- self.session = self.connection.session(name, 1) # 1 second timeout
- self.queue = self.session.queue_declare(name, exclusive=exclusive)
- self.session.dtx_select()
- self.consumer = None
-
- def xid(self, id=None):
- if id is None: id = self.name
- return self.session.xid(format=0, global_id=id)
-
- def check_status(self, expect, actual):
- if expect != actual: raise DtxStatusException(expect, actual)
-
- def start(self, id=None, resume=False):
- self.check_status(XA_OK, self.session.dtx_start(xid=self.xid(id), resume=resume).status)
-
- def end(self, id=None, suspend=False):
- self.check_status(XA_OK, self.session.dtx_end(xid=self.xid(id), suspend=suspend).status)
-
- def prepare(self, id=None):
- self.check_status(XA_OK, self.session.dtx_prepare(xid=self.xid(id)).status)
-
- def commit(self, id=None, one_phase=True):
- self.check_status(
- XA_OK, self.session.dtx_commit(xid=self.xid(id), one_phase=one_phase).status)
-
- def rollback(self, id=None):
- self.check_status(XA_OK, self.session.dtx_rollback(xid=self.xid(id)).status)
-
- def set_timeout(self, timeout, id=None):
- self.session.dtx_set_timeout(xid=self.xid(id),timeout=timeout)
-
- def send(self, messages):
- for m in messages:
- dp=self.session.delivery_properties(routing_key=self.name)
- mp=self.session.message_properties()
- self.session.message_transfer(message=qpid.datatypes.Message(dp, mp, m))
-
- def accept(self):
- """Accept 1 message from queue"""
- consumer_tag="%s-consumer"%(self.name)
- self.session.message_subscribe(queue=self.name, destination=consumer_tag)
- self.session.message_flow(unit = self.session.credit_unit.message, value = 1, destination = consumer_tag)
- self.session.message_flow(unit = self.session.credit_unit.byte, value = 0xFFFFFFFFL, destination = consumer_tag)
- msg = self.session.incoming(consumer_tag).get(timeout=1)
- self.session.message_cancel(destination=consumer_tag)
- self.session.message_accept(qpid.datatypes.RangedSet(msg.id))
- return msg
-
-
- def verify(self, sessions, messages):
- for s in sessions:
- self.test.assert_browse(s, self.name, messages)
-
-class DtxTests(BrokerTest):
-
- def test_dtx_update(self):
- """Verify that DTX transaction state is updated to a new broker.
- Start a collection of transactions, then add a new cluster member,
- then verify they commit/rollback correctly on the new broker."""
-
- # Note: multiple test have been bundled into one to avoid the need to start/stop
- # multiple brokers per test.
-
- cluster=self.cluster(1)
- sessions = [cluster[0].connect().session()] # For verify
-
- # Transaction that will be open when new member joins, then committed.
- t1 = DtxTestFixture(self, cluster[0], "t1")
- t1.start()
- t1.send(["1", "2"])
- t1.verify(sessions, []) # Not visible outside of transaction
-
- # Transaction that will be open when new member joins, then rolled back.
- t2 = DtxTestFixture(self, cluster[0], "t2")
- t2.start()
- t2.send(["1", "2"])
-
- # Transaction that will be prepared when new member joins, then committed.
- t3 = DtxTestFixture(self, cluster[0], "t3")
- t3.start()
- t3.send(["1", "2"])
- t3.end()
- t3.prepare()
- t1.verify(sessions, []) # Not visible outside of transaction
-
- # Transaction that will be prepared when new member joins, then rolled back.
- t4 = DtxTestFixture(self, cluster[0], "t4")
- t4.start()
- t4.send(["1", "2"])
- t4.end()
- t4.prepare()
-
- # Transaction using an exclusive queue
- t5 = DtxTestFixture(self, cluster[0], "t5", exclusive=True)
- t5.start()
- t5.send(["1", "2"])
-
- # Accept messages in a transaction before/after join then commit
- # Note: Message sent outside transaction, we're testing transactional acceptance.
- t6 = DtxTestFixture(self, cluster[0], "t6")
- t6.send(["a","b","c"])
- t6.start()
- self.assertEqual(t6.accept().body, "a");
- t6.verify(sessions, ["b", "c"])
-
- # Accept messages in a transaction before/after join then roll back
- # Note: Message sent outside transaction, we're testing transactional acceptance.
- t7 = DtxTestFixture(self, cluster[0], "t7")
- t7.send(["a","b","c"])
- t7.start()
- self.assertEqual(t7.accept().body, "a");
- t7.verify(sessions, ["b", "c"])
-
- # Ended, suspended transactions across join.
- t8 = DtxTestFixture(self, cluster[0], "t8")
- t8.start(id="1")
- t8.send(["x"])
- t8.end(id="1", suspend=True)
- t8.start(id="2")
- t8.send(["y"])
- t8.end(id="2")
- t8.start()
- t8.send("z")
-
-
- # Start new cluster member
- cluster.start()
- sessions.append(cluster[1].connect().session())
-
- # Commit t1
- t1.send(["3","4"])
- t1.verify(sessions, [])
- t1.end()
- t1.commit(one_phase=True)
- t1.verify(sessions, ["1","2","3","4"])
-
- # Rollback t2
- t2.send(["3","4"])
- t2.end()
- t2.rollback()
- t2.verify(sessions, [])
-
- # Commit t3
- t3.commit(one_phase=False)
- t3.verify(sessions, ["1","2"])
-
- # Rollback t4
- t4.rollback()
- t4.verify(sessions, [])
-
- # Commit t5
- t5.send(["3","4"])
- t5.verify(sessions, [])
- t5.end()
- t5.commit(one_phase=True)
- t5.verify(sessions, ["1","2","3","4"])
-
- # Commit t6
- self.assertEqual(t6.accept().body, "b");
- t6.verify(sessions, ["c"])
- t6.end()
- t6.commit(one_phase=True)
- t6.session.close() # Make sure they're not requeued by the session.
- t6.verify(sessions, ["c"])
-
- # Rollback t7
- self.assertEqual(t7.accept().body, "b");
- t7.verify(sessions, ["c"])
- t7.end()
- t7.rollback()
- t7.verify(sessions, ["a", "b", "c"])
-
- # Resume t8
- t8.end()
- t8.commit(one_phase=True)
- t8.start("1", resume=True)
- t8.end("1")
- t8.commit("1", one_phase=True)
- t8.commit("2", one_phase=True)
- t8.verify(sessions, ["z", "x","y"])
-
-
- def test_dtx_failover_rollback(self):
- """Kill a broker during a transaction, verify we roll back correctly"""
- cluster=self.cluster(1, expect=EXPECT_EXIT_FAIL)
- cluster.start(expect=EXPECT_RUNNING)
-
- # Test unprepared at crash
- t1 = DtxTestFixture(self, cluster[0], "t1")
- t1.send(["a"]) # Not in transaction
- t1.start()
- t1.send(["b"]) # In transaction
-
- # Test prepared at crash
- t2 = DtxTestFixture(self, cluster[0], "t2")
- t2.send(["a"]) # Not in transaction
- t2.start()
- t2.send(["b"]) # In transaction
- t2.end()
- t2.prepare()
-
- # Crash the broker
- cluster[0].kill()
-
- # Transactional changes should not appear
- s = cluster[1].connect().session();
- self.assert_browse(s, "t1", ["a"])
- self.assert_browse(s, "t2", ["a"])
-
- def test_dtx_timeout(self):
- """Verify that dtx timeout works"""
- cluster = self.cluster(1)
- t1 = DtxTestFixture(self, cluster[0], "t1")
- t1.start()
- t1.set_timeout(1)
- time.sleep(1.1)
- try:
- t1.end()
- self.fail("Expected rollback timeout.")
- except DtxStatusException, e:
- self.assertEqual(e.actual, XA_RBTIMEOUT)
-
-class TxTests(BrokerTest):
-
- def test_tx_update(self):
- """Verify that transaction state is updated to a new broker"""
-
- def make_message(session, body=None, key=None, id=None):
- dp=session.delivery_properties(routing_key=key)
- mp=session.message_properties(correlation_id=id)
- return qpid.datatypes.Message(dp, mp, body)
-
- cluster=self.cluster(1)
- # Use old API. TX is not supported in messaging API.
- c = cluster[0].connect_old()
- s = c.session("tx-session", 1)
- s.queue_declare(queue="q")
- # Start transaction
- s.tx_select()
- s.message_transfer(message=make_message(s, "1", "q"))
- # Start new member mid-transaction
- cluster.start()
- # Do more work
- s.message_transfer(message=make_message(s, "2", "q"))
- # Commit the transaction and verify the results.
- s.tx_commit()
- for b in cluster: self.assert_browse(b.connect().session(), "q", ["1","2"])
-
-
-class LongTests(BrokerTest):
- """Tests that can run for a long time if -DDURATION=<minutes> is set"""
- def duration(self):
- d = self.config.defines.get("DURATION")
- if d: return float(d)*60
- else: return 3 # Default is to be quick
-
- def test_failover(self):
- """Test fail-over during continuous send-receive with errors"""
-
- # Original cluster will all be killed so expect exit with failure
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL)
- for b in cluster: b.ready() # Wait for brokers to be ready
- for b in cluster: ErrorGenerator(b)
-
- # Start sender and receiver threads
- cluster[0].declare_queue("test-queue")
- sender = NumberedSender(cluster[0], max_depth=1000)
- receiver = NumberedReceiver(cluster[0], sender=sender)
- receiver.start()
- sender.start()
- # Wait for sender & receiver to get up and running
- retry(lambda: receiver.received > 0)
-
- # Kill original brokers, start new ones for the duration.
- endtime = time.time() + self.duration()
- i = 0
- while time.time() < endtime:
- sender.sender.assert_running()
- receiver.receiver.assert_running()
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- for b in cluster[i:]: b.ready()
- ErrorGenerator(b)
- time.sleep(5)
- sender.stop()
- receiver.stop()
- for i in range(i, len(cluster)): cluster[i].kill()
-
- def test_management(self, args=[]):
- """
- Stress test: Run management clients and other clients concurrently
- while killing and restarting brokers.
- """
-
- class ClientLoop(StoppableThread):
- """Run a client executable in a loop."""
- def __init__(self, broker, cmd):
- StoppableThread.__init__(self)
- self.broker=broker
- self.cmd = cmd # Client command.
- self.lock = Lock()
- self.process = None # Client process.
- self.start()
-
- def run(self):
- try:
- while True:
- self.lock.acquire()
- try:
- if self.stopped: break
- self.process = self.broker.test.popen(
- self.cmd, expect=EXPECT_UNKNOWN)
- finally:
- self.lock.release()
- try:
- exit = self.process.wait()
- except OSError, e:
- # Process may already have been killed by self.stop()
- break
- except Exception, e:
- self.process.unexpected(
- "client of %s: %s"%(self.broker.name, e))
- self.lock.acquire()
- try:
- if self.stopped: break
- if exit != 0:
- self.process.unexpected(
- "client of %s exit code %s"%(self.broker.name, exit))
- finally:
- self.lock.release()
- except Exception, e:
- self.error = RethrownException("Error in ClientLoop.run")
-
- def stop(self):
- """Stop the running client and wait for it to exit"""
- self.lock.acquire()
- try:
- if self.stopped: return
- self.stopped = True
- if self.process:
- try: self.process.kill() # Kill the client.
- except OSError: pass # The client might not be running.
- finally: self.lock.release()
- StoppableThread.stop(self)
-
- # body of test_management()
-
- args += ["--mgmt-pub-interval", 1]
- args += ["--log-enable=trace+:management"]
- # Use store if present.
- if BrokerTest.store_lib: args +=["--load-module", BrokerTest.store_lib]
- cluster = self.cluster(3, args, expect=EXPECT_EXIT_FAIL) # brokers will be killed
-
- clients = [] # Per-broker list of clients that only connect to one broker.
- mclients = [] # Management clients that connect to every broker in the cluster.
-
- def start_clients(broker):
- """Start ordinary clients for a broker."""
- cmds=[
- ["qpid-tool", "localhost:%s"%(broker.port())],
- ["qpid-perftest", "--count=5000", "--durable=yes",
- "--base-name", str(qpid.datatypes.uuid4()), "--port", broker.port()],
- ["qpid-txtest", "--queue-base-name", "tx-%s"%str(qpid.datatypes.uuid4()),
- "--port", broker.port()],
- ["qpid-queue-stats", "-a", "localhost:%s" %(broker.port())]
- ]
- clients.append([ClientLoop(broker, cmd) for cmd in cmds])
-
- def start_mclients(broker):
- """Start management clients that make multiple connections."""
- cmd = ["qpid-cluster", "-C", "localhost:%s" %(broker.port())]
- mclients.append(ClientLoop(broker, cmd))
-
- endtime = time.time() + self.duration()
- # For long duration, first run is a quarter of the duration.
- runtime = min(5.0, self.duration() / 3.0)
- alive = 0 # First live cluster member
- for i in range(len(cluster)): start_clients(cluster[i])
- start_mclients(cluster[alive])
-
- while time.time() < endtime:
- time.sleep(runtime)
- runtime = 5 # Remaining runs 5 seconds, frequent broker kills
- for b in cluster[alive:]: b.ready() # Check if a broker crashed.
- # Kill the first broker, expect the clients to fail.
- b = cluster[alive]
- b.ready()
- b.kill()
- # Stop the brokers clients and all the mclients.
- for c in clients[alive] + mclients:
- try: c.stop()
- except: pass # Ignore expected errors due to broker shutdown.
- clients[alive] = []
- mclients = []
- # Start another broker and clients
- alive += 1
- cluster.start(expect=EXPECT_EXIT_FAIL)
- cluster[-1].ready() # Wait till its ready
- start_clients(cluster[-1])
- start_mclients(cluster[alive])
- for c in chain(mclients, *clients):
- c.stop()
- for b in cluster[alive:]:
- b.ready() # Verify still alive
- b.kill()
- # Verify that logs are consistent
- cluster_test_logs.verify_logs()
-
- def test_management_qmf2(self):
- self.test_management(args=["--mgmt-qmf2=yes"])
-
- def test_connect_consistent(self):
- args=["--mgmt-pub-interval=1","--log-enable=trace+:management"]
- cluster = self.cluster(2, args=args)
- end = time.time() + self.duration()
- while (time.time() < end): # Get a management interval
- for i in xrange(1000): cluster[0].connect().close()
- cluster_test_logs.verify_logs()
-
- def test_flowlimit_failover(self):
- """Test fail-over during continuous send-receive with flow control
- active.
- """
-
- # Original cluster will all be killed so expect exit with failure
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL)
- for b in cluster: b.ready() # Wait for brokers to be ready
-
- # create a queue with rather draconian flow control settings
- ssn0 = cluster[0].connect().session()
- s0 = ssn0.sender("test-queue; {create:always, node:{type:queue, x-declare:{arguments:{'qpid.flow_stop_count':2000, 'qpid.flow_resume_count':100}}}}")
-
- receiver = NumberedReceiver(cluster[0])
- receiver.start()
- sender = NumberedSender(cluster[0])
- sender.start()
- # Wait for senders & receiver to get up and running
- retry(lambda: receiver.received > 10)
-
- # Kill original brokers, start new ones for the duration.
- endtime = time.time() + self.duration();
- i = 0
- while time.time() < endtime:
- sender.sender.assert_running()
- receiver.receiver.assert_running()
- for b in cluster[i:]: b.ready() # Check if any broker crashed.
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- time.sleep(5)
- sender.stop()
- receiver.stop()
- for i in range(i, len(cluster)): cluster[i].kill()
-
- def test_ttl_failover(self):
- """Test that messages with TTL don't cause problems in a cluster with failover"""
-
- class Client(StoppableThread):
-
- def __init__(self, broker):
- StoppableThread.__init__(self)
- self.connection = broker.connect(reconnect=True)
- self.auto_fetch_reconnect_urls(self.connection)
- self.session = self.connection.session()
-
- def auto_fetch_reconnect_urls(self, conn):
- """Replacment for qpid.messaging.util version which is noisy"""
- ssn = conn.session("auto-fetch-reconnect-urls")
- rcv = ssn.receiver("amq.failover")
- rcv.capacity = 10
-
- def main():
- while True:
- try:
- msg = rcv.fetch()
- qpid.messaging.util.set_reconnect_urls(conn, msg)
- ssn.acknowledge(msg, sync=False)
- except messaging.exceptions.LinkClosed: return
- except messaging.exceptions.ConnectionError: return
-
- thread = Thread(name="auto-fetch-reconnect-urls", target=main)
- thread.setDaemon(True)
- thread.start()
-
- def stop(self):
- StoppableThread.stop(self)
- self.connection.detach()
-
- class Sender(Client):
- def __init__(self, broker, address):
- Client.__init__(self, broker)
- self.sent = 0 # Number of messages _reliably_ sent.
- self.sender = self.session.sender(address, capacity=1000)
-
- def send_counted(self, ttl):
- self.sender.send(Message(str(self.sent), ttl=ttl))
- self.sent += 1
-
- def run(self):
- while not self.stopped:
- choice = random.randint(0,4)
- if choice == 0: self.send_counted(None) # No ttl
- elif choice == 1: self.send_counted(100000) # Large ttl
- else: # Small ttl, might expire
- self.sender.send(Message("", ttl=random.random()/10))
- self.sender.send(Message("z"), sync=True) # Chaser.
-
- class Receiver(Client):
-
- def __init__(self, broker, address):
- Client.__init__(self, broker)
- self.received = 0 # Number of non-empty (reliable) messages received.
- self.receiver = self.session.receiver(address, capacity=1000)
- def run(self):
- try:
- while True:
- m = self.receiver.fetch(1)
- if m.content == "z": break
- if m.content: # Ignore unreliable messages
- # Ignore duplicates
- if int(m.content) == self.received: self.received += 1
- except Exception,e: self.error = e
-
- # def test_ttl_failover
-
- # Original cluster will all be killed so expect exit with failure
- # Set small purge interval.
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL, args=["--queue-purge-interval=1"])
- for b in cluster: b.ready() # Wait for brokers to be ready
-
- # Python client failover produces noisy WARN logs, disable temporarily
- logger = logging.getLogger()
- log_level = logger.getEffectiveLevel()
- logger.setLevel(logging.ERROR)
- sender = None
- receiver = None
- try:
- # Start sender and receiver threads
- receiver = Receiver(cluster[0], "q;{create:always}")
- receiver.start()
- sender = Sender(cluster[0], "q;{create:always}")
- sender.start()
- # Wait for sender & receiver to get up and running
- retry(lambda: receiver.received > 0)
-
- # Kill brokers in a cycle.
- endtime = time.time() + self.duration()
- runtime = min(5.0, self.duration() / 4.0)
- i = 0
- while time.time() < endtime:
- for b in cluster[i:]: b.ready() # Check if any broker crashed.
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- b.ready()
- time.sleep(runtime)
- sender.stop()
- receiver.stop()
- for b in cluster[i:]:
- b.ready() # Check it didn't crash
- b.kill()
- self.assertEqual(sender.sent, receiver.received)
- cluster_test_logs.verify_logs()
-
- finally:
- # Detach to avoid slow reconnect attempts during shut-down if test fails.
- if sender: sender.connection.detach()
- if receiver: receiver.connection.detach()
- logger.setLevel(log_level)
-
- def test_msg_group_failover(self):
- """Test fail-over during continuous send-receive of grouped messages.
- """
-
- class GroupedTrafficGenerator(Thread):
- def __init__(self, url, queue, group_key):
- Thread.__init__(self)
- self.url = url
- self.queue = queue
- self.group_key = group_key
- self.status = -1
-
- def run(self):
- # generate traffic for approx 10 seconds (2011msgs / 200 per-sec)
- cmd = ["msg_group_test",
- "--broker=%s" % self.url,
- "--address=%s" % self.queue,
- "--connection-options={%s}" % (Cluster.CONNECTION_OPTIONS),
- "--group-key=%s" % self.group_key,
- "--receivers=2",
- "--senders=3",
- "--messages=2011",
- "--send-rate=200",
- "--capacity=11",
- "--ack-frequency=23",
- "--allow-duplicates",
- "--group-size=37",
- "--randomize-group-size",
- "--interleave=13"]
- # "--trace"]
- self.generator = Popen( cmd );
- self.status = self.generator.wait()
- return self.status
-
- def results(self):
- self.join(timeout=30) # 3x assumed duration
- if self.isAlive(): return -1
- return self.status
-
- # Original cluster will all be killed so expect exit with failure
- cluster = self.cluster(3, expect=EXPECT_EXIT_FAIL, args=["-t"])
- for b in cluster: b.ready() # Wait for brokers to be ready
-
- # create a queue with rather draconian flow control settings
- ssn0 = cluster[0].connect().session()
- q_args = "{'qpid.group_header_key':'group-id', 'qpid.shared_msg_group':1}"
- s0 = ssn0.sender("test-group-q; {create:always, node:{type:queue, x-declare:{arguments:%s}}}" % q_args)
-
- # Kill original brokers, start new ones for the duration.
- endtime = time.time() + self.duration();
- i = 0
- while time.time() < endtime:
- traffic = GroupedTrafficGenerator( cluster[i].host_port(),
- "test-group-q", "group-id" )
- traffic.start()
- time.sleep(1)
-
- for x in range(2):
- for b in cluster[i:]: b.ready() # Check if any broker crashed.
- cluster[i].kill()
- i += 1
- b = cluster.start(expect=EXPECT_EXIT_FAIL)
- time.sleep(1)
-
- # wait for traffic to finish, verify success
- self.assertEqual(0, traffic.results())
-
- for i in range(i, len(cluster)): cluster[i].kill()
-
-
-class StoreTests(BrokerTest):
- """
- Cluster tests that can only be run if there is a store available.
- """
- def args(self):
- assert BrokerTest.store_lib
- return ["--load-module", BrokerTest.store_lib]
-
- def test_store_loaded(self):
- """Ensure we are indeed loading a working store"""
- broker = self.broker(self.args(), name="recoverme", expect=EXPECT_EXIT_FAIL)
- m = Message("x", durable=True)
- broker.send_message("q", m)
- broker.kill()
- broker = self.broker(self.args(), name="recoverme")
- self.assertEqual("x", broker.get_message("q").content)
-
- def test_kill_restart(self):
- """Verify we can kill/resetart a broker with store in a cluster"""
- cluster = self.cluster(1, self.args())
- cluster.start("restartme", expect=EXPECT_EXIT_FAIL).kill()
-
- # Send a message, retrieve from the restarted broker
- cluster[0].send_message("q", "x")
- m = cluster.start("restartme").get_message("q")
- self.assertEqual("x", m.content)
-
- def stop_cluster(self,broker):
- """Clean shut-down of a cluster"""
- self.assertEqual(0, qpid_cluster.main(
- ["-kf", broker.host_port()]))
-
- def test_persistent_restart(self):
- """Verify persistent cluster shutdown/restart scenarios"""
- cluster = self.cluster(0, args=self.args() + ["--cluster-size=3"])
- a = cluster.start("a", expect=EXPECT_EXIT_OK, wait=False)
- b = cluster.start("b", expect=EXPECT_EXIT_OK, wait=False)
- c = cluster.start("c", expect=EXPECT_EXIT_FAIL, wait=True)
- a.send_message("q", Message("1", durable=True))
- # Kill & restart one member.
- c.kill()
- self.assertEqual(a.get_message("q").content, "1")
- a.send_message("q", Message("2", durable=True))
- c = cluster.start("c", expect=EXPECT_EXIT_OK)
- self.assertEqual(c.get_message("q").content, "2")
- # Shut down the entire cluster cleanly and bring it back up
- a.send_message("q", Message("3", durable=True))
- self.stop_cluster(a)
- a = cluster.start("a", wait=False)
- b = cluster.start("b", wait=False)
- c = cluster.start("c", wait=True)
- self.assertEqual(a.get_message("q").content, "3")
-
- def test_persistent_partial_failure(self):
- # Kill 2 members, shut down the last cleanly then restart
- # Ensure we use the clean database
- cluster = self.cluster(0, args=self.args() + ["--cluster-size=3"])
- a = cluster.start("a", expect=EXPECT_EXIT_FAIL, wait=False)
- b = cluster.start("b", expect=EXPECT_EXIT_FAIL, wait=False)
- c = cluster.start("c", expect=EXPECT_EXIT_OK, wait=True)
- a.send_message("q", Message("4", durable=True))
- a.kill()
- b.kill()
- self.assertEqual(c.get_message("q").content, "4")
- c.send_message("q", Message("clean", durable=True))
- self.stop_cluster(c)
- a = cluster.start("a", wait=False)
- b = cluster.start("b", wait=False)
- c = cluster.start("c", wait=True)
- self.assertEqual(a.get_message("q").content, "clean")
-
- def test_wrong_cluster_id(self):
- # Start a cluster1 broker, then try to restart in cluster2
- cluster1 = self.cluster(0, args=self.args())
- a = cluster1.start("a", expect=EXPECT_EXIT_OK)
- a.terminate()
- cluster2 = self.cluster(1, args=self.args())
- try:
- a = cluster2.start("a", expect=EXPECT_EXIT_FAIL)
- a.ready()
- self.fail("Expected exception")
- except: pass
-
- def test_wrong_shutdown_id(self):
- # Start 2 members and shut down.
- cluster = self.cluster(0, args=self.args()+["--cluster-size=2"])
- a = cluster.start("a", expect=EXPECT_EXIT_OK, wait=False)
- b = cluster.start("b", expect=EXPECT_EXIT_OK, wait=False)
- self.stop_cluster(a)
- self.assertEqual(a.wait(), 0)
- self.assertEqual(b.wait(), 0)
-
- # Restart with a different member and shut down.
- a = cluster.start("a", expect=EXPECT_EXIT_OK, wait=False)
- c = cluster.start("c", expect=EXPECT_EXIT_OK, wait=False)
- self.stop_cluster(a)
- self.assertEqual(a.wait(), 0)
- self.assertEqual(c.wait(), 0)
- # Mix members from both shutdown events, they should fail
- # TODO aconway 2010-03-11: can't predict the exit status of these
- # as it depends on the order of delivery of initial-status messages.
- # See comment at top of this file.
- a = cluster.start("a", expect=EXPECT_UNKNOWN, wait=False)
- b = cluster.start("b", expect=EXPECT_UNKNOWN, wait=False)
- self.assertRaises(Exception, lambda: a.ready())
- self.assertRaises(Exception, lambda: b.ready())
-
- def test_solo_store_clean(self):
- # A single node cluster should always leave a clean store.
- cluster = self.cluster(0, self.args())
- a = cluster.start("a", expect=EXPECT_EXIT_FAIL)
- a.send_message("q", Message("x", durable=True))
- a.kill()
- a = cluster.start("a")
- self.assertEqual(a.get_message("q").content, "x")
-
- def test_last_store_clean(self):
- # Verify that only the last node in a cluster to shut down has
- # a clean store. Start with cluster of 3, reduce to 1 then
- # increase again to ensure that a node that was once alone but
- # finally did not finish as the last node does not get a clean
- # store.
- cluster = self.cluster(0, self.args())
- a = cluster.start("a", expect=EXPECT_EXIT_FAIL)
- self.assertEqual(a.store_state(), "clean")
- b = cluster.start("b", expect=EXPECT_EXIT_FAIL)
- c = cluster.start("c", expect=EXPECT_EXIT_FAIL)
- self.assertEqual(b.store_state(), "dirty")
- self.assertEqual(c.store_state(), "dirty")
- retry(lambda: a.store_state() == "dirty")
-
- a.send_message("q", Message("x", durable=True))
- a.kill()
- b.kill() # c is last man, will mark store clean
- retry(lambda: c.store_state() == "clean")
- a = cluster.start("a", expect=EXPECT_EXIT_FAIL) # c no longer last man
- retry(lambda: c.store_state() == "dirty")
- c.kill() # a is now last man
- retry(lambda: a.store_state() == "clean")
- a.kill()
- self.assertEqual(a.store_state(), "clean")
- self.assertEqual(b.store_state(), "dirty")
- self.assertEqual(c.store_state(), "dirty")
-
- def test_restart_clean(self):
- """Verify that we can re-start brokers one by one in a
- persistent cluster after a clean oshutdown"""
- cluster = self.cluster(0, self.args())
- a = cluster.start("a", expect=EXPECT_EXIT_OK)
- b = cluster.start("b", expect=EXPECT_EXIT_OK)
- c = cluster.start("c", expect=EXPECT_EXIT_OK)
- a.send_message("q", Message("x", durable=True))
- self.stop_cluster(a)
- a = cluster.start("a")
- b = cluster.start("b")
- c = cluster.start("c")
- self.assertEqual(c.get_message("q").content, "x")
-
- def test_join_sub_size(self):
- """Verify that after starting a cluster with cluster-size=N,
- we can join new members even if size < N-1"""
- cluster = self.cluster(0, self.args()+["--cluster-size=3"])
- a = cluster.start("a", wait=False, expect=EXPECT_EXIT_FAIL)
- b = cluster.start("b", wait=False, expect=EXPECT_EXIT_FAIL)
- c = cluster.start("c")
- a.send_message("q", Message("x", durable=True))
- a.send_message("q", Message("y", durable=True))
- a.kill()
- b.kill()
- a = cluster.start("a")
- self.assertEqual(c.get_message("q").content, "x")
- b = cluster.start("b")
- self.assertEqual(c.get_message("q").content, "y")
diff --git a/cpp/src/tests/clustered_replication_test b/cpp/src/tests/clustered_replication_test
deleted file mode 100755
index 5a9f143eb4..0000000000
--- a/cpp/src/tests/clustered_replication_test
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Test reliability of the replication feature in the face of link
-# failures:
-source ./test_env.sh
-
-trap stop_brokers INT EXIT
-
-fail() {
- echo $1
- exit 1
-}
-
-stop_brokers() {
- if [[ $PRIMARY1 ]] ; then
- $QPIDD_EXEC --no-module-dir -q --port $PRIMARY1
- unset PRIMARY1
- fi
- if [[ $PRIMARY2 ]] ; then
- $QPIDD_EXEC --no-module-dir -q --port $PRIMARY2
- unset PRIMARY2
- fi
- if [[ $DR1 ]] ; then
- $QPIDD_EXEC --no-module-dir -q --port $DR1
- unset DR1
- fi
- if [[ $DR2 ]] ; then
- $QPIDD_EXEC --no-module-dir -q --port $DR2
- unset DR2
- fi
-}
-
-if test -d $PYTHON_DIR; then
- . cpg_check.sh
- cpg_enabled || exit 0
-
- #todo: these cluster names need to be unique to prevent clashes
- PRIMARY_CLUSTER=PRIMARY_$(hostname)_$$
- DR_CLUSTER=DR_$(hostname)_$$
-
- GENERAL_OPTS="--auth no --no-module-dir --no-data-dir --daemon --port 0 --log-to-stderr false"
- PRIMARY_OPTS="--load-module $REPLICATING_LISTENER_LIB --create-replication-queue true --replication-queue REPLICATION_QUEUE --load-module $CLUSTER_LIB --cluster-name $PRIMARY_CLUSTER"
- DR_OPTS="--load-module $REPLICATION_EXCHANGE_LIB --load-module $CLUSTER_LIB --cluster-name $DR_CLUSTER"
-
- rm -f repl*.tmp #cleanup any files left from previous run
-
- #start first node of primary cluster and set up test queue
- echo Starting primary cluster
- PRIMARY1=$(with_ais_group $QPIDD_EXEC $GENERAL_OPTS $PRIMARY_OPTS --log-to-file repl.primary.1.tmp) || fail "Could not start PRIMARY1"
- $PYTHON_COMMANDS/qpid-config -b "localhost:$PRIMARY1" add queue test-queue --generate-queue-events 2
- $PYTHON_COMMANDS/qpid-config -b "localhost:$PRIMARY1" add queue control-queue --generate-queue-events 1
-
- #send 10 messages, consume 5 of them
- for i in `seq 1 10`; do echo Message$i; done | ./sender --port $PRIMARY1
- ./receiver --port $PRIMARY1 --messages 5 > /dev/null
-
- #add new node to primary cluster, testing correct transfer of state:
- echo Adding node to primary cluster
- PRIMARY2=$(with_ais_group $QPIDD_EXEC $GENERAL_OPTS $PRIMARY_OPTS --log-to-file repl.primary.2.tmp) || fail "Could not start PRIMARY2 "
-
- #start DR cluster, set up test queue there and establish replication bridge
- echo Starting DR cluster
- DR1=$(with_ais_group $QPIDD_EXEC $GENERAL_OPTS $DR_OPTS --log-to-file repl.dr.1.tmp) || fail "Could not start DR1"
- DR2=$(with_ais_group $QPIDD_EXEC $GENERAL_OPTS $DR_OPTS --log-to-file repl.dr.2.tmp) || fail "Could not start DR2"
-
- $PYTHON_COMMANDS/qpid-config -b "localhost:$DR1" add queue test-queue
- $PYTHON_COMMANDS/qpid-config -b "localhost:$DR1" add queue control-queue
- $PYTHON_COMMANDS/qpid-config -b "localhost:$DR1" add exchange replication REPLICATION_EXCHANGE
- $PYTHON_COMMANDS/qpid-route queue add localhost:$DR2 localhost:$PRIMARY2 REPLICATION_EXCHANGE REPLICATION_QUEUE || fail "Could not add route."
-
- #send more messages to primary
- for i in `seq 11 20`; do echo Message$i; done | ./sender --port $PRIMARY1 --send-eos 1
-
- #wait for replication events to all be processed:
- echo Waiting for replication to complete
- echo Done | ./sender --port $PRIMARY1 --routing-key control-queue --send-eos 1
- ./receiver --queue control-queue --port $DR1 > /dev/null
-
- #verify contents of test queue on dr cluster:
- echo Verifying...
- ./receiver --port $DR2 > repl.out.tmp
- for i in `seq 6 20`; do echo Message$i; done | diff repl.out.tmp - || FAIL=1
-
- if [[ $FAIL ]]; then
- echo Clustered replication test failed: expectations not met!
- exit 1
- else
- echo Clustered replication test passed
- rm -f repl*.tmp
- fi
-
-fi
diff --git a/cpp/src/tests/dynamic_log_hires_timestamp b/cpp/src/tests/dynamic_log_hires_timestamp
new file mode 100755
index 0000000000..afe79cc9b3
--- /dev/null
+++ b/cpp/src/tests/dynamic_log_hires_timestamp
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Run a simple test to verify dynamic log highres timestamp changes
+source ./test_env.sh
+test -d $PYTHON_DIR || { echo "Skipping python tests, no python dir."; exit 0; }
+
+LOG_FILE=hires_test.log
+trap cleanup EXIT
+
+cleanup() {
+ test -n "$PORT" && $QPIDD_EXEC --no-module-dir --quit --port $PORT
+}
+
+error() {
+ echo $*;
+ exit 1;
+}
+
+rm -rf $LOG_FILE
+PORT=$($QPIDD_EXEC --auth=no --no-module-dir --daemon --port=0 --log-to-file $LOG_FILE) || error "Could not start broker"
+
+echo Broker for log highres timestamp test started on $PORT, pid is $($QPIDD_EXEC --no-module-dir --check --port $PORT)
+
+$srcdir/qpid-ctrl -b localhost:$PORT setLogLevel level='debug+:Broker' > /dev/null
+$srcdir/qpid-ctrl -b localhost:$PORT echo sequence=1 body=LOWRES > /dev/null
+$srcdir/qpid-ctrl -b localhost:$PORT setLogHiresTimestamp logHires='true' > /dev/null
+$srcdir/qpid-ctrl -b localhost:$PORT echo sequence=2 body=HI_RES > /dev/null
+$srcdir/qpid-ctrl -b localhost:$PORT setLogHiresTimestamp logHires='false' > /dev/null
+$srcdir/qpid-ctrl -b localhost:$PORT echo sequence=3 body=LOWRES > /dev/null
+
+# Expect 3 log entries with 'echo' in them
+if [[ $(grep echo $LOG_FILE | wc -l) -ne 3 ]]; then
+ cat $LOG_FILE
+ error "Log content error - expected 3 echo log entries"
+fi
+
+# Lines 1 and 3 are length X
+# Line 2 is length X+10 because of timestamp addition
+LEN1=$(grep echo $LOG_FILE | grep \(1 | wc -m)
+LEN2=$(grep echo $LOG_FILE | grep \(2 | wc -m)
+LEN3=$(grep echo $LOG_FILE | grep \(3 | wc -m)
+EXPECTED_LEN2=$(( $LEN1 + 10 ))
+
+if [ $LEN1 -ne $LEN3 ]; then
+ cat $LOG_FILE
+ error "Log content error - expected echo 3 to be same line length as echo 1"
+fi
+
+if [ $LEN2 -ne $EXPECTED_LEN2 ]; then
+ cat $LOG_FILE
+ error "Log content error - expected echo 2 to be 10 characters longer than echo 1"
+fi
+
+rm -rf $LOG_FILE
+echo OK
+
diff --git a/cpp/src/tests/failover_soak.cpp b/cpp/src/tests/failover_soak.cpp
deleted file mode 100644
index c2ac36a757..0000000000
--- a/cpp/src/tests/failover_soak.cpp
+++ /dev/null
@@ -1,827 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- *
- */
-
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/wait.h>
-#include <sys/time.h>
-#include <string.h>
-#include <sys/types.h>
-#include <signal.h>
-
-#include <string>
-#include <iostream>
-#include <sstream>
-#include <vector>
-
-#include <boost/assign.hpp>
-
-#include "qpid/framing/Uuid.h"
-
-#include <ForkedBroker.h>
-#include <qpid/client/Connection.h>
-
-
-
-
-
-using namespace std;
-using boost::assign::list_of;
-using namespace qpid::framing;
-using namespace qpid::client;
-
-
-namespace qpid {
-namespace tests {
-
-vector<pid_t> pids;
-
-typedef vector<ForkedBroker *> brokerVector;
-
-typedef enum
-{
- NO_STATUS,
- RUNNING,
- COMPLETED
-}
-childStatus;
-
-
-typedef enum
-{
- NO_TYPE,
- DECLARING_CLIENT,
- SENDING_CLIENT,
- RECEIVING_CLIENT
-}
-childType;
-
-
-ostream& operator<< ( ostream& os, const childType& ct ) {
- switch ( ct ) {
- case DECLARING_CLIENT: os << "Declaring Client"; break;
- case SENDING_CLIENT: os << "Sending Client"; break;
- case RECEIVING_CLIENT: os << "Receiving Client"; break;
- default: os << "No Client"; break;
- }
-
- return os;
-}
-
-
-
-
-struct child
-{
- child ( string & name, pid_t pid, childType type )
- : name(name), pid(pid), retval(-999), status(RUNNING), type(type)
- {
- gettimeofday ( & startTime, 0 );
- }
-
-
- void
- done ( int _retval )
- {
- retval = _retval;
- status = COMPLETED;
- gettimeofday ( & stopTime, 0 );
- }
-
-
- void
- setType ( childType t )
- {
- type = t;
- }
-
-
- string name;
- pid_t pid;
- int retval;
- childStatus status;
- childType type;
- struct timeval startTime,
- stopTime;
-};
-
-
-
-
-struct children : public vector<child *>
-{
-
- void
- add ( string & name, pid_t pid, childType type )
- {
- push_back ( new child ( name, pid, type ) );
- }
-
-
- child *
- get ( pid_t pid )
- {
- vector<child *>::iterator i;
- for ( i = begin(); i != end(); ++ i )
- if ( pid == (*i)->pid )
- return *i;
-
- return 0;
- }
-
-
- void
- exited ( pid_t pid, int retval )
- {
- child * kid = get ( pid );
- if(! kid)
- {
- if ( verbosity > 1 )
- {
- cerr << "children::exited warning: Can't find child with pid "
- << pid
- << endl;
- }
- return;
- }
-
- kid->done ( retval );
- }
-
-
- int
- unfinished ( )
- {
- int count = 0;
-
- vector<child *>::iterator i;
- for ( i = begin(); i != end(); ++ i )
- if ( COMPLETED != (*i)->status )
- ++ count;
-
- return count;
- }
-
-
- int
- checkChildren ( )
- {
- for ( unsigned int i = 0; i < pids.size(); ++ i )
- {
- int pid = pids[i];
- int returned_pid;
- int status;
-
- child * kid = get ( pid );
-
- if ( kid->status != COMPLETED )
- {
- returned_pid = waitpid ( pid, &status, WNOHANG );
-
- if ( returned_pid == pid )
- {
- int exit_status = WEXITSTATUS(status);
- exited ( pid, exit_status );
- if ( exit_status ) // this is a child error.
- return exit_status;
- }
- }
- }
-
- return 0;
- }
-
-
- void
- killEverybody ( )
- {
- vector<child *>::iterator i;
- for ( i = begin(); i != end(); ++ i )
- kill ( (*i)->pid, 9 );
- }
-
-
-
- void
- print ( )
- {
- cout << "--- status of all children --------------\n";
- vector<child *>::iterator i;
- for ( i = begin(); i != end(); ++ i )
- cout << "child: " << (*i)->name
- << " status: " << (*i)->status
- << endl;
- cout << "\n\n\n\n";
- }
-
- int verbosity;
-};
-
-
-children allMyChildren;
-
-
-void
-childExit ( int )
-{
- int childReturnCode;
- pid_t pid = waitpid ( 0, & childReturnCode, WNOHANG);
-
- if ( pid > 0 )
- allMyChildren.exited ( pid, childReturnCode );
-}
-
-
-
-int
-mrand ( int maxDesiredVal ) {
- double zeroToOne = (double) rand() / (double) RAND_MAX;
- return (int) (zeroToOne * (double) maxDesiredVal);
-}
-
-
-
-int
-mrand ( int minDesiredVal, int maxDesiredVal ) {
- int interval = maxDesiredVal - minDesiredVal;
- return minDesiredVal + mrand ( interval );
-}
-
-
-
-void
-makeClusterName ( string & s ) {
- stringstream ss;
- ss << "soakTestCluster_" << Uuid(true).str();
- s = ss.str();
-}
-
-
-
-
-
-void
-printBrokers ( brokerVector & brokers )
-{
- cout << "Broker List ------------ size: " << brokers.size() << "\n";
- for ( brokerVector::iterator i = brokers.begin(); i != brokers.end(); ++ i) {
- cout << "pid: "
- << (*i)->getPID()
- << " port: "
- << (*i)->getPort()
- << endl;
- }
- cout << "end Broker List ------------\n";
-}
-
-
-
-
-ForkedBroker * newbie = 0;
-int newbie_port = 0;
-
-
-
-bool
-wait_for_newbie ( )
-{
- if ( ! newbie )
- return true;
-
- try
- {
- Connection connection;
- connection.open ( "127.0.0.1", newbie_port );
- connection.close();
- newbie = 0; // He's no newbie anymore!
- return true;
- }
- catch ( const std::exception& error )
- {
- std::cerr << "wait_for_newbie error: "
- << error.what()
- << endl;
- return false;
- }
-}
-
-bool endsWith(const char* str, const char* suffix) {
- return (strlen(suffix) < strlen(str) && 0 == strcmp(str+strlen(str)-strlen(suffix), suffix));
-}
-
-
-void
-startNewBroker ( brokerVector & brokers,
- char const * moduleOrDir,
- string const clusterName,
- int verbosity,
- int durable )
-{
- static int brokerId = 0;
- stringstream path, prefix;
- prefix << "soak-" << brokerId;
- std::vector<std::string> argv = list_of<string>
- ("qpidd")
- ("--cluster-name")(clusterName)
- ("--auth=no")
- ("--mgmt-enable=no")
- ("--log-prefix")(prefix.str())
- ("--log-to-file")(prefix.str()+".log")
- ("--log-enable=info+")
- ("--log-enable=debug+:cluster")
- ("TMP_DATA_DIR");
-
- if (endsWith(moduleOrDir, "cluster.so")) {
- // Module path specified, load only that module.
- argv.push_back(string("--load-module=")+moduleOrDir);
- argv.push_back("--no-module-dir");
- if ( durable ) {
- std::cerr << "failover_soak warning: durable arg hass no effect. Use \"dir\" option of \"moduleOrDir\".\n";
- }
- }
- else {
- // Module directory specified, load all modules in dir.
- argv.push_back(string("--module-dir=")+moduleOrDir);
- }
-
- newbie = new ForkedBroker (argv);
- newbie_port = newbie->getPort();
- ForkedBroker * broker = newbie;
-
- if ( verbosity > 0 )
- std::cerr << "new broker created: pid == "
- << broker->getPID()
- << " log-prefix == "
- << "soak-" << brokerId
- << endl;
- brokers.push_back ( broker );
-
- ++ brokerId;
-}
-
-
-
-
-
-bool
-killFrontBroker ( brokerVector & brokers, int verbosity )
-{
- cerr << "killFrontBroker: waiting for newbie sync...\n";
- if ( ! wait_for_newbie() )
- return false;
- cerr << "killFrontBroker: newbie synced.\n";
-
- if ( verbosity > 0 )
- cout << "killFrontBroker pid: " << brokers[0]->getPID() << " on port " << brokers[0]->getPort() << endl;
- try { brokers[0]->kill(9); }
- catch ( const exception& error ) {
- if ( verbosity > 0 )
- {
- cout << "error killing broker: "
- << error.what()
- << endl;
- }
-
- return false;
- }
- delete brokers[0];
- brokers.erase ( brokers.begin() );
- return true;
-}
-
-
-
-
-
-/*
- * The optional delay is to avoid killing newbie brokers that have just
- * been added and are still in the process of updating. This causes
- * spurious, test-generated errors that scare everybody.
- */
-void
-killAllBrokers ( brokerVector & brokers, int delay )
-{
- if ( delay > 0 )
- {
- std::cerr << "Killing all brokers after delay of " << delay << endl;
- sleep ( delay );
- }
-
- for ( uint i = 0; i < brokers.size(); ++ i )
- try { brokers[i]->kill(9); }
- catch ( const exception& error )
- {
- std::cerr << "killAllBrokers Warning: exception during kill on broker "
- << i
- << " "
- << error.what()
- << endl;
- }
-}
-
-
-
-
-
-pid_t
-runDeclareQueuesClient ( brokerVector brokers,
- char const * host,
- char const * path,
- int verbosity,
- int durable,
- char const * queue_prefix,
- int n_queues
- )
-{
- string name("declareQueues");
- int port = brokers[0]->getPort ( );
-
- if ( verbosity > 1 )
- cout << "startDeclareQueuesClient: host: "
- << host
- << " port: "
- << port
- << endl;
- stringstream portSs;
- portSs << port;
-
- vector<const char*> argv;
- argv.push_back ( "declareQueues" );
- argv.push_back ( host );
- string portStr = portSs.str();
- argv.push_back ( portStr.c_str() );
- if ( durable )
- argv.push_back ( "1" );
- else
- argv.push_back ( "0" );
-
- argv.push_back ( queue_prefix );
-
- char n_queues_str[20];
- sprintf ( n_queues_str, "%d", n_queues );
- argv.push_back ( n_queues_str );
-
- argv.push_back ( 0 );
- pid_t pid = fork();
-
- if ( ! pid ) {
- execv ( path, const_cast<char * const *>(&argv[0]) );
- perror ( "error executing declareQueues: " );
- return 0;
- }
-
- allMyChildren.add ( name, pid, DECLARING_CLIENT );
- return pid;
-}
-
-
-
-
-
-pid_t
-startReceivingClient ( brokerVector brokers,
- char const * host,
- char const * receiverPath,
- char const * reportFrequency,
- int verbosity,
- char const * queue_name
- )
-{
- string name("receiver");
- int port = brokers[0]->getPort ( );
-
- if ( verbosity > 1 )
- cout << "startReceivingClient: port " << port << endl;
-
- // verbosity has to be > 1 to let clients talk.
- int client_verbosity = (verbosity > 1 ) ? 1 : 0;
-
- char portStr[100];
- char verbosityStr[100];
- sprintf(portStr, "%d", port);
- sprintf(verbosityStr, "%d", client_verbosity);
-
-
- vector<const char*> argv;
- argv.push_back ( "resumingReceiver" );
- argv.push_back ( host );
- argv.push_back ( portStr );
- argv.push_back ( reportFrequency );
- argv.push_back ( verbosityStr );
- argv.push_back ( queue_name );
- argv.push_back ( 0 );
-
- pid_t pid = fork();
- pids.push_back ( pid );
-
- if ( ! pid ) {
- execv ( receiverPath, const_cast<char * const *>(&argv[0]) );
- perror ( "error executing receiver: " );
- return 0;
- }
-
- allMyChildren.add ( name, pid, RECEIVING_CLIENT );
- return pid;
-}
-
-
-
-
-
-pid_t
-startSendingClient ( brokerVector brokers,
- char const * host,
- char const * senderPath,
- char const * nMessages,
- char const * reportFrequency,
- int verbosity,
- int durability,
- char const * queue_name
- )
-{
- string name("sender");
- int port = brokers[0]->getPort ( );
-
- if ( verbosity > 1)
- cout << "startSenderClient: port " << port << endl;
- char portStr[100];
- char verbosityStr[100];
- //
- // verbosity has to be > 1 to let clients talk.
- int client_verbosity = (verbosity > 1 ) ? 1 : 0;
-
- sprintf ( portStr, "%d", port);
- sprintf ( verbosityStr, "%d", client_verbosity);
-
- vector<const char*> argv;
- argv.push_back ( "replayingSender" );
- argv.push_back ( host );
- argv.push_back ( portStr );
- argv.push_back ( nMessages );
- argv.push_back ( reportFrequency );
- argv.push_back ( verbosityStr );
- if ( durability )
- argv.push_back ( "1" );
- else
- argv.push_back ( "0" );
- argv.push_back ( queue_name );
- argv.push_back ( 0 );
-
- pid_t pid = fork();
- pids.push_back ( pid );
-
- if ( ! pid ) {
- execv ( senderPath, const_cast<char * const *>(&argv[0]) );
- perror ( "error executing sender: " );
- return 0;
- }
-
- allMyChildren.add ( name, pid, SENDING_CLIENT );
- return pid;
-}
-
-
-
-#define HUNKY_DORY 0
-#define BAD_ARGS 1
-#define CANT_FORK_DQ 2
-#define CANT_FORK_RECEIVER 3
-#define CANT_FORK_SENDER 4
-#define DQ_FAILED 5
-#define ERROR_ON_CHILD 6
-#define HANGING 7
-#define ERROR_KILLING_BROKER 8
-
-}} // namespace qpid::tests
-
-using namespace qpid::tests;
-
-// If you want durability, use the "dir" option of "moduleOrDir" .
-int
-main ( int argc, char const ** argv )
-{
- int brokerKills = 0;
- if ( argc != 11 ) {
- cerr << "Usage: "
- << argv[0]
- << "moduleOrDir declareQueuesPath senderPath receiverPath nMessages reportFrequency verbosity durable n_queues n_brokers"
- << endl;
- cerr << "\tverbosity is an integer, durable is 0 or 1\n";
- return BAD_ARGS;
- }
- signal ( SIGCHLD, childExit );
-
- int i = 1;
- char const * moduleOrDir = argv[i++];
- char const * declareQueuesPath = argv[i++];
- char const * senderPath = argv[i++];
- char const * receiverPath = argv[i++];
- char const * nMessages = argv[i++];
- char const * reportFrequency = argv[i++];
- int verbosity = atoi(argv[i++]);
- int durable = atoi(argv[i++]);
- int n_queues = atoi(argv[i++]);
- int n_brokers = atoi(argv[i++]);
-
- char const * host = "127.0.0.1";
-
- allMyChildren.verbosity = verbosity;
-
- string clusterName;
-
- srand ( getpid() );
-
- makeClusterName ( clusterName );
-
- brokerVector brokers;
-
- if ( verbosity > 1 )
- cout << "Starting initial cluster...\n";
-
- for ( int i = 0; i < n_brokers; ++ i ) {
- startNewBroker ( brokers,
- moduleOrDir,
- clusterName,
- verbosity,
- durable );
- }
-
-
- if ( verbosity > 0 )
- printBrokers ( brokers );
-
- // Get prefix for each queue name.
- stringstream queue_prefix;
- queue_prefix << "failover_soak_" << getpid();
- string queue_prefix_str(queue_prefix.str());
-
- // Run the declareQueues child.
- int childStatus;
- pid_t dqClientPid =
- runDeclareQueuesClient ( brokers,
- host,
- declareQueuesPath,
- verbosity,
- durable,
- queue_prefix_str.c_str(),
- n_queues
- );
- if ( -1 == dqClientPid ) {
- cerr << "END_OF_TEST ERROR_START_DECLARE_1\n";
- return CANT_FORK_DQ;
- }
-
- // Don't continue until declareQueues is finished.
- pid_t retval = waitpid ( dqClientPid, & childStatus, 0);
- if ( retval != dqClientPid) {
- cerr << "END_OF_TEST ERROR_START_DECLARE_2\n";
- return DQ_FAILED;
- }
- allMyChildren.exited ( dqClientPid, childStatus );
-
-
- /*
- Start one receiving and one sending client for each queue.
- */
- for ( int i = 0; i < n_queues; ++ i ) {
-
- stringstream queue_name;
- queue_name << queue_prefix.str() << '_' << i;
- string queue_name_str(queue_name.str());
-
- // Receiving client ---------------------------
- pid_t receivingClientPid =
- startReceivingClient ( brokers,
- host,
- receiverPath,
- reportFrequency,
- verbosity,
- queue_name_str.c_str() );
- if ( -1 == receivingClientPid ) {
- cerr << "END_OF_TEST ERROR_START_RECEIVER\n";
- return CANT_FORK_RECEIVER;
- }
-
-
- // Sending client ---------------------------
- pid_t sendingClientPid =
- startSendingClient ( brokers,
- host,
- senderPath,
- nMessages,
- reportFrequency,
- verbosity,
- durable,
- queue_name_str.c_str() );
- if ( -1 == sendingClientPid ) {
- cerr << "END_OF_TEST ERROR_START_SENDER\n";
- return CANT_FORK_SENDER;
- }
- }
-
-
- int minSleep = 2,
- maxSleep = 6;
-
- int totalBrokers = n_brokers;
-
- int loop = 0;
-
- while ( 1 )
- {
- ++ loop;
-
- /*
- if ( verbosity > 1 )
- std::cerr << "------- loop " << loop << " --------\n";
-
- if ( verbosity > 0 )
- cout << totalBrokers << " brokers have been added to the cluster.\n\n\n";
- */
-
- // Sleep for a while. -------------------------
- int sleepyTime = mrand ( minSleep, maxSleep );
- sleep ( sleepyTime );
-
- int bullet = mrand ( 100 );
- if ( bullet >= 95 )
- {
- fprintf ( stderr, "Killing oldest broker...\n" );
-
- // Kill the oldest broker. --------------------------
- if ( ! killFrontBroker ( brokers, verbosity ) )
- {
- allMyChildren.killEverybody();
- killAllBrokers ( brokers, 5 );
- std::cerr << "END_OF_TEST ERROR_BROKER\n";
- return ERROR_KILLING_BROKER;
- }
- ++ brokerKills;
-
- // Start a new broker. --------------------------
- if ( verbosity > 0 )
- cout << "Starting new broker.\n\n";
-
- startNewBroker ( brokers,
- moduleOrDir,
- clusterName,
- verbosity,
- durable );
- ++ totalBrokers;
- printBrokers ( brokers );
- cerr << brokerKills << " brokers have been killed.\n\n\n";
- }
-
- int retval = allMyChildren.checkChildren();
- if ( retval )
- {
- std::cerr << "END_OF_TEST ERROR_CLIENT\n";
- allMyChildren.killEverybody();
- killAllBrokers ( brokers, 5 );
- return ERROR_ON_CHILD;
- }
-
- // If all children have exited, quit.
- int unfinished = allMyChildren.unfinished();
- if ( unfinished == 0 ) {
- killAllBrokers ( brokers, 5 );
-
- if ( verbosity > 1 )
- cout << "failoverSoak: all children have exited.\n";
-
- std::cerr << "END_OF_TEST SUCCESSFUL\n";
- return HUNKY_DORY;
- }
-
- }
-
- allMyChildren.killEverybody();
- killAllBrokers ( brokers, 5 );
-
- std::cerr << "END_OF_TEST SUCCESSFUL\n";
-
- return HUNKY_DORY;
-}
-
-
-
diff --git a/cpp/src/tests/federated_cluster_test b/cpp/src/tests/federated_cluster_test
deleted file mode 100755
index f42b7501b8..0000000000
--- a/cpp/src/tests/federated_cluster_test
+++ /dev/null
@@ -1,153 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Test reliability of the replication feature in the face of link
-# failures:
-srcdir=`dirname $0`
-source ./test_env.sh
-
-trap stop_brokers EXIT
-
-fail() {
- echo $1
- exit 1
-}
-
-stop_brokers() {
- if [[ $BROKER_A ]] ; then
- ../qpidd --no-module-dir -q --port $BROKER_A
- unset BROKER_A
- fi
- if [[ $NODE_1 ]] ; then
- ../qpidd --no-module-dir -q --port $NODE_1
- unset NODE_1
- fi
- if [[ $NODE_2 ]] ; then
- ../qpidd --no-module-dir -q --port $NODE_2
- unset NODE_2
- fi
- if [ -f cluster.ports ]; then
- rm cluster.ports
- fi
-}
-
-start_brokers() {
- #start single node...
- BROKER_A=`../qpidd --daemon --port 0 --no-data-dir --no-module-dir --auth no --log-enable info+` || fail "BROKER_A failed to start"
-
- #...and start cluster
- $srcdir/start_cluster 2 || fail "Could not start cluster"
- NODE_1=$(head -1 cluster.ports)
- NODE_2=$(tail -1 cluster.ports)
- test -n "$NODE_1" || fail "NODE_1 failed to start"
- test -n "$NODE_2" || fail "NODE_2 failed to start"
-}
-
-setup() {
- #create exchange on both cluster and single broker
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add exchange direct test-exchange
- $PYTHON_COMMANDS/qpid-config -b "localhost:$NODE_1" add exchange direct test-exchange
-
- #create dynamic routes for test exchange
- $PYTHON_COMMANDS/qpid-route dynamic add "localhost:$NODE_2" "localhost:$BROKER_A" test-exchange
- $PYTHON_COMMANDS/qpid-route dynamic add "localhost:$BROKER_A" "localhost:$NODE_2" test-exchange
-
- #create test queue on cluster and bind it to the test exchange
- $PYTHON_COMMANDS/qpid-config -b "localhost:$NODE_1" add queue test-queue
- $PYTHON_COMMANDS/qpid-config -b "localhost:$NODE_1" bind test-exchange test-queue to-cluster
-
- #create test queue on single broker and bind it to the test exchange
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add queue test-queue
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" bind test-exchange test-queue from-cluster
-}
-
-run_test_pull_to_cluster_two_consumers() {
- #start consumers on each of the two nodes of the cluster
- ./receiver --port $NODE_1 --queue test-queue --credit-window 1 > fed1.out.tmp &
- ./receiver --port $NODE_2 --queue test-queue --credit-window 1 > fed2.out.tmp &
-
- #send stream of messages to test exchange on single broker
- for i in `seq 1 1000`; do echo Message $i >> fed.in.tmp; done
- ./sender --port $BROKER_A --exchange test-exchange --routing-key to-cluster --send-eos 2 < fed.in.tmp
-
- #combine output of the two consumers, sort it and compare with the expected stream
- wait
- sort -g -k 2 fed1.out.tmp fed2.out.tmp > fed.out.tmp
- diff fed.in.tmp fed.out.tmp || fail "federated link to cluster failed: expectations not met!"
-
- rm -f fed*.tmp #cleanup
-}
-
-run_test_pull_to_cluster() {
- #send stream of messages to test exchange on single broker
- for i in `seq 1 1000`; do echo Message $i >> fed.in.tmp; done
- ./sender --port $BROKER_A --exchange test-exchange --routing-key to-cluster --send-eos 1 < fed.in.tmp
-
- #consume from remaining node of the cluster
- ./receiver --port $NODE_2 --queue test-queue > fed.out.tmp
-
- #verify all messages are received
- diff fed.in.tmp fed.out.tmp || fail "federated link to cluster failed: expectations not met!"
-
- rm -f fed*.tmp #cleanup
-}
-
-run_test_pull_from_cluster() {
- #start consumer on single broker
- ./receiver --port $BROKER_A --queue test-queue --credit-window 1 > fed.out.tmp &
-
- #send stream of messages to test exchange on cluster
- for i in `seq 1 1000`; do echo Message $i >> fed.in.tmp; done
- ./sender --port $NODE_2 --exchange test-exchange --routing-key from-cluster --send-eos 1 < fed.in.tmp
-
- #verify all messages are received
- wait
- diff fed.in.tmp fed.out.tmp || fail "federated link from cluster failed: expectations not met!"
-
- rm -f fed*.tmp #cleanup
-}
-
-
-if test -d ${PYTHON_DIR}; then
- . cpg_check.sh
- cpg_enabled || exit 0
-
- rm -f fed*.tmp #cleanup any files left from previous run
- start_brokers
- echo "brokers started"
- setup
- echo "setup completed"
- run_test_pull_to_cluster_two_consumers
- echo "federated link to cluster verified"
- run_test_pull_from_cluster
- echo "federated link from cluster verified"
- if [[ $TEST_NODE_FAILURE ]] ; then
- #kill first cluster node and retest
- kill -9 $(../qpidd --check --port $NODE_1) && unset NODE_1
- echo "killed first cluster node; waiting for links to re-establish themselves..."
- sleep 5
- echo "retesting..."
- run_test_pull_to_cluster
- echo "federated link to cluster verified"
- run_test_pull_from_cluster
- echo "federated link from cluster verified"
- fi
-fi
diff --git a/cpp/src/tests/federation.py b/cpp/src/tests/federation.py
index dcd074eda9..6477c6effd 100755
--- a/cpp/src/tests/federation.py
+++ b/cpp/src/tests/federation.py
@@ -2604,3 +2604,109 @@ class FederationTests(TestBase010):
self.verify_cleanup()
+ def test_dynamic_bounce_unbinds_named_queue(self):
+ """ Verify that a propagated binding is removed when the connection is
+ bounced
+ """
+ session = self.session
+
+ # create the federation
+
+ self.startQmf()
+ qmf = self.qmf
+
+ self._setup_brokers()
+
+ # create exchange on each broker, and retrieve the corresponding
+ # management object for that exchange
+
+ exchanges=[]
+ for _b in self._brokers[0:2]:
+ _b.client_session.exchange_declare(exchange="fedX", type="direct")
+ self.assertEqual(_b.client_session.exchange_query(name="fedX").type,
+ "direct", "exchange_declare failed!")
+ # pull the exchange out of qmf...
+ retries = 0
+ my_exchange = None
+ timeout = time() + 10
+ while my_exchange is None and time() <= timeout:
+ objs = qmf.getObjects(_broker=_b.qmf_broker, _class="exchange")
+ for ooo in objs:
+ if ooo.name == "fedX":
+ my_exchange = ooo
+ break
+ if my_exchange is None:
+ self.fail("QMF failed to find new exchange!")
+ exchanges.append(my_exchange)
+
+ # on the destination broker, create a binding for propagation
+ self._brokers[0].client_session.queue_declare(queue="fedDstQ")
+ self._brokers[0].client_session.exchange_bind(queue="fedDstQ", exchange="fedX", binding_key="spud")
+
+ # on the source broker, create a bridge queue
+ self._brokers[1].client_session.queue_declare(queue="fedSrcQ")
+
+ # connect B1 --> B0
+ result = self._brokers[0].qmf_object.create( "link",
+ "Link-dynamic",
+ {"host":self._brokers[1].host,
+ "port":self._brokers[1].port}, False)
+ self.assertEqual(result.status, 0)
+
+ # bridge the "fedX" exchange:
+ result = self._brokers[0].qmf_object.create("bridge",
+ "Bridge-dynamic",
+ {"link":"Link-dynamic",
+ "src":"fedX",
+ "dest":"fedX",
+ "dynamic":True,
+ "queue":"fedSrcQ"}, False)
+ self.assertEqual(result.status, 0)
+
+ # wait for the inter-broker links to become operational
+ operational = False
+ timeout = time() + 10
+ while not operational and time() <= timeout:
+ operational = True
+ for _l in qmf.getObjects(_class="link"):
+ #print("Link=%s:%s %s" % (_l.host, _l.port, str(_l.state)))
+ if _l.state != "Operational":
+ operational = False
+ self.failUnless(operational, "inter-broker links failed to become operational.")
+
+ # wait until the binding key has propagated to the src broker
+ exchanges[1].update()
+ timeout = time() + 10
+ while exchanges[1].bindingCount < 1 and time() <= timeout:
+ exchanges[1].update()
+ self.failUnless(exchanges[1].bindingCount == 1)
+
+ #
+ # Tear down the bridges between the two exchanges, then wait
+ # for the bindings to be cleaned up
+ #
+ for _b in qmf.getObjects(_class="bridge"):
+ result = _b.close()
+ self.assertEqual(result.status, 0)
+ exchanges[1].update()
+ timeout = time() + 10
+ while exchanges[1].bindingCount != 0 and time() <= timeout:
+ exchanges[1].update()
+ self.failUnless(exchanges[1].bindingCount == 0)
+
+ self._brokers[1].client_session.queue_delete(queue="fedSrcQ")
+
+ for _b in qmf.getObjects(_class="bridge"):
+ result = _b.close()
+ self.assertEqual(result.status, 0)
+
+ for _l in qmf.getObjects(_class="link"):
+ result = _l.close()
+ self.assertEqual(result.status, 0)
+
+ for _b in self._brokers[0:2]:
+ _b.client_session.exchange_delete(exchange="fedX")
+
+ self._teardown_brokers()
+
+ self.verify_cleanup()
diff --git a/cpp/src/tests/federation_sys.py b/cpp/src/tests/federation_sys.py
index 11590f684e..e2553e4cf3 100755
--- a/cpp/src/tests/federation_sys.py
+++ b/cpp/src/tests/federation_sys.py
@@ -7,9 +7,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -37,12 +37,12 @@ class Enum(object):
class QmfTestBase010(TestBase010):
-
+
_brokers = []
_links = []
_bridges = []
_alt_exch_ops = Enum(none=0, create=1, delete=2)
-
+
class _Broker(object):
"""
This broker proxy object holds the Qmf proxy to a broker of known address as well as the QMF broker
@@ -64,7 +64,7 @@ class QmfTestBase010(TestBase010):
for session in self.sessions:
try: # Session may have been closed by broker error
session.close()
- except Exception, e: print "WARNING: %s: Unable to close session %s (%s): %s %s" % (self, session, hex(id(session)), type(e), e)
+ except Exception, e: print "WARNING: %s: Unable to close session %s (%s): %s %s" % (self, session, hex(id(session)), type(e), e)
try: # Connection may have been closed by broker error
self.connection.close()
except Exception, e: print "WARNING: %s: Unable to close connection %s (%s): %s %s" % (self, self.connection, hex(id(self.connection)), type(e), e)
@@ -72,7 +72,7 @@ class QmfTestBase010(TestBase010):
session = self.connection.session(name, transactional_flag)
self.sessions.append(session)
return session
-
+
def setUp(self):
"""
Called one before each test starts
@@ -96,34 +96,22 @@ class QmfTestBase010(TestBase010):
b.destroy(self.qmf)
TestBase010.tearDown(self)
self.qmf.close()
-
+
#--- General test utility functions
-
+
def _get_name(self):
"""
Return the name of method which called this method stripped of "test_" prefix. Used for naming
queues and exchanges on a per-test basis.
"""
return stack()[1][3][5:]
-
+
def _get_broker_port(self, key):
"""
Get the port of a broker defined in the environment using -D<key>=portno
"""
return int(self.defines[key])
-
- def _get_cluster_ports(self, key):
- """
- Get the cluster ports from the parameters of the test which place it in the environment using
- -D<key>="port0 port1 ... portN" (space-separated)
- """
- ports = []
- ports_str = self.defines[key]
- if ports_str:
- for p in ports_str.split():
- ports.append(int(p))
- return ports
-
+
def _get_send_address(self, exch_name, queue_name):
"""
Get an address to which to send messages based on the exchange name and queue name, but taking into account
@@ -132,18 +120,15 @@ class QmfTestBase010(TestBase010):
if len(exch_name) == 0: # Default exchange
return queue_name
return "%s/%s" % (exch_name, queue_name)
-
- def _get_broker(self, cluster_flag, broker_port_key, cluster_ports_key):
+
+ def _get_broker(self, broker_port_key):
"""
Read the port numbers for pre-started brokers from the environment using keys, then find or create and return
the Qmf broker proxy for the appropriate broker
"""
- if cluster_flag:
- port = self._get_cluster_ports(cluster_ports_key)[0] # Always use the first node in the cluster
- else:
- port = self._get_broker_port(broker_port_key)
+ port = self._get_broker_port(broker_port_key)
return self._find_create_broker("localhost:%s" % port)
-
+ ################
def _get_msg_subject(self, topic_key):
"""
Return an appropriate subject for sending a message to a known topic. Return None if there is no topic.
@@ -152,7 +137,7 @@ class QmfTestBase010(TestBase010):
if "*" in topic_key: return topic_key.replace("*", "test")
if "#" in topic_key: return topic_key.replace("#", "multipart.test")
return topic_key
-
+
def _send_msgs(self, session_name, broker, addr, msg_count, msg_content = "Message_%03d", topic_key = "",
msg_durable_flag = False, enq_txn_size = 0):
"""
@@ -172,7 +157,7 @@ class QmfTestBase010(TestBase010):
send_session.commit()
sender.close()
send_session.close()
-
+
def _receive_msgs(self, session_name, broker, addr, msg_count, msg_content = "Message_%03d", deq_txn_size = 0,
timeout = 0):
"""
@@ -214,9 +199,9 @@ class QmfTestBase010(TestBase010):
receive_session.commit()
receiver.close()
receive_session.close()
-
+
#--- QMF-specific utility functions
-
+
def _get_qmf_property(self, props, key):
"""
Get the value of a named property key kj from a property list [(k0, v0), (k1, v1), ... (kn, vn)].
@@ -225,13 +210,13 @@ class QmfTestBase010(TestBase010):
if k.name == key:
return v
return None
-
+
def _check_qmf_return(self, method_result):
"""
Check the result of a Qmf-defined method call
"""
self.assertTrue(method_result.status == 0, method_result.text)
-
+
def _check_optional_qmf_property(self, qmf_broker, type, qmf_object, key, expected_val, obj_ref_flag):
"""
Optional Qmf properties don't show up in the properties list when they are not specified. Checks for
@@ -253,9 +238,9 @@ class QmfTestBase010(TestBase010):
(type, qmf_object.name, key, val, expected_val))
else:
self.fail("%s %s exists, but has an unexpected %s property \"%s\" set." % (type, qmf_object.name, key, val))
-
+
#--- Find/create Qmf broker objects
-
+
def _find_qmf_broker(self, url):
"""
Find the Qmf broker object for the given broker URL. The broker must have been previously added to Qmf through
@@ -265,7 +250,7 @@ class QmfTestBase010(TestBase010):
if b.getBroker().getUrl() == url:
return b
return None
-
+
def _find_create_broker(self, url):
"""
Find a running broker through Qmf. If it does not exist, add it (assuming the broker is already running).
@@ -280,9 +265,9 @@ class QmfTestBase010(TestBase010):
else:
broker.qmf_broker = qmf_broker
return broker
-
+
#--- Find/create/delete exchanges
-
+
def _find_qmf_exchange(self, qmf_broker, name, type, alternate, durable, auto_delete):
"""
Find Qmf exchange object
@@ -302,7 +287,7 @@ class QmfTestBase010(TestBase010):
(name, e.autoDelete, auto_delete))
return e
return None
-
+
def _find_create_qmf_exchange(self, qmf_broker, name, type, alternate, durable, auto_delete, args):
"""
Find Qmf exchange object if exchange exists, create exchange and return its Qmf object if not
@@ -315,7 +300,7 @@ class QmfTestBase010(TestBase010):
e = self._find_qmf_exchange(qmf_broker, name, type, alternate, durable, auto_delete)
self.assertNotEqual(e, None, "Creation of exchange %s on broker %s failed" % (name, qmf_broker.getBroker().getUrl()))
return e
-
+
def _find_delete_qmf_exchange(self, qmf_broker, name, type, alternate, durable, auto_delete):
"""
Find and delete Qmf exchange object if it exists
@@ -323,9 +308,9 @@ class QmfTestBase010(TestBase010):
e = self._find_qmf_exchange(qmf_broker, name, type, alternate, durable, auto_delete)
if e is not None and not auto_delete:
self._check_qmf_return(qmf_broker.delete(type="exchange", name=name, options={}))
-
+
#--- Find/create/delete queues
-
+
def _find_qmf_queue(self, qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete):
"""
Find a Qmf queue object
@@ -344,7 +329,7 @@ class QmfTestBase010(TestBase010):
(name, q.autoDelete, auto_delete))
return q
return None
-
+
def _find_create_qmf_queue(self, qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete, args):
"""
Find Qmf queue object if queue exists, create queue and return its Qmf object if not
@@ -357,7 +342,7 @@ class QmfTestBase010(TestBase010):
q = self._find_qmf_queue(qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete)
self.assertNotEqual(q, None, "Creation of queue %s on broker %s failed" % (name, qmf_broker.getBroker().getUrl()))
return q
-
+
def _find_delete_qmf_queue(self, qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete, args):
"""
Find and delete Qmf queue object if it exists
@@ -365,9 +350,9 @@ class QmfTestBase010(TestBase010):
q = self._find_qmf_queue(qmf_broker, name, alternate_exchange, durable, exclusive, auto_delete)
if q is not None and not auto_delete:
self._check_qmf_return(qmf_broker.delete(type="queue", name=name, options={}))
-
+
#--- Find/create/delete bindings (between an exchange and a queue)
-
+
def _find_qmf_binding(self, qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args):
"""
Find a Qmf binding object
@@ -383,7 +368,7 @@ class QmfTestBase010(TestBase010):
(qmf_exchange.name, qmf_queue.name, b.arguments, binding_args))
return b
return None
-
+
def _find_create_qmf_binding(self, qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args):
"""
Find Qmf binding object if it exists, create binding and return its Qmf object if not
@@ -396,7 +381,7 @@ class QmfTestBase010(TestBase010):
self.assertNotEqual(b, None, "Creation of binding between exchange %s and queue %s with key %s failed" %
(qmf_exchange.name, qmf_queue.name, binding_key))
return b
-
+
def _find_delete_qmf_binding(self, qmf_broker, qmf_exchange, qmf_queue, binding_key, binding_args):
"""
Find and delete Qmf binding object if it exists
@@ -405,7 +390,7 @@ class QmfTestBase010(TestBase010):
if b is not None:
if len(qmf_exchange.name) > 0: # not default exchange
self._check_qmf_return(qmf_broker.delete(type="binding", name="%s/%s/%s" % (qmf_exchange.name, qmf_queue.name, binding_key), options={}))
-
+
#--- Find/create a link
def _find_qmf_link(self, qmf_from_broker_proxy, host, port):
@@ -416,7 +401,7 @@ class QmfTestBase010(TestBase010):
if l.host == host and l.port == port:
return l
return None
-
+
def _find_create_qmf_link(self, qmf_from_broker, qmf_to_broker_proxy, link_durable_flag, auth_mechanism, user_id,
password, transport, pause_interval, link_ready_timeout):
"""
@@ -433,24 +418,19 @@ class QmfTestBase010(TestBase010):
(qmf_from_broker.getBroker().getUrl(), qmf_to_broker_proxy.getUrl()))
self._wait_for_link(l, pause_interval, link_ready_timeout)
return l
-
+
def _wait_for_link(self, link, pause_interval, link_ready_timeout):
"""
Wait for link to become active (state=Operational)
"""
tot_time = 0
link.update()
- if link.state == "":
- # Link mgmt updates for the c++ link object are disabled when in a cluster because of inconsistent state:
- # one is "Operational", the other "Passive". In this case, wait a bit and hope for the best...
- sleep(2*pause_interval)
- else:
- while link.state != "Operational" and tot_time < link_ready_timeout:
- sleep(pause_interval)
- tot_time += pause_interval
- link.update()
- self.assertEqual(link.state, "Operational", "Timeout: Link not operational, state=%s" % link.state)
-
+ while link.state != "Operational" and tot_time < link_ready_timeout:
+ sleep(pause_interval)
+ tot_time += pause_interval
+ link.update()
+ self.assertEqual(link.state, "Operational", "Timeout: Link not operational, state=%s" % link.state)
+
#--- Find/create a bridge
def _find_qmf_bridge(self, qmf_broker_proxy, qmf_link, source, destination, key):
@@ -461,7 +441,7 @@ class QmfTestBase010(TestBase010):
if b.linkRef == qmf_link.getObjectId() and b.src == source and b.dest == destination and b.key == key:
return b
return None
-
+
def _find_create_qmf_bridge(self, qmf_broker_proxy, qmf_link, queue_name, exch_name, topic_key,
queue_route_type_flag, bridge_durable_flag):
"""
@@ -486,7 +466,7 @@ class QmfTestBase010(TestBase010):
b = self._find_qmf_bridge(qmf_broker_proxy, qmf_link, src, dest, key)
self.assertNotEqual(b, None, "Bridge creation failed: src=%s dest=%s key=%s" % (src, dest, key))
return b
-
+
def _wait_for_bridge(self, bridge, src_broker, dest_broker, exch_name, queue_name, topic_key, pause_interval,
bridge_ready_timeout):
"""
@@ -522,9 +502,9 @@ class QmfTestBase010(TestBase010):
sender.close()
send_session.close()
self.assertTrue(active, "Bridge failed to become active after %ds: %s" % (bridge_ready_timeout, bridge))
-
+
#--- Find/create/delete utility functions
-
+
def _create_and_bind(self, qmf_broker, exchange_args, queue_args, binding_args):
"""
Create a binding between a named exchange and queue on a broker
@@ -532,7 +512,7 @@ class QmfTestBase010(TestBase010):
e = self._find_create_qmf_exchange(qmf_broker, **exchange_args)
q = self._find_create_qmf_queue(qmf_broker, **queue_args)
return self._find_create_qmf_binding(qmf_broker, e, q, **binding_args)
-
+
def _check_alt_exchange(self, qmf_broker, alt_exch_name, alt_exch_type, alt_exch_op):
"""
Check for existence of alternate exchange. Return the Qmf exchange proxy object for the alternate exchange
@@ -546,7 +526,7 @@ class QmfTestBase010(TestBase010):
alternate="", durable=False, auto_delete=False)
return self._find_qmf_exchange(qmf_broker=qmf_broker, name=alt_exchange_name, type=alt_exchange_type,
alternate="", durable=False, auto_delete=False)
-
+
def _delete_queue_binding(self, qmf_broker, exchange_args, queue_args, binding_args):
"""
Delete a queue and the binding between it and the exchange
@@ -555,7 +535,7 @@ class QmfTestBase010(TestBase010):
q = self._find_qmf_queue(qmf_broker, queue_args["name"], queue_args["alternate_exchange"], queue_args["durable"], queue_args["exclusive"], queue_args["auto_delete"])
self._find_delete_qmf_binding(qmf_broker, e, q, **binding_args)
self._find_delete_qmf_queue(qmf_broker, **queue_args)
-
+
def _create_route(self, queue_route_type_flag, src_broker, dest_broker, exch_name, queue_name, topic_key,
link_durable_flag, bridge_durable_flag, auth_mechanism, user_id, password, transport,
pause_interval = 1, link_ready_timeout = 20, bridge_ready_timeout = 20):
@@ -571,7 +551,7 @@ class QmfTestBase010(TestBase010):
self._wait_for_bridge(b, src_broker, dest_broker, exch_name, queue_name, topic_key, pause_interval, bridge_ready_timeout)
# Parameterized test - entry point for tests
-
+
def _do_test(self,
test_name, # Name of test
exch_name = "amq.direct", # Remote exchange name
@@ -597,8 +577,6 @@ class QmfTestBase010(TestBase010):
queue_route_type_flag = False, # Route type: false = bridge route, true = queue route
enq_txn_size = 0, # Enqueue transaction size, 0 = no transactions
deq_txn_size = 0, # Dequeue transaction size, 0 = no transactions
- local_cluster_flag = False, # Use a node from the local cluster, otherwise use single local broker
- remote_cluster_flag = False, # Use a node from the remote cluster, otherwise use single remote broker
alt_exch_op = _alt_exch_ops.create,# Op on alt exch [create (ensure present), delete (ensure not present), none (neither create nor delete)]
auth_mechanism = "", # Authorization mechanism for linked broker
user_id = "", # User ID for authorization on linked broker
@@ -609,32 +587,32 @@ class QmfTestBase010(TestBase010):
Parameterized federation test. Sets up a federated link between a source broker and a destination broker and
checks that messages correctly pass over the link to the destination. Where appropriate (non-queue-routes), also
checks for the presence of messages on the source broker.
-
+
In these tests, the concept is to create a LOCAL broker, then create a link to a REMOTE broker using federation.
In other words, the messages sent to the LOCAL broker will be replicated on the REMOTE broker, and tests are
performed on the REMOTE broker to check that the required messages are present. In the case of regular routes,
the LOCAL broker will also retain the messages, and a similar test is performed on this broker.
-
+
TODO: There are several items to improve here:
1. _do_test() is rather general. Rather create a version for each exchange type and test the exchange/queue
interaction in more detail based on the exchange type
2. Add a headers and an xml exchange type
- 3. Restructure the tests to start and stop brokers and clusters directly rather than relying on previously
- started brokers. Then persistence can be checked by stopping and restarting the brokers/clusters. In particular,
+ 3. Restructure the tests to start and stop brokers directly rather than relying on previously
+ started brokers. Then persistence can be checked by stopping and restarting the brokers. In particular,
test the persistence of links and bridges, both of which take a persistence flag.
4. Test the behavior of the alternate exchanges when messages are sourced through a link. Also check behavior
when the alternate exchange is not present or is deleted after the reference is made.
5. Test special queue types (eg LVQ)
"""
- local_broker = self._get_broker(local_cluster_flag, "local-port", "local-cluster-ports")
- remote_broker = self._get_broker(remote_cluster_flag, "remote-port", "remote-cluster-ports")
-
+ local_broker = self._get_broker("local-port")
+ remote_broker = self._get_broker("remote-port")
+
# Check alternate exchanges exist (and create them if not) on both local and remote brokers
self._check_alt_exchange(local_broker.qmf_broker, exch_alt_exch, exch_alt_exch_type, alt_exch_op)
self._check_alt_exchange(local_broker.qmf_broker, queue_alt_exch, queue_alt_exch_type, alt_exch_op)
self._check_alt_exchange(remote_broker.qmf_broker, exch_alt_exch, exch_alt_exch_type, alt_exch_op)
self._check_alt_exchange(remote_broker.qmf_broker, queue_alt_exch, queue_alt_exch_type, alt_exch_op)
-
+
queue_name = "queue_%s" % test_name
exchange_args = {"name": exch_name, "type": exch_type, "alternate": exch_alt_exch,
"durable": exch_durable_flag, "auto_delete": exch_auto_delete_flag, "args": exch_x_args}
@@ -658,70 +636,70 @@ class QmfTestBase010(TestBase010):
if not queue_route_type_flag:
self._receive_msgs("local_receive_session", local_broker, addr = queue_name, msg_count = msg_count, deq_txn_size = deq_txn_size)
self._receive_msgs("remote_receive_session", remote_broker, addr = queue_name, msg_count = msg_count, deq_txn_size = deq_txn_size, timeout = 5)
-
+
# Clean up
self._delete_queue_binding(qmf_broker=local_broker.qmf_broker, exchange_args=exchange_args, queue_args=queue_args, binding_args=binding_args)
self._delete_queue_binding(qmf_broker=remote_broker.qmf_broker, exchange_args=exchange_args, queue_args=queue_args, binding_args=binding_args)
class A_ShortTests(QmfTestBase010):
-
+
def test_route_defaultExch(self):
self._do_test(self._get_name())
-
+
def test_queueRoute_defaultExch(self):
self._do_test(self._get_name(), queue_route_type_flag=True)
-
-
+
+
class A_LongTests(QmfTestBase010):
-
+
def test_route_amqDirectExch(self):
self._do_test(self._get_name(), exch_name="amq.direct")
-
+
def test_queueRoute_amqDirectExch(self):
self._do_test(self._get_name(), exch_name="amq.direct", queue_route_type_flag=True)
-
-
+
+
def test_route_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange")
-
+
def test_queueRoute_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True)
-
-
+
+
def test_route_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout")
-
+
def test_queueRoute_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True)
-
-
+
+
def test_route_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#")
-
+
def test_queueRoute_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True)
-
-
+
+
class B_ShortTransactionTests(QmfTestBase010):
-
+
def test_txEnq01_route_defaultExch(self):
self._do_test(self._get_name(), enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_defaultExch(self):
self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_defaultExch(self):
self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_defaultExch(self):
self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
class B_LongTransactionTests(QmfTestBase010):
-
+
def test_txEnq10_route_defaultExch(self):
self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_defaultExch(self):
self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
@@ -730,1171 +708,270 @@ class B_LongTransactionTests(QmfTestBase010):
def test_txEnq01_route_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1)
def test_txEnq10_route_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
def test_txEnq01_route_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1)
def test_txEnq10_route_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
def test_txEnq01_route_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1)
def test_txEnq10_route_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-class C_ShortClusterTests(QmfTestBase010):
-
- def test_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), local_cluster_flag=True)
-
- def test_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class C_LongClusterTests(QmfTestBase010):
-
- def test_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", local_cluster_flag=True)
-
- def test_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", local_cluster_flag=True)
-
- def test_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", local_cluster_flag=True)
-
- def test_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class D_ShortClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq01_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class D_LongClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq10_locCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_defaultExch(self):
- self._do_test(self._get_name(), enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_defaultExch(self):
- self._do_test(self._get_name(), queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class E_ShortPersistenceTests(QmfTestBase010):
-
+class E_ShortPersistenceTests(QmfTestBase010):
+
def test_route_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True)
-
+
def test_route_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True)
-
+
def test_queueRoute_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_queueRoute_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-class E_LongPersistenceTests(QmfTestBase010):
+class E_LongPersistenceTests(QmfTestBase010):
+
-
def test_route_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True)
-
+
def test_route_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True)
-
+
def test_queueRoute_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_queueRoute_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_route_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True)
-
+
def test_route_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True)
-
+
def test_queueRoute_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_queueRoute_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_route_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True)
-
+
def test_route_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True)
-
+
def test_queueRoute_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True)
-
+
def test_queueRoute_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True)
-
+
class F_ShortPersistenceTransactionTests(QmfTestBase010):
-
+
def test_txEnq01_route_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_route_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
class F_LongPersistenceTransactionTests(QmfTestBase010):
-
+
def test_txEnq10_route_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_route_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durQueue_defaultExch(self):
self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durMsg_durQueue_defaultExch(self):
self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
-
+
+
def test_txEnq01_route_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_route_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq10_route_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_route_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_directExch(self):
self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_route_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_route_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq10_route_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_route_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_fanoutExch(self):
self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
def test_txEnq01_route_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_route_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
-
+
def test_txEnq01_queueRoute_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1)
def test_txEnq10_route_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_route_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq10_queueRoute_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103)
-
+
def test_txEnq01_txDeq01_route_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_route_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-
+
def test_txEnq01_txDeq01_queueRoute_durMsg_durQueue_topicExch(self):
self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1)
-class G_ShortPersistenceClusterTests(QmfTestBase010):
-
- def test_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class G_LongPersistenceClusterTests(QmfTestBase010):
-
-
-
- def test_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True)
-
- def test_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class H_ShortPersistenceClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq01_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-class H_LongPersistenceClusterTransactionTests(QmfTestBase010):
-
- def test_txEnq10_locCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_defaultExch(self):
- self._do_test(self._get_name(), queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_defaultExch(self):
- self._do_test(self._get_name(), msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
-
-
-
- def test_txEnq01_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_directExch(self):
- self._do_test(self._get_name(), exch_name="testDirectExchange", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_fanoutExch(self):
- self._do_test(self._get_name(), exch_name="testFanoutExchange", exch_type="fanout", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
-
- def test_txEnq01_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq10_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq10_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq10_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=10, msg_count = 103, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_route_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
- def test_txEnq01_txDeq01_locCluster_remCluster_queueRoute_durMsg_durQueue_topicExch(self):
- self._do_test(self._get_name(), exch_name="testTopicExchange", exch_type="topic", topic_key=self._get_name()+".#", msg_durable_flag=True, queue_durable_flag=True, queue_route_type_flag=True, enq_txn_size=1, deq_txn_size=1, local_cluster_flag=True, remote_cluster_flag=True)
-
diff --git a/cpp/src/tests/ha_store_tests.py b/cpp/src/tests/ha_store_tests.py
new file mode 100755
index 0000000000..d1eaca1b87
--- /dev/null
+++ b/cpp/src/tests/ha_store_tests.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+This module contains tests for HA functionality that requires a store.
+It is not included as part of "make check" since it will not function
+without a store. Currently it can be run from a build of the message
+store.
+"""
+
+import os, signal, sys, time, imp, re, subprocess, glob, random, logging, shutil, math, unittest, random
+import traceback
+from qpid.messaging import Message, NotFound, ConnectionError, ReceiverError, Connection, Timeout, Disposition, REJECTED, Empty
+from qpid.datatypes import uuid4
+from brokertest import *
+from ha_test import *
+from threading import Thread, Lock, Condition
+from logging import getLogger, WARN, ERROR, DEBUG, INFO
+from qpidtoollibs import BrokerAgent
+from uuid import UUID
+
+
+class StoreTests(BrokerTest):
+ """Test for HA with persistence."""
+
+ def test_store_recovery(self):
+ """Verify basic store and recover functionality"""
+ cluster = HaCluster(self, 2)
+ sn = cluster[0].connect().session()
+ s = sn.sender("qq;{create:always,node:{durable:true}}")
+ sk = sn.sender("xx/k;{create:always,node:{type:topic, durable:true, x-declare:{type:'direct'}, x-bindings:[{exchange:xx,key:k,queue:qq}]}}")
+ s.send(Message("foo", durable=True))
+ s.send(Message("bar", durable=True))
+ sk.send(Message("baz", durable=True))
+ r = cluster[0].connect().session().receiver("qq")
+ self.assertEqual(r.fetch().content, "foo")
+ r.session.acknowledge()
+ # FIXME aconway 2012-09-21: sending this message is an ugly hack to flush
+ # the dequeue operation on qq.
+ s.send(Message("flush", durable=True))
+
+ def verify(broker, x_count):
+ sn = broker.connect().session()
+ assert_browse(sn, "qq", [ "bar", "baz", "flush" ]+ (x_count)*["x"])
+ sn.sender("xx/k").send(Message("x", durable=True))
+ assert_browse(sn, "qq", [ "bar", "baz", "flush" ]+ (x_count+1)*["x"])
+
+ verify(cluster[0], 0)
+ cluster.bounce(0, promote_next=False)
+ cluster[0].promote()
+ cluster[0].wait_status("active")
+ verify(cluster[0], 1)
+ cluster.kill(0, promote_next=False)
+ cluster[1].promote()
+ cluster[1].wait_status("active")
+ verify(cluster[1], 2)
+ cluster.bounce(1, promote_next=False)
+ cluster[1].promote()
+ cluster[1].wait_status("active")
+ verify(cluster[1], 3)
+
+ def test_catchup_store(self):
+ """Verify that a backup erases queue data from store recovery before
+ doing catch-up from the primary."""
+ cluster = HaCluster(self, 2)
+ sn = cluster[0].connect().session()
+ s1 = sn.sender("q1;{create:always,node:{durable:true}}")
+ for m in ["foo","bar"]: s1.send(Message(m, durable=True))
+ s2 = sn.sender("q2;{create:always,node:{durable:true}}")
+ sk2 = sn.sender("ex/k2;{create:always,node:{type:topic, durable:true, x-declare:{type:'direct'}, x-bindings:[{exchange:ex,key:k2,queue:q2}]}}")
+ sk2.send(Message("hello", durable=True))
+ # Wait for backup to catch up.
+ cluster[1].assert_browse_backup("q1", ["foo","bar"])
+ cluster[1].assert_browse_backup("q2", ["hello"])
+
+ # Make changes that the backup doesn't see
+ cluster.kill(1, promote_next=False)
+ time.sleep(1) # FIXME aconway 2012-09-25:
+ r1 = cluster[0].connect().session().receiver("q1")
+ for m in ["foo", "bar"]: self.assertEqual(r1.fetch().content, m)
+ r1.session.acknowledge()
+ for m in ["x","y","z"]: s1.send(Message(m, durable=True))
+ # Use old connection to unbind
+ us = cluster[0].connect_old().session(str(uuid4()))
+ us.exchange_unbind(exchange="ex", binding_key="k2", queue="q2")
+ us.exchange_bind(exchange="ex", binding_key="k1", queue="q1")
+ # Restart both brokers from store to get inconsistent sequence numbering.
+ cluster.bounce(0, promote_next=False)
+ cluster[0].promote()
+ cluster[0].wait_status("active")
+ cluster.restart(1)
+ cluster[1].wait_status("ready")
+
+ # Verify state
+ cluster[0].assert_browse("q1", ["x","y","z"])
+ cluster[1].assert_browse_backup("q1", ["x","y","z"])
+ sn = cluster[0].connect().session() # FIXME aconway 2012-09-25: should fail over!
+ sn.sender("ex/k1").send("boo")
+ cluster[0].assert_browse_backup("q1", ["x","y","z", "boo"])
+ cluster[1].assert_browse_backup("q1", ["x","y","z", "boo"])
+ sn.sender("ex/k2").send("hoo") # q2 was unbound so this should be dropped.
+ sn.sender("q2").send("end") # mark the end of the queue for assert_browse
+ cluster[0].assert_browse("q2", ["hello", "end"])
+ cluster[1].assert_browse_backup("q2", ["hello", "end"])
+
+if __name__ == "__main__":
+ shutil.rmtree("brokertest.tmp", True)
+ qpid_ha = os.getenv("QPID_HA_EXEC")
+ if qpid_ha and os.path.exists(qpid_ha):
+ os.execvp("qpid-python-test",
+ ["qpid-python-test", "-m", "ha_store_tests"] + sys.argv[1:])
+ else:
+ print "Skipping ha_store_tests, %s not available"%(qpid_ha)
diff --git a/cpp/src/tests/ha_test.py b/cpp/src/tests/ha_test.py
new file mode 100755
index 0000000000..cdf9d3dd17
--- /dev/null
+++ b/cpp/src/tests/ha_test.py
@@ -0,0 +1,304 @@
+#!/usr/bin/env python
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import os, signal, sys, time, imp, re, subprocess, glob, random, logging, shutil, math, unittest, random
+import traceback
+from qpid.messaging import Message, NotFound, ConnectionError, ReceiverError, Connection, Timeout, Disposition, REJECTED, Empty
+from qpid.datatypes import uuid4
+from brokertest import *
+from threading import Thread, Lock, Condition
+from logging import getLogger, WARN, ERROR, DEBUG, INFO
+from qpidtoollibs import BrokerAgent
+from uuid import UUID
+
+log = getLogger(__name__)
+
+class LogLevel:
+ """
+ Temporarily change the log settings on the root logger.
+ Used to suppress expected WARN messages from the python client.
+ """
+ def __init__(self, level):
+ self.save_level = getLogger().getEffectiveLevel()
+ getLogger().setLevel(level)
+
+ def restore(self):
+ getLogger().setLevel(self.save_level)
+
+class QmfAgent(object):
+ """Access to a QMF broker agent."""
+ def __init__(self, address, **kwargs):
+ self._connection = Connection.establish(
+ address, client_properties={"qpid.ha-admin":1}, **kwargs)
+ self._agent = BrokerAgent(self._connection)
+
+ def __getattr__(self, name):
+ a = getattr(self._agent, name)
+ return a
+
+class Credentials(object):
+ """SASL credentials: username, password, and mechanism"""
+ def __init__(self, username, password, mechanism):
+ (self.username, self.password, self.mechanism) = (username, password, mechanism)
+
+ def __str__(self): return "Credentials%s"%(self.tuple(),)
+
+ def tuple(self): return (self.username, self.password, self.mechanism)
+
+ def add_user(self, url): return "%s/%s@%s"%(self.username, self.password, url)
+
+class HaBroker(Broker):
+ """Start a broker with HA enabled
+ @param client_cred: (user, password, mechanism) for admin clients started by the HaBroker.
+ """
+ def __init__(self, test, args=[], brokers_url=None, ha_cluster=True, ha_replicate="all",
+ client_credentials=None, **kwargs):
+ assert BrokerTest.ha_lib, "Cannot locate HA plug-in"
+ args = copy(args)
+ args += ["--load-module", BrokerTest.ha_lib,
+ "--log-enable=debug+:ha::",
+ # FIXME aconway 2012-02-13: workaround slow link failover.
+ "--link-maintenance-interval=0.1",
+ "--ha-cluster=%s"%ha_cluster]
+ if ha_replicate is not None:
+ args += [ "--ha-replicate=%s"%ha_replicate ]
+ if brokers_url: args += [ "--ha-brokers-url", brokers_url ]
+ Broker.__init__(self, test, args, **kwargs)
+ self.qpid_ha_path=os.path.join(os.getenv("PYTHON_COMMANDS"), "qpid-ha")
+ assert os.path.exists(self.qpid_ha_path)
+ self.qpid_config_path=os.path.join(os.getenv("PYTHON_COMMANDS"), "qpid-config")
+ assert os.path.exists(self.qpid_config_path)
+ self.qpid_ha_script=import_script(self.qpid_ha_path)
+ self._agent = None
+ self.client_credentials = client_credentials
+
+ def __str__(self): return Broker.__str__(self)
+
+ def qpid_ha(self, args):
+ cred = self.client_credentials
+ url = self.host_port()
+ if cred:
+ url =cred.add_user(url)
+ args = args + ["--sasl-mechanism", cred.mechanism]
+ self.qpid_ha_script.main_except(["", "-b", url]+args)
+
+ def promote(self):
+ self.ready(); self.qpid_ha(["promote"])
+ def set_public_url(self, url): self.qpid_ha(["set", "--public-url", url])
+ def set_brokers_url(self, url): self.qpid_ha(["set", "--brokers-url", url])
+ def replicate(self, from_broker, queue): self.qpid_ha(["replicate", from_broker, queue])
+
+ def agent(self):
+ if not self._agent:
+ cred = self.client_credentials
+ if cred:
+ self._agent = QmfAgent(cred.add_user(self.host_port()), sasl_mechanisms=cred.mechanism)
+ else:
+ self._agent = QmfAgent(self.host_port())
+ return self._agent
+
+ def qmf(self):
+ hb = self.agent().getHaBroker()
+ hb.update()
+ return hb
+
+ def ha_status(self): return self.qmf().status
+
+ def wait_status(self, status):
+ def try_get_status():
+ self._status = "<unknown>"
+ # Ignore ConnectionError, the broker may not be up yet.
+ try:
+ self._status = self.ha_status()
+ return self._status == status;
+ except ConnectionError: return False
+ assert retry(try_get_status, timeout=20), "%s expected=%r, actual=%r"%(
+ self, status, self._status)
+
+ def wait_queue(self, queue, timeout=1):
+ """ Wait for queue to be visible via QMF"""
+ agent = self.agent()
+ assert retry(lambda: agent.getQueue(queue) is not None, timeout=timeout)
+
+ def wait_no_queue(self, queue, timeout=1):
+ """ Wait for queue to be invisible via QMF"""
+ agent = self.agent()
+ assert retry(lambda: agent.getQueue(queue) is None, timeout=timeout)
+
+ # FIXME aconway 2012-05-01: do direct python call to qpid-config code.
+ def qpid_config(self, args):
+ assert subprocess.call(
+ [self.qpid_config_path, "--broker", self.host_port()]+args,
+ stdout=1, stderr=subprocess.STDOUT
+ ) == 0
+
+ def config_replicate(self, from_broker, queue):
+ self.qpid_config(["add", "queue", "--start-replica", from_broker, queue])
+
+ def config_declare(self, queue, replication):
+ self.qpid_config(["add", "queue", queue, "--replicate", replication])
+
+ def connect_admin(self, **kwargs):
+ cred = self.client_credentials
+ if cred:
+ return Broker.connect(
+ self, client_properties={"qpid.ha-admin":1},
+ username=cred.username, password=cred.password, sasl_mechanisms=cred.mechanism,
+ **kwargs)
+ else:
+ return Broker.connect(self, client_properties={"qpid.ha-admin":1}, **kwargs)
+
+ def wait_address(self, address):
+ """Wait for address to become valid on the broker."""
+ bs = self.connect_admin().session()
+ try: wait_address(bs, address)
+ finally: bs.connection.close()
+
+ def wait_backup(self, address): self.wait_address(address)
+
+ def assert_browse(self, queue, expected, **kwargs):
+ """Verify queue contents by browsing."""
+ bs = self.connect().session()
+ try:
+ wait_address(bs, queue)
+ assert_browse_retry(bs, queue, expected, **kwargs)
+ finally: bs.connection.close()
+
+ def assert_browse_backup(self, queue, expected, **kwargs):
+ """Combines wait_backup and assert_browse_retry."""
+ bs = self.connect_admin().session()
+ try:
+ wait_address(bs, queue)
+ assert_browse_retry(bs, queue, expected, **kwargs)
+ finally: bs.connection.close()
+
+ def assert_connect_fail(self):
+ try:
+ self.connect()
+ self.test.fail("Expected ConnectionError")
+ except ConnectionError: pass
+
+ def try_connect(self):
+ try: return self.connect()
+ except ConnectionError: return None
+
+ def ready(self):
+ return Broker.ready(self, client_properties={"qpid.ha-admin":1})
+
+ def kill(self):
+ self._agent = None
+ return Broker.kill(self)
+
+class HaCluster(object):
+ _cluster_count = 0
+
+ def __init__(self, test, n, promote=True, wait=True, **kwargs):
+ """Start a cluster of n brokers.
+
+ @test: The test being run
+ @n: start n brokers
+ @promote: promote self[0] to primary
+ @wait: wait for primary active and backups ready. Ignored if promote=False
+ """
+ self.test = test
+ self.kwargs = kwargs
+ self._brokers = []
+ self.id = HaCluster._cluster_count
+ self.broker_id = 0
+ HaCluster._cluster_count += 1
+ for i in xrange(n): self.start(False)
+ self.update_urls()
+ if promote:
+ self[0].promote()
+ if wait:
+ self[0].wait_status("active")
+ for b in self[1:]: b.wait_status("ready")
+
+
+ def next_name(self):
+ name="cluster%s-%s"%(self.id, self.broker_id)
+ self.broker_id += 1
+ return name
+
+ def start(self, update_urls=True, args=[]):
+ """Start a new broker in the cluster"""
+ b = HaBroker(self.test, name=self.next_name(), **self.kwargs)
+ b.ready()
+ self._brokers.append(b)
+ if update_urls: self.update_urls()
+ return b
+
+ def update_urls(self):
+ self.url = ",".join([b.host_port() for b in self])
+ if len(self) > 1: # No failover addresses on a 1 cluster.
+ for b in self:
+ b.set_brokers_url(self.url)
+ b.set_public_url(self.url)
+
+ def connect(self, i):
+ """Connect with reconnect_urls"""
+ return self[i].connect(reconnect=True, reconnect_urls=self.url.split(","))
+
+ def kill(self, i, promote_next=True):
+ """Kill broker i, promote broker i+1"""
+ self[i].kill()
+ if promote_next: self[(i+1) % len(self)].promote()
+
+ def restart(self, i):
+ """Start a broker with the same port, name and data directory. It will get
+ a separate log file: foo.n.log"""
+ b = self._brokers[i]
+ self._brokers[i] = HaBroker(
+ self.test, name=b.name, port=b.port(), brokers_url=self.url,
+ **self.kwargs)
+ self._brokers[i].ready()
+
+ def bounce(self, i, promote_next=True):
+ """Stop and restart a broker in a cluster."""
+ if (len(self) == 1):
+ self.kill(i, promote_next=False)
+ self.restart(i)
+ self[i].ready()
+ if promote_next: self[i].promote()
+ else:
+ self.kill(i, promote_next)
+ self.restart(i)
+
+ # Behave like a list of brokers.
+ def __len__(self): return len(self._brokers)
+ def __getitem__(self,index): return self._brokers[index]
+ def __iter__(self): return self._brokers.__iter__()
+
+
+def wait_address(session, address):
+ """Wait for an address to become valid."""
+ def check():
+ try: session.sender(address); return True
+ except NotFound: return False
+ assert retry(check), "Timed out waiting for address %s"%(address)
+
+def valid_address(session, address):
+ """Test if an address is valid"""
+ try:
+ session.receiver(address)
+ return True
+ except NotFound: return False
+
+
diff --git a/cpp/src/tests/ha_tests.py b/cpp/src/tests/ha_tests.py
index 310ef844bd..77399bf2e2 100755
--- a/cpp/src/tests/ha_tests.py
+++ b/cpp/src/tests/ha_tests.py
@@ -18,235 +18,32 @@
# under the License.
#
-import os, signal, sys, time, imp, re, subprocess, glob, random, logging, shutil, math, unittest
+import os, signal, sys, time, imp, re, subprocess, glob, random, logging, shutil, math, unittest, random
import traceback
-from qpid.messaging import Message, NotFound, ConnectionError, ReceiverError, Connection, Timeout, Disposition, REJECTED
+from qpid.messaging import Message, SessionError, NotFound, ConnectionError, ReceiverError, Connection, Timeout, Disposition, REJECTED, Empty
from qpid.datatypes import uuid4
from brokertest import *
+from ha_test import *
from threading import Thread, Lock, Condition
from logging import getLogger, WARN, ERROR, DEBUG, INFO
-from qpidtoollibs import BrokerAgent
+from qpidtoollibs import BrokerAgent, EventHelper
from uuid import UUID
log = getLogger(__name__)
-class QmfAgent(object):
- """Access to a QMF broker agent."""
- def __init__(self, address, **kwargs):
- self._connection = Connection.establish(
- address, client_properties={"qpid.ha-admin":1}, **kwargs)
- self._agent = BrokerAgent(self._connection)
- assert self._agent.getHaBroker(), "HA module not loaded in broker at: %s"%(address)
+def grep(filename, regexp):
+ for line in open(filename).readlines():
+ if (regexp.search(line)): return True
+ return False
- def __getattr__(self, name):
- a = getattr(self._agent, name)
- return a
+class HaBrokerTest(BrokerTest):
+ """Base class for HA broker tests"""
+ def assert_log_no_errors(self, broker):
+ log = broker.get_log()
+ if grep(log, re.compile("] error|] critical")):
+ self.fail("Errors in log file %s"%(log))
-class Credentials(object):
- """SASL credentials: username, password, and mechanism"""
- def __init__(self, username, password, mechanism):
- (self.username, self.password, self.mechanism) = (username, password, mechanism)
-
- def __str__(self): return "Credentials%s"%(self.tuple(),)
-
- def tuple(self): return (self.username, self.password, self.mechanism)
-
- def add_user(self, url): return "%s/%s@%s"%(self.username, self.password, url)
-
-class HaBroker(Broker):
- """Start a broker with HA enabled
- @param client_cred: (user, password, mechanism) for admin clients started by the HaBroker.
- """
- def __init__(self, test, args=[], brokers_url=None, ha_cluster=True, ha_replicate="all",
- client_credentials=None, **kwargs):
- assert BrokerTest.ha_lib, "Cannot locate HA plug-in"
- args = copy(args)
- args += ["--load-module", BrokerTest.ha_lib,
- "--log-enable=debug+:ha::",
- # FIXME aconway 2012-02-13: workaround slow link failover.
- "--link-maintenace-interval=0.1",
- "--ha-cluster=%s"%ha_cluster]
- if ha_replicate is not None:
- args += [ "--ha-replicate=%s"%ha_replicate ]
- if brokers_url: args += [ "--ha-brokers-url", brokers_url ]
- Broker.__init__(self, test, args, **kwargs)
- self.qpid_ha_path=os.path.join(os.getenv("PYTHON_COMMANDS"), "qpid-ha")
- assert os.path.exists(self.qpid_ha_path)
- self.qpid_config_path=os.path.join(os.getenv("PYTHON_COMMANDS"), "qpid-config")
- assert os.path.exists(self.qpid_config_path)
- getLogger().setLevel(ERROR) # Hide expected WARNING log messages from failover.
- self.qpid_ha_script=import_script(self.qpid_ha_path)
- self._agent = None
- self.client_credentials = client_credentials
-
- def __str__(self): return Broker.__str__(self)
-
- def qpid_ha(self, args):
- cred = self.client_credentials
- url = self.host_port()
- if cred:
- url =cred.add_user(url)
- args = args + ["--sasl-mechanism", cred.mechanism]
- self.qpid_ha_script.main_except(["", "-b", url]+args)
-
- def promote(self): self.qpid_ha(["promote"])
- def set_client_url(self, url): self.qpid_ha(["set", "--public-url", url])
- def set_brokers_url(self, url): self.qpid_ha(["set", "--brokers-url", url])
- def replicate(self, from_broker, queue): self.qpid_ha(["replicate", from_broker, queue])
-
- def agent(self):
- if not self._agent:
- cred = self.client_credentials
- if cred:
- self._agent = QmfAgent(cred.add_user(self.host_port()), sasl_mechanisms=cred.mechanism)
- else:
- self._agent = QmfAgent(self.host_port())
- return self._agent
-
- def ha_status(self):
- hb = self.agent().getHaBroker()
- hb.update()
- return hb.status
-
- def wait_status(self, status):
- def try_get_status():
- # Ignore ConnectionError, the broker may not be up yet.
- try:
- self._status = self.ha_status()
- return self._status == status;
- except ConnectionError: return False
- assert retry(try_get_status, timeout=20), "%s %r != %r"%(self, self._status, status)
-
- # FIXME aconway 2012-05-01: do direct python call to qpid-config code.
- def qpid_config(self, args):
- assert subprocess.call(
- [self.qpid_config_path, "--broker", self.host_port()]+args) == 0
-
- def config_replicate(self, from_broker, queue):
- self.qpid_config(["add", "queue", "--start-replica", from_broker, queue])
-
- def config_declare(self, queue, replication):
- self.qpid_config(["add", "queue", queue, "--replicate", replication])
-
- def connect_admin(self, **kwargs):
- cred = self.client_credentials
- if cred:
- return Broker.connect(
- self, client_properties={"qpid.ha-admin":1},
- username=cred.username, password=cred.password, sasl_mechanisms=cred.mechanism,
- **kwargs)
- else:
- return Broker.connect(self, client_properties={"qpid.ha-admin":1}, **kwargs)
-
- def wait_backup(self, address):
- """Wait for address to become valid on a backup broker."""
- bs = self.connect_admin().session()
- try: wait_address(bs, address)
- finally: bs.connection.close()
-
- def assert_browse(self, queue, expected, **kwargs):
- """Verify queue contents by browsing."""
- bs = self.connect().session()
- try:
- wait_address(bs, queue)
- assert_browse_retry(bs, queue, expected, **kwargs)
- finally: bs.connection.close()
-
- def assert_browse_backup(self, queue, expected, **kwargs):
- """Combines wait_backup and assert_browse_retry."""
- bs = self.connect_admin().session()
- try:
- wait_address(bs, queue)
- assert_browse_retry(bs, queue, expected, **kwargs)
- finally: bs.connection.close()
-
- def assert_connect_fail(self):
- try:
- self.connect()
- self.test.fail("Expected ConnectionError")
- except ConnectionError: pass
-
- def try_connect(self):
- try: return self.connect()
- except ConnectionError: return None
-
-class HaCluster(object):
- _cluster_count = 0
-
- def __init__(self, test, n, promote=True, **kwargs):
- """Start a cluster of n brokers"""
- self.test = test
- self.kwargs = kwargs
- self._brokers = []
- self.id = HaCluster._cluster_count
- self.broker_id = 0
- HaCluster._cluster_count += 1
- for i in xrange(n): self.start(False)
- self.update_urls()
- self[0].promote()
-
- def next_name(self):
- name="cluster%s-%s"%(self.id, self.broker_id)
- self.broker_id += 1
- return name
-
- def start(self, update_urls=True, args=[]):
- """Start a new broker in the cluster"""
- b = HaBroker(self.test, name=self.next_name(), **self.kwargs)
- self._brokers.append(b)
- if update_urls: self.update_urls()
- return b
-
- def update_urls(self):
- self.url = ",".join([b.host_port() for b in self])
- if len(self) > 1: # No failover addresses on a 1 cluster.
- for b in self: b.set_brokers_url(self.url)
-
- def connect(self, i):
- """Connect with reconnect_urls"""
- return self[i].connect(reconnect=True, reconnect_urls=self.url.split(","))
-
- def kill(self, i, promote_next=True):
- """Kill broker i, promote broker i+1"""
- self[i].expect = EXPECT_EXIT_FAIL
- self[i].kill()
- if promote_next: self[(i+1) % len(self)].promote()
-
- def restart(self, i):
- """Start a broker with the same port, name and data directory. It will get
- a separate log file: foo.n.log"""
- b = self._brokers[i]
- self._brokers[i] = HaBroker(
- self.test, name=b.name, port=b.port(), brokers_url=self.url,
- **self.kwargs)
-
- def bounce(self, i, promote_next=True):
- """Stop and restart a broker in a cluster."""
- self.kill(i, promote_next)
- self.restart(i)
-
- # Behave like a list of brokers.
- def __len__(self): return len(self._brokers)
- def __getitem__(self,index): return self._brokers[index]
- def __iter__(self): return self._brokers.__iter__()
-
-def wait_address(session, address):
- """Wait for an address to become valid."""
- def check():
- try:
- session.sender(address)
- return True
- except NotFound: return False
- assert retry(check), "Timed out waiting for address %s"%(address)
-
-def valid_address(session, address):
- """Test if an address is valid"""
- try:
- session.receiver(address)
- return True
- except NotFound: return False
-
-class ReplicationTests(BrokerTest):
+class ReplicationTests(HaBrokerTest):
"""Correctness tests for HA replication."""
def test_replication(self):
@@ -256,8 +53,9 @@ class ReplicationTests(BrokerTest):
def queue(name, replicate):
return "%s;{create:always,node:{x-declare:{arguments:{'qpid.replicate':%s}}}}"%(name, replicate)
- def exchange(name, replicate, bindq):
- return"%s;{create:always,node:{type:topic,x-declare:{arguments:{'qpid.replicate':%s}, type:'fanout'},x-bindings:[{exchange:'%s',queue:'%s'}]}}"%(name, replicate, name, bindq)
+ def exchange(name, replicate, bindq, key):
+ return "%s/%s;{create:always,node:{type:topic,x-declare:{arguments:{'qpid.replicate':%s}, type:'topic'},x-bindings:[{exchange:'%s',queue:'%s',key:'%s'}]}}"%(name, key, replicate, name, bindq, key)
+
def setup(p, prefix, primary):
"""Create config, send messages on the primary p"""
s = p.sender(queue(prefix+"q1", "all"))
@@ -267,16 +65,21 @@ class ReplicationTests(BrokerTest):
p.acknowledge()
p.sender(queue(prefix+"q2", "configuration")).send(Message("2"))
p.sender(queue(prefix+"q3", "none")).send(Message("3"))
- p.sender(exchange(prefix+"e1", "all", prefix+"q1")).send(Message("4"))
- p.sender(exchange(prefix+"e2", "all", prefix+"q2")).send(Message("5"))
+ p.sender(exchange(prefix+"e1", "all", prefix+"q1", "key1")).send(Message("4"))
+ p.sender(exchange(prefix+"e2", "configuration", prefix+"q2", "key2")).send(Message("5"))
# Test unbind
p.sender(queue(prefix+"q4", "all")).send(Message("6"))
- s3 = p.sender(exchange(prefix+"e4", "all", prefix+"q4"))
+ s3 = p.sender(exchange(prefix+"e4", "all", prefix+"q4", "key4"))
s3.send(Message("7"))
# Use old connection to unbind
us = primary.connect_old().session(str(uuid4()))
- us.exchange_unbind(exchange=prefix+"e4", binding_key="", queue=prefix+"q4")
+ us.exchange_unbind(exchange=prefix+"e4", binding_key="key4", queue=prefix+"q4")
p.sender(prefix+"e4").send(Message("drop1")) # Should be dropped
+ # Test replication of deletes
+ p.sender(queue(prefix+"dq", "all"))
+ p.sender(exchange(prefix+"de", "all", prefix+"dq", ""))
+ p.sender(prefix+"dq;{delete:always}").close()
+ p.sender(prefix+"de;{delete:always}").close()
# Need a marker so we can wait till sync is done.
p.sender(queue(prefix+"x", "configuration"))
@@ -292,50 +95,61 @@ class ReplicationTests(BrokerTest):
self.assert_browse_retry(b, prefix+"q2", []) # configuration only
assert not valid_address(b, prefix+"q3")
- b.sender(prefix+"e1").send(Message(prefix+"e1")) # Verify binds with replicate=all
+
+ # Verify exchange with replicate=all
+ b.sender(prefix+"e1/key1").send(Message(prefix+"e1"))
self.assert_browse_retry(b, prefix+"q1", ["1", "4", prefix+"e1"])
- b.sender(prefix+"e2").send(Message(prefix+"e2")) # Verify binds with replicate=configuration
+
+ # Verify exchange with replicate=configuration
+ b.sender(prefix+"e2/key2").send(Message(prefix+"e2"))
self.assert_browse_retry(b, prefix+"q2", [prefix+"e2"])
- b.sender(prefix+"e4").send(Message("drop2")) # Verify unbind.
+ b.sender(prefix+"e4/key4").send(Message("drop2")) # Verify unbind.
self.assert_browse_retry(b, prefix+"q4", ["6","7"])
- primary = HaBroker(self, name="primary")
- primary.promote()
- p = primary.connect().session()
+ # Verify deletes
+ assert not valid_address(b, prefix+"dq")
+ assert not valid_address(b, prefix+"de")
- # Create config, send messages before starting the backup, to test catch-up replication.
- setup(p, "1", primary)
- backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
- # Create config, send messages after starting the backup, to test steady-state replication.
- setup(p, "2", primary)
-
- # Verify the data on the backup
- b = backup.connect_admin().session()
- verify(b, "1", p)
- verify(b, "2", p)
- # Test a series of messages, enqueue all then dequeue all.
- s = p.sender(queue("foo","all"))
- wait_address(b, "foo")
- msgs = [str(i) for i in range(10)]
- for m in msgs: s.send(Message(m))
- self.assert_browse_retry(p, "foo", msgs)
- self.assert_browse_retry(b, "foo", msgs)
- r = p.receiver("foo")
- for m in msgs: self.assertEqual(m, r.fetch(timeout=0).content)
- p.acknowledge()
- self.assert_browse_retry(p, "foo", [])
- self.assert_browse_retry(b, "foo", [])
-
- # Another series, this time verify each dequeue individually.
- for m in msgs: s.send(Message(m))
- self.assert_browse_retry(p, "foo", msgs)
- self.assert_browse_retry(b, "foo", msgs)
- for i in range(len(msgs)):
- self.assertEqual(msgs[i], r.fetch(timeout=0).content)
+ l = LogLevel(ERROR) # Hide expected WARNING log messages from failover.
+ try:
+ primary = HaBroker(self, name="primary")
+ primary.promote()
+ p = primary.connect().session()
+
+ # Create config, send messages before starting the backup, to test catch-up replication.
+ setup(p, "1", primary)
+ backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
+ # Create config, send messages after starting the backup, to test steady-state replication.
+ setup(p, "2", primary)
+
+ # Verify the data on the backup
+ b = backup.connect_admin().session()
+ verify(b, "1", p)
+ verify(b, "2", p)
+ # Test a series of messages, enqueue all then dequeue all.
+ s = p.sender(queue("foo","all"))
+ wait_address(b, "foo")
+ msgs = [str(i) for i in range(10)]
+ for m in msgs: s.send(Message(m))
+ self.assert_browse_retry(p, "foo", msgs)
+ self.assert_browse_retry(b, "foo", msgs)
+ r = p.receiver("foo")
+ for m in msgs: self.assertEqual(m, r.fetch(timeout=0).content)
p.acknowledge()
- self.assert_browse_retry(p, "foo", msgs[i+1:])
- self.assert_browse_retry(b, "foo", msgs[i+1:])
+ self.assert_browse_retry(p, "foo", [])
+ self.assert_browse_retry(b, "foo", [])
+
+ # Another series, this time verify each dequeue individually.
+ for m in msgs: s.send(Message(m))
+ self.assert_browse_retry(p, "foo", msgs)
+ self.assert_browse_retry(b, "foo", msgs)
+ for i in range(len(msgs)):
+ self.assertEqual(msgs[i], r.fetch(timeout=0).content)
+ p.acknowledge()
+ self.assert_browse_retry(p, "foo", msgs[i+1:])
+ self.assert_browse_retry(b, "foo", msgs[i+1:])
+ finally: l.restore()
def test_sync(self):
primary = HaBroker(self, name="primary")
@@ -361,53 +175,59 @@ class ReplicationTests(BrokerTest):
def test_send_receive(self):
"""Verify sequence numbers of messages sent by qpid-send"""
- brokers = HaCluster(self, 3)
- sender = self.popen(
- ["qpid-send",
- "--broker", brokers[0].host_port(),
- "--address", "q;{create:always}",
- "--messages=1000",
- "--content-string=x"
- ])
- receiver = self.popen(
- ["qpid-receive",
- "--broker", brokers[0].host_port(),
- "--address", "q;{create:always}",
- "--messages=990",
- "--timeout=10"
- ])
- self.assertEqual(sender.wait(), 0)
- self.assertEqual(receiver.wait(), 0)
- expect = [long(i) for i in range(991, 1001)]
- sn = lambda m: m.properties["sn"]
- brokers[1].assert_browse_backup("q", expect, transform=sn)
- brokers[2].assert_browse_backup("q", expect, transform=sn)
+ l = LogLevel(ERROR) # Hide expected WARNING log messages from failover.
+ try:
+ brokers = HaCluster(self, 3)
+ sender = self.popen(
+ ["qpid-send",
+ "--broker", brokers[0].host_port(),
+ "--address", "q;{create:always}",
+ "--messages=1000",
+ "--content-string=x"
+ ])
+ receiver = self.popen(
+ ["qpid-receive",
+ "--broker", brokers[0].host_port(),
+ "--address", "q;{create:always}",
+ "--messages=990",
+ "--timeout=10"
+ ])
+ self.assertEqual(sender.wait(), 0)
+ self.assertEqual(receiver.wait(), 0)
+ expect = [long(i) for i in range(991, 1001)]
+ sn = lambda m: m.properties["sn"]
+ brokers[1].assert_browse_backup("q", expect, transform=sn)
+ brokers[2].assert_browse_backup("q", expect, transform=sn)
+ finally: l.restore()
def test_failover_python(self):
"""Verify that backups rejects connections and that fail-over works in python client"""
- primary = HaBroker(self, name="primary", expect=EXPECT_EXIT_FAIL)
- primary.promote()
- backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
- # Check that backup rejects normal connections
+ l = LogLevel(ERROR) # Hide expected WARNING log messages from failover.
try:
- backup.connect().session()
- self.fail("Expected connection to backup to fail")
- except ConnectionError: pass
- # Check that admin connections are allowed to backup.
- backup.connect_admin().close()
-
- # Test discovery: should connect to primary after reject by backup
- c = backup.connect(reconnect_urls=[primary.host_port(), backup.host_port()], reconnect=True)
- s = c.session()
- sender = s.sender("q;{create:always}")
- backup.wait_backup("q")
- sender.send("foo")
- primary.kill()
- assert retry(lambda: not is_running(primary.pid))
- backup.promote()
- sender.send("bar")
- self.assert_browse_retry(s, "q", ["foo", "bar"])
- c.close()
+ primary = HaBroker(self, name="primary", expect=EXPECT_EXIT_FAIL)
+ primary.promote()
+ backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
+ # Check that backup rejects normal connections
+ try:
+ backup.connect().session()
+ self.fail("Expected connection to backup to fail")
+ except ConnectionError: pass
+ # Check that admin connections are allowed to backup.
+ backup.connect_admin().close()
+
+ # Test discovery: should connect to primary after reject by backup
+ c = backup.connect(reconnect_urls=[primary.host_port(), backup.host_port()], reconnect=True)
+ s = c.session()
+ sender = s.sender("q;{create:always}")
+ backup.wait_backup("q")
+ sender.send("foo")
+ primary.kill()
+ assert retry(lambda: not is_running(primary.pid))
+ backup.promote()
+ sender.send("bar")
+ self.assert_browse_retry(s, "q", ["foo", "bar"])
+ c.close()
+ finally: l.restore()
def test_failover_cpp(self):
"""Verify that failover works in the C++ client."""
@@ -456,51 +276,61 @@ class ReplicationTests(BrokerTest):
def test_standalone_queue_replica(self):
"""Test replication of individual queues outside of cluster mode"""
- getLogger().setLevel(ERROR) # Hide expected WARNING log messages from failover.
- primary = HaBroker(self, name="primary", ha_cluster=False)
- pc = primary.connect()
- ps = pc.session().sender("q;{create:always}")
- pr = pc.session().receiver("q;{create:always}")
- backup = HaBroker(self, name="backup", ha_cluster=False)
- br = backup.connect().session().receiver("q;{create:always}")
-
- # Set up replication with qpid-ha
- backup.replicate(primary.host_port(), "q")
- ps.send("a")
- backup.assert_browse_backup("q", ["a"])
- ps.send("b")
- backup.assert_browse_backup("q", ["a", "b"])
- self.assertEqual("a", pr.fetch().content)
- pr.session.acknowledge()
- backup.assert_browse_backup("q", ["b"])
-
- # Set up replication with qpid-config
- ps2 = pc.session().sender("q2;{create:always}")
- backup.config_replicate(primary.host_port(), "q2");
- ps2.send("x")
- backup.assert_browse_backup("q2", ["x"])
-
+ l = LogLevel(ERROR) # Hide expected WARNING log messages from failover.
+ try:
+ primary = HaBroker(self, name="primary", ha_cluster=False,
+ args=["--ha-queue-replication=yes"]);
+ pc = primary.connect()
+ ps = pc.session().sender("q;{create:always}")
+ pr = pc.session().receiver("q;{create:always}")
+ backup = HaBroker(self, name="backup", ha_cluster=False,
+ args=["--ha-queue-replication=yes"])
+ br = backup.connect().session().receiver("q;{create:always}")
+
+ # Set up replication with qpid-ha
+ backup.replicate(primary.host_port(), "q")
+ ps.send("a")
+ backup.assert_browse_backup("q", ["a"])
+ ps.send("b")
+ backup.assert_browse_backup("q", ["a", "b"])
+ self.assertEqual("a", pr.fetch().content)
+ pr.session.acknowledge()
+ backup.assert_browse_backup("q", ["b"])
+
+ # Set up replication with qpid-config
+ ps2 = pc.session().sender("q2;{create:always}")
+ backup.config_replicate(primary.host_port(), "q2");
+ ps2.send("x")
+ backup.assert_browse_backup("q2", ["x"])
+ finally: l.restore()
def test_queue_replica_failover(self):
- """Test individual queue replication from a cluster to a standalone backup broker, verify it fails over."""
- cluster = HaCluster(self, 2)
- primary = cluster[0]
- pc = cluster.connect(0)
- ps = pc.session().sender("q;{create:always}")
- pr = pc.session().receiver("q;{create:always}")
- backup = HaBroker(self, name="backup", ha_cluster=False)
- br = backup.connect().session().receiver("q;{create:always}")
- backup.replicate(cluster.url, "q")
- ps.send("a")
- backup.assert_browse_backup("q", ["a"])
- cluster.bounce(0)
- backup.assert_browse_backup("q", ["a"])
- ps.send("b")
- backup.assert_browse_backup("q", ["a", "b"])
- cluster.bounce(1)
- self.assertEqual("a", pr.fetch().content)
- pr.session.acknowledge()
- backup.assert_browse_backup("q", ["b"])
+ """Test individual queue replication from a cluster to a standalone
+ backup broker, verify it fails over."""
+ l = LogLevel(ERROR) # Hide expected WARNING log messages from failover.
+ try:
+ cluster = HaCluster(self, 2)
+ primary = cluster[0]
+ pc = cluster.connect(0)
+ ps = pc.session().sender("q;{create:always}")
+ pr = pc.session().receiver("q;{create:always}")
+ backup = HaBroker(self, name="backup", ha_cluster=False,
+ args=["--ha-queue-replication=yes"])
+ br = backup.connect().session().receiver("q;{create:always}")
+ backup.replicate(cluster.url, "q")
+ ps.send("a")
+ backup.assert_browse_backup("q", ["a"])
+ cluster.bounce(0)
+ backup.assert_browse_backup("q", ["a"])
+ ps.send("b")
+ backup.assert_browse_backup("q", ["a", "b"])
+ cluster.bounce(1)
+ self.assertEqual("a", pr.fetch().content)
+ pr.session.acknowledge()
+ backup.assert_browse_backup("q", ["b"])
+ pc.close()
+ br.close()
+ finally: l.restore()
def test_lvq(self):
"""Verify that we replicate to an LVQ correctly"""
@@ -634,8 +464,10 @@ class ReplicationTests(BrokerTest):
def test_replicate_default(self):
"""Make sure we don't replicate if ha-replicate is unspecified or none"""
cluster1 = HaCluster(self, 2, ha_replicate=None)
+ cluster1[1].wait_status("ready")
c1 = cluster1[0].connect().session().sender("q;{create:always}")
cluster2 = HaCluster(self, 2, ha_replicate="none")
+ cluster2[1].wait_status("ready")
cluster2[0].connect().session().sender("q;{create:always}")
time.sleep(.1) # Give replication a chance.
try:
@@ -647,6 +479,23 @@ class ReplicationTests(BrokerTest):
self.fail("Excpected no-such-queue exception")
except NotFound: pass
+ def test_replicate_binding(self):
+ """Verify that binding replication can be disabled"""
+ primary = HaBroker(self, name="primary", expect=EXPECT_EXIT_FAIL)
+ primary.promote()
+ backup = HaBroker(self, name="backup", brokers_url=primary.host_port())
+ ps = primary.connect().session()
+ ps.sender("ex;{create:always,node:{type:topic,x-declare:{arguments:{'qpid.replicate':all}, type:'fanout'}}}")
+ ps.sender("q;{create:always,node:{type:queue,x-declare:{arguments:{'qpid.replicate':all}},x-bindings:[{exchange:'ex',queue:'q',key:'',arguments:{'qpid.replicate':none}}]}}")
+ backup.wait_backup("q")
+
+ primary.kill()
+ assert retry(lambda: not is_running(primary.pid)) # Wait for primary to die
+ backup.promote()
+ bs = backup.connect_admin().session()
+ bs.sender("ex").send(Message("msg"))
+ self.assert_browse_retry(bs, "q", [])
+
def test_invalid_replication(self):
"""Verify that we reject an attempt to declare a queue with invalid replication value."""
cluster = HaCluster(self, 1, ha_replicate="all")
@@ -672,20 +521,26 @@ class ReplicationTests(BrokerTest):
def test_auto_delete_exclusive(self):
"""Verify that we ignore auto-delete, exclusive, non-auto-delete-timeout queues"""
- cluster = HaCluster(self,2)
- s = cluster[0].connect().session()
- s.receiver("exad;{create:always,node:{x-declare:{exclusive:True,auto-delete:True}}}")
- s.receiver("ex;{create:always,node:{x-declare:{exclusive:True}}}")
- s.receiver("ad;{create:always,node:{x-declare:{auto-delete:True}}}")
- s.receiver("time;{create:always,node:{x-declare:{exclusive:True,auto-delete:True,arguments:{'qpid.auto_delete_timeout':1}}}}")
- s.receiver("q;{create:always}")
+ cluster = HaCluster(self, 2)
+ s0 = cluster[0].connect().session()
+ s0.receiver("exad;{create:always,node:{x-declare:{exclusive:True,auto-delete:True}}}")
+ s0.receiver("ex;{create:always,node:{x-declare:{exclusive:True}}}")
+ ad = s0.receiver("ad;{create:always,node:{x-declare:{auto-delete:True}}}")
+ s0.receiver("time;{create:always,node:{x-declare:{exclusive:True,auto-delete:True,arguments:{'qpid.auto_delete_timeout':1}}}}")
+ s0.receiver("q;{create:always}")
- s = cluster[1].connect_admin().session()
+ s1 = cluster[1].connect_admin().session()
cluster[1].wait_backup("q")
- assert not valid_address(s, "exad")
- assert valid_address(s, "ex")
- assert valid_address(s, "ad")
- assert valid_address(s, "time")
+ assert not valid_address(s1, "exad")
+ assert valid_address(s1, "ex")
+ assert valid_address(s1, "ad")
+ assert valid_address(s1, "time")
+
+ # Verify that auto-delete queues are not kept alive by
+ # replicating subscriptions
+ ad.close()
+ s0.sync()
+ assert not valid_address(s0, "ad")
def test_broker_info(self):
"""Check that broker information is correctly published via management"""
@@ -763,18 +618,18 @@ acl deny all all
s.sender("altex;{create:always,node:{type:topic,x-declare:{type:'fanout'}}}")
# altq queue bound to altex, collect re-routed messages.
s.sender("altq;{create:always,node:{x-bindings:[{exchange:'altex',queue:altq}]}}")
- # 0ex exchange with alternate-exchange altex and no queues bound
- s.sender("0ex;{create:always,node:{type:topic, x-declare:{type:'direct', alternate-exchange:'altex'}}}")
+ # ex exchange with alternate-exchange altex and no queues bound
+ s.sender("ex;{create:always,node:{type:topic, x-declare:{type:'direct', alternate-exchange:'altex'}}}")
# create queue q with alternate-exchange altex
s.sender("q;{create:always,node:{type:queue, x-declare:{alternate-exchange:'altex'}}}")
# create a bunch of exchanges to ensure we don't clean up prematurely if the
# response comes in multiple fragments.
- for i in xrange(200): s.sender("00ex%s;{create:always,node:{type:topic}}"%i)
+ for i in xrange(200): s.sender("ex.%s;{create:always,node:{type:topic}}"%i)
def verify(broker):
s = broker.connect().session()
# Verify unmatched message goes to ex's alternate.
- s.sender("0ex").send("foo")
+ s.sender("ex").send("foo")
altq = s.receiver("altq")
self.assertEqual("foo", altq.fetch(timeout=0).content)
s.acknowledge()
@@ -786,20 +641,265 @@ acl deny all all
self.assertEqual("bar", altq.fetch(timeout=0).content)
s.acknowledge()
+ def ss(n): return cluster[n].connect().session()
+
# Sanity check: alternate exchanges on original broker
verify(cluster[0])
+ # Altex is in use as an alternate exchange.
+ self.assertRaises(SessionError,
+ lambda:ss(0).sender("altex;{delete:always}").close())
# Check backup that was connected during setup.
- cluster[1].wait_backup("0ex")
+ cluster[1].wait_status("ready")
+ cluster[1].wait_backup("ex")
cluster[1].wait_backup("q")
cluster.bounce(0)
verify(cluster[1])
+
# Check a newly started backup.
cluster.start()
- cluster[2].wait_backup("0ex")
+ cluster[2].wait_status("ready")
+ cluster[2].wait_backup("ex")
cluster[2].wait_backup("q")
cluster.bounce(1)
verify(cluster[2])
+ # Check that alt-exchange in-use count is replicated
+ s = cluster[2].connect().session();
+
+ self.assertRaises(SessionError,
+ lambda:ss(2).sender("altex;{delete:always}").close())
+ s.sender("q;{delete:always}").close()
+ self.assertRaises(SessionError,
+ lambda:ss(2).sender("altex;{delete:always}").close())
+ s.sender("ex;{delete:always}").close()
+ s.sender("altex;{delete:always}").close()
+
+ def test_priority_reroute(self):
+ """Regression test for QPID-4262, rerouting messages from a priority queue
+ to itself causes a crash"""
+ cluster = HaCluster(self, 2)
+ primary = cluster[0]
+ session = primary.connect().session()
+ s = session.sender("pq; {create:always, node:{x-declare:{arguments:{'qpid.priorities':10}},x-bindings:[{exchange:'amq.fanout',queue:pq}]}}")
+ for m in xrange(100): s.send(Message(str(m), priority=m%10))
+ pq = QmfAgent(primary.host_port()).getQueue("pq")
+ pq.reroute(request=0, useAltExchange=False, exchange="amq.fanout")
+ # Verify that consuming is in priority order
+ expect = [str(10*i+p) for p in xrange(9,-1,-1) for i in xrange(0,10) ]
+ actual = [m.content for m in primary.get_messages("pq", 100)]
+ self.assertEqual(expect, actual)
+
+ def test_delete_missing_response(self):
+ """Check that a backup correctly deletes leftover queues and exchanges that are
+ missing from the initial reponse set."""
+ # This test is a bit contrived, we set up the situation on backup brokers
+ # and then promote one.
+ cluster = HaCluster(self, 2, promote=False)
+
+ # cluster[0] Will be the primary
+ s = cluster[0].connect_admin().session()
+ s.sender("q1;{create:always}")
+ s.sender("e1;{create:always, node:{type:topic}}")
+
+ # cluster[1] will be the backup, has extra queues/exchanges
+ xdecl = "x-declare:{arguments:{'qpid.replicate':'all'}}"
+ node = "node:{%s}"%(xdecl)
+ s = cluster[1].connect_admin().session()
+ s.sender("q1;{create:always, %s}"%(node))
+ s.sender("q2;{create:always, %s}"%(node))
+ s.sender("e1;{create:always, node:{type:topic, %s}}"%(xdecl))
+ s.sender("e2;{create:always, node:{type:topic, %s}}"%(xdecl))
+ for a in ["q1", "q2", "e1", "e2"]: cluster[1].wait_backup(a)
+
+ cluster[0].promote()
+ # Verify the backup deletes the surplus queue and exchange
+ cluster[1].wait_status("ready")
+ s = cluster[1].connect_admin().session()
+ self.assertRaises(NotFound, s.receiver, ("q2"));
+ self.assertRaises(NotFound, s.receiver, ("e2"));
+
+
+ def test_delete_qpid_4285(self):
+ """Regression test for QPID-4285: on deleting a queue it gets stuck in a
+ partially deleted state and causes replication errors."""
+ cluster = HaCluster(self,2)
+ s = cluster[0].connect().session()
+ s.receiver("q;{create:always}")
+ cluster[1].wait_backup("q")
+ cluster.kill(0) # Make the backup take over.
+ s = cluster[1].connect().session()
+ s.receiver("q;{delete:always}").close() # Delete q on new primary
+ try:
+ s.receiver("q")
+ self.fail("Expected NotFound exception") # Should not be avaliable
+ except NotFound: pass
+ assert not cluster[1].agent().getQueue("q") # Should not be in QMF
+
+ def alt_setup(self, session, suffix):
+ # Create exchange to use as alternate and a queue bound to it.
+ # altex exchange: acts as alternate exchange
+ session.sender("altex%s;{create:always,node:{type:topic,x-declare:{type:'fanout'}}}"%(suffix))
+ # altq queue bound to altex, collect re-routed messages.
+ session.sender("altq%s;{create:always,node:{x-bindings:[{exchange:'altex%s',queue:altq%s}]}}"%(suffix,suffix,suffix))
+
+ def test_auto_delete_close(self):
+ """Verify auto-delete queues are deleted on backup if auto-deleted
+ on primary"""
+ cluster=HaCluster(self, 2)
+ p = cluster[0].connect().session()
+ self.alt_setup(p, "1")
+ r = p.receiver("adq1;{create:always,node:{x-declare:{auto-delete:True,alternate-exchange:'altex1'}}}", capacity=1)
+ s = p.sender("adq1")
+ for m in ["aa","bb","cc"]: s.send(m)
+ p.sender("adq2;{create:always,node:{x-declare:{auto-delete:True}}}")
+ cluster[1].wait_queue("adq1")
+ cluster[1].wait_queue("adq2")
+ r.close() # trigger auto-delete of adq1
+ cluster[1].wait_no_queue("adq1")
+ cluster[1].assert_browse_backup("altq1", ["aa","bb","cc"])
+ cluster[1].wait_queue("adq2")
+
+ def test_auto_delete_crash(self):
+ """Verify auto-delete queues are deleted on backup if the primary crashes"""
+ cluster=HaCluster(self, 2)
+ p = cluster[0].connect().session()
+ self.alt_setup(p,"1")
+
+ # adq1 is subscribed so will be auto-deleted.
+ r = p.receiver("adq1;{create:always,node:{x-declare:{auto-delete:True,alternate-exchange:'altex1'}}}", capacity=1)
+ s = p.sender("adq1")
+ for m in ["aa","bb","cc"]: s.send(m)
+ # adq2 is subscribed after cluster[2] starts.
+ p.sender("adq2;{create:always,node:{x-declare:{auto-delete:True}}}")
+ # adq3 is never subscribed.
+ p.sender("adq3;{create:always,node:{x-declare:{auto-delete:True}}}")
+
+ cluster.start()
+ cluster[2].wait_status("ready")
+
+ p.receiver("adq2") # Subscribed after cluster[2] joined
+
+ for q in ["adq1","adq2","adq3","altq1"]: cluster[1].wait_queue(q)
+ for q in ["adq1","adq2","adq3","altq1"]: cluster[2].wait_queue(q)
+ cluster[0].kill()
+
+ cluster[1].wait_no_queue("adq1")
+ cluster[1].wait_no_queue("adq2")
+ cluster[1].wait_queue("adq3")
+
+ cluster[2].wait_no_queue("adq1")
+ cluster[2].wait_no_queue("adq2")
+ cluster[2].wait_queue("adq3")
+
+ cluster[1].assert_browse_backup("altq1", ["aa","bb","cc"])
+ cluster[2].assert_browse_backup("altq1", ["aa","bb","cc"])
+
+ def test_auto_delete_timeout(self):
+ cluster = HaCluster(self, 2)
+ # Test timeout
+ r1 = cluster[0].connect().session().receiver("q1;{create:always,node:{x-declare:{auto-delete:True,arguments:{'qpid.auto_delete_timeout':1}}}}")
+ # Test special case of timeout = 0
+ r0 = cluster[0].connect().session().receiver("q0;{create:always,node:{x-declare:{auto-delete:True,arguments:{'qpid.auto_delete_timeout':0}}}}")
+ cluster[1].wait_queue("q0")
+ cluster[1].wait_queue("q1")
+ cluster[0].kill()
+ cluster[1].wait_queue("q1") # Not timed out yet
+ cluster[1].wait_no_queue("q1", timeout=5) # Wait for timeout
+ cluster[1].wait_no_queue("q0", timeout=5) # Wait for timeout
+
+ def test_alt_exchange_dup(self):
+ """QPID-4349: if a queue has an alterante exchange and is deleted the
+ messages appear twice on the alternate, they are rerouted once by the
+ primary and again by the backup."""
+ cluster = HaCluster(self,2)
+
+ # Set up q with alternate exchange altex bound to altq.
+ s = cluster[0].connect().session()
+ s.sender("altex;{create:always,node:{type:topic,x-declare:{type:'fanout'}}}")
+ s.sender("altq;{create:always,node:{x-bindings:[{exchange:'altex',queue:altq}]}}")
+ snd = s.sender("q;{create:always,node:{x-declare:{alternate-exchange:'altex'}}}")
+ messages = [ str(n) for n in xrange(10) ]
+ for m in messages: snd.send(m)
+ cluster[1].assert_browse_backup("q", messages)
+ s.sender("q;{delete:always}").close()
+ cluster[1].assert_browse_backup("altq", messages)
+
+ def test_expired(self):
+ """Regression test for QPID-4379: HA does not properly handle expired messages"""
+ # Race between messages expiring and HA replicating consumer.
+ cluster = HaCluster(self, 2)
+ s = cluster[0].connect().session().sender("q;{create:always}", capacity=2)
+ def send_ttl_messages():
+ for i in xrange(100): s.send(Message(str(i), ttl=0.001), timeout=1)
+ send_ttl_messages()
+ cluster.start()
+ send_ttl_messages()
+
+ def test_stale_response(self):
+ """Check for race condition where a stale response is processed after an
+ event for the same queue/exchange """
+ cluster = HaCluster(self, 2)
+ s = cluster[0].connect().session()
+ s.sender("keep;{create:always}") # Leave this queue in place.
+ for i in xrange(1000):
+ s.sender("deleteme%s;{create:always,delete:always}"%(i)).close()
+ # It is possible for the backup to attempt to subscribe after the queue
+ # is deleted. This is not an error, but is logged as an error on the primary.
+ # The backup does not log this as an error so we only check the backup log for errors.
+ self.assert_log_no_errors(cluster[1])
+
+ def test_missed_recreate(self):
+ """If a queue or exchange is destroyed and one with the same name re-created
+ while a backup is disconnected, the backup should also delete/recreate
+ the object when it re-connects"""
+ cluster = HaCluster(self, 3)
+ sn = cluster[0].connect().session()
+ # Create a queue with messages
+ s = sn.sender("qq;{create:always}")
+ msgs = [str(i) for i in xrange(3)]
+ for m in msgs: s.send(m)
+ cluster[1].assert_browse_backup("qq", msgs)
+ cluster[2].assert_browse_backup("qq", msgs)
+ # Set up an exchange with a binding.
+ sn.sender("xx;{create:always,node:{type:topic}}")
+ sn.sender("xxq;{create:always,node:{x-bindings:[{exchange:'xx',queue:'xxq',key:xxq}]}}")
+ cluster[1].wait_address("xx")
+ self.assertEqual(cluster[1].agent().getExchange("xx").values["bindingCount"], 1)
+ cluster[2].wait_address("xx")
+ self.assertEqual(cluster[2].agent().getExchange("xx").values["bindingCount"], 1)
+
+ # Simulate the race by re-creating the objects before promoting the new primary
+ cluster.kill(0, False)
+ xdecl = "x-declare:{arguments:{'qpid.replicate':'all'}}"
+ node = "node:{%s}"%(xdecl)
+ sn = cluster[1].connect_admin().session()
+ sn.sender("qq;{delete:always}").close()
+ s = sn.sender("qq;{create:always, %s}"%(node))
+ s.send("foo")
+ sn.sender("xx;{delete:always}").close()
+ sn.sender("xx;{create:always,node:{type:topic,%s}}"%(xdecl))
+ cluster[1].promote()
+ cluster[1].wait_status("active")
+ # Verify we are not still using the old objects on cluster[2]
+ cluster[2].assert_browse_backup("qq", ["foo"])
+ cluster[2].wait_address("xx")
+ self.assertEqual(cluster[2].agent().getExchange("xx").values["bindingCount"], 0)
+
+ def test_redeclare_exchange(self):
+ """Ensure that re-declaring an exchange is an HA no-op"""
+ cluster = HaCluster(self, 2)
+ ps = cluster[0].connect().session()
+ ps.sender("ex1;{create:always,node:{type:topic,x-declare:{arguments:{'qpid.replicate':all}, type:'fanout'}}}")
+ ps.sender("ex2;{create:always,node:{type:topic,x-declare:{arguments:{'qpid.replicate':all}, type:'fanout', alternate-exchange:'ex1'}}}")
+ cluster[1].wait_backup("ex1")
+ cluster[1].wait_backup("ex2")
+
+ # Use old API to re-declare the exchange
+ old_conn = cluster[0].connect_old()
+ old_sess = old_conn.session(str(qpid.datatypes.uuid4()))
+ old_sess.exchange_declare(exchange='ex1', type='fanout')
+ cluster[1].wait_backup("ex1")
+
def fairshare(msgs, limit, levels):
"""
Generator to return prioritised messages in expected order for a given fairshare limit
@@ -812,7 +912,7 @@ def fairshare(msgs, limit, levels):
msgs = postponed
count = 0
last_priority = None
- postponed = []
+ postponed = [ ]
msg = msgs.pop(0)
if last_priority and priority_level(msg.priority, levels) == last_priority:
count += 1
@@ -834,7 +934,7 @@ def priority_level(value, levels):
offset = 5-math.ceil(levels/2.0)
return min(max(value - offset, 0), levels-1)
-class LongTests(BrokerTest):
+class LongTests(HaBrokerTest):
"""Tests that can run for a long time if -DDURATION=<minutes> is set"""
def duration(self):
@@ -860,7 +960,7 @@ class LongTests(BrokerTest):
"""Wait for receiver r to pass n"""
def check():
r.check() # Verify no exceptions
- return r.received > n
+ return r.received > n + 100
assert retry(check), "Stalled %s at %s"%(r.queue, n)
for r in receivers: wait_passed(r, 0)
@@ -868,87 +968,176 @@ class LongTests(BrokerTest):
# Kill and restart brokers in a cycle:
endtime = time.time() + self.duration()
i = 0
+ primary = 0
try:
while time.time() < endtime or i < 3: # At least 3 iterations
+ # Precondition: All 3 brokers running,
+ # primary = index of promoted primary
+ # one or two backups are running,
for s in senders: s.sender.assert_running()
for r in receivers: r.receiver.assert_running()
- checkpoint = [ r.received for r in receivers ]
- # Don't kill primary till it is active and the next
- # backup is ready, otherwise we can lose messages.
- brokers[i%3].wait_status("active")
- brokers[(i+1)%3].wait_status("ready")
- brokers.bounce(i%3)
+ checkpoint = [ r.received+100 for r in receivers ]
+ dead = None
+ victim = random.randint(0,2)
+ if victim == primary:
+ # Don't kill primary till it is active and the next
+ # backup is ready, otherwise we can lose messages.
+ brokers[victim].wait_status("active")
+ next = (victim+1)%3
+ brokers[next].wait_status("ready")
+ brokers.bounce(victim) # Next one is promoted
+ primary = next
+ else:
+ brokers.kill(victim, False)
+ dead = victim
+
+ # At this point the primary is running with 1 or 2 backups
+ # Make sure we are not stalled
+ map(wait_passed, receivers, checkpoint)
+ # Run another checkpoint to ensure things work in this configuration
+ checkpoint = [ r.received+100 for r in receivers ]
+ map(wait_passed, receivers, checkpoint)
+
+ if dead is not None:
+ brokers.restart(dead) # Restart backup
+ brokers[dead].ready()
+ dead = None
i += 1
- map(wait_passed, receivers, checkpoint) # Wait for all receivers
except:
traceback.print_exc()
raise
finally:
for s in senders: s.stop()
for r in receivers: r.stop()
- dead = []
+ unexpected_dead = []
for i in xrange(3):
- if not brokers[i].is_running(): dead.append(i)
- brokers.kill(i, False)
- if dead: raise Exception("Brokers not running: %s"%dead)
+ if not brokers[i].is_running() and i != dead:
+ unexpected_dead.append(i)
+ if brokers[i].is_running(): brokers.kill(i, False)
+ if unexpected_dead:
+ raise Exception("Brokers not running: %s"%unexpected_dead)
+
+ def test_qmf_order(self):
+ """QPID 4402: HA QMF events can be out of order.
+ This test mimics the test described in the JIRA. Two threads repeatedly
+ declare the same auto-delete queue and close their connection.
+ """
+ broker = Broker(self)
+ class Receiver(Thread):
+ def __init__(self, qname):
+ Thread.__init__(self)
+ self.qname = qname
+ self.stopped = False
+
+ def run(self):
+ while not self.stopped:
+ self.connection = broker.connect()
+ try:
+ self.connection.session().receiver(
+ self.qname+";{create:always,node:{x-declare:{auto-delete:True}}}")
+ except NotFound: pass # Can occur occasionally, not an error.
+ try: self.connection.close()
+ except: pass
+
+ class QmfObject(object):
+ """Track existance of an object and validate QMF events"""
+ def __init__(self, type_name, name_field, name):
+ self.type_name, self.name_field, self.name = type_name, name_field, name
+ self.exists = False
+
+ def qmf_event(self, event):
+ content = event.content[0]
+ event_type = content['_schema_id']['_class_name']
+ values = content['_values']
+ if event_type == self.type_name+"Declare" and values[self.name_field] == self.name:
+ disp = values['disp']
+ log.debug("Event %s: disp=%s exists=%s"%(
+ event_type, values['disp'], self.exists))
+ if self.exists: assert values['disp'] == 'existing'
+ else: assert values['disp'] == 'created'
+ self.exists = True
+ elif event_type == self.type_name+"Delete" and values[self.name_field] == self.name:
+ log.debug("Event %s: exists=%s"%(event_type, self.exists))
+ assert self.exists
+ self.exists = False
+
+ # Verify order of QMF events.
+ helper = EventHelper()
+ r = broker.connect().session().receiver(helper.eventAddress())
+ threads = [Receiver("qq"), Receiver("qq")]
+ for t in threads: t.start()
+ queue = QmfObject("queue", "qName", "qq")
+ finish = time.time() + self.duration()
+ try:
+ while time.time() < finish:
+ queue.qmf_event(r.fetch())
+ finally:
+ for t in threads: t.stopped = True; t.join()
-class RecoveryTests(BrokerTest):
+class RecoveryTests(HaBrokerTest):
"""Tests for recovery after a failure."""
def test_queue_hold(self):
"""Verify that the broker holds queues without sufficient backup,
i.e. does not complete messages sent to those queues."""
- # We don't want backups to time out for this test, set long timeout.
- cluster = HaCluster(self, 4, args=["--ha-backup-timeout=100000"]);
- # Wait for the primary to be ready
- cluster[0].wait_status("active")
- # Create a queue before the failure.
- s1 = cluster.connect(0).session().sender("q1;{create:always}")
- for b in cluster: b.wait_backup("q1")
- for i in xrange(100): s1.send(str(i))
- # Kill primary and 2 backups
- for i in [0,1,2]: cluster.kill(i, False)
- cluster[3].promote() # New primary, backups will be 1 and 2
- cluster[3].wait_status("recovering")
-
- def assertSyncTimeout(s):
- try:
- s.sync(timeout=.01)
- self.fail("Expected Timeout exception")
- except Timeout: pass
-
- # Create a queue after the failure
- s2 = cluster.connect(3).session().sender("q2;{create:always}")
-
- # Verify that messages sent are not completed
- for i in xrange(100,200): s1.send(str(i), sync=False); s2.send(str(i), sync=False)
- assertSyncTimeout(s1)
- self.assertEqual(s1.unsettled(), 100)
- assertSyncTimeout(s2)
- self.assertEqual(s2.unsettled(), 100)
-
- # Verify we can receive even if sending is on hold:
- cluster[3].assert_browse("q1", [str(i) for i in range(100)+range(100,200)])
-
- # Restart backups, verify queues are released only when both backups are up
- cluster.restart(1)
- assertSyncTimeout(s1)
- self.assertEqual(s1.unsettled(), 100)
- assertSyncTimeout(s2)
- self.assertEqual(s2.unsettled(), 100)
- self.assertEqual(cluster[3].ha_status(), "recovering")
- cluster.restart(2)
-
- # Verify everything is up to date and active
- def settled(sender): sender.sync(); return sender.unsettled() == 0;
- assert retry(lambda: settled(s1)), "Unsetttled=%s"%(s1.unsettled())
- assert retry(lambda: settled(s2)), "Unsetttled=%s"%(s2.unsettled())
- cluster[1].assert_browse_backup("q1", [str(i) for i in range(100)+range(100,200)])
- cluster[1].assert_browse_backup("q2", [str(i) for i in range(100,200)])
- cluster[3].wait_status("active"),
- s1.session.connection.close()
- s2.session.connection.close()
+ l = LogLevel(ERROR) # Hide expected WARNING log messages from failover.
+ try:
+ # We don't want backups to time out for this test, set long timeout.
+ cluster = HaCluster(self, 4, args=["--ha-backup-timeout=120"]);
+ # Wait for the primary to be ready
+ cluster[0].wait_status("active")
+ for b in cluster[1:4]: b.wait_status("ready")
+ # Create a queue before the failure.
+ s1 = cluster.connect(0).session().sender("q1;{create:always}")
+ for b in cluster: b.wait_backup("q1")
+ for i in xrange(100): s1.send(str(i))
+
+ # Kill primary and 2 backups
+ cluster[3].wait_status("ready")
+ for i in [0,1,2]: cluster.kill(i, False)
+ cluster[3].promote() # New primary, backups will be 1 and 2
+ cluster[3].wait_status("recovering")
+
+ def assertSyncTimeout(s):
+ try:
+ s.sync(timeout=.01)
+ self.fail("Expected Timeout exception")
+ except Timeout: pass
+
+ # Create a queue after the failure
+ s2 = cluster.connect(3).session().sender("q2;{create:always}")
+
+ # Verify that messages sent are not completed
+ for i in xrange(100,200):
+ s1.send(str(i), sync=False);
+ s2.send(str(i), sync=False)
+ assertSyncTimeout(s1)
+ self.assertEqual(s1.unsettled(), 100)
+ assertSyncTimeout(s2)
+ self.assertEqual(s2.unsettled(), 100)
+
+ # Verify we can receive even if sending is on hold:
+ cluster[3].assert_browse("q1", [str(i) for i in range(200)])
+
+ # Restart backups, verify queues are released only when both backups are up
+ cluster.restart(1)
+ assertSyncTimeout(s1)
+ self.assertEqual(s1.unsettled(), 100)
+ assertSyncTimeout(s2)
+ self.assertEqual(s2.unsettled(), 100)
+ cluster.restart(2)
+
+ # Verify everything is up to date and active
+ def settled(sender): sender.sync(timeout=1); return sender.unsettled() == 0;
+ assert retry(lambda: settled(s1)), "Unsetttled=%s"%(s1.unsettled())
+ assert retry(lambda: settled(s2)), "Unsetttled=%s"%(s2.unsettled())
+ cluster[1].assert_browse_backup("q1", [str(i) for i in range(100)+range(100,200)])
+ cluster[1].assert_browse_backup("q2", [str(i) for i in range(100,200)])
+ cluster[3].wait_status("active"),
+ s1.session.connection.close()
+ s2.session.connection.close()
+ finally: l.restore()
def test_expected_backup_timeout(self):
"""Verify that we time-out expected backups and release held queues
@@ -972,6 +1161,52 @@ class RecoveryTests(BrokerTest):
s.sync(timeout=1) # And released after the timeout.
self.assertEqual(cluster[2].ha_status(), "active")
+ def test_join_ready_cluster(self):
+ """If we join a cluster where the primary is dead, the new primary is
+ not yet promoted and there are ready backups then we should refuse
+ promotion so that one of the ready backups can be chosen."""
+ # FIXME aconway 2012-10-05: smaller timeout
+ cluster = HaCluster(self, 2, args=["--link-heartbeat-interval", 1])
+ cluster[0].wait_status("active")
+ cluster[1].wait_status("ready")
+ cluster.bounce(0, promote_next=False)
+ self.assertRaises(Exception, cluster[0].promote)
+ os.kill(cluster[1].pid, signal.SIGSTOP) # Test for timeout if unresponsive.
+ cluster.bounce(0, promote_next=False)
+ cluster[0].promote()
+
+
+class ConfigurationTests(HaBrokerTest):
+ """Tests for configuration settings."""
+
+ def test_client_broker_url(self):
+ """Check that setting of broker and public URLs obeys correct defaulting
+ and precedence"""
+
+ def check(broker, brokers, public):
+ qmf = broker.qmf()
+ self.assertEqual(brokers, qmf.brokersUrl)
+ self.assertEqual(public, qmf.publicUrl)
+
+ def start(brokers, public, known=None):
+ args=[]
+ if brokers: args.append("--ha-brokers-url="+brokers)
+ if public: args.append("--ha-public-url="+public)
+ if known: args.append("--known-hosts-url="+known)
+ return HaBroker(self, args=args)
+
+ # Both set explictily, no defaulting
+ b = start("foo:123", "bar:456")
+ check(b, "amqp:tcp:foo:123", "amqp:tcp:bar:456")
+ b.set_brokers_url("foo:999")
+ check(b, "amqp:tcp:foo:999", "amqp:tcp:bar:456")
+ b.set_public_url("bar:999")
+ check(b, "amqp:tcp:foo:999", "amqp:tcp:bar:999")
+
+ # Allow "none" to mean "not set"
+ b = start("none", "none")
+ check(b, "", "")
+
if __name__ == "__main__":
shutil.rmtree("brokertest.tmp", True)
qpid_ha = os.getenv("QPID_HA_EXEC")
@@ -979,5 +1214,5 @@ if __name__ == "__main__":
os.execvp("qpid-python-test",
["qpid-python-test", "-m", "ha_tests"] + sys.argv[1:])
else:
- print "Skipping ha_tests, qpid_ha not available"
+ print "Skipping ha_tests, %s not available"%(qpid_ha)
diff --git a/cpp/src/tests/ipv6_test b/cpp/src/tests/ipv6_test
index 9d1cb2acdd..f47e721513 100755
--- a/cpp/src/tests/ipv6_test
+++ b/cpp/src/tests/ipv6_test
@@ -122,43 +122,3 @@ else
rm rdata-in rdata-out
fi
-# Cluster smoke test follows
-test -z $CLUSTER_LIB && exit 0 # Exit if cluster not supported.
-
-## Test failover in a cluster using IPv6 only
-. cpg_check.sh
-cpg_enabled || exit 0
-
-pick_port() {
- # We need a fixed port to set --cluster-url. Use qpidd to pick a free port.
- # Note this method is racy
- PICK=$($QPIDD_EXEC -dp0)
- $QPIDD_EXEC -qp $PICK
- echo $PICK
-}
-
-ssl_cluster_broker() { # $1 = port
- $QPIDD_EXEC $COMMON_OPTS --load-module $CLUSTER_LIB --cluster-name ipv6_test.$HOSTNAME.$$ --cluster-url amqp:[$TEST_HOSTNAME]:$1 --port $1
- # Wait for broker to be ready
- ./qpid-ping -b $TEST_HOSTNAME -qp $1 || { echo "Cannot connect to broker on $1"; exit 1; }
- echo "Running IPv6 cluster broker on port $1"
-}
-
-PORT1=`pick_port`; ssl_cluster_broker $PORT1
-PORT2=`pick_port`; ssl_cluster_broker $PORT2
-
-# Pipe receive output to uniq to remove duplicates
-./qpid-receive --connection-options "{reconnect:true, reconnect-timeout:5}" --failover-updates -b amqp:[$TEST_HOSTNAME]:$PORT1 -a "foo;{create:always}" -f | uniq > ssl_test_receive.tmp &
-
-./qpid-send -b amqp:[$TEST_HOSTNAME]:$PORT2 --content-string=one -a "foo;{create:always}"
-
-$QPIDD_EXEC -qp $PORT1 # Kill broker 1 receiver should fail-over.
-./qpid-send -b amqp:[$TEST_HOSTNAME]:$PORT2 --content-string=two -a "foo;{create:always}" --send-eos 1
-wait # Wait for qpid-receive
-{ echo one; echo two; } > ssl_test_receive.cmp
-diff ssl_test_receive.tmp ssl_test_receive.cmp || { echo "Failover failed"; exit 1; }
-
-$QPIDD_EXEC -qp $PORT2
-
-rm -f ssl_test_receive.*
-
diff --git a/cpp/src/tests/legacystore/.valgrind.supp b/cpp/src/tests/legacystore/.valgrind.supp
new file mode 100644
index 0000000000..5c1c5377bf
--- /dev/null
+++ b/cpp/src/tests/legacystore/.valgrind.supp
@@ -0,0 +1,35 @@
+{
+ <insert_a_suppression_name_here>
+ Memcheck:Leak
+ fun:_Znwm
+ fun:_ZNSs4_Rep9_S_createEmmRKSaIcE
+ fun:_ZNSs12_S_constructIPKcEEPcT_S3_RKSaIcESt20forward_iterator_tag
+ fun:_ZNSsC1EPKcRKSaIcE
+}
+
+{
+ <insert_a_suppression_name_here>
+ Memcheck:Leak
+ fun:_Znwm
+ fun:_ZNSs4_Rep9_S_createEmmRKSaIcE
+ fun:_ZNSs4_Rep8_M_cloneERKSaIcEm
+ fun:_ZNSs7reserveEm
+}
+
+{
+ <insert_a_suppression_name_here>
+ Memcheck:Leak
+ fun:_Znwm
+ fun:_ZNSs4_Rep9_S_createEmmRKSaIcE
+ fun:_ZNSs9_M_mutateEmmm
+ fun:_ZNSs15_M_replace_safeEmmPKcm
+}
+
+{
+ <insert_a_suppression_name_here>
+ Memcheck:Leak
+ fun:_Znwm
+ fun:_ZNSs4_Rep9_S_createEmmRKSaIcE
+ fun:_ZNSsC1IPcEET_S1_RKSaIcE
+}
+
diff --git a/cpp/src/tests/legacystore/.valgrindrc b/cpp/src/tests/legacystore/.valgrindrc
new file mode 100644
index 0000000000..4aba7661de
--- /dev/null
+++ b/cpp/src/tests/legacystore/.valgrindrc
@@ -0,0 +1,7 @@
+--gen-suppressions=all
+--leak-check=full
+--demangle=yes
+--suppressions=.valgrind.supp
+--num-callers=25
+--trace-children=yes
+
diff --git a/cpp/src/tests/legacystore/CMakeLists.txt b/cpp/src/tests/legacystore/CMakeLists.txt
new file mode 100644
index 0000000000..6cfaa7ec17
--- /dev/null
+++ b/cpp/src/tests/legacystore/CMakeLists.txt
@@ -0,0 +1,117 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+if(BUILD_LEGACYSTORE)
+
+message(STATUS "Building legacystore tests")
+
+# Enable dashboard reporting.
+include (CTest)
+
+# Make sure that everything get built before the tests
+# Need to create a var with all the necessary top level targets
+
+# If we're linking Boost for DLLs, turn that on for the unit test too.
+if (QPID_LINK_BOOST_DYNAMIC)
+ add_definitions(-DBOOST_TEST_DYN_LINK)
+endif (QPID_LINK_BOOST_DYNAMIC)
+
+include_directories( ${CMAKE_CURRENT_SOURCE_DIR} )
+
+include (FindPythonInterp)
+
+# # Inherit environment from parent script
+# set (abs_srcdir ${CMAKE_CURRENT_SOURCE_DIR})
+# set (abs_builddir ${CMAKE_CURRENT_BINARY_DIR})
+# set (abs_top_srcdir ${CMAKE_SOURCE_DIR})
+# set (abs_top_builddir ${CMAKE_BINARY_DIR})
+# set (builddir_lib_suffix "")
+
+# If valgrind is selected in the configuration step, set up the path to it
+# for CTest.
+if (ENABLE_VALGRIND)
+ set (MEMORYCHECK_COMMAND ${VALGRIND})
+ set (MEMORYCHECK_COMMAND_OPTIONS "--gen-suppressions=all
+--leak-check=full
+--demangle=yes
+--suppressions=${CMAKE_CURRENT_SOURCE_DIR}/.valgrind.supp
+--num-callers=25
+--log-file=ctest_valgrind.vglog")
+endif (ENABLE_VALGRIND)
+
+# Like this to work with cmake 2.4 on Unix
+set (qpid_test_boost_libs
+ ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY} ${Boost_SYSTEM_LIBRARY})
+
+#
+# Unit test program
+#
+# Unit tests are built as a single program to reduce valgrind overhead
+# when running the tests. If you want to build a subset of the tests run
+# ccmake and set unit_tests_to_build to the set you want to build.
+# HACK ALERT - Unit tests are built individually to resolve a conflict
+# with running multiple brokers that connect to 0.0.0.0:5672 and that
+# womp on each other's store directory.
+
+#
+# define_selftest
+# macro to accept the name of a single source file and to create a
+# unit test executable that runs the source.
+#
+MACRO (define_selftest theSourceFile)
+add_executable (legacystore_${theSourceFile}
+ unit_test
+ ${theSourceFile}
+ ${platform_test_additions})
+target_link_libraries (legacystore_${theSourceFile}
+ ${qpid_test_boost_libs}
+ qpidmessaging qpidbroker qmfconsole legacystore)
+get_property(ls_include TARGET legacystore_${theSourceFile} PROPERTY INCLUDE_DIRECTORIES)
+list(APPEND ls_include ${abs_top_srcdir}/src/qpid/legacystore)
+list(APPEND ls_include ${abs_top_srcdir}/src/tests)
+set_target_properties (legacystore_${theSourceFile} PROPERTIES
+ INCLUDE_DIRECTORIES "${ls_include}"
+ COMPILE_DEFINITIONS _IN_QPID_BROKER)
+remember_location(legacystore_${theSourceFile})
+set(test_wrap ${shell} ${CMAKE_CURRENT_SOURCE_DIR}/run_test${test_script_suffix})
+
+add_test (legacystore_${theSourceFile} ${test_wrap} ${legacystore_${theSourceFile}_LOCATION})
+ENDMACRO (define_selftest)
+
+# add_definitions(-H)
+
+define_selftest (SimpleTest)
+define_selftest (OrderingTest)
+define_selftest (TransactionalTest)
+define_selftest (TwoPhaseCommitTest)
+
+#
+# Other test programs
+#
+
+# This should ideally be done as part of the test run, but I don't know a way
+# to get these arguments and the working directory set like Makefile.am does,
+# and have that run during the test pass.
+if (PYTHON_EXECUTABLE)
+ set (python_bld ${CMAKE_CURRENT_BINARY_DIR}/python)
+ execute_process(COMMAND ${PYTHON_EXECUTABLE} setup.py install --prefix=${pythoon_bld} --install-lib=${python_bld} --install-scripts=${python_bld}/commands
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/../python)
+endif (PYTHON_EXECUTABLE)
+
+endif (BUILD_LEGACYSTORE)
diff --git a/cpp/src/tests/legacystore/MessageUtils.h b/cpp/src/tests/legacystore/MessageUtils.h
new file mode 100644
index 0000000000..6552357c72
--- /dev/null
+++ b/cpp/src/tests/legacystore/MessageUtils.h
@@ -0,0 +1,105 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include <qpid/broker/Message.h>
+#include <qpid/broker/Queue.h>
+#include <qpid/broker/amqp_0_10/MessageTransfer.h>
+#include <qpid/framing/AMQFrame.h>
+#include <qpid/framing/all_method_bodies.h>
+#include <qpid/framing/Uuid.h>
+
+using namespace qpid::broker;
+using namespace qpid::framing;
+
+struct MessageUtils
+{
+ static Message createMessage(const std::string& exchange, const std::string& routingKey,
+ const Uuid& messageId=Uuid(), const bool durable = false,
+ const uint64_t contentSize = 0, const std::string& correlationId = std::string())
+ {
+ boost::intrusive_ptr<qpid::broker::amqp_0_10::MessageTransfer> msg(new qpid::broker::amqp_0_10::MessageTransfer());
+
+ AMQFrame method(( MessageTransferBody(ProtocolVersion(), exchange, 0, 0)));
+ AMQFrame header((AMQHeaderBody()));
+
+ msg->getFrames().append(method);
+ msg->getFrames().append(header);
+ MessageProperties* props = msg->getFrames().getHeaders()->get<MessageProperties>(true);
+ props->setContentLength(contentSize);
+ props->setMessageId(messageId);
+ props->setCorrelationId(correlationId);
+ msg->getFrames().getHeaders()->get<DeliveryProperties>(true)->setRoutingKey(routingKey);
+ if (durable)
+ msg->getFrames().getHeaders()->get<DeliveryProperties>(true)->setDeliveryMode(PERSISTENT);
+ return Message(msg, msg);
+ }
+
+ static void addContent(Message msg, const std::string& data)
+ {
+ AMQFrame content((AMQContentBody(data)));
+ qpid::broker::amqp_0_10::MessageTransfer::get(msg).getFrames().append(content);
+ }
+
+ struct MessageRetriever : public Consumer
+ {
+ MessageRetriever(Queue& q) : Consumer("test", CONSUMER), queue(q) {};
+
+ bool deliver(const QueueCursor& c, const Message& m)
+ {
+ message = m;
+ cursor = c;
+ return true;
+ };
+ void notify() {}
+ void cancel() {}
+ void acknowledged(const DeliveryRecord&) {}
+ OwnershipToken* getSession() { return 0; }
+
+ const Queue& queue;
+ Message message;
+ QueueCursor cursor;
+ };
+
+ static Message get(Queue& queue, QueueCursor* cursor = 0)
+ {
+ boost::shared_ptr<MessageRetriever> consumer(new MessageRetriever(queue));
+ if (!queue.dispatch(consumer))throw qpid::Exception("No message found!");
+ if (cursor) *cursor = consumer->cursor;
+ return consumer->message;
+ }
+
+ static Uuid getMessageId(const Message& message)
+ {
+ return qpid::broker::amqp_0_10::MessageTransfer::get(message).getProperties<MessageProperties>()->getMessageId();
+ }
+
+ static std::string getCorrelationId(const Message& message)
+ {
+ return qpid::broker::amqp_0_10::MessageTransfer::get(message).getProperties<MessageProperties>()->getCorrelationId();
+ }
+
+ static void deliver(Message& msg, FrameHandler& h, uint16_t framesize)
+ {
+ qpid::broker::amqp_0_10::MessageTransfer::get(msg).sendHeader(h, framesize, false, 0, 0, qpid::types::Variant::Map());
+ qpid::broker::amqp_0_10::MessageTransfer::get(msg).sendContent(h, framesize);
+ }
+
+};
diff --git a/cpp/src/tests/legacystore/OrderingTest.cpp b/cpp/src/tests/legacystore/OrderingTest.cpp
new file mode 100644
index 0000000000..92a09f0c60
--- /dev/null
+++ b/cpp/src/tests/legacystore/OrderingTest.cpp
@@ -0,0 +1,168 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "unit_test.h"
+
+#include "qpid/legacystore/MessageStoreImpl.h"
+#include <iostream>
+#include "MessageUtils.h"
+#include <qpid/broker/Queue.h>
+#include <qpid/broker/RecoveryManagerImpl.h>
+#include <qpid/framing/AMQHeaderBody.h>
+#include "qpid/log/Logger.h"
+#include "qpid/sys/Timer.h"
+
+using namespace qpid;
+using namespace qpid::broker;
+using namespace qpid::framing;
+using namespace mrg::msgstore;
+
+qpid::broker::Broker::Options opts;
+qpid::broker::Broker br(opts);
+
+QPID_AUTO_TEST_SUITE(OrderingTest)
+
+#define SET_LOG_LEVEL(level) \
+ qpid::log::Options opts(""); \
+ opts.selectors.clear(); \
+ opts.selectors.push_back(level); \
+ qpid::log::Logger::instance().configure(opts);
+
+const std::string test_filename("OrderingTest");
+const char* tdp = getenv("TMP_DATA_DIR");
+const std::string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/OrderingTest");
+
+// === Helper fns ===
+
+const std::string name("OrderingQueue");
+std::auto_ptr<MessageStoreImpl> store;
+QueueRegistry queues;
+Queue::shared_ptr queue;
+std::queue<Uuid> ids;
+
+class TestConsumer : public Consumer
+{
+ public:
+
+ TestConsumer(Queue::shared_ptr q, std::queue<Uuid>& i) : Consumer("test", CONSUMER), queue(q), ids(i) {};
+
+ bool deliver(const QueueCursor& cursor, const Message& message)
+ {
+ queue->dequeue(0, cursor);
+ BOOST_CHECK_EQUAL(ids.front(), MessageUtils::getMessageId(message));
+ ids.pop();
+ return true;
+ };
+ void notify() {}
+ void cancel() {}
+ void acknowledged(const DeliveryRecord&) {}
+ OwnershipToken* getSession() { return 0; }
+ private:
+ Queue::shared_ptr queue;
+ std::queue<Uuid>& ids;
+};
+boost::shared_ptr<TestConsumer> consumer;
+
+void setup()
+{
+ store = std::auto_ptr<MessageStoreImpl>(new MessageStoreImpl(&br));
+ store->init(test_dir, 4, 1, true); // truncate store
+
+ queue = Queue::shared_ptr(new Queue(name, 0, store.get(), 0));
+ queue->create();
+ consumer = boost::shared_ptr<TestConsumer>(new TestConsumer(queue, ids));
+}
+
+void push()
+{
+ Uuid messageId(true);
+ ids.push(messageId);
+
+ Message msg = MessageUtils::createMessage("exchange", "routing_key", messageId, true, 0);
+
+ queue->deliver(msg);
+}
+
+bool pop()
+{
+ return queue->dispatch(consumer);
+}
+
+void restart()
+{
+ queue.reset();
+ store.reset();
+
+ store = std::auto_ptr<MessageStoreImpl>(new MessageStoreImpl(&br));
+ store->init(test_dir, 4, 1);
+ ExchangeRegistry exchanges;
+ LinkRegistry links;
+ sys::Timer t;
+ DtxManager mgr(t);
+ mgr.setStore (store.get());
+ RecoveryManagerImpl recoveryMgr(queues, exchanges, links, mgr, br.getProtocolRegistry());
+ store->recover(recoveryMgr);
+
+ queue = queues.find(name);
+ consumer = boost::shared_ptr<TestConsumer>(new TestConsumer(queue, ids));
+}
+
+void check()
+{
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL((u_int32_t) ids.size(), queue->getMessageCount());
+ while (pop()) ;//keeping popping 'till all messages are dequeued
+ BOOST_CHECK_EQUAL((u_int32_t) 0, queue->getMessageCount());
+ BOOST_CHECK_EQUAL((size_t) 0, ids.size());
+}
+
+
+// === Test suite ===
+
+QPID_AUTO_TEST_CASE(Basic)
+{
+ SET_LOG_LEVEL("error+"); // This only needs to be set once.
+
+ std::cout << test_filename << ".Basic: " << std::flush;
+ setup();
+ //push on 10 messages
+ for (int i = 0; i < 10; i++) push();
+ restart();
+ check();
+ std::cout << "ok" << std::endl;
+}
+
+QPID_AUTO_TEST_CASE(Cycle)
+{
+ std::cout << test_filename << ".Cycle: " << std::flush;
+ setup();
+ //push on 10 messages:
+ for (int i = 0; i < 10; i++) push();
+ //pop 5:
+ for (int i = 0; i < 5; i++) pop();
+ //push on another 5:
+ for (int i = 0; i < 5; i++) push();
+ restart();
+ check();
+ std::cout << "ok" << std::endl;
+}
+
+QPID_AUTO_TEST_SUITE_END()
diff --git a/cpp/src/tests/legacystore/SimpleTest.cpp b/cpp/src/tests/legacystore/SimpleTest.cpp
new file mode 100644
index 0000000000..a49333d876
--- /dev/null
+++ b/cpp/src/tests/legacystore/SimpleTest.cpp
@@ -0,0 +1,497 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "unit_test.h"
+
+#include "qpid/legacystore/MessageStoreImpl.h"
+#include <iostream>
+#include "tests/legacystore/MessageUtils.h"
+#include "qpid/legacystore/StoreException.h"
+#include "qpid/broker/DirectExchange.h"
+#include <qpid/broker/Queue.h>
+#include <qpid/broker/QueueSettings.h>
+#include <qpid/broker/RecoveryManagerImpl.h>
+#include <qpid/framing/AMQHeaderBody.h>
+#include <qpid/framing/FieldTable.h>
+#include <qpid/framing/FieldValue.h>
+#include "qpid/log/Logger.h"
+#include "qpid/sys/Timer.h"
+
+qpid::broker::Broker::Options opts;
+qpid::broker::Broker br(opts);
+
+#define SET_LOG_LEVEL(level) \
+ qpid::log::Options opts(""); \
+ opts.selectors.clear(); \
+ opts.selectors.push_back(level); \
+ qpid::log::Logger::instance().configure(opts);
+
+
+using boost::intrusive_ptr;
+using boost::static_pointer_cast;
+using namespace qpid;
+using namespace qpid::broker;
+using namespace qpid::framing;
+using namespace mrg::msgstore;
+using namespace std;
+
+QPID_AUTO_TEST_SUITE(SimpleTest)
+
+const string test_filename("SimpleTest");
+const char* tdp = getenv("TMP_DATA_DIR");
+const string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/SimpleTest");
+
+// === Helper fns ===
+
+struct DummyHandler : OutputHandler
+{
+ std::vector<AMQFrame> frames;
+
+ virtual void send(AMQFrame& frame){
+ frames.push_back(frame);
+ }
+};
+
+void recover(MessageStoreImpl& store, QueueRegistry& queues, ExchangeRegistry& exchanges, LinkRegistry& links)
+{
+ sys::Timer t;
+ DtxManager mgr(t);
+ mgr.setStore (&store);
+ RecoveryManagerImpl recovery(queues, exchanges, links, mgr, br.getProtocolRegistry());
+ store.recover(recovery);
+}
+
+void recover(MessageStoreImpl& store, ExchangeRegistry& exchanges)
+{
+ QueueRegistry queues;
+ LinkRegistry links;
+ recover(store, queues, exchanges, links);
+}
+
+void recover(MessageStoreImpl& store, QueueRegistry& queues)
+{
+ ExchangeRegistry exchanges;
+ LinkRegistry links;
+ recover(store, queues, exchanges, links);
+}
+
+void bindAndUnbind(const string& exchangeName, const string& queueName,
+ const string& key, const FieldTable& args)
+{
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Exchange::shared_ptr exchange(new DirectExchange(exchangeName, true, args));
+ Queue::shared_ptr queue(new Queue(queueName, 0, &store, 0));
+ store.create(*exchange, qpid::framing::FieldTable());
+ store.create(*queue, qpid::framing::FieldTable());
+ BOOST_REQUIRE(exchange->bind(queue, key, &args));
+ store.bind(*exchange, *queue, key, args);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry exchanges;
+ QueueRegistry queues;
+ LinkRegistry links;
+
+ recover(store, queues, exchanges, links);
+
+ Exchange::shared_ptr exchange = exchanges.get(exchangeName);
+ Queue::shared_ptr queue = queues.find(queueName);
+ // check exchange args are still set
+ for (FieldTable::ValueMap::const_iterator i = args.begin(); i!=args.end(); i++) {
+ BOOST_CHECK(exchange->getArgs().get((*i).first)->getData() == (*i).second->getData());
+ }
+ //check it is bound by unbinding
+ BOOST_REQUIRE(exchange->unbind(queue, key, &args));
+ store.unbind(*exchange, *queue, key, args);
+ }
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry exchanges;
+ QueueRegistry queues;
+ LinkRegistry links;
+
+ recover(store, queues, exchanges, links);
+
+ Exchange::shared_ptr exchange = exchanges.get(exchangeName);
+ Queue::shared_ptr queue = queues.find(queueName);
+ // check exchange args are still set
+ for (FieldTable::ValueMap::const_iterator i = args.begin(); i!=args.end(); i++) {
+ BOOST_CHECK(exchange->getArgs().get((*i).first)->getData() == (*i).second->getData());
+ }
+ //make sure it is no longer bound
+ BOOST_REQUIRE(!exchange->unbind(queue, key, &args));
+ }
+}
+
+
+// === Test suite ===
+
+QPID_AUTO_TEST_CASE(CreateDelete)
+{
+ SET_LOG_LEVEL("error+"); // This only needs to be set once.
+
+ cout << test_filename << ".CreateDelete: " << flush;
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ string name("CreateDeleteQueue");
+ Queue queue(name, 0, &store, 0);
+ store.create(queue, qpid::framing::FieldTable());
+// TODO - check dir exists
+ BOOST_REQUIRE(queue.getPersistenceId());
+ store.destroy(queue);
+// TODO - check dir is deleted
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(EmptyRecover)
+{
+ cout << test_filename << ".EmptyRecover: " << flush;
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ //nothing to assert, just testing it doesn't blow up
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(QueueCreate)
+{
+ cout << test_filename << ".QueueCreate: " << flush;
+
+ uint64_t id(0);
+ string name("MyDurableQueue");
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Queue queue(name, 0, &store, 0);
+ store.create(queue, qpid::framing::FieldTable());
+ BOOST_REQUIRE(queue.getPersistenceId());
+ id = queue.getPersistenceId();
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ Queue::shared_ptr queue = registry.find(name);
+ BOOST_REQUIRE(queue.get());
+ BOOST_CHECK_EQUAL(id, queue->getPersistenceId());
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(QueueCreateWithSettings)
+{
+ cout << test_filename << ".QueueCreateWithSettings: " << flush;
+
+ FieldTable arguments;
+ arguments.setInt("qpid.max_count", 202);
+ arguments.setInt("qpid.max_size", 1003);
+ QueueSettings settings;
+ settings.populate(arguments, settings.storeSettings);
+ string name("MyDurableQueue");
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Queue queue(name, settings, &store, 0);
+ queue.create();
+ BOOST_REQUIRE(queue.getPersistenceId());
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ Queue::shared_ptr queue = registry.find(name);
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL(settings.maxDepth.getCount(), 202);
+ BOOST_CHECK_EQUAL(settings.maxDepth.getSize(), 1003);
+ BOOST_CHECK_EQUAL(settings.maxDepth.getCount(), queue->getSettings().maxDepth.getCount());
+ BOOST_CHECK_EQUAL(settings.maxDepth.getSize(), queue->getSettings().maxDepth.getSize());
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(QueueDestroy)
+{
+ cout << test_filename << ".QueueDestroy: " << flush;
+
+ string name("MyDurableQueue");
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Queue queue(name, 0, &store, 0);
+ store.create(queue, qpid::framing::FieldTable());
+ store.destroy(queue);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ BOOST_REQUIRE(!registry.find(name));
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(Enqueue)
+{
+ cout << test_filename << ".Enqueue: " << flush;
+
+ //TODO: this is largely copy & paste'd from MessageTest in
+ //qpid tree. ideally need some helper routines for reducing
+ //this to a simpler less duplicated form
+
+ string name("MyDurableQueue");
+ string exchange("MyExchange");
+ string routingKey("MyRoutingKey");
+ Uuid messageId(true);
+ string data1("abcdefg");
+ string data2("hijklmn");
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Queue::shared_ptr queue(new Queue(name, 0, &store, 0));
+ queue->create();
+
+ Message msg = MessageUtils::createMessage(exchange, routingKey, messageId, true, 14);
+ MessageUtils::addContent(msg, data1);
+ MessageUtils::addContent(msg, data2);
+
+ msg.addAnnotation("abc", "xyz");
+
+ queue->deliver(msg);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ Queue::shared_ptr queue = registry.find(name);
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL((u_int32_t) 1, queue->getMessageCount());
+ Message msg = MessageUtils::get(*queue);
+
+ BOOST_CHECK_EQUAL(routingKey, msg.getRoutingKey());
+ BOOST_CHECK_EQUAL(messageId, MessageUtils::getMessageId(msg));
+ BOOST_CHECK_EQUAL(std::string("xyz"), msg.getAnnotation("abc"));
+ BOOST_CHECK_EQUAL((u_int64_t) 14, msg.getContentSize());
+
+ DummyHandler handler;
+ MessageUtils::deliver(msg, handler, 100);
+ BOOST_CHECK_EQUAL((size_t) 2, handler.frames.size());
+ AMQContentBody* contentBody(dynamic_cast<AMQContentBody*>(handler.frames[1].getBody()));
+ BOOST_REQUIRE(contentBody);
+ BOOST_CHECK_EQUAL(data1.size() + data2.size(), contentBody->getData().size());
+ BOOST_CHECK_EQUAL(data1 + data2, contentBody->getData());
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(Dequeue)
+{
+ cout << test_filename << ".Dequeue: " << flush;
+
+ //TODO: reduce the duplication in these tests
+ string name("MyDurableQueue");
+ {
+ string exchange("MyExchange");
+ string routingKey("MyRoutingKey");
+ Uuid messageId(true);
+ string data("abcdefg");
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Queue::shared_ptr queue(new Queue(name, 0, &store, 0));
+ queue->create();
+
+ Message msg = MessageUtils::createMessage(exchange, routingKey, messageId, true, 7);
+ MessageUtils::addContent(msg, data);
+
+ queue->deliver(msg);
+
+ QueueCursor cursor;
+ MessageUtils::get(*queue, &cursor);
+ queue->dequeue(0, cursor);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ QueueRegistry registry;
+ registry.setStore (&store);
+ recover(store, registry);
+ Queue::shared_ptr queue = registry.find(name);
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL((u_int32_t) 0, queue->getMessageCount());
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(ExchangeCreateAndDestroy)
+{
+ cout << test_filename << ".ExchangeCreateAndDestroy: " << flush;
+
+ uint64_t id(0);
+ string name("MyDurableExchange");
+ string type("direct");
+ FieldTable args;
+ args.setString("a", "A");
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ ExchangeRegistry registry;
+ Exchange::shared_ptr exchange = registry.declare(name, type, true, args).first;
+ store.create(*exchange, qpid::framing::FieldTable());
+ id = exchange->getPersistenceId();
+ BOOST_REQUIRE(id);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry registry;
+
+ recover(store, registry);
+
+ Exchange::shared_ptr exchange = registry.get(name);
+ BOOST_CHECK_EQUAL(id, exchange->getPersistenceId());
+ BOOST_CHECK_EQUAL(type, exchange->getType());
+ BOOST_REQUIRE(exchange->isDurable());
+ BOOST_CHECK_EQUAL(*args.get("a"), *exchange->getArgs().get("a"));
+ store.destroy(*exchange);
+ }
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry registry;
+
+ recover(store, registry);
+
+ try {
+ Exchange::shared_ptr exchange = registry.get(name);
+ BOOST_FAIL("Expected exchange not to be found");
+ } catch (const SessionException& e) {
+ BOOST_CHECK_EQUAL((framing::ReplyCode) 404, e.code);
+ }
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(ExchangeBindAndUnbind)
+{
+ cout << test_filename << ".ExchangeBindAndUnbind: " << flush;
+
+ bindAndUnbind("MyDurableExchange", "MyDurableQueue", "my-routing-key", FieldTable());
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(ExchangeBindAndUnbindWithArgs)
+{
+ cout << test_filename << ".ExchangeBindAndUnbindWithArgs: " << flush;
+
+ FieldTable args;
+ args.setString("a", "A");
+ args.setString("b", "B");
+ bindAndUnbind("MyDurableExchange", "MyDurableQueue", "my-routing-key", args);
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(ExchangeImplicitUnbind)
+{
+ cout << test_filename << ".ExchangeImplicitUnbind: " << flush;
+
+ string exchangeName("MyDurableExchange");
+ string queueName1("MyDurableQueue1");
+ string queueName2("MyDurableQueue2");
+ string key("my-routing-key");
+ FieldTable args;
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1, true); // truncate store
+ Exchange::shared_ptr exchange(new DirectExchange(exchangeName, true, args));
+ Queue::shared_ptr queue1(new Queue(queueName1, 0, &store, 0));
+ Queue::shared_ptr queue2(new Queue(queueName2, 0, &store, 0));
+ store.create(*exchange, qpid::framing::FieldTable());
+ store.create(*queue1, qpid::framing::FieldTable());
+ store.create(*queue2, qpid::framing::FieldTable());
+ store.bind(*exchange, *queue1, key, args);
+ store.bind(*exchange, *queue2, key, args);
+ //delete queue1:
+ store.destroy(*queue1);
+ }//db will be closed
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry exchanges;
+ QueueRegistry queues;
+ LinkRegistry links;
+
+ //ensure recovery works ok:
+ recover(store, queues, exchanges, links);
+
+ Exchange::shared_ptr exchange = exchanges.get(exchangeName);
+ BOOST_REQUIRE(!queues.find(queueName1).get());
+ BOOST_REQUIRE(queues.find(queueName2).get());
+
+ //delete exchange:
+ store.destroy(*exchange);
+ }
+ {
+ MessageStoreImpl store(&br);
+ store.init(test_dir, 4, 1);
+ ExchangeRegistry exchanges;
+ QueueRegistry queues;
+ LinkRegistry links;
+
+ //ensure recovery works ok:
+ recover(store, queues, exchanges, links);
+
+ try {
+ Exchange::shared_ptr exchange = exchanges.get(exchangeName);
+ BOOST_FAIL("Expected exchange not to be found");
+ } catch (const SessionException& e) {
+ BOOST_CHECK_EQUAL((framing::ReplyCode) 404, e.code);
+ }
+ Queue::shared_ptr queue = queues.find(queueName2);
+ store.destroy(*queue);
+ }
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_SUITE_END()
diff --git a/cpp/src/tests/legacystore/TestFramework.cpp b/cpp/src/tests/legacystore/TestFramework.cpp
new file mode 100644
index 0000000000..2f7faf7682
--- /dev/null
+++ b/cpp/src/tests/legacystore/TestFramework.cpp
@@ -0,0 +1,30 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+// Defines broker to be used by tests
+
+#include "unit_test.h"
+#include "TestFramework.h"
+#include "qpid/broker/Broker.h"
+
+#include <iostream>
+
+//BOOST_GLOBAL_FIXTURE( testBroker )
diff --git a/cpp/src/tests/legacystore/TestFramework.h b/cpp/src/tests/legacystore/TestFramework.h
new file mode 100644
index 0000000000..f3066db602
--- /dev/null
+++ b/cpp/src/tests/legacystore/TestFramework.h
@@ -0,0 +1,37 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+// Defines broker to be used by tests
+
+#include "unit_test.h"
+
+#include <qpid/broker/Broker.h>
+
+namespace {
+ // test broker
+ qpid::broker::Broker::Options opts;
+ qpid::broker::Broker br(opts);
+/*
+ struct testBroker {
+ testBroker() {}
+ ~testBroker() {}
+ };*/
+}
diff --git a/cpp/src/tests/legacystore/TransactionalTest.cpp b/cpp/src/tests/legacystore/TransactionalTest.cpp
new file mode 100644
index 0000000000..2d3f6f922c
--- /dev/null
+++ b/cpp/src/tests/legacystore/TransactionalTest.cpp
@@ -0,0 +1,351 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "unit_test.h"
+
+#include "qpid/legacystore/MessageStoreImpl.h"
+#include <iostream>
+#include "MessageUtils.h"
+#include "qpid/legacystore/StoreException.h"
+#include "qpid/broker/Queue.h"
+#include "qpid/broker/RecoveryManagerImpl.h"
+#include "qpid/framing/AMQHeaderBody.h"
+#include "qpid/log/Statement.h"
+#include "qpid/log/Logger.h"
+#include "qpid/sys/Timer.h"
+
+using namespace mrg::msgstore;
+using namespace qpid;
+using namespace qpid::broker;
+using namespace qpid::framing;
+using namespace std;
+
+namespace {
+qpid::broker::Broker::Options opts;
+qpid::broker::Broker br(opts);
+}
+
+QPID_AUTO_TEST_SUITE(TransactionalTest)
+
+#define SET_LOG_LEVEL(level) \
+ qpid::log::Options opts(""); \
+ opts.selectors.clear(); \
+ opts.selectors.push_back(level); \
+ qpid::log::Logger::instance().configure(opts);
+
+const string test_filename("TransactionalTest");
+const char* tdp = getenv("TMP_DATA_DIR");
+const string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/TransactionalTest");
+
+// Test txn context which has special setCompleteFailure() method which prevents entire "txn complete" process from hapenning
+class TestTxnCtxt : public TxnCtxt
+{
+ public:
+ TestTxnCtxt(IdSequence* _loggedtx) : TxnCtxt(_loggedtx) {}
+ void setCompleteFailure(const unsigned num_queues_rem) {
+ // Remove queue members from back of impactedQueues until queues_rem reamin.
+ // to end to simulate multi-queue txn complete failure.
+ while (impactedQueues.size() > num_queues_rem) impactedQueues.erase(impactedQueues.begin());
+ }
+ void resetPreparedXidStorePtr() { preparedXidStorePtr = 0; }
+};
+
+// Test store which has special begin() which returns a TestTPCTxnCtxt, and a method to check for
+// remaining open transactions.
+// begin(), commit(), and abort() all hide functions in MessageStoreImpl. To avoid the compiler
+// warnings/errors these are renamed with a 'TMS' prefix.
+class TestMessageStore: public MessageStoreImpl
+{
+ public:
+ TestMessageStore(qpid::broker::Broker* br, const char* envpath = 0) : MessageStoreImpl(br, envpath) {}
+ std::auto_ptr<qpid::broker::TransactionContext> TMSbegin() {
+ checkInit();
+ // pass sequence number for c/a
+ return auto_ptr<TransactionContext>(new TestTxnCtxt(&messageIdSequence));
+ }
+ void TMScommit(TransactionContext& ctxt, const bool complete_prepared_list) {
+ checkInit();
+ TxnCtxt* txn(check(&ctxt));
+ if (!txn->isTPC()) {
+ localPrepare(dynamic_cast<TxnCtxt*>(txn));
+ if (!complete_prepared_list) dynamic_cast<TestTxnCtxt*>(txn)->resetPreparedXidStorePtr();
+ }
+ completed(*dynamic_cast<TxnCtxt*>(txn), true);
+ }
+ void TMSabort(TransactionContext& ctxt, const bool complete_prepared_list)
+ {
+ checkInit();
+ TxnCtxt* txn(check(&ctxt));
+ if (!txn->isTPC()) {
+ localPrepare(dynamic_cast<TxnCtxt*>(txn));
+ if (!complete_prepared_list) dynamic_cast<TestTxnCtxt*>(txn)->resetPreparedXidStorePtr();
+ }
+ completed(*dynamic_cast<TxnCtxt*>(txn), false);
+ }
+};
+
+// === Helper fns ===
+
+const string nameA("queueA");
+const string nameB("queueB");
+//const Uuid messageId(true);
+std::auto_ptr<MessageStoreImpl> store;
+std::auto_ptr<QueueRegistry> queues;
+Queue::shared_ptr queueA;
+Queue::shared_ptr queueB;
+
+template <class T>
+void setup()
+{
+ store = std::auto_ptr<T>(new T(&br));
+ store->init(test_dir, 4, 1, true); // truncate store
+
+ //create two queues:
+ queueA = Queue::shared_ptr(new Queue(nameA, 0, store.get(), 0));
+ queueA->create();
+ queueB = Queue::shared_ptr(new Queue(nameB, 0, store.get(), 0));
+ queueB->create();
+}
+
+template <class T>
+void restart()
+{
+ queueA.reset();
+ queueB.reset();
+ queues.reset();
+ store.reset();
+
+ store = std::auto_ptr<T>(new T(&br));
+ store->init(test_dir, 4, 1);
+ queues = std::auto_ptr<QueueRegistry>(new QueueRegistry);
+ ExchangeRegistry exchanges;
+ LinkRegistry links;
+ sys::Timer t;
+ DtxManager mgr(t);
+ mgr.setStore (store.get());
+ RecoveryManagerImpl recovery(*queues, exchanges, links, mgr, br.getProtocolRegistry());
+ store->recover(recovery);
+
+ queueA = queues->find(nameA);
+ queueB = queues->find(nameB);
+}
+
+Message createMessage(const string& id, const string& exchange="exchange", const string& key="routing_key")
+{
+ return MessageUtils::createMessage(exchange, key, Uuid(), true, 0, id);
+}
+
+void checkMsg(Queue::shared_ptr& queue, u_int32_t size, const string& msgid = "<none>")
+{
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL(size, queue->getMessageCount());
+ if (size > 0) {
+ Message msg = MessageUtils::get(*queue);
+ BOOST_REQUIRE(msg);
+ BOOST_CHECK_EQUAL(msgid, MessageUtils::getCorrelationId(msg));
+ }
+}
+
+void swap(bool commit)
+{
+ setup<MessageStoreImpl>();
+
+ //create message and enqueue it onto first queue:
+ Message msgA = createMessage("Message", "exchange", "routing_key");
+ queueA->deliver(msgA);
+
+ QueueCursor cursorB;
+ Message msgB = MessageUtils::get(*queueA, &cursorB);
+ BOOST_REQUIRE(msgB);
+ //move the message from one queue to the other as a transaction
+ std::auto_ptr<TransactionContext> txn = store->begin();
+ TxBuffer tx;
+ queueB->deliver(msgB, &tx);//note: need to enqueue it first to avoid message being deleted
+
+ queueA->dequeue(txn.get(), cursorB);
+ tx.prepare(txn.get());
+ if (commit) {
+ store->commit(*txn);
+ } else {
+ store->abort(*txn);
+ }
+
+ restart<MessageStoreImpl>();
+
+ // Check outcome
+ BOOST_REQUIRE(queueA);
+ BOOST_REQUIRE(queueB);
+
+ Queue::shared_ptr x;//the queue from which the message was swapped
+ Queue::shared_ptr y;//the queue on which the message is expected to be
+
+ if (commit) {
+ x = queueA;
+ y = queueB;
+ } else {
+ x = queueB;
+ y = queueA;
+ }
+
+ checkMsg(x, 0);
+ checkMsg(y, 1, "Message");
+ checkMsg(y, 0);
+}
+
+void testMultiQueueTxn(const unsigned num_queues_rem, const bool complete_prepared_list, const bool commit)
+{
+ setup<TestMessageStore>();
+ TestMessageStore* tmsp = static_cast<TestMessageStore*>(store.get());
+ std::auto_ptr<TransactionContext> txn(tmsp->TMSbegin());
+ TxBuffer tx;
+
+ //create two messages and enqueue them onto both queues:
+ Message msgA = createMessage("MessageA", "exchange", "routing_key");
+ queueA->deliver(msgA, &tx);
+ queueB->deliver(msgA, &tx);
+ Message msgB = createMessage("MessageB", "exchange", "routing_key");
+ queueA->deliver(msgB, &tx);
+ queueB->deliver(msgB, &tx);
+
+ tx.prepare(txn.get());
+ static_cast<TestTxnCtxt*>(txn.get())->setCompleteFailure(num_queues_rem);
+ if (commit)
+ tmsp->TMScommit(*txn, complete_prepared_list);
+ else
+ tmsp->TMSabort(*txn, complete_prepared_list);
+ restart<TestMessageStore>();
+
+ // Check outcome
+ if (commit)
+ {
+ checkMsg(queueA, 2, "MessageA");
+ checkMsg(queueB, 2, "MessageA");
+ checkMsg(queueA, 1, "MessageB");
+ checkMsg(queueB, 1, "MessageB");
+ }
+ checkMsg(queueA, 0);
+ checkMsg(queueB, 0);
+}
+
+// === Test suite ===
+
+QPID_AUTO_TEST_CASE(Commit)
+{
+ SET_LOG_LEVEL("error+"); // This only needs to be set once.
+
+ cout << test_filename << ".Commit: " << flush;
+ swap(true);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(Abort)
+{
+ cout << test_filename << ".Abort: " << flush;
+ swap(false);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueCommit)
+{
+ cout << test_filename << ".MultiQueueCommit: " << flush;
+ testMultiQueueTxn(2, true, true);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAbort)
+{
+ cout << test_filename << ".MultiQueueAbort: " << flush;
+ testMultiQueueTxn(2, true, false);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueNoQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueNoQueueCommitRecover: " << flush;
+ testMultiQueueTxn(0, false, true);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueNoQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueNoQueueAbortRecover: " << flush;
+ testMultiQueueTxn(0, false, false);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueSomeQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueSomeQueueCommitRecover: " << flush;
+ testMultiQueueTxn(1, false, true);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueSomeQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueSomeQueueAbortRecover: " << flush;
+ testMultiQueueTxn(1, false, false);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAllQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueAllQueueCommitRecover: " << flush;
+ testMultiQueueTxn(2, false, true);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAllQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueAllQueueAbortRecover: " << flush;
+ testMultiQueueTxn(2, false, false);
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(LockedRecordTest)
+{
+ cout << test_filename << ".LockedRecordTest: " << flush;
+
+ setup<MessageStoreImpl>();
+ queueA->deliver(createMessage("Message", "exchange", "routingKey"));
+ std::auto_ptr<TransactionContext> txn = store->begin();
+
+ QueueCursor cursor;
+ Message msg = MessageUtils::get(*queueA, &cursor);
+ queueA->dequeue(txn.get(), cursor);
+
+ try {
+ store->dequeue(0, msg.getPersistentContext(), *queueA);
+ BOOST_ERROR("Did not throw JERR_MAP_LOCKED exception as expected.");
+ }
+ catch (const mrg::msgstore::StoreException& e) {
+ if (std::strstr(e.what(), "JERR_MAP_LOCKED") == 0)
+ BOOST_ERROR("Unexpected StoreException: " << e.what());
+ }
+ catch (const std::exception& e) {
+ BOOST_ERROR("Unexpected exception: " << e.what());
+ }
+ store->commit(*txn);
+ checkMsg(queueA, 0);
+
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_SUITE_END()
diff --git a/cpp/src/tests/legacystore/TwoPhaseCommitTest.cpp b/cpp/src/tests/legacystore/TwoPhaseCommitTest.cpp
new file mode 100644
index 0000000000..92e49df9e3
--- /dev/null
+++ b/cpp/src/tests/legacystore/TwoPhaseCommitTest.cpp
@@ -0,0 +1,675 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+#include "unit_test.h"
+
+#include "qpid/legacystore/MessageStoreImpl.h"
+#include <iostream>
+#include "MessageUtils.h"
+#include "qpid/broker/Queue.h"
+#include "qpid/broker/RecoveryManagerImpl.h"
+#include "qpid/framing/AMQHeaderBody.h"
+#include "qpid/log/Statement.h"
+#include "qpid/legacystore/TxnCtxt.h"
+#include "qpid/log/Logger.h"
+#include "qpid/sys/Timer.h"
+
+using namespace mrg::msgstore;
+using namespace qpid;
+using namespace qpid::broker;
+using namespace qpid::framing;
+using namespace std;
+
+
+qpid::broker::Broker::Options opts;
+qpid::broker::Broker br(opts);
+
+
+QPID_AUTO_TEST_SUITE(TwoPhaseCommitTest)
+
+#define SET_LOG_LEVEL(level) \
+ qpid::log::Options opts(""); \
+ opts.selectors.clear(); \
+ opts.selectors.push_back(level); \
+ qpid::log::Logger::instance().configure(opts);
+
+
+const string test_filename("TwoPhaseCommitTest");
+const char* tdp = getenv("TMP_DATA_DIR");
+string test_dir(tdp && strlen(tdp) > 0 ? tdp : "/tmp/TwoPhaseCommitTest");
+
+// === Helper fns ===
+
+class TwoPhaseCommitTest
+{
+
+ class Strategy
+ {
+ public:
+ virtual void init() = 0;
+ virtual void run(TPCTransactionContext* txn) = 0;
+ virtual void check(bool committed) = 0;
+ virtual ~Strategy(){}
+ };
+
+ class Swap : public Strategy
+ {
+ TwoPhaseCommitTest* const test;
+ const string messageId;
+ Message msg;
+ public:
+ Swap(TwoPhaseCommitTest* const test_, const string& messageId_): test(test_), messageId(messageId_) {}
+ void init(){ msg = test->deliver(messageId, test->queueA); }
+ void run(TPCTransactionContext* txn) { test->swap(txn, test->queueA, test->queueB); }
+ void check(bool committed) { test->swapCheck(committed, messageId, test->queueA, test->queueB); }
+ };
+
+ class Enqueue : public Strategy
+ {
+ TwoPhaseCommitTest* const test;
+ Message msg1;
+ Message msg2;
+ Message msg3;
+ public:
+ Enqueue(TwoPhaseCommitTest* const test_): test(test_) {}
+ void init() {}
+ void run(TPCTransactionContext* txn) {
+ msg1 = test->enqueue(txn, "Enqueue1", test->queueA);
+ msg2 = test->enqueue(txn, "Enqueue2", test->queueA);
+ msg3 = test->enqueue(txn, "Enqueue3", test->queueA);
+ }
+ void check(bool committed) {
+ if (committed) {
+ test->checkMsg(test->queueA, 3, "Enqueue1");
+ test->checkMsg(test->queueA, 2, "Enqueue2");
+ test->checkMsg(test->queueA, 1, "Enqueue3");
+ }
+ test->checkMsg(test->queueA, 0);
+ }
+ };
+
+ class Dequeue : public Strategy
+ {
+ TwoPhaseCommitTest* const test;
+ Message msg1;
+ Message msg2;
+ Message msg3;
+ public:
+ Dequeue(TwoPhaseCommitTest* const test_): test(test_) {}
+ void init() {
+ msg1 = test->deliver("Dequeue1", test->queueA);
+ msg2 = test->deliver("Dequeue2", test->queueA);
+ msg3 = test->deliver("Dequeue3", test->queueA);
+ }
+ void run(TPCTransactionContext* txn) {
+ test->dequeue(txn, test->queueA);
+ test->dequeue(txn, test->queueA);
+ test->dequeue(txn, test->queueA);
+ }
+ void check(bool committed) {
+ if (!committed) {
+ test->checkMsg(test->queueA, 3, "Dequeue1");
+ test->checkMsg(test->queueA, 2, "Dequeue2");
+ test->checkMsg(test->queueA, 1, "Dequeue3");
+ }
+ test->checkMsg(test->queueA, 0);
+ }
+ };
+
+ class MultiQueueTxn : public Strategy
+ {
+ TwoPhaseCommitTest* const test;
+ Message msg1;
+ Message msg2;
+ std::set<Queue::shared_ptr> queueset;
+ public:
+ MultiQueueTxn(TwoPhaseCommitTest* const test_): test(test_) {}
+ virtual void init() {}
+ virtual void run(TPCTransactionContext* txn) {
+ queueset.insert(test->queueA);
+ queueset.insert(test->queueB);
+ msg1 = test->enqueue(txn, "Message1", queueset);
+ msg2 = test->enqueue(txn, "Message2", queueset);
+ queueset.clear();
+ }
+ virtual void check(bool committed) {
+ TestMessageStore* sptr = static_cast<TestMessageStore*>(test->store.get());
+ if (committed)
+ {
+ test->checkMsg(test->queueA, 2, "Message1");
+ test->checkMsg(test->queueB, 2, "Message1");
+ test->checkMsg(test->queueA, 1, "Message2");
+ test->checkMsg(test->queueB, 1, "Message2");
+ }
+ test->checkMsg(test->queueA, 0);
+ test->checkMsg(test->queueB, 0);
+ // Check there are no remaining open txns in store
+ BOOST_CHECK_EQUAL(u_int32_t(0), sptr->getRemainingTxns(*(test->queueA)));
+ BOOST_CHECK_EQUAL(u_int32_t(0), sptr->getRemainingTxns(*(test->queueB)));
+ BOOST_CHECK_EQUAL(u_int32_t(0), sptr->getRemainingPreparedListTxns());
+ }
+ };
+
+ // Test txn context which has special setCompleteFailure() method which prevents entire "txn complete" process from hapenning
+ class TestTPCTxnCtxt : public TPCTxnCtxt
+ {
+ public:
+ TestTPCTxnCtxt(const std::string& _xid, IdSequence* _loggedtx) : TPCTxnCtxt(_xid, _loggedtx) {}
+ void setCompleteFailure(const unsigned num_queues_rem, const bool complete_prepared_list) {
+ // Remove queue members from back of impactedQueues until queues_rem reamin.
+ // to end to simulate multi-queue txn complete failure.
+ while (impactedQueues.size() > num_queues_rem) impactedQueues.erase(impactedQueues.begin());
+ // If prepared list is not to be committed, set pointer to 0
+ if (!complete_prepared_list) preparedXidStorePtr = 0;
+ }
+ };
+
+ // Test store which has sepcial begin() which returns a TestTPCTxnCtxt, and a method to check for
+ // reamining open transactions
+ class TestMessageStore: public MessageStoreImpl
+ {
+ public:
+ TestMessageStore(qpid::broker::Broker* br, const char* envpath = 0) : MessageStoreImpl(br, envpath) {}
+ std::auto_ptr<qpid::broker::TPCTransactionContext> TMSbegin(const std::string& xid) {
+ checkInit();
+ IdSequence* jtx = &messageIdSequence;
+ // pass sequence number for c/a
+ return auto_ptr<TPCTransactionContext>(new TestTPCTxnCtxt(xid, jtx));
+ }
+ u_int32_t getRemainingTxns(const PersistableQueue& queue) {
+ return static_cast<JournalImpl*>(queue.getExternalQueueStore())->get_open_txn_cnt();
+ }
+ u_int32_t getRemainingPreparedListTxns() {
+ return tplStorePtr->get_open_txn_cnt();
+ }
+ };
+
+ const string nameA;
+ const string nameB;
+ std::auto_ptr<MessageStoreImpl> store;
+ std::auto_ptr<DtxManager> dtxmgr;
+ std::auto_ptr<QueueRegistry> queues;
+ std::auto_ptr<LinkRegistry> links;
+ Queue::shared_ptr queueA;
+ Queue::shared_ptr queueB;
+ Message msg1;
+ Message msg2;
+ Message msg4;
+ std::auto_ptr<TxBuffer> tx;
+
+ void recoverPrepared(bool commit)
+ {
+ setup<MessageStoreImpl>();
+
+ Swap swap(this, "RecoverPrepared");
+ swap.init();
+ std::auto_ptr<TPCTransactionContext> txn(store->begin("my-xid"));
+ swap.run(txn.get());
+ if (tx.get()) {
+ tx->prepare(txn.get());
+ tx.reset();
+ }
+
+ store->prepare(*txn);
+ restart<MessageStoreImpl>();
+
+ //check that the message is not available from either queue
+ BOOST_CHECK_EQUAL((u_int32_t) 0, queueA->getMessageCount());
+ BOOST_CHECK_EQUAL((u_int32_t) 0, queueB->getMessageCount());
+
+ //commit/abort the txn - through the dtx manager, not directly on the store
+ if (commit) {
+ dtxmgr->commit("my-xid", false);
+ } else {
+ dtxmgr->rollback("my-xid");
+ }
+
+ swap.check(commit);
+ restart<MessageStoreImpl>();
+ swap.check(commit);
+ }
+
+ void testMultiQueueTxn(const unsigned num_queues_rem, const bool complete_prepared_list, const bool commit)
+ {
+ setup<TestMessageStore>();
+ MultiQueueTxn mqtTest(this);
+ mqtTest.init();
+ std::auto_ptr<TPCTransactionContext> txn(static_cast<TestMessageStore*>(store.get())->begin("my-xid"));
+ mqtTest.run(txn.get());
+ if (tx.get()) {
+ tx->prepare(txn.get());
+ tx.reset();
+ }
+ store->prepare(*txn);
+
+ // As the commits and aborts should happen through DtxManager, and it is too complex to
+ // pass all these test params through, we bypass DtxManager and use the store directly.
+ // This will prevent the queues from seeing committed txns, however. To test the success
+ // or failure of
+ static_cast<TestTPCTxnCtxt*>(txn.get())->setCompleteFailure(num_queues_rem, complete_prepared_list);
+ if (commit)
+ store->commit(*txn);
+ else
+ store->abort(*txn);
+ restart<TestMessageStore>();
+ mqtTest.check(commit);
+ }
+
+ void commit(Strategy& strategy)
+ {
+ setup<MessageStoreImpl>();
+ strategy.init();
+
+ std::auto_ptr<TPCTransactionContext> txn(store->begin("my-xid"));
+ strategy.run(txn.get());
+ if (tx.get()) {
+ tx->prepare(txn.get());
+ tx.reset();
+ }
+ store->prepare(*txn);
+ store->commit(*txn);
+ restart<MessageStoreImpl>();
+ strategy.check(true);
+ }
+
+ void abort(Strategy& strategy, bool prepare)
+ {
+ setup<MessageStoreImpl>();
+ strategy.init();
+
+ std::auto_ptr<TPCTransactionContext> txn(store->begin("my-xid"));
+ strategy.run(txn.get());
+ if (tx.get()) {
+ tx->prepare(txn.get());
+ tx.reset();
+ }
+ if (prepare) store->prepare(*txn);
+ store->abort(*txn);
+ restart<MessageStoreImpl>();
+ strategy.check(false);
+ }
+
+ void swap(TPCTransactionContext* txn, Queue::shared_ptr& from, Queue::shared_ptr& to)
+ {
+ QueueCursor c;
+ Message msg1 = MessageUtils::get(*from, &c);//just dequeues in memory
+ //move the message from one queue to the other as part of a
+ //distributed transaction
+ if (!tx.get()) tx = std::auto_ptr<TxBuffer>(new TxBuffer);
+ to->deliver(msg1, tx.get());//note: need to enqueue it first to avoid message being deleted
+ from->dequeue(txn, c);
+ }
+
+ void dequeue(TPCTransactionContext* txn, Queue::shared_ptr& queue)
+ {
+ QueueCursor c;
+ Message msg2 = MessageUtils::get(*queue, &c);//just dequeues in memory
+ queue->dequeue(txn, c);
+ }
+
+ Message enqueue(TPCTransactionContext* /*txn*/, const string& msgid, Queue::shared_ptr& queue)
+ {
+ Message msg = createMessage(msgid);
+ if (!tx.get()) tx = std::auto_ptr<TxBuffer>(new TxBuffer);
+ queue->deliver(msg, tx.get());
+ return msg;
+ }
+
+ Message enqueue(TPCTransactionContext* /*txn*/, const string& msgid, std::set<Queue::shared_ptr>& queueset)
+ {
+ if (!tx.get()) tx = std::auto_ptr<TxBuffer>(new TxBuffer);
+ Message msg = createMessage(msgid);
+ for (std::set<Queue::shared_ptr>::iterator i = queueset.begin(); i != queueset.end(); i++) {
+ (*i)->deliver(msg, tx.get());
+ }
+ return msg;
+ }
+
+ Message deliver(const string& msgid, Queue::shared_ptr& queue)
+ {
+ Message m = createMessage(msgid);
+ queue->deliver(m);
+ return m;
+ }
+
+ template <class T>
+ void setup()
+ {
+ store = std::auto_ptr<T>(new T(&br));
+ store->init(test_dir, 4, 1, true); // truncate store
+
+ //create two queues:
+ queueA = Queue::shared_ptr(new Queue(nameA, 0, store.get(), 0));
+ queueA->create();
+ queueB = Queue::shared_ptr(new Queue(nameB, 0, store.get(), 0));
+ queueB->create();
+ }
+
+ Message createMessage(const string& id, const string& exchange="exchange", const string& key="routing_key")
+ {
+ Message msg = MessageUtils::createMessage(exchange, key, Uuid(), true, 0, id);
+ return msg;
+ }
+
+ template <class T>
+ void restart()
+ {
+ queueA.reset();
+ queueB.reset();
+ store.reset();
+ queues.reset();
+ links.reset();
+
+ store = std::auto_ptr<T>(new T(&br));
+ store->init(test_dir, 4, 1);
+ sys::Timer t;
+ ExchangeRegistry exchanges;
+ queues = std::auto_ptr<QueueRegistry>(new QueueRegistry);
+ links = std::auto_ptr<LinkRegistry>(new LinkRegistry);
+ dtxmgr = std::auto_ptr<DtxManager>(new DtxManager(t));
+ dtxmgr->setStore (store.get());
+ RecoveryManagerImpl recovery(*queues, exchanges, *links, *dtxmgr, br.getProtocolRegistry());
+ store->recover(recovery);
+
+ queueA = queues->find(nameA);
+ queueB = queues->find(nameB);
+ }
+
+ void checkMsg(Queue::shared_ptr& queue, u_int32_t size, const string& msgid = "<none>")
+ {
+ BOOST_REQUIRE(queue);
+ BOOST_CHECK_EQUAL(size, queue->getMessageCount());
+ if (size > 0) {
+ Message msg = MessageUtils::get(*queue);
+ BOOST_REQUIRE(msg);
+ BOOST_CHECK_EQUAL(msgid, MessageUtils::getCorrelationId(msg));
+ }
+ }
+
+ void swapCheck(bool swapped, const string& msgid, Queue::shared_ptr& from, Queue::shared_ptr& to)
+ {
+ BOOST_REQUIRE(from);
+ BOOST_REQUIRE(to);
+
+ Queue::shared_ptr x; //the queue from which the message was swapped
+ Queue::shared_ptr y; //the queue on which the message is expected to be
+
+ if (swapped) {
+ x = from;
+ y = to;
+ } else {
+ x = to;
+ y = from;
+ }
+
+ checkMsg(x, 0);
+ checkMsg(y, 1, msgid);
+ checkMsg(y, 0);
+ }
+
+public:
+ TwoPhaseCommitTest() : nameA("queueA"), nameB("queueB") {}
+
+ void testCommitEnqueue()
+ {
+ Enqueue enqueue(this);
+ commit(enqueue);
+ }
+
+ void testCommitDequeue()
+ {
+ Dequeue dequeue(this);
+ commit(dequeue);
+ }
+
+ void testCommitSwap()
+ {
+ Swap swap(this, "SwapMessageId");
+ commit(swap);
+ }
+
+ void testPrepareAndAbortEnqueue()
+ {
+ Enqueue enqueue(this);
+ abort(enqueue, true);
+ }
+
+ void testPrepareAndAbortDequeue()
+ {
+ Dequeue dequeue(this);
+ abort(dequeue, true);
+ }
+
+ void testPrepareAndAbortSwap()
+ {
+ Swap swap(this, "SwapMessageId");
+ abort(swap, true);
+ }
+
+ void testAbortNoPrepareEnqueue()
+ {
+ Enqueue enqueue(this);
+ abort(enqueue, false);
+ }
+
+ void testAbortNoPrepareDequeue()
+ {
+ Dequeue dequeue(this);
+ abort(dequeue, false);
+ }
+
+ void testAbortNoPrepareSwap()
+ {
+ Swap swap(this, "SwapMessageId");
+ abort(swap, false);
+ }
+
+ void testRecoverPreparedThenCommitted()
+ {
+ recoverPrepared(true);
+ }
+
+ void testRecoverPreparedThenAborted()
+ {
+ recoverPrepared(false);
+ }
+
+ void testMultiQueueCommit()
+ {
+ testMultiQueueTxn(2, true, true);
+ }
+
+ void testMultiQueueAbort()
+ {
+ testMultiQueueTxn(2, true, false);
+ }
+
+ void testMultiQueueNoQueueCommitRecover()
+ {
+ testMultiQueueTxn(0, false, true);
+ }
+
+ void testMultiQueueNoQueueAbortRecover()
+ {
+ testMultiQueueTxn(0, false, false);
+ }
+
+ void testMultiQueueSomeQueueCommitRecover()
+ {
+ testMultiQueueTxn(1, false, true);
+ }
+
+ void testMultiQueueSomeQueueAbortRecover()
+ {
+ testMultiQueueTxn(1, false, false);
+ }
+
+ void testMultiQueueAllQueueCommitRecover()
+ {
+ testMultiQueueTxn(2, false, true);
+ }
+
+ void testMultiQueueAllQueueAbortRecover()
+ {
+ testMultiQueueTxn(2, false, false);
+ }
+};
+
+TwoPhaseCommitTest tpct;
+
+// === Test suite ===
+
+QPID_AUTO_TEST_CASE(CommitEnqueue)
+{
+ SET_LOG_LEVEL("error+"); // This only needs to be set once.
+
+ cout << test_filename << ".CommitEnqueue: " << flush;
+ tpct.testCommitEnqueue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(CommitDequeue)
+{
+ cout << test_filename << ".CommitDequeue: " << flush;
+ tpct.testCommitDequeue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(CommitSwap)
+{
+ cout << test_filename << ".CommitSwap: " << flush;
+ tpct.testCommitSwap();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(PrepareAndAbortEnqueue)
+{
+ cout << test_filename << ".PrepareAndAbortEnqueue: " << flush;
+ tpct.testPrepareAndAbortEnqueue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(PrepareAndAbortDequeue)
+{
+ cout << test_filename << ".PrepareAndAbortDequeue: " << flush;
+ tpct.testPrepareAndAbortDequeue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(PrepareAndAbortSwap)
+{
+ cout << test_filename << ".PrepareAndAbortSwap: " << flush;
+ tpct.testPrepareAndAbortSwap();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(AbortNoPrepareEnqueue)
+{
+ cout << test_filename << ".AbortNoPrepareEnqueue: " << flush;
+ tpct.testAbortNoPrepareEnqueue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(AbortNoPrepareDequeue)
+{
+ cout << test_filename << ".AbortNoPrepareDequeue: " << flush;
+ tpct.testAbortNoPrepareDequeue();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(AbortNoPrepareSwap)
+{
+ cout << test_filename << ".AbortNoPrepareSwap: " << flush;
+ tpct.testAbortNoPrepareSwap();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(RecoverPreparedThenCommitted)
+{
+ cout << test_filename << ".RecoverPreparedThenCommitted: " << flush;
+ tpct.testRecoverPreparedThenCommitted();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(RecoverPreparedThenAborted)
+{
+ cout << test_filename << ".RecoverPreparedThenAborted: " << flush;
+ tpct.testRecoverPreparedThenAborted();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueCommit)
+{
+ cout << test_filename << ".MultiQueueCommit: " << flush;
+ tpct.testMultiQueueCommit();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAbort)
+{
+ cout << test_filename << ".MultiQueueAbort: " << flush;
+ tpct.testMultiQueueAbort();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueNoQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueNoQueueCommitRecover: " << flush;
+ tpct.testMultiQueueNoQueueCommitRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueNoQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueNoQueueAbortRecover: " << flush;
+ tpct.testMultiQueueNoQueueAbortRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueSomeQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueSomeQueueCommitRecover: " << flush;
+ tpct.testMultiQueueSomeQueueCommitRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueSomeQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueSomeQueueAbortRecover: " << flush;
+ tpct.testMultiQueueSomeQueueAbortRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAllQueueCommitRecover)
+{
+ cout << test_filename << ".MultiQueueAllQueueCommitRecover: " << flush;
+ tpct.testMultiQueueAllQueueCommitRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_CASE(MultiQueueAllQueueAbortRecover)
+{
+ cout << test_filename << ".MultiQueueAllQueueAbortRecover: " << flush;
+ tpct.testMultiQueueAllQueueAbortRecover();
+ cout << "ok" << endl;
+}
+
+QPID_AUTO_TEST_SUITE_END()
diff --git a/cpp/src/tests/stop_cluster b/cpp/src/tests/legacystore/clean.sh
index 02436c60b7..efb19586fa 100755..100644
--- a/cpp/src/tests/stop_cluster
+++ b/cpp/src/tests/legacystore/clean.sh
@@ -8,9 +8,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,15 +19,14 @@
# under the License.
#
-# Stop brokers on ports listed in cluster.ports
+# This script cleans up any previous database and journal files, and should
+# be run prior to the store system tests, as these are prone to crashing or
+# hanging under some circumstances if the database is old or inconsistent.
-PORTS=`cat cluster.ports`
-for PORT in $PORTS ; do
- $QPIDD_EXEC --no-module-dir -qp $PORT || ERROR="$ERROR $PORT"
-done
-rm -f cluster.ports qpidd.port
-
-if [ -n "$ERROR" ]; then
- echo "Errors stopping brokers on ports: $ERROR"
- exit 1
+if [ -d ${TMP_DATA_DIR} ]; then
+ rm -rf ${TMP_DATA_DIR}
+fi
+if [ -d ${TMP_PYTHON_TEST_DIR} ]; then
+ rm -rf ${TMP_PYTHON_TEST_DIR}
fi
+rm -f ${abs_srcdir}/*.vglog*
diff --git a/cpp/src/tests/legacystore/persistence.py b/cpp/src/tests/legacystore/persistence.py
new file mode 100644
index 0000000000..c4ab712f14
--- /dev/null
+++ b/cpp/src/tests/legacystore/persistence.py
@@ -0,0 +1,574 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import sys, re, traceback, socket
+from getopt import getopt, GetoptError
+
+from qpid.connection import Connection
+from qpid.util import connect
+from qpid.datatypes import Message, RangedSet
+from qpid.queue import Empty
+from qpid.session import SessionException
+from qpid.testlib import TestBase010
+from time import sleep
+
+class PersistenceTest(TestBase010):
+
+ XA_RBROLLBACK = 1
+ XA_RBTIMEOUT = 2
+ XA_OK = 0
+
+ def createMessage(self, **kwargs):
+ session = self.session
+ dp = {}
+ dp['delivery_mode'] = 2
+ mp = {}
+ for k, v in kwargs.iteritems():
+ if k in ['routing_key', 'delivery_mode']: dp[k] = v
+ if k in ['message_id', 'correlation_id', 'application_headers']: mp[k] = v
+ args = []
+ args.append(session.delivery_properties(**dp))
+ if len(mp):
+ args.append(session.message_properties(**mp))
+ if kwargs.has_key('body'): args.append(kwargs['body'])
+ return Message(*args)
+
+ def phase1(self):
+ session = self.session
+
+ session.queue_declare(queue="queue-a", durable=True)
+ session.queue_declare(queue="queue-b", durable=True)
+ session.exchange_bind(queue="queue-a", exchange="amq.direct", binding_key="a")
+ session.exchange_bind(queue="queue-b", exchange="amq.direct", binding_key="b")
+
+ session.message_transfer(destination="amq.direct",
+ message=self.createMessage(routing_key="a", correlation_id="Msg0001", body="A_Message1"))
+ session.message_transfer(destination="amq.direct",
+ message=self.createMessage(routing_key="b", correlation_id="Msg0002", body="B_Message1"))
+
+# session.queue_declare(queue="lvq-test", durable=True, arguments={"qpid.last_value_queue":True})
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"B"}, body="B1"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A1"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A2"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"B"}, body="B2"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"B"}, body="B3"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C1"))
+
+
+
+ def phase2(self):
+ session = self.session
+
+ #check queues exists
+ session.queue_declare(queue="queue-a", durable=True, passive=True)
+ session.queue_declare(queue="queue-b", durable=True, passive=True)
+
+ #check they are still bound to amq.direct correctly
+ responses = []
+ responses.append(session.exchange_bound(queue="queue-a", exchange="amq.direct", binding_key="a"))
+ responses.append(session.exchange_bound(queue="queue-b", exchange="amq.direct", binding_key="b"))
+ for r in responses:
+ self.assert_(not r.exchange_not_found)
+ self.assert_(not r.queue_not_found)
+ self.assert_(not r.key_not_matched)
+
+
+ #check expected messages are there
+ self.assertMessageOnQueue("queue-a", "Msg0001", "A_Message1")
+ self.assertMessageOnQueue("queue-b", "Msg0002", "B_Message1")
+
+ self.assertEmptyQueue("queue-a")
+ self.assertEmptyQueue("queue-b")
+
+ session.queue_declare(queue="queue-c", durable=True)
+
+ #send a message to a topic such that it reaches all queues
+ session.exchange_bind(queue="queue-a", exchange="amq.topic", binding_key="abc")
+ session.exchange_bind(queue="queue-b", exchange="amq.topic", binding_key="abc")
+ session.exchange_bind(queue="queue-c", exchange="amq.topic", binding_key="abc")
+
+ session.message_transfer(destination="amq.topic",
+ message=self.createMessage(routing_key="abc", correlation_id="Msg0003", body="AB_Message2"))
+
+# #check LVQ exists and has exepected messages:
+# session.queue_declare(queue="lvq-test", durable=True, passive=True)
+# session.message_subscribe(destination="lvq", queue="lvq-test")
+# lvq = session.incoming("lvq")
+# lvq.start()
+# accepted = RangedSet()
+# for m in ["A2", "B3", "C1"]:
+# msg = lvq.get(timeout=1)
+# self.assertEquals(m, msg.body)
+# accepted.add(msg.id)
+# try:
+# extra = lvq.get(timeout=1)
+# self.fail("lvq-test not empty, contains: " + extra.body)
+# except Empty: None
+# #publish some more messages while subscriber is active (no replacement):
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C2"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C3"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A3"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"A"}, body="A4"))
+# session.message_transfer(message=self.createMessage(routing_key="lvq-test", application_headers={"qpid.LVQ_key":"C"}, body="C4"))
+# #check that accepting replaced messages is safe
+# session.message_accept(accepted)
+
+
+ def phase3(self):
+ session = self.session
+
+# #lvq recovery validation
+# session.queue_declare(queue="lvq-test", durable=True, passive=True)
+# session.message_subscribe(destination="lvq", queue="lvq-test")
+# lvq = session.incoming("lvq")
+# lvq.start()
+# accepted = RangedSet()
+# lvq.start()
+# for m in ["C4", "A4"]:
+# msg = lvq.get(timeout=1)
+# self.assertEquals(m, msg.body)
+# accepted.add(msg.id)
+# session.message_accept(accepted)
+# try:
+# extra = lvq.get(timeout=1)
+# self.fail("lvq-test not empty, contains: " + extra.body)
+# except Empty: None
+# session.message_cancel(destination="lvq")
+# session.queue_delete(queue="lvq-test")
+
+
+ #check queues exists
+ session.queue_declare(queue="queue-a", durable=True, passive=True)
+ session.queue_declare(queue="queue-b", durable=True, passive=True)
+ session.queue_declare(queue="queue-c", durable=True, passive=True)
+
+ session.tx_select()
+ #check expected messages are there
+ self.assertMessageOnQueue("queue-a", "Msg0003", "AB_Message2")
+ self.assertMessageOnQueue("queue-b", "Msg0003", "AB_Message2")
+ self.assertMessageOnQueue("queue-c", "Msg0003", "AB_Message2")
+
+ self.assertEmptyQueue("queue-a")
+ self.assertEmptyQueue("queue-b")
+ self.assertEmptyQueue("queue-c")
+
+ #note: default bindings must be restored for this to work
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-a", correlation_id="Msg0004", body="A_Message3"))
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-a", correlation_id="Msg0005", body="A_Message4"))
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-a", correlation_id="Msg0006", body="A_Message5"))
+
+ session.tx_commit()
+
+
+ #delete a queue
+ session.queue_delete(queue="queue-c")
+
+ session.message_subscribe(destination="ctag", queue="queue-a", accept_mode=0)
+ session.message_flow(destination="ctag", unit=0, value=0xFFFFFFFF)
+ session.message_flow(destination="ctag", unit=1, value=0xFFFFFFFF)
+ included = session.incoming("ctag")
+ msg1 = included.get(timeout=1)
+ self.assertExpectedContent(msg1, "Msg0004", "A_Message3")
+ msg2 = included.get(timeout=1)
+ self.assertExpectedContent(msg2, "Msg0005", "A_Message4")
+ msg3 = included.get(timeout=1)
+ self.assertExpectedContent(msg3, "Msg0006", "A_Message5")
+ self.ack(msg1, msg2, msg3)
+
+ session.message_transfer(destination="amq.direct", message=self.createMessage(
+ routing_key="queue-b", correlation_id="Msg0007", body="B_Message3"))
+
+ session.tx_rollback()
+
+
+ def phase4(self):
+ session = self.session
+
+ #check queues exists
+ session.queue_declare(queue="queue-a", durable=True, passive=True)
+ session.queue_declare(queue="queue-b", durable=True, passive=True)
+
+ self.assertMessageOnQueue("queue-a", "Msg0004", "A_Message3")
+ self.assertMessageOnQueue("queue-a", "Msg0005", "A_Message4")
+ self.assertMessageOnQueue("queue-a", "Msg0006", "A_Message5")
+
+ self.assertEmptyQueue("queue-a")
+ self.assertEmptyQueue("queue-b")
+
+ #check this queue doesn't exist
+ try:
+ session.queue_declare(queue="queue-c", durable=True, passive=True)
+ raise Exception("Expected queue-c to have been deleted")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def phase5(self):
+
+ session = self.session
+ queues = ["queue-a1", "queue-a2", "queue-b1", "queue-b2", "queue-c1", "queue-c2", "queue-d1", "queue-d2"]
+
+ for q in queues:
+ session.queue_declare(queue=q, durable=True)
+ session.queue_purge(queue=q)
+
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-a1", correlation_id="MsgA", body="MessageA"))
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-b1", correlation_id="MsgB", body="MessageB"))
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-c1", correlation_id="MsgC", body="MessageC"))
+ session.message_transfer(message=self.createMessage(
+ routing_key="queue-d1", correlation_id="MsgD", body="MessageD"))
+
+ session.dtx_select()
+ txa = self.xid('a')
+ txb = self.xid('b')
+ txc = self.xid('c')
+ txd = self.xid('d')
+
+ self.txswap("queue-a1", "queue-a2", txa)
+ self.txswap("queue-b1", "queue-b2", txb)
+ self.txswap("queue-c1", "queue-c2", txc)
+ self.txswap("queue-d1", "queue-d2", txd)
+
+ #no queue should have any messages accessible
+ for q in queues:
+ self.assertEqual(0, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
+
+ self.assertEqual(self.XA_OK, session.dtx_commit(xid=txa, one_phase=True).status)
+ self.assertEqual(self.XA_OK, session.dtx_rollback(xid=txb).status)
+ self.assertEqual(self.XA_OK, session.dtx_prepare(xid=txc).status)
+ self.assertEqual(self.XA_OK, session.dtx_prepare(xid=txd).status)
+
+ #further checks
+ not_empty = ["queue-a2", "queue-b1"]
+ for q in queues:
+ if q in not_empty:
+ self.assertEqual(1, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
+ else:
+ self.assertEqual(0, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
+
+
+ def phase6(self):
+ session = self.session
+
+ #check prepared transaction are reported correctly by recover
+ txc = self.xid('c')
+ txd = self.xid('d')
+
+ xids = session.dtx_recover().in_doubt
+ ids = [x.global_id for x in xids] #TODO: come up with nicer way to test these
+
+ if txc.global_id not in ids:
+ self.fail("Recovered xids not as expected. missing: %s" % (txc))
+ if txd.global_id not in ids:
+ self.fail("Recovered xids not as expected. missing: %s" % (txd))
+ self.assertEqual(2, len(xids))
+
+
+ queues = ["queue-a1", "queue-a2", "queue-b1", "queue-b2", "queue-c1", "queue-c2", "queue-d1", "queue-d2"]
+ not_empty = ["queue-a2", "queue-b1"]
+
+ #re-check
+ not_empty = ["queue-a2", "queue-b1"]
+ for q in queues:
+ if q in not_empty:
+ self.assertEqual(1, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
+ else:
+ self.assertEqual(0, session.queue_query(queue=q).message_count, "Bad count for %s" % (q))
+
+ #complete the prepared transactions
+ self.assertEqual(self.XA_OK, session.dtx_commit(xid=txc).status)
+ self.assertEqual(self.XA_OK, session.dtx_rollback(xid=txd).status)
+ not_empty.append("queue-c2")
+ not_empty.append("queue-d1")
+
+ for q in queues:
+ if q in not_empty:
+ self.assertEqual(1, session.queue_query(queue=q).message_count)
+ else:
+ self.assertEqual(0, session.queue_query(queue=q).message_count)
+
+ def phase7(self):
+ session = self.session
+ session.synchronous = False
+
+ # check xids from phase 6 are gone
+ txc = self.xid('c')
+ txd = self.xid('d')
+
+ xids = session.dtx_recover().in_doubt
+ ids = [x.global_id for x in xids] #TODO: come up with nicer way to test these
+
+ if txc.global_id in ids:
+ self.fail("Xid still present : %s" % (txc))
+ if txd.global_id in ids:
+ self.fail("Xid still present : %s" % (txc))
+ self.assertEqual(0, len(xids))
+
+ #test deletion of queue after publish
+ #create queue
+ session.queue_declare(queue = "q", auto_delete=True, durable=True)
+
+ #send message
+ for i in range(1, 10):
+ session.message_transfer(message=self.createMessage(routing_key = "q", body = "my-message"))
+
+ session.synchronous = True
+ #explicitly delete queue
+ session.queue_delete(queue = "q")
+
+ #test acking of message from auto-deleted queue
+ #create queue
+ session.queue_declare(queue = "q", auto_delete=True, durable=True)
+
+ #send message
+ session.message_transfer(message=self.createMessage(routing_key = "q", body = "my-message"))
+
+ #create consumer
+ session.message_subscribe(queue = "q", destination = "a", accept_mode=0, acquire_mode=0)
+ session.message_flow(unit = 1, value = 0xFFFFFFFF, destination = "a")
+ session.message_flow(unit = 0, value = 10, destination = "a")
+ queue = session.incoming("a")
+
+ #consume the message, cancel subscription (triggering auto-delete), then ack it
+ msg = queue.get(timeout = 5)
+ session.message_cancel(destination = "a")
+ self.ack(msg)
+
+ #test implicit deletion of bindings when queue is deleted
+ session.queue_declare(queue = "durable-subscriber-queue", exclusive=True, durable=True)
+ session.exchange_bind(exchange="amq.topic", queue="durable-subscriber-queue", binding_key="xyz")
+ session.message_transfer(destination= "amq.topic", message=self.createMessage(routing_key = "xyz", body = "my-message"))
+ session.queue_delete(queue = "durable-subscriber-queue")
+
+ #test unbind:
+ #create a series of bindings to a queue
+ session.queue_declare(queue = "binding-test-queue", durable=True)
+ session.exchange_bind(exchange="amq.direct", queue="binding-test-queue", binding_key="abc")
+ session.exchange_bind(exchange="amq.direct", queue="binding-test-queue", binding_key="pqr")
+ session.exchange_bind(exchange="amq.direct", queue="binding-test-queue", binding_key="xyz")
+ session.exchange_bind(exchange="amq.match", queue="binding-test-queue", binding_key="a", arguments={"x-match":"all", "p":"a"})
+ session.exchange_bind(exchange="amq.match", queue="binding-test-queue", binding_key="b", arguments={"x-match":"all", "p":"b"})
+ session.exchange_bind(exchange="amq.match", queue="binding-test-queue", binding_key="c", arguments={"x-match":"all", "p":"c"})
+ #then restart broker...
+
+
+ def phase8(self):
+ session = self.session
+
+ #continue testing unbind:
+ #send messages to the queue via each of the bindings
+ for k in ["abc", "pqr", "xyz"]:
+ data = "first %s" % (k)
+ session.message_transfer(destination= "amq.direct", message=self.createMessage(routing_key=k, body=data))
+ for a in [{"p":"a"}, {"p":"b"}, {"p":"c"}]:
+ data = "first %s" % (a["p"])
+ session.message_transfer(destination="amq.match", message=self.createMessage(application_headers=a, body=data))
+ #unbind some bindings (using final 0-10 semantics)
+ session.exchange_unbind(exchange="amq.direct", queue="binding-test-queue", binding_key="pqr")
+ session.exchange_unbind(exchange="amq.match", queue="binding-test-queue", binding_key="b")
+ #send messages again
+ for k in ["abc", "pqr", "xyz"]:
+ data = "second %s" % (k)
+ session.message_transfer(destination= "amq.direct", message=self.createMessage(routing_key=k, body=data))
+ for a in [{"p":"a"}, {"p":"b"}, {"p":"c"}]:
+ data = "second %s" % (a["p"])
+ session.message_transfer(destination="amq.match", message=self.createMessage(application_headers=a, body=data))
+
+ #check that only the correct messages are received
+ expected = []
+ for k in ["abc", "pqr", "xyz"]:
+ expected.append("first %s" % (k))
+ for a in [{"p":"a"}, {"p":"b"}, {"p":"c"}]:
+ expected.append("first %s" % (a["p"]))
+ for k in ["abc", "xyz"]:
+ expected.append("second %s" % (k))
+ for a in [{"p":"a"}, {"p":"c"}]:
+ expected.append("second %s" % (a["p"]))
+
+ session.message_subscribe(queue = "binding-test-queue", destination = "binding-test")
+ session.message_flow(unit = 1, value = 0xFFFFFFFF, destination = "binding-test")
+ session.message_flow(unit = 0, value = 10, destination = "binding-test")
+ queue = session.incoming("binding-test")
+
+ while len(expected):
+ msg = queue.get(timeout=1)
+ if msg.body not in expected:
+ self.fail("Missing message: %s" % msg.body)
+ expected.remove(msg.body)
+ try:
+ msg = queue.get(timeout=1)
+ self.fail("Got extra message: %s" % msg.body)
+ except Empty: pass
+
+
+
+ session.queue_declare(queue = "durable-subscriber-queue", exclusive=True, durable=True)
+ session.exchange_bind(exchange="amq.topic", queue="durable-subscriber-queue", binding_key="xyz")
+ session.message_transfer(destination= "amq.topic", message=self.createMessage(routing_key = "xyz", body = "my-message"))
+ session.queue_delete(queue = "durable-subscriber-queue")
+
+
+ def xid(self, txid, branchqual = ''):
+ return self.session.xid(format=0, global_id=txid, branch_id=branchqual)
+
+ def txswap(self, src, dest, tx):
+ self.assertEqual(self.XA_OK, self.session.dtx_start(xid=tx).status)
+ self.session.message_subscribe(destination="temp-swap", queue=src, accept_mode=0)
+ self.session.message_flow(destination="temp-swap", unit=0, value=1)
+ self.session.message_flow(destination="temp-swap", unit=1, value=0xFFFFFFFF)
+ msg = self.session.incoming("temp-swap").get(timeout=1)
+ self.session.message_cancel(destination="temp-swap")
+ self.session.message_transfer(message=self.createMessage(routing_key=dest, correlation_id=self.getProperty(msg, 'correlation_id'),
+ body=msg.body))
+ self.ack(msg)
+ self.assertEqual(self.XA_OK, self.session.dtx_end(xid=tx).status)
+
+ def assertEmptyQueue(self, name):
+ self.assertEqual(0, self.session.queue_query(queue=name).message_count)
+
+ def assertConnectionException(self, expectedCode, message):
+ self.assertEqual("connection", message.method.klass.name)
+ self.assertEqual("close", message.method.name)
+ self.assertEqual(expectedCode, message.reply_code)
+
+ def assertExpectedMethod(self, reply, klass, method):
+ self.assertEqual(klass, reply.method.klass.name)
+ self.assertEqual(method, reply.method.name)
+
+ def assertExpectedContent(self, msg, id, body):
+ self.assertEqual(id, self.getProperty(msg, 'correlation_id'))
+ self.assertEqual(body, msg.body)
+ return msg
+
+ def getProperty(self, msg, name):
+ for h in msg.headers:
+ if hasattr(h, name): return getattr(h, name)
+ return None
+
+ def ack(self, *msgs):
+ session = self.session
+ set = RangedSet()
+ for m in msgs:
+ set.add(m.id)
+ #TODO: tidy up completion
+ session.receiver._completed.add(m.id)
+ session.message_accept(set)
+ session.channel.session_completed(session.receiver._completed)
+
+ def assertExpectedGetResult(self, id, body):
+ return self.assertExpectedContent(session.incoming("incoming-gets").get(timeout=1), id, body)
+
+ def assertEqual(self, expected, actual, msg=''):
+ if expected != actual: raise Exception("%s expected: %s actual: %s" % (msg, expected, actual))
+
+ def assertMessageOnQueue(self, queue, id, body):
+ self.session.message_subscribe(destination="incoming-gets", queue=queue, accept_mode=0)
+ self.session.message_flow(destination="incoming-gets", unit=0, value=1)
+ self.session.message_flow(destination="incoming-gets", unit=1, value=0xFFFFFFFF)
+ msg = self.session.incoming("incoming-gets").get(timeout=1)
+ self.assertExpectedContent(msg, id, body)
+ self.ack(msg)
+ self.session.message_cancel(destination="incoming-gets")
+
+
+ def __init__(self):
+ TestBase010.__init__(self, "run")
+ self.setBroker("localhost")
+ self.errata = []
+
+ def connect(self):
+ """ Connects to the broker """
+ self.conn = Connection(connect(self.host, self.port))
+ self.conn.start(timeout=10)
+ self.session = self.conn.session("test-session", timeout=10)
+
+ def run(self, args=sys.argv[1:]):
+ try:
+ opts, extra = getopt(args, "r:s:e:b:p:h", ["retry=", "spec=", "errata=", "broker=", "phase=", "help"])
+ except GetoptError, e:
+ self._die(str(e))
+ phase = 0
+ retry = 0;
+ for opt, value in opts:
+ if opt in ("-h", "--help"): self._die()
+ if opt in ("-s", "--spec"): self.spec = value
+ if opt in ("-e", "--errata"): self.errata.append(value)
+ if opt in ("-b", "--broker"): self.setBroker(value)
+ if opt in ("-p", "--phase"): phase = int(value)
+ if opt in ("-r", "--retry"): retry = int(value)
+
+ if not phase: self._die("please specify the phase to run")
+ phase = "phase%d" % phase
+ self.connect()
+
+ try:
+ getattr(self, phase)()
+ print phase, "succeeded"
+ res = True;
+ except Exception, e:
+ print phase, "failed: ", e
+ traceback.print_exc()
+ res = False
+
+
+ if not self.session.error(): self.session.close(timeout=10)
+ self.conn.close(timeout=10)
+
+ # Crude fix to wait for thread in client to exit after return from session_close()
+ # Reduces occurrences of "Unhandled exception in thread" messages after each test
+ import time
+ time.sleep(1)
+
+ return res
+
+
+ def setBroker(self, broker):
+ rex = re.compile(r"""
+ # [ <user> [ / <password> ] @] <host> [ :<port> ]
+ ^ (?: ([^/]*) (?: / ([^@]*) )? @)? ([^:]+) (?: :([0-9]+))?$""", re.X)
+ match = rex.match(broker)
+ if not match: self._die("'%s' is not a valid broker" % (broker))
+ self.user, self.password, self.host, self.port = match.groups()
+ self.port = int(default(self.port, 5672))
+ self.user = default(self.user, "guest")
+ self.password = default(self.password, "guest")
+
+ def _die(self, message = None):
+ if message: print message
+ print """
+Options:
+ -h/--help : this message
+ -s/--spec <spec.xml> : file containing amqp XML spec
+ -p/--phase : test phase to run
+ -b/--broker [<user>[/<password>]@]<host>[:<port>] : broker to connect to
+ """
+ sys.exit(1)
+
+def default(value, default):
+ if (value == None): return default
+ else: return value
+
+if __name__ == "__main__":
+ test = PersistenceTest()
+ if not test.run(): sys.exit(1)
diff --git a/cpp/src/tests/federated_cluster_test_with_node_failure b/cpp/src/tests/legacystore/run_long_python_tests
index e9ae4b5914..e43b2236ec 100755..100644
--- a/cpp/src/tests/federated_cluster_test_with_node_failure
+++ b/cpp/src/tests/legacystore/run_long_python_tests
@@ -1,5 +1,4 @@
#!/bin/bash
-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
@@ -8,9 +7,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,5 +18,4 @@
# under the License.
#
-srcdir=`dirname $0`
-TEST_NODE_FAILURE=1 $srcdir/federated_cluster_test
+./run_python_tests LONG_TEST
diff --git a/cpp/src/tests/legacystore/run_python_tests b/cpp/src/tests/legacystore/run_python_tests
new file mode 100644
index 0000000000..d9dec16963
--- /dev/null
+++ b/cpp/src/tests/legacystore/run_python_tests
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+if test -z ${QPID_DIR} ; then
+ cat <<EOF
+
+ =========== WARNING: PYTHON TESTS DISABLED ==============
+
+ QPID_DIR not set.
+
+ ===========================================================
+
+EOF
+ exit
+fi
+
+. `dirname $0`/tests_env.sh
+
+MODULENAME=python_tests
+
+echo "Running Python tests in module ${MODULENAME}..."
+
+case x$1 in
+ xSHORT_TEST)
+ DEFAULT_PYTHON_TESTS="*.client_persistence.ExchangeQueueTests.* *.flow_to_disk.SimpleMaxSizeCountTest.test_browse_recover *.flow_to_disk.SimpleMaxSizeCountTest.test_durable_browse_recover *.flow_to_disk.MultiDurableQueueDurableMsgBrowseRecoverTxPTxCTest.test_mixed_limit_2" ;;
+ xLONG_TEST)
+ DEFAULT_PYTHON_TESTS= ;;
+ x)
+ DEFAULT_PYTHON_TESTS="*.client_persistence.* *.flow_to_disk.SimpleMaxSizeCountTest.* *.flow_to_disk.MultiDurableQueue*.test_mixed_limit_1 *.flow_to_disk.MultiQueue*.test_mixed_limit_1 *.resize.SimpleTest.* *.federation.*" ;;
+ *)
+ DEFAULT_PYTHON_TESTS=$1
+esac
+
+PYTHON_TESTS=${PYTHON_TESTS:-${DEFAULT_PYTHON_TESTS}}
+
+OUTDIR=${MODULENAME}.tmp
+rm -rf $OUTDIR
+
+# To debug a test, add the following options to the end of the following line:
+# -v DEBUG -c qpid.messaging.io.ops [*.testName]
+${PYTHON_DIR}/qpid-python-test -m ${MODULENAME} -I ${FAILING_PYTHON_TESTS} ${PYTHON_TESTS} -DOUTDIR=$OUTDIR #-v DEBUG
+RETCODE=$?
+
+if test x${RETCODE} != x0; then
+ exit 1;
+fi
+exit 0
diff --git a/cpp/src/tests/run_long_cluster_tests b/cpp/src/tests/legacystore/run_short_python_tests
index 5dce0be585..523924fdba 100755..100644
--- a/cpp/src/tests/run_long_cluster_tests
+++ b/cpp/src/tests/legacystore/run_short_python_tests
@@ -1,5 +1,4 @@
#!/bin/bash
-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
@@ -8,9 +7,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,6 +18,4 @@
# under the License.
#
-srcdir=`dirname $0`
-$srcdir/run_cluster_tests 'cluster_tests.LongTests.*' -DDURATION=4
-
+./run_python_tests SHORT_TEST
diff --git a/cpp/src/tests/legacystore/run_test b/cpp/src/tests/legacystore/run_test
new file mode 100644
index 0000000000..1d5c2ae407
--- /dev/null
+++ b/cpp/src/tests/legacystore/run_test
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Set up environment and run a test executable or script.
+#
+# Output nothing if test passes, show the output if it fails and
+# leave output in <test>.log for examination.
+#
+# If qpidd.port exists run test with QPID_PORT=`cat qpidd.port`
+#
+# If $VALGRIND if is set run under valgrind. If there are valgrind
+# erros show valgrind output, also leave it in <test>.valgrind for
+# examination.
+#
+
+source `dirname $0`/vg_check
+
+# Export variables from makefile.
+export VALGRIND srcdir
+
+# Export QPID_PORT if qpidd.port exists.
+test -f qpidd.port && export QPID_PORT=`cat qpidd.port`
+
+# Avoid silly libtool error messages if these are not defined
+test -z "$LC_ALL" && export LC_ALL=
+test -z "$LC_CTYPE" && export LC_CTYPE=
+test -z "$LC_COLLATE" && export LC_COLLATE=
+test -z "$LC_MESSAGES" && export LC_MESSAGES=
+
+VG_LOG="$1.vglog"
+rm -f $VG_LOG*
+
+if grep -l "^# Generated by .*libtool" "$1" >/dev/null 2>&1; then
+ # This is a libtool "executable". Valgrind it if VALGRIND specified.
+ test -n "$VALGRIND" && VALGRIND="$VALGRIND --log-file=$VG_LOG --"
+ # Hide output unless there's an error.
+ libtool --mode=execute $VALGRIND "$@" 2>&1 || ERROR=$?
+ test -n "$VALGRIND" && vg_check $VG_LOG*
+else
+ # This is a non-libtool shell script, just execute it.
+ export VALGRIND srcdir
+ exec "$@"
+fi
+
+if test -z "$ERROR"; then
+ # Clean up logs if there was no error.
+ rm -f $VG_LOG*
+ exit 0
+else
+ exit $ERROR
+fi
diff --git a/cpp/src/tests/run_cluster_test b/cpp/src/tests/legacystore/start_broker
index 11df3d63a3..30e4659030 100755..100644
--- a/cpp/src/tests/run_cluster_test
+++ b/cpp/src/tests/legacystore/start_broker
@@ -8,9 +8,9 @@
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
-#
+#
# http://www.apache.org/licenses/LICENSE-2.0
-#
+#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
@@ -19,9 +19,7 @@
# under the License.
#
-
-# Run the tests
-srcdir=`dirname $0`
-source cpg_check.sh
-cpg_enabled || exit 0
-with_ais_group $srcdir/run_test ./cluster_test
+QPIDD=$QPID_BLD/src/qpidd
+rm -f qpidd.vglog* qpidd.log
+test -n "$VALGRIND" && VALGRIND="$VALGRIND --log-file=qpidd.vglog --suppressions=$QPID_DIR/cpp/src/tests/.valgrind.supp --"
+exec libtool --mode=execute $VALGRIND $QPIDD --daemon --port=0 --log-enable error+ --log-to-file qpidd.log "$@" > qpidd.port
diff --git a/cpp/src/tests/cpg_check.sh.in b/cpp/src/tests/legacystore/stop_broker
index ed97776218..dcefff376f 100755..100644
--- a/cpp/src/tests/cpg_check.sh.in
+++ b/cpp/src/tests/legacystore/stop_broker
@@ -1,4 +1,5 @@
#!/bin/bash
+
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
@@ -18,21 +19,28 @@
# under the License.
#
-QPID_USE_CPG=${QPID_USE_CPG:-@USE_CPG@}
+# Stop the broker, check for errors.
+#
+if test -f qpidd.port; then
+ export QPID_PORT=`cat qpidd.port`
+ QPIDD=$QPID_BLD/src/qpidd
+ rm -f qpidd.port
+
+ $QPIDD --quit || ERROR=$?
-# Check if CPG is enabled
-cpg_enabled() {
- test x$QPID_USE_CPG = xyes || return 1 # disabled
- ps -u root | grep 'aisexec\|corosync' >/dev/null || {
- echo WARNING: Skip cluster tests, aisexec or corosync daemon is not running.
- return 1; # A warning, not a failure.
+ # Check qpidd.log.
+ grep -a 'warning\|error\|critical' qpidd.log && {
+ echo "WARNING: Suspicious broker log entries in qpidd.log, above."
}
- return 0
-}
-# Execute command with the ais group set if user is a member.
-with_ais_group() {
- if id -nG | grep '\<ais\>' >/dev/null; then sg ais -c "$*"
- else "$@"
+ # Check valgrind log.
+ if test -n "$VALGRIND"; then
+ source `dirname $0`/vg_check $VG_LOG*
+ vg_check qpidd.vglog*
fi
-}
+
+ exit $ERROR
+else
+ echo "No qpidd.port file found - cannot stop broker."
+ exit 1;
+fi
diff --git a/cpp/src/tests/legacystore/system_test.sh b/cpp/src/tests/legacystore/system_test.sh
new file mode 100644
index 0000000000..4cccc5ac8d
--- /dev/null
+++ b/cpp/src/tests/legacystore/system_test.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+error() { echo $*; exit 1; }
+
+# Make sure $QPID_DIR contains what we need.
+if ! test -d "$QPID_DIR" ; then
+ echo "WARNING: QPID_DIR is not set skipping system tests."
+ exit
+fi
+STORE_LIB=../lib/.libs/msgstore.so
+
+xml_spec=$QPID_DIR/specs/amqp.0-10-qpid-errata.xml
+test -f $xml_spec || error "$xml_spec not found: invalid \$QPID_DIR ?"
+export PYTHONPATH=$QPID_DIR/python:$QPID_DIR/extras/qmf/src/py:$QPID_DIR/tools/src/py
+
+echo "Using directory $TMP_DATA_DIR"
+
+fail=0
+
+# Run the tests with a given set of flags
+BROKER_OPTS="--no-module-dir --load-module=$STORE_LIB --data-dir=$TMP_DATA_DIR --auth=no --wcache-page-size 16"
+run_tests() {
+ for p in `seq 1 8`; do
+ $abs_srcdir/start_broker "$@" ${BROKER_OPTS} || { echo "FAIL broker start"; return 1; }
+ python "$abs_srcdir/persistence.py" -s "$xml_spec" -b localhost:`cat qpidd.port` -p $p -r 3 || fail=1;
+ $abs_srcdir/stop_broker
+ done
+}
+
+run_tests || fail=1
+
+exit $fail
diff --git a/cpp/src/tests/legacystore/tests_env.sh b/cpp/src/tests/legacystore/tests_env.sh
new file mode 100644
index 0000000000..30d255b87c
--- /dev/null
+++ b/cpp/src/tests/legacystore/tests_env.sh
@@ -0,0 +1,260 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# --- Function definitions ---
+
+
+func_check_required_env ()
+#-------------------------
+# Check that EITHER:
+# QPID_DIR is set (for running against svn QPID)
+# OR
+# QPID_PREFIX is set (for running against installed QPID
+# Will exit with error code 1 if neither of these is defined.
+# Params: None
+# Returns: 0 if env vars ok, 1 otherwise
+{
+ if test -z "${QPID_DIR}" -a -z "${QPID_PREFIX}"; then
+ # Try to find qpidd in the normal installed location
+ if test -x /usr/sbin/qpidd; then
+ QPID_PREFIX=/usr
+ else
+ echo "ERROR: Could not find installed Qpid"
+ echo "Either of the following must be set in the environment for this script to run:"
+ echo " QPID_DIR for running against a Qpid svn build"
+ echo " QPID_PREFIX for running against an installed Qpid"
+ return 1
+ fi
+ fi
+ return 0
+}
+
+
+func_check_qpid_python ()
+#------------------------
+# Check that Qpid python environment is ok
+# Params: None
+# Returns: 0 if Python environment is ok; 1 otherwise
+{
+ if ! python -c "import qpid" ; then
+ cat <<EOF
+
+ =========== WARNING: PYTHON TESTS DISABLED ==============
+
+ Unable to load python qpid module - skipping python tests.
+
+ PYTHONPATH=${PYTHONPATH}
+
+ ===========================================================
+
+EOF
+ return 1
+ fi
+ return 0
+}
+
+
+func_set_env ()
+#--------------
+# Set up the environment based on value of ${QPID_DIR}: if ${QPID_DIR} exists, assume a svn checkout,
+# otherwise set up for an installed or prefix test.
+# Params: None
+# Returns: Nothing
+{
+ if test "${QPID_DIR}" -a -d "${QPID_DIR}" ; then
+ # QPID_DIR is defined for source tree builds by the --with-qpid-checkout configure option.
+ # QPID_BLD is defined as the build directory, either $QPID_DIR/cpp or separately specified with
+ # the --with-qpid-build option for VPATH builds.
+
+ # Check QPID_BLD is also set
+ if test -z ${QPID_BLD}; then
+ QPID_BLD="${QPID_DIR}/cpp"
+ fi
+
+ # Paths and dirs
+ #if test -z ${abs_srcdir}; then
+ # abs_srcdir=`pwd`
+ #fi
+ source $QPID_BLD/src/tests/test_env.sh
+ # Override these two settings from test_env.sh:
+ export RECEIVER_EXEC=$QPID_TEST_EXEC_DIR/qpid-receive
+ export SENDER_EXEC=$QPID_TEST_EXEC_DIR/qpid-send
+
+ echo "abs_srcdir=$abs_srcdir"
+ export STORE_LIB="`pwd`/../lib/.libs/msgstore.so"
+ export STORE_ENABLE=1
+ export CLUSTER_LIB="${QPID_BLD}/src/.libs/cluster.so"
+
+ PYTHON_DIR="${QPID_DIR}/python"
+ export PYTHONPATH="${PYTHONPATH}":"${PYTHON_DIR}":"${QPID_DIR}/extras/qmf/src/py":"${QPID_DIR}/tools/src/py":"${QPID_DIR}/cpp/src/tests":"${abs_srcdir}"
+
+ # Libraries
+
+ # Executables
+ export QPIDD_EXEC="${QPID_BLD}/src/qpidd"
+
+ # Test data
+
+ else
+ # Set up the environment based on value of ${QPID_PREFIX} for testing against an installed qpid
+ # Alternatively, make sure ${QPID_BIN_DIR}, ${QPID_SBIN_DIR}, ${QPID_LIB_DIR} and ${QPID_LIBEXEC_DIR} are set for
+ # the installed location.
+ if test "${QPID_PREFIX}" -a -d "${QPID_PREFIX}" ; then
+ QPID_BIN_DIR=${QPID_PREFIX}/bin
+ QPID_SBIN_DIR=${QPID_PREFIX}/sbin
+ QPID_LIB_DIR=${QPID_PREFIX}/lib
+ QPID_LIBEXEC_DIR=${QPID_PREFIX}/libexec
+ fi
+
+ # These four env vars must be set prior to calling this script
+ func_checkpaths QPID_BIN_DIR QPID_SBIN_DIR QPID_LIB_DIR QPID_LIBEXEC_DIR
+
+ # Paths and dirs
+ export PYTHON_DIR="${QPID_BIN_DIR}"
+ export PYTHONPATH="${PYTHONPATH}":"${QPID_LIB_DIR}/python":"${QPID_LIBEXEC_DIR}/qpid/tests":"${QPID_LIB_DIR}/python2.4"
+
+
+ # Libraries
+
+ # Executables
+ export QPIDD_EXEC="${QPID_SBIN_DIR}/qpidd"
+
+ # Test Data
+
+ fi
+}
+
+
+func_mk_data_dir ()
+#------------------
+# Create a data dir at ${TMP_DATA_DIR} if not present, clear it otherwise.
+# Set TMP_DATA_DIR if it is not set.
+# Params: None
+# Returns: Nothing
+{
+ if test -z "${TMP_DATA_DIR}"; then
+ TMP_DATA_DIR=/tmp/python_tests
+ echo "TMP_DATA_DIR not set; using ${TMP_DATA_DIR}"
+ fi
+
+ # Delete old test dirs if they exist
+ if test -d "${TMP_DATA_DIR}" ; then
+ rm -rf "${TMP_DATA_DIR}/*"
+ fi
+ mkdir -p "${TMP_DATA_DIR}"
+ export TMP_DATA_DIR
+}
+
+
+func_checkvar ()
+#---------------
+# Check that an environment var is set (ie non-zero length)
+# Params: $1 - env var to be checked
+# Returns: 0 = env var is set (ie non-zero length)
+# 1 = env var is not set
+{
+ local loc_VAR=$1
+ if test -z ${!loc_VAR}; then
+ echo "WARNING: environment variable ${loc_VAR} not set."
+ return 1
+ fi
+ return 0
+}
+
+
+func_checkpaths ()
+#-----------------
+# Check a list of paths (each can contain ':'-separated sub-list) is set and valid (ie each path exists as a dir)
+# Params: $@ - List of path env vars to be checked
+# Returns: Nothing
+{
+ local loc_PATHS=$@
+ for path in ${loc_PATHS}; do
+ func_checkvar ${path}
+ if test $? == 0; then
+ local temp_IFS=${IFS}
+ IFS=":"
+ local pl=${!path}
+ for p in ${pl[@]}; do
+ if test ! -d ${p}; then
+ echo "WARNING: Directory ${p} in var ${path} not found."
+ fi
+ done
+ IFS=${temp_IFS}
+ fi
+ done
+}
+
+
+func_checklibs ()
+#----------------
+# Check that a list of libs is set and valid (ie each lib exists as an executable file)
+# Params: $@ - List of lib values to be checked
+# Returns: Nothing
+{
+ local loc_LIBS=$@
+ for lib in ${loc_LIBS[@]}; do
+ func_checkvar ${lib}
+ if test $? == 0; then
+ if test ! -x ${!lib}; then
+ echo "WARNING: Library ${lib}=${!lib} not found."
+ fi
+ fi
+ done
+}
+
+
+func_checkexecs ()
+#-----------------
+# Check that a list of executable is set and valid (ie each exec exists as an executable file)
+# Params: $@ - List of exec values to be checked
+# Returns: Nothing
+{
+ local loc_EXECS=$@
+ for exec in ${loc_EXECS[@]}; do
+ func_checkvar ${exec}
+ if test $? == 0; then
+ if test ! -x ${!exec}; then
+ echo "WARNING: Executable ${exec}=${!exec} not found or is not executable."
+ fi
+ fi
+ done
+}
+
+
+#--- Start of script ---
+
+func_check_required_env || exit 1 # Cannot run, exit with error
+
+srcdir=`dirname $0`
+if test -z ${abs_srcdir}; then
+ abs_srcdir=${srcdir}
+fi
+
+func_set_env
+func_check_qpid_python || exit 0 # A warning, not a failure.
+func_mk_data_dir
+
+# Check expected environment vars are set
+func_checkpaths PYTHON_DIR PYTHONPATH TMP_DATA_DIR
+func_checklibs STORE_LIB CLUSTER_LIB
+func_checkexecs QPIDD_EXEC QPID_CONFIG_EXEC QPID_ROUTE_EXEC SENDER_EXEC RECEIVER_EXEC
+
+FAILING_PYTHON_TESTS="${abs_srcdir}/failing_python_tests.txt"
+
diff --git a/cpp/src/tests/legacystore/unit_test.cpp b/cpp/src/tests/legacystore/unit_test.cpp
new file mode 100644
index 0000000000..add80a6f91
--- /dev/null
+++ b/cpp/src/tests/legacystore/unit_test.cpp
@@ -0,0 +1,28 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+
+// Defines test_main function to link with actual unit test code.
+#define BOOST_AUTO_TEST_MAIN // Boost 1.33
+#define BOOST_TEST_MAIN
+
+#include "unit_test.h"
+
diff --git a/cpp/src/tests/legacystore/unit_test.h b/cpp/src/tests/legacystore/unit_test.h
new file mode 100644
index 0000000000..16b6ae2ffb
--- /dev/null
+++ b/cpp/src/tests/legacystore/unit_test.h
@@ -0,0 +1,69 @@
+#ifndef QPIPD_TEST_UNIT_TEST_H_
+#define QPIPD_TEST_UNIT_TEST_H_
+
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ *
+ */
+
+
+// Workaround so we can build against boost 1.32, 1.33 and boost 1.34.
+// Remove when we no longer need to support 1.32 or 1.33.
+
+#include <boost/version.hpp>
+
+#if (BOOST_VERSION < 103400) // v.1.33 and earlier
+# include <boost/test/auto_unit_test.hpp>
+#else // v.1.34 and later
+# include <boost/test/unit_test.hpp>
+#endif
+
+// Keep the test function for compilation but do not not register it.
+// TODO aconway 2008-04-23: better workaround for expected failures.
+// The following causes the test testUpdateTxState not to run at all.
+# define QPID_AUTO_TEST_CASE_EXPECTED_FAILURES(test_name,n) \
+ namespace { struct test_name { void test_method(); }; } \
+ void test_name::test_method()
+// The following runs the test testUpdateTxState, but it fails.
+/*#define QPID_AUTO_TEST_CASE_EXPECTED_FAILURES(test_name,n) \
+ namespace { struct test_name { void test_method(); }; } \
+ BOOST_AUTO_TEST_CASE(name)*/
+
+#if (BOOST_VERSION < 103300) // v.1.32 and earlier
+
+# define QPID_AUTO_TEST_SUITE(name)
+# define QPID_AUTO_TEST_CASE(name) BOOST_AUTO_UNIT_TEST(name)
+# define QPID_AUTO_TEST_SUITE_END()
+
+#elif (BOOST_VERSION < 103400) // v.1.33
+
+// Note the trailing ';'
+# define QPID_AUTO_TEST_SUITE(name) BOOST_AUTO_TEST_SUITE(name);
+# define QPID_AUTO_TEST_CASE(name) BOOST_AUTO_TEST_CASE(name)
+# define QPID_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END();
+
+#else // v.1.34 and later
+
+# define QPID_AUTO_TEST_SUITE(name) BOOST_AUTO_TEST_SUITE(name)
+# define QPID_AUTO_TEST_CASE(name) BOOST_AUTO_TEST_CASE(name)
+# define QPID_AUTO_TEST_SUITE_END() BOOST_AUTO_TEST_SUITE_END()
+
+#endif
+
+#endif /*!QPIPD_TEST_UNIT_TEST_H_*/
diff --git a/cpp/src/tests/long_cluster_tests.py b/cpp/src/tests/long_cluster_tests.py
deleted file mode 100755
index f77837f0c4..0000000000
--- a/cpp/src/tests/long_cluster_tests.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-import os, signal, sys, unittest
-from testlib import TestBaseCluster
-
-class LongClusterTests(TestBaseCluster):
- """Long/Soak cluster tests with async store ability"""
-
-
- def test_LongCluster_01_DummyTest(self):
- """Dummy test - a placeholder for the first of the long/soak python cluster tests"""
- pass
-
-# Start the test here
-
-if __name__ == '__main__':
- if os.getenv("STORE_LIB") != None:
- print "NOTE: Store enabled for the following tests:"
- if not unittest.main(): sys.exit(1)
-
diff --git a/cpp/src/tests/qpid-cluster-benchmark b/cpp/src/tests/qpid-cluster-benchmark
index 610beacebd..3e6b805692 100755
--- a/cpp/src/tests/qpid-cluster-benchmark
+++ b/cpp/src/tests/qpid-cluster-benchmark
@@ -55,12 +55,10 @@ done
shift $(($OPTIND-1))
CONNECTION_OPTIONS="--connection-options {tcp-nodelay:$TCP_NODELAY,reconnect:$RECONNECT,heartbeat:$HEARTBEAT}"
-CREATE_OPTIONS="node:{x-declare:{arguments:{'qpid.replicate':all}}}"
BROKER=$(echo $BROKERS | sed s/,.*//)
run_test() { echo $*; shift; "$@"; echo; echo; echo; }
OPTS="$OPTS $REPEAT $BROKERS --summarize $QUEUES $SENDERS $RECEIVERS $MESSAGES $CLIENT_HOSTS $SAVE_RECEIVED $CONNECTION_OPTIONS $NO_DELETE"
-OPTS="$OPTS --create-option $CREATE_OPTIONS"
run_test "Benchmark:" qpid-cpp-benchmark $OPTS "$@"
diff --git a/cpp/src/tests/qpid-cpp-benchmark b/cpp/src/tests/qpid-cpp-benchmark
index d5ad5191ca..9ac6a10883 100755
--- a/cpp/src/tests/qpid-cpp-benchmark
+++ b/cpp/src/tests/qpid-cpp-benchmark
@@ -18,7 +18,7 @@
# under the License.
#
-import optparse, time, qpid.messaging, re
+import optparse, time, qpid.messaging, re, os
from threading import Thread
from subprocess import Popen, PIPE, STDOUT
@@ -77,6 +77,10 @@ op.add_option("--no-delete", default=False, action="store_true",
help="Don't delete the test queues.")
op.add_option("--fill-drain", default=False, action="store_true",
help="First fill the queues, then drain them")
+op.add_option("--qpid-send-path", default="", type="str", metavar="PATH",
+ help="path to qpid-send binary")
+op.add_option("--qpid-receive-path", default="", type="str", metavar="PATH",
+ help="path to qpid-receive binary")
single_quote_re = re.compile("'")
def posix_quote(string):
@@ -115,7 +119,7 @@ def start_receive(queue, index, opts, ready_queue, broker, host):
messages = msg_total/opts.receivers;
if (index < msg_total%opts.receivers): messages += 1
if (messages == 0): return None
- command = ["qpid-receive",
+ command = [os.path.join(opts.qpid_receive_path, "qpid-receive"),
"-b", broker,
"-a", address,
"-m", str(messages),
@@ -138,7 +142,7 @@ def start_receive(queue, index, opts, ready_queue, broker, host):
def start_send(queue, opts, broker, host):
address="%s;{%s}"%(queue,",".join(opts.send_option + ["create:always"]))
- command = ["qpid-send",
+ command = [os.path.join(opts.qpid_send_path, "qpid-send"),
"-b", broker,
"-a", address,
"--messages", str(opts.messages),
diff --git a/cpp/src/tests/qpid-receive.cpp b/cpp/src/tests/qpid-receive.cpp
index 7a02b871db..e623d052e8 100644
--- a/cpp/src/tests/qpid-receive.cpp
+++ b/cpp/src/tests/qpid-receive.cpp
@@ -222,7 +222,7 @@ int main(int argc, char ** argv)
if (msg.getCorrelationId().size()) std::cout << "CorrelationId: " << msg.getCorrelationId() << std::endl;
if (msg.getUserId().size()) std::cout << "UserId: " << msg.getUserId() << std::endl;
if (msg.getTtl().getMilliseconds()) std::cout << "TTL: " << msg.getTtl().getMilliseconds() << std::endl;
- if (msg.getPriority()) std::cout << "Priority: " << msg.getPriority() << std::endl;
+ if (msg.getPriority()) std::cout << "Priority: " << ((uint) msg.getPriority()) << std::endl;
if (msg.getDurable()) std::cout << "Durable: true" << std::endl;
if (msg.getRedelivered()) std::cout << "Redelivered: true" << std::endl;
std::cout << "Properties: " << msg.getProperties() << std::endl;
diff --git a/cpp/src/tests/qpid-send.cpp b/cpp/src/tests/qpid-send.cpp
index b1c4f2be38..c3bba31e3b 100644
--- a/cpp/src/tests/qpid-send.cpp
+++ b/cpp/src/tests/qpid-send.cpp
@@ -200,7 +200,7 @@ struct Options : public qpid::Options
std::string name;
std::string value;
if (nameval(property, name, value)) {
- message.getProperties()[name] = value;
+ message.getProperties()[name].parse(value);
} else {
message.getProperties()[name] = Variant();
}
diff --git a/cpp/src/tests/qpid-test-cluster b/cpp/src/tests/qpid-test-cluster
deleted file mode 100755
index 40ad452a0d..0000000000
--- a/cpp/src/tests/qpid-test-cluster
+++ /dev/null
@@ -1,109 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-usage() {
- echo "Usage: `basename $0` [options] start|stop|restart|check [hosts]
-Start/stop/restart a cluster on specified hosts or on \$HOSTS via ssh.
-
-Options:
- -l USER Run qpidd and copy files as USER.
- -e SCRIPT Source SCRIPT for environment settings. Copies SCRIPT to each host.
- Default is $DEFAULT_ENV.
- -c CONFIG Use CONFIG as qpidd config file. Copies CONFIG to each host.
- Default is $DEFAULT_CONF
- -d Delete data-dir and log file before starting broker.
-"
- exit 1
-}
-
-DEFAULT_CONF=~/qpid-test-qpidd.conf
-DEFAULT_ENV=~/qpid-test-env.sh
-
-test -f $DEFAULT_CONF && CONF_FILE=$DEFAULT_CONF
-test -f $DEFAULT_ENV && ENV_FILE=$DEFAULT_ENV
-
-while getopts "l:e:c:d" opt; do
- case $opt in
- l) SSHOPTS="-l$OPTARG $SSHOPTS" ; RSYNC_USER="$OPTARG@" ;;
- e) ENV_FILE=$OPTARG ;;
- c) CONF_FILE=$OPTARG ;;
- d) DO_DELETE=1 ;;
- *) usage;;
- esac
-done
-shift `expr $OPTIND - 1`
-test "$*" || usage
-CMD=$1; shift
-HOSTS=${*:-$HOSTS}
-
-conf_value() { test -f "$CONF_FILE" && awk -F= "/^$1=/ {print \$2}" $CONF_FILE; }
-
-if test -n "$CONF_FILE"; then
- test -f "$CONF_FILE" || { echo Config file not found: $CONF_FILE; exit 1; }
- RSYNCFILES="$RSYNCFILES $CONF_FILE"
- QPIDD_ARGS="$QPIDD_ARGS --config $CONF_FILE"
- CONF_PORT=`conf_value port`
- CONF_DATA_DIR=`conf_value data-dir`
- CONF_LOG_FILE=`conf_value log-to-file`
-fi
-
-if test -n "$ENV_FILE"; then
- test -f "$ENV_FILE" || { echo Environment file not found: $ENV_FILE; exit 1; }
- RSYNCFILES="$RSYNCFILES $ENV_FILE"
- SOURCE_ENV="source $ENV_FILE ; "
-fi
-
-test -n "$RSYNCFILES" && rsynchosts $RSYNCFILES # Copy conf/env files to all hosts
-
-do_start() {
- for h in $HOSTS; do
- COMMAND="qpidd -d $QPIDD_ARGS"
- id -nG | grep '\<ais\>' >/dev/null && COMMAND="sg ais -c '$COMMAND'"
- if test "$DO_DELETE"; then COMMAND="rm -rf $CONF_DATA_DIR $CONF_LOG_FILE; $COMMAND"; fi
- ssh $h "$SOURCE_ENV $COMMAND" || { echo "Failed to start on $h"; exit 1; }
- done
-}
-
-do_stop() {
- for h in $HOSTS; do
- ssh $h "$SOURCE_ENV qpidd -q --no-module-dir --no-data-dir $QPIDD_ARGS"
- done
-}
-
-do_status() {
- for h in $HOSTS; do
- if ssh $h "$SOURCE_ENV qpidd -c --no-module-dir --no-data-dir $QPIDD_ARGS > /dev/null"; then
- echo "$h ok"
- else
- echo "$h not running"
- STATUS=1
- fi
- done
-}
-
-case $CMD in
- start) do_start ;;
- stop) do_stop ;;
- restart) do_stop ; do_start ;;
- status) do_status ;;
- *) usage;;
-esac
-
-exit $STATUS
diff --git a/cpp/src/tests/reliable_replication_test b/cpp/src/tests/reliable_replication_test
deleted file mode 100755
index c660f751e5..0000000000
--- a/cpp/src/tests/reliable_replication_test
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Test reliability of the replication feature in the face of link
-# failures:
-
-source ./test_env.sh
-
-trap stop_brokers EXIT
-
-stop_brokers() {
- if [[ $BROKER_A ]] ; then
- $QPIDD_EXEC --no-module-dir -q --port $BROKER_A
- unset BROKER_A
- fi
- if [[ $BROKER_B ]] ; then
- $QPIDD_EXEC --no-module-dir -q --port $BROKER_B
- unset BROKER_B
- fi
-}
-
-setup() {
- rm -f replication-source.log replication-dest.log
- $QPIDD_EXEC --daemon --port 0 --no-data-dir --no-module-dir --auth no --load-module $REPLICATING_LISTENER_LIB --replication-queue replication --create-replication-queue true --log-enable trace+ --log-to-file replication-source.log --log-to-stderr 0 > qpidd-repl.port
- BROKER_A=`cat qpidd-repl.port`
-
- $QPIDD_EXEC --daemon --port 0 --no-data-dir --no-module-dir --auth no --load-module $REPLICATION_EXCHANGE_LIB --log-enable info+ --log-to-file replication-dest.log --log-to-stderr 0 > qpidd-repl.port
- BROKER_B=`cat qpidd-repl.port`
-
- echo "Testing replication from port $BROKER_A to port $BROKER_B"
-
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add exchange replication replication
- $PYTHON_COMMANDS/qpid-route --ack 500 queue add "localhost:$BROKER_B" "localhost:$BROKER_A" replication replication
-
- #create test queue (only replicate enqueues for this test):
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add queue queue-a --generate-queue-events 1
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add queue queue-a
-}
-
-send() {
- ./sender --port $BROKER_A --routing-key queue-a --send-eos 1 < replicated.expected
-}
-
-receive() {
- rm -f replicated.actual
- ./receiver --port $BROKER_B --queue queue-a > replicated.actual
-}
-
-bounce_link() {
- $PYTHON_COMMANDS/qpid-route link del "localhost:$BROKER_B" "localhost:$BROKER_A"
- $PYTHON_COMMANDS/qpid-route --ack 500 queue add "localhost:$BROKER_B" "localhost:$BROKER_A" replication replication
-}
-
-if test -d ${PYTHON_DIR} && test -e $REPLICATING_LISTENER_LIB && test -e $REPLICATION_EXCHANGE_LIB ; then
- setup
- for i in `seq 1 100000`; do echo Message $i; done > replicated.expected
- send &
- receive &
- for i in `seq 1 3`; do sleep 1; bounce_link; done;
- wait
- #check that received list is identical to sent list
- diff replicated.actual replicated.expected || exit 1
- rm -f replication.actual replication.expected replication-source.log replication-dest.log qpidd-repl.port
- true
-fi
-
diff --git a/cpp/src/tests/replication_test b/cpp/src/tests/replication_test
deleted file mode 100755
index f8b2136396..0000000000
--- a/cpp/src/tests/replication_test
+++ /dev/null
@@ -1,182 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Run a test of the replication feature
-
-source ./test_env.sh
-
-trap stop_brokers INT TERM QUIT
-
-stop_brokers() {
- if [ x$BROKER_A != x ]; then
- $QPIDD_EXEC --no-module-dir -q --port $BROKER_A
- unset BROKER_A
- fi
- if [ x$BROKER_B != x ]; then
- $QPIDD_EXEC --no-module-dir -q --port $BROKER_B
- unset BROKER_B
- fi
-}
-
-if test -d ${PYTHON_DIR} && test -f "$REPLICATING_LISTENER_LIB" && test -f "$REPLICATION_EXCHANGE_LIB"; then
- rm -f queue-*.repl replication-*.log #cleanup from any earlier runs
-
- $QPIDD_EXEC --daemon --port 0 --no-data-dir --no-module-dir --auth no --load-module $REPLICATING_LISTENER_LIB --replication-queue replication --create-replication-queue true --log-enable info+ --log-to-file replication-source.log --log-to-stderr 0 > qpidd.port
- BROKER_A=`cat qpidd.port`
-
- $QPIDD_EXEC --daemon --port 0 --no-data-dir --no-module-dir --auth no --load-module $REPLICATION_EXCHANGE_LIB --log-enable info+ --log-to-file replication-dest.log --log-to-stderr 0 > qpidd.port
- BROKER_B=`cat qpidd.port`
- echo "Running replication test between localhost:$BROKER_A and localhost:$BROKER_B"
-
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add exchange replication replication
- $PYTHON_COMMANDS/qpid-route --ack 5 queue add "localhost:$BROKER_B" "localhost:$BROKER_A" replication replication
-
- #create test queues
-
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add queue queue-a --generate-queue-events 2
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add queue queue-b --generate-queue-events 2
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add queue queue-c --generate-queue-events 1
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add queue queue-d --generate-queue-events 2
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add queue queue-e --generate-queue-events 1
-
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add queue queue-a
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add queue queue-b
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add queue queue-c
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add queue queue-e
- #queue-d deliberately not declared on DR; this error case should be handled
-
- #publish and consume from test queues on broker A:
- i=1
- while [ $i -le 10 ]; do
- echo Message $i for A >> queue-a-input.repl
- i=`expr $i + 1`
- done
- i=1
- while [ $i -le 20 ]; do
- echo Message $i for B >> queue-b-input.repl
- i=`expr $i + 1`
- done
- i=1
- while [ $i -le 15 ]; do
- echo Message $i for C >> queue-c-input.repl
- i=`expr $i + 1`
- done
-
- ./sender --port $BROKER_A --routing-key queue-a --send-eos 1 < queue-a-input.repl
- ./sender --port $BROKER_A --routing-key queue-b --send-eos 1 < queue-b-input.repl
- ./sender --port $BROKER_A --routing-key queue-c --send-eos 1 < queue-c-input.repl
- echo dummy | ./sender --port $BROKER_A --routing-key queue-d --send-eos 1
-
- ./receiver --port $BROKER_A --queue queue-a --messages 5 > /dev/null
- ./receiver --port $BROKER_A --queue queue-b --messages 10 > /dev/null
- ./receiver --port $BROKER_A --queue queue-c --messages 10 > /dev/null
- ./receiver --port $BROKER_A --queue queue-d > /dev/null
-
-
- # What we are doing is putting a message on the end of repliaction queue & waiting for it on remote side
- # making sure all the messages have been flushed from the replication queue.
- echo dummy | ./sender --port $BROKER_A --routing-key queue-e --send-eos 1
- ./receiver --port $BROKER_B --queue queue-e --messages 1 > /dev/null
-
- #shutdown broker A then check that broker Bs versions of the queues are as expected
- $QPIDD_EXEC --no-module-dir -q --port $BROKER_A
- unset BROKER_A
-
- #validate replicated queues:
- ./receiver --port $BROKER_B --queue queue-a > queue-a-backup.repl
- ./receiver --port $BROKER_B --queue queue-b > queue-b-backup.repl
- ./receiver --port $BROKER_B --queue queue-c > queue-c-backup.repl
-
-
- tail -5 queue-a-input.repl > queue-a-expected.repl
- tail -10 queue-b-input.repl > queue-b-expected.repl
- diff queue-a-backup.repl queue-a-expected.repl || FAIL=1
- diff queue-b-backup.repl queue-b-expected.repl || FAIL=1
- diff queue-c-backup.repl queue-c-input.repl || FAIL=1
-
- grep 'queue-d does not exist' replication-dest.log > /dev/null || echo "WARNING: Expected error to be logged!"
-
- stop_brokers
-
- # now check offsets working (enqueue based on position being set, not queue abs position)
-
- $QPIDD_EXEC --daemon --port 0 --no-data-dir --no-module-dir --auth no --load-module $REPLICATING_LISTENER_LIB --replication-queue replication --create-replication-queue true --log-enable info+ --log-to-file replication-source.log --log-to-stderr 0 > qpidd.port
- BROKER_A=`cat qpidd.port`
-
- $QPIDD_EXEC --daemon --port 0 --no-data-dir --no-module-dir --auth no --load-module $REPLICATION_EXCHANGE_LIB --log-enable info+ --log-to-file replication-dest.log --log-to-stderr 0 > qpidd.port
- BROKER_B=`cat qpidd.port`
-
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add exchange replication replication
- $PYTHON_COMMANDS/qpid-route --ack 5 queue add "localhost:$BROKER_B" "localhost:$BROKER_A" replication replication
-
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add queue queue-e --generate-queue-events 2
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add queue queue-e
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_A" add queue queue-d --generate-queue-events 1
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add queue queue-d
-
- i=1
- while [ $i -le 10 ]; do
- echo Message $i for A >> queue-e-input.repl
- i=`expr $i + 1`
- done
-
- ./sender --port $BROKER_A --routing-key queue-e --send-eos 1 < queue-e-input.repl
- ./receiver --port $BROKER_A --queue queue-e --messages 10 > /dev/null
-
- # What we are doing is putting a message on the end of repliaction queue & waiting for it on remote side
- # making sure all the messages have been flushed from the replication queue.
- echo dummy | ./sender --port $BROKER_A --routing-key queue-d --send-eos 1
- ./receiver --port $BROKER_B --queue queue-d --messages 1 > /dev/null
-
- # now check offsets working
- $QPIDD_EXEC --no-module-dir -q --port $BROKER_B
- unset BROKER_B
- $QPIDD_EXEC --daemon --port 0 --no-data-dir --no-module-dir --auth no --load-module $REPLICATION_EXCHANGE_LIB --log-enable info+ --log-to-file replication-dest.log --log-to-stderr 0 > qpidd.port
- BROKER_B=`cat qpidd.port`
-
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add queue queue-e
- $PYTHON_COMMANDS/qpid-config -b "localhost:$BROKER_B" add exchange replication replication
- $PYTHON_COMMANDS/qpid-route --ack 5 queue add "localhost:$BROKER_B" "localhost:$BROKER_A" replication replication
- # now send another 15
- i=11
- while [ $i -le 15 ]; do
- echo Message $i for A >> queue-e1-input.repl
- i=`expr $i + 1`
- done
- ./sender --port $BROKER_A --routing-key queue-e --send-eos 1 < queue-e1-input.repl
-
- ./receiver --port $BROKER_B --queue queue-e > queue-e-backup.repl
- diff queue-e-backup.repl queue-e1-input.repl || FAIL=1
-
- stop_brokers
-
- if [ x$FAIL != x ]; then
- echo replication test failed: expectations not met!
- exit 1
- else
- echo queue state replicated as expected
- rm -f queue-*.repl replication-*.log
- fi
-
-else
- echo "Skipping replication test, plugins not built or python utils not located"
-fi
-
diff --git a/cpp/src/tests/restart_cluster b/cpp/src/tests/restart_cluster
deleted file mode 100755
index 5b48e619f6..0000000000
--- a/cpp/src/tests/restart_cluster
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Re-start a cluster on the local host.
-
-srcdir=`dirname $0`
-$srcdir/stop_cluster
-exec $srcdir/start_cluster "$@"
-#!/bin/bash
-# Re-start a cluster on the local host.
-
-srcdir=`dirname $0`
-$srcdir/stop_cluster
-exec $srcdir/start_cluster "$@"
-#!/bin/bash
-# Re-start a cluster on the local host.
-
-srcdir=`dirname $0`
-$srcdir/stop_cluster
-exec $srcdir/start_cluster "$@"
diff --git a/cpp/src/tests/run_acl_tests b/cpp/src/tests/run_acl_tests
index 25241ad75e..ebe4cf8bdb 100755
--- a/cpp/src/tests/run_acl_tests
+++ b/cpp/src/tests/run_acl_tests
@@ -24,22 +24,26 @@ source ./test_env.sh
DATA_DIR=`pwd`/data_dir
DATA_DIRI=`pwd`/data_diri
DATA_DIRU=`pwd`/data_diru
+DATA_DIRQ=`pwd`/data_dirq
trap stop_brokers INT TERM QUIT
start_brokers() {
- ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIR --load-module $ACL_LIB --acl-file policy.acl --auth no --log-to-file local.log > qpidd.port
+ ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIR --load-module $ACL_LIB --acl-file policy.acl --auth no --log-to-file local.log > qpidd.port
LOCAL_PORT=`cat qpidd.port`
- ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIRI --load-module $ACL_LIB --acl-file policy.acl --auth no --max-connections-per-ip 2 --log-to-file locali.log > qpiddi.port
+ ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIRI --load-module $ACL_LIB --acl-file policy.acl --auth no --connection-limit-per-ip 2 --log-to-file locali.log > qpiddi.port
LOCAL_PORTI=`cat qpiddi.port`
- ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIRU --load-module $ACL_LIB --acl-file policy.acl --auth no --max-connections-per-user 2 --log-to-file localu.log > qpiddu.port
+ ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIRU --load-module $ACL_LIB --acl-file policy.acl --auth no --connection-limit-per-user 2 --log-to-file localu.log > qpiddu.port
LOCAL_PORTU=`cat qpiddu.port`
+ ../qpidd --daemon --port 0 --no-module-dir --data-dir $DATA_DIRQ --load-module $ACL_LIB --acl-file policy.acl --auth no --max-queues-per-user 2 --log-to-file localq.log > qpiddq.port
+ LOCAL_PORTQ=`cat qpiddq.port`
}
stop_brokers() {
$QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORT
$QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORTI
$QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORTU
+ $QPIDD_EXEC --no-module-dir -q --port $LOCAL_PORTQ
}
test_loading_acl_from_absolute_path(){
@@ -59,20 +63,24 @@ if test -d ${PYTHON_DIR} ; then
rm -rf $DATA_DIR
rm -rf $DATA_DIRI
rm -rf $DATA_DIRU
+ rm -rf $DATA_DIRQ
mkdir -p $DATA_DIR
mkdir -p $DATA_DIRI
mkdir -p $DATA_DIRU
+ mkdir -p $DATA_DIRQ
cp $srcdir/policy.acl $DATA_DIR
cp $srcdir/policy.acl $DATA_DIRI
cp $srcdir/policy.acl $DATA_DIRU
+ cp $srcdir/policy.acl $DATA_DIRQ
start_brokers
- echo "Running acl tests using brokers on ports $LOCAL_PORT, $LOCAL_PORTI, and $LOCAL_PORTU"
- $QPID_PYTHON_TEST -b localhost:$LOCAL_PORT -m acl -Dport-i=$LOCAL_PORTI -Dport-u=$LOCAL_PORTU || EXITCODE=1
+ echo "Running acl tests using brokers on ports $LOCAL_PORT, $LOCAL_PORTI, $LOCAL_PORTU, and $LOCAL_PORTQ"
+ $QPID_PYTHON_TEST -b localhost:$LOCAL_PORT -m acl -Dport-i=$LOCAL_PORTI -Dport-u=$LOCAL_PORTU -Dport-q=$LOCAL_PORTQ || EXITCODE=1
stop_brokers || EXITCODE=1
test_loading_acl_from_absolute_path || EXITCODE=1
rm -rf $DATA_DIR
rm -rf $DATA_DIRI
rm -rf $DATA_DIRU
+ rm -rf $DATA_DIRQ
exit $EXITCODE
fi
diff --git a/cpp/src/tests/run_cluster_authentication_soak b/cpp/src/tests/run_cluster_authentication_soak
deleted file mode 100755
index 24befa28ba..0000000000
--- a/cpp/src/tests/run_cluster_authentication_soak
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-source ./test_env.sh
-source sasl_test_setup.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-with_ais_group ./cluster_authentication_soak 500
-
diff --git a/cpp/src/tests/run_cluster_authentication_test b/cpp/src/tests/run_cluster_authentication_test
deleted file mode 100755
index 844807a857..0000000000
--- a/cpp/src/tests/run_cluster_authentication_test
+++ /dev/null
@@ -1,27 +0,0 @@
-#! /bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-
-source ./test_env.sh
-source sasl_test_setup.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-with_ais_group ./cluster_authentication_soak
-
diff --git a/cpp/src/tests/run_cluster_tests b/cpp/src/tests/run_cluster_tests
deleted file mode 100755
index a5cea5ff6e..0000000000
--- a/cpp/src/tests/run_cluster_tests
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-
-test -x $QPID_PYTHON_TEST || { echo Skipping test, $QPID_PYTHON_TEST not found; exit 0; }
-
-# Delete old cluster test data
-OUTDIR=${OUTDIR:-brokertest.tmp}
-rm -rf $OUTDIR
-mkdir -p $OUTDIR
-
-# Ignore tests requiring a store by default.
-CLUSTER_TESTS_IGNORE=${CLUSTER_TESTS_IGNORE:--i cluster_tests.StoreTests.* -I $srcdir/cluster_tests.fail}
-CLUSTER_TESTS=${CLUSTER_TESTS:-$*}
-
-with_ais_group $QPID_PYTHON_TEST -DOUTDIR=$OUTDIR -m cluster_tests $CLUSTER_TESTS_IGNORE $CLUSTER_TESTS || exit 1
-rm -rf $OUTDIR
diff --git a/cpp/src/tests/run_failover_soak b/cpp/src/tests/run_failover_soak
deleted file mode 100755
index 2c56bf7d6b..0000000000
--- a/cpp/src/tests/run_failover_soak
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-host=127.0.0.1
-
-unset QPID_NO_MODULE_DIR # failover_soak uses --module-dir, dont want clash
-MODULES=${MODULES:-$moduledir}
-MESSAGES=${MESSAGES:-500000}
-REPORT_FREQUENCY=${REPORT_FREQUENCY:-20000}
-VERBOSITY=${VERBOSITY:-0}
-DURABILITY=${DURABILITY:-0}
-N_QUEUES=${N_QUEUES:-1}
-N_BROKERS=${N_BROKERS:-4}
-
-rm -f soak-*.log
-exec ./failover_soak $MODULES ./declare_queues ./replaying_sender ./resuming_receiver $MESSAGES $REPORT_FREQUENCY $VERBOSITY $DURABILITY $N_QUEUES $N_BROKERS
-
diff --git a/cpp/src/tests/run_federation_sys_tests b/cpp/src/tests/run_federation_sys_tests
index 76da176914..d9a9649c37 100755
--- a/cpp/src/tests/run_federation_sys_tests
+++ b/cpp/src/tests/run_federation_sys_tests
@@ -25,10 +25,6 @@ source ./test_env.sh
MODULENAME=federation_sys
-# Test for clustering
-source cpg_check.sh
-if cpg_enabled; then CLUSTERING_ENABLED=1; fi
-
# Test for long test
if [[ "$1" == "LONG_TEST" ]]; then
USE_LONG_TEST=1
@@ -42,11 +38,7 @@ if [ -z ${USE_LONG_TEST} ]; then
SKIPTESTS="-i federation_sys.A_Long* -i federation_sys.B_Long* ${SKIPTESTS}"
fi
echo "WARNING: Tests using persistence will be ignored."
-if [ -z ${CLUSTERING_ENABLED} ]; then
- SKIPTESTS="${SKIPTESTS} -i federation_sys.C_* -i federation_sys.D_*"
-elif [ -z ${USE_LONG_TEST} ]; then
- SKIPTESTS="${SKIPTESTS} -i federation_sys.C_Long* -i federation_sys.D_Long*"
-fi
+SKIPTESTS="${SKIPTESTS} -i federation_sys.C_* -i federation_sys.D_*"
start_brokers() {
start_broker() {
@@ -56,35 +48,21 @@ start_brokers() {
}
start_broker "" LOCAL_PORT
start_broker "" REMOTE_PORT
- if [ -n "${CLUSTERING_ENABLED}" ]; then
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-1" CLUSTER_C1_1
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-1" CLUSTER_C1_2
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-2" CLUSTER_C2_1
- start_broker "--load-module ${CLUSTER_LIB} --cluster-name test-cluster-2" CLUSTER_C2_2
- fi
rm qpidd.port
}
stop_brokers() {
${QPIDD_EXEC} -q --port ${LOCAL_PORT}
${QPIDD_EXEC} -q --port ${REMOTE_PORT}
- if [ -n "${CLUSTERING_ENABLED}" ]; then
- ${QPID_CLUSTER_EXEC} --all-stop --force localhost:${CLUSTER_C1_1}
- ${QPID_CLUSTER_EXEC} --all-stop --force localhost:${CLUSTER_C2_1}
- fi
}
if test -d ${PYTHON_DIR} ; then
start_brokers
- if [ -z ${CLUSTERING_ENABLED} ]; then
- echo "Running federation tests using brokers on local port ${LOCAL_PORT}, remote port ${REMOTE_PORT} (NOTE: clustering is DISABLED)"
- else
- echo "Running federation tests using brokers on local port ${LOCAL_PORT}, remote port ${REMOTE_PORT}, local cluster nodes ${CLUSTER_C1_1} ${CLUSTER_C1_2}, remote cluster nodes ${CLUSTER_C2_1} ${CLUSTER_C2_2}"
- fi
+ echo "Running federation tests using brokers on local port ${LOCAL_PORT}, remote port ${REMOTE_PORT} (NOTE: clustering is DISABLED)"
if [ -z ${USE_LONG_TEST} ]; then
echo "NOTE: To run a full set of federation system tests, use \"make check-long\". To test with persistence, run the store version of this script."
fi
- ${QPID_PYTHON_TEST} -m ${MODULENAME} ${SKIPTESTS} -b localhost:${REMOTE_PORT} -Dlocal-port=${LOCAL_PORT} -Dremote-port=${REMOTE_PORT} -Dlocal-cluster-ports="${CLUSTER_C1_1} ${CLUSTER_C1_2}" -Dremote-cluster-ports="${CLUSTER_C2_1} ${CLUSTER_C2_2}" $@
+ ${QPID_PYTHON_TEST} -m ${MODULENAME} ${SKIPTESTS} -b localhost:${REMOTE_PORT} -Dlocal-port=${LOCAL_PORT} -Dremote-port=${REMOTE_PORT} $@
RETCODE=$?
stop_brokers
if test x${RETCODE} != x0; then
diff --git a/cpp/src/tests/sasl_fed_ex b/cpp/src/tests/sasl_fed_ex
index cc5b310067..4ea61c5a2a 100755
--- a/cpp/src/tests/sasl_fed_ex
+++ b/cpp/src/tests/sasl_fed_ex
@@ -34,17 +34,11 @@ then
echo
# These are the four different ways of creating links ( or routes+links )
# that the qpid-route command provides.
- echo "Usage: ${script_name} dynamic|link|queue|route [cluster]"
+ echo "Usage: ${script_name} dynamic|link|queue|route"
echo
exit 1
fi
-# Has the user told us to do clustering ? -----------
-clustering_flag=
-if [ $# -eq "2" ] && [ "$2" == "cluster" ]; then
- clustering_flag=true
-fi
-
qpid_route_method=$1
# Debugging print. --------------------------
@@ -128,15 +122,7 @@ DST_TCP_PORT=5807
SRC_TCP_PORT_2=5802
DST_TCP_PORT_2=5803
-CLUSTER_NAME_SUFFIX=`hostname | tr '.' ' ' | awk '{print $1}'`
-CLUSTER_1_NAME=sasl_fed_ex_cluster_1_${CLUSTER_NAME_SUFFIX}
-CLUSTER_2_NAME=sasl_fed_ex_cluster_2_${CLUSTER_NAME_SUFFIX}
-
-print "CLUSTER_1_NAME == ${CLUSTER_1_NAME}"
-print "CLUSTER_2_NAME == ${CLUSTER_2_NAME}"
-
SSL_LIB=${moduledir}/ssl.so
-CLUSTER_LIB=${moduledir}/cluster.so
export QPID_SSL_CERT_NAME=${TEST_HOSTNAME}
@@ -183,80 +169,26 @@ COMMON_BROKER_OPTIONS=" \
function start_brokers {
- if [ $1 ]; then
- # clustered ----------------------------------------
- print "Starting SRC cluster"
-
- print " src broker 1"
- $QPIDD_EXEC \
- --port=${SRC_TCP_PORT} \
- --ssl-port ${SRC_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_1_NAME} \
- --log-to-file $tmp_root/qpidd_src.log 2> /dev/null
-
- broker_ports[0]=${SRC_TCP_PORT}
-
- print " src broker 2"
- $QPIDD_EXEC \
- --port=${SRC_TCP_PORT_2} \
- --ssl-port ${SRC_SSL_PORT_2} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_1_NAME} \
- --log-to-file $tmp_root/qpidd_src_2.log 2> /dev/null
-
- broker_ports[1]=${SRC_TCP_PORT_2}
-
-
- print "Starting DST cluster"
-
- print " dst broker 1"
- $QPIDD_EXEC \
- --port=${DST_TCP_PORT} \
- --ssl-port ${DST_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_2_NAME} \
- --log-to-file $tmp_root/qpidd_dst.log 2> /dev/null
-
- broker_ports[2]=${DST_TCP_PORT}
-
- print " dst broker 2"
- $QPIDD_EXEC \
- --port=${DST_TCP_PORT_2} \
- --ssl-port ${DST_SSL_PORT_2} \
- ${COMMON_BROKER_OPTIONS} \
- --load-module ${CLUSTER_LIB} \
- --cluster-name ${CLUSTER_2_NAME} \
- --log-to-file $tmp_root/qpidd_dst_2.log 2> /dev/null
-
- broker_ports[3]=${DST_TCP_PORT_2}
-
- else
# vanilla brokers --------------------------------
print "Starting SRC broker"
$QPIDD_EXEC \
- --port=${SRC_TCP_PORT} \
- --ssl-port ${SRC_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --log-to-file $tmp_root/qpidd_src.log 2> /dev/null
+ --port=${SRC_TCP_PORT} \
+ --ssl-port ${SRC_SSL_PORT} \
+ ${COMMON_BROKER_OPTIONS} \
+ --log-to-file $tmp_root/qpidd_src.log 2> /dev/null
broker_ports[0]=${SRC_TCP_PORT}
print "Starting DST broker"
$QPIDD_EXEC \
- --port=${DST_TCP_PORT} \
- --ssl-port ${DST_SSL_PORT} \
- ${COMMON_BROKER_OPTIONS} \
- --log-to-file $tmp_root/qpidd_dst.log 2> /dev/null
+ --port=${DST_TCP_PORT} \
+ --ssl-port ${DST_SSL_PORT} \
+ ${COMMON_BROKER_OPTIONS} \
+ --log-to-file $tmp_root/qpidd_dst.log 2> /dev/null
broker_ports[1]=${DST_TCP_PORT}
- fi
}
-
function halt_brokers {
n_brokers=${#broker_ports[@]}
print "Halting ${n_brokers} brokers."
@@ -270,7 +202,7 @@ function halt_brokers {
}
-start_brokers $clustering_flag
+start_brokers
# I am not randomizing these names, because this test creates its own brokers.
@@ -329,9 +261,7 @@ fi
# to avoid false negatives.
sleep 5
-# This should work the same whether or not we are running a clustered test.
-# In the case of clustered tests, the status is not printed by qpid_route.
-# So in either case, I will look only at the transport field, which should be "ssl".
+# Look only at the transport field, which should be "ssl".
print "check the link"
link_status=$($QPID_ROUTE_EXEC link list localhost:${DST_TCP_PORT} | tail -1 | awk '{print $3}')
diff --git a/cpp/src/tests/sasl_fed_ex_dynamic_cluster b/cpp/src/tests/sasl_fed_ex_dynamic_cluster
deleted file mode 100755
index fd6b72a4f2..0000000000
--- a/cpp/src/tests/sasl_fed_ex_dynamic_cluster
+++ /dev/null
@@ -1,30 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-
-with_ais_group ${srcdir}/sasl_fed_ex dynamic cluster
-
-
diff --git a/cpp/src/tests/sasl_fed_ex_link_cluster b/cpp/src/tests/sasl_fed_ex_link_cluster
deleted file mode 100755
index 34b2aa4a5f..0000000000
--- a/cpp/src/tests/sasl_fed_ex_link_cluster
+++ /dev/null
@@ -1,29 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-with_ais_group ${srcdir}/sasl_fed_ex link cluster
-
-
diff --git a/cpp/src/tests/sasl_fed_ex_queue_cluster b/cpp/src/tests/sasl_fed_ex_queue_cluster
deleted file mode 100755
index 14f36f6fc4..0000000000
--- a/cpp/src/tests/sasl_fed_ex_queue_cluster
+++ /dev/null
@@ -1,29 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-with_ais_group ${srcdir}/sasl_fed_ex queue cluster
-
-
diff --git a/cpp/src/tests/sasl_fed_ex_route_cluster b/cpp/src/tests/sasl_fed_ex_route_cluster
deleted file mode 100755
index 756476056e..0000000000
--- a/cpp/src/tests/sasl_fed_ex_route_cluster
+++ /dev/null
@@ -1,29 +0,0 @@
-#! /bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-with_ais_group ${srcdir}/sasl_fed_ex route cluster
-
-
diff --git a/cpp/src/tests/ssl_test b/cpp/src/tests/ssl_test
index 19a316a483..89aaf44af0 100755
--- a/cpp/src/tests/ssl_test
+++ b/cpp/src/tests/ssl_test
@@ -193,29 +193,3 @@ echo "Running SSL/TCP mux test on random port $PORT"
./qpid-perftest --count ${COUNT} --port ${PORT} -P tcp -b $TEST_HOSTNAME --summary || error "TCP connection failed!"
stop_brokers
-
-test -z $CLUSTER_LIB && exit 0 # Exit if cluster not supported.
-
-## Test failover in a cluster using SSL only
-source cpg_check.sh
-cpg_enabled || exit 0
-
-PORT1=`pick_port`; ssl_cluster_broker $PORT1
-echo "Running SSL cluster broker on port $PORT1"
-
-PORT2=`pick_port`; ssl_cluster_broker $PORT2
-echo "Running SSL cluster broker on port $PORT2"
-
-# Pipe receive output to uniq to remove duplicates
-./qpid-receive --connection-options "{reconnect:true, reconnect-timeout:5}" --failover-updates -b amqp:ssl:$TEST_HOSTNAME:$PORT1 -a "foo;{create:always}" -f | uniq > ssl_test_receive.tmp &
-./qpid-send -b amqp:ssl:$TEST_HOSTNAME:$PORT2 --content-string=one -a "foo;{create:always}"
-
-stop_broker 0 # Kill broker 1 - receiver should fail-over.
-echo "Killed SSL cluster broker on port $PORT1"
-
-./qpid-send -b amqp:ssl:$TEST_HOSTNAME:$PORT2 --content-string=two -a "foo;{create:always}" --send-eos 1
-wait # Wait for qpid-receive
-{ echo one; echo two; } > ssl_test_receive.cmp
-diff ssl_test_receive.tmp ssl_test_receive.cmp || { echo "Failover failed"; exit 1; }
-rm -f ssl_test_receive.*
-
diff --git a/cpp/src/tests/start_cluster b/cpp/src/tests/start_cluster
deleted file mode 100755
index 78fd104d9c..0000000000
--- a/cpp/src/tests/start_cluster
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Start a cluster of brokers on local host, put the list of ports for cluster members in cluster.ports
-#
-
-# Execute command with the ais group set.
-source ./test_env.sh
-source cpg_check.sh
-cpg_enabled || exit 0
-
-rm -f cluster*.log cluster.ports qpidd.port
-
-SIZE=${1:-3}; shift
-CLUSTER=$HOSTNAME.$$
-OPTS="-d --no-module-dir --load-module $CLUSTER_LIB --cluster-name=$CLUSTER --auth=no --log-enable notice+ --log-enable debug+:cluster $@"
-
-for (( i=0; i<SIZE; ++i )); do
- DDIR=`mktemp -d /tmp/start_cluster.XXXXXXXXXX`
- PORT=`with_ais_group ../qpidd -p0 --log-to-file=cluster$i.log $OPTS --data-dir=$DDIR` || exit 1
- echo $PORT >> cluster.ports
-done
-
-head -n 1 cluster.ports > qpidd.port # First member's port for tests.
-
diff --git a/cpp/src/tests/start_cluster_hosts b/cpp/src/tests/start_cluster_hosts
deleted file mode 100755
index 778b4248da..0000000000
--- a/cpp/src/tests/start_cluster_hosts
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/bin/sh
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# Start a cluster of brokers on local host, put the list of host port addresses
-# in cluster.ports
-#
-# Arguments: [-k] [-p port] HOST [HOST...]
-# -p port to start broker on, can be 0. Actual ports recorded in cluster.addr.
-# -k kill any qpidd processes owned by this user before starting.
-#
-# Start a broker on each named host. Name a host twice to start multiple brokers.
-#
-# You must be able to ssh to each host and be in group ais.
-# $QPIDD must be executable on each host.
-# Logs go to syslog on each host, with a unique prefix per broker.
-#
-
-QPIDD=${QPIDD:-$PWD/../qpidd}
-LIBQPIDCLUSTER=${LIBQPIDCLUSTER:-$PWD/../.libs/cluster.so}
-NAME=$USER # User name is default cluster name.
-RESTART=NO
-
-while getopts "kp:n:q:r" ARG ; do
- case $ARG in
- k) KILL=yes ;;
- p) PORT="$OPTARG" ;;
- n) NAME=$OPTARG ;;
- q) QPIDD=$OPTARG ;;
- l) LIBQPIDCLUSTER=$OPTARG ;;
- r) RESTART=yes ;;
- *) echo "Error parsing options: $ARG"; exit 1 ;;
- esac
-done
-shift `expr $OPTIND - 1`
-test -n "$PORT" && PORTOPT="-p $PORT"
-test "$KILL" = yes && KILL="$QPIDD --no-module-dir -q $PORTOPT ;"
-CLUSTER=${*:-$CLUSTER} # Use args or env
-test -z "$CLUSTER" && { echo Must specify at least one host; exit 1; }
-
-
-OPTS="-d $PORTOPT --load-module $LIBQPIDCLUSTER --cluster-name=$NAME --no-data-dir --auth=no --log-to-syslog --log-enable=info+"
-
-num=0
-for h in $CLUSTER; do
- num=`expr $num + 1` # Give a unique log prefix to each node.
- cmd="$KILL $QPIDD $OPTS --log-prefix $num.$h"
- out=`echo "$cmd" | ssh $h newgrp ais` || { echo == $h error: $out ; exit 1; }
- if [ "$PORT" = 0 ] ; then p=$out; else p=$PORT; fi
- echo "$h $p"
-done
-
diff --git a/cpp/src/tests/storePerftools/asyncPerf/PerfTest.cpp b/cpp/src/tests/storePerftools/asyncPerf/PerfTest.cpp
index 85dd7fa715..7d97ded8b9 100644
--- a/cpp/src/tests/storePerftools/asyncPerf/PerfTest.cpp
+++ b/cpp/src/tests/storePerftools/asyncPerf/PerfTest.cpp
@@ -157,7 +157,7 @@ PerfTest::destroyQueues() {
int
runPerfTest(int argc, char** argv) {
// Load async store module
- qpid::tryShlib ("asyncStore.so", false);
+ qpid::tryShlib ("asyncStore.so");
qpid::CommonOptions co;
qpid::asyncStore::AsyncStoreOptions aso;
diff --git a/cpp/src/tests/test_env.ps1.in b/cpp/src/tests/test_env.ps1.in
index 60ba4305a5..5fa3a0ac31 100644
--- a/cpp/src/tests/test_env.ps1.in
+++ b/cpp/src/tests/test_env.ps1.in
@@ -62,8 +62,6 @@ $env:TEST_STORE_LIB="$testmoduledir\test_store.so"
#exportmodule() { test -f $moduledir/$2 && eval "export $1=$moduledir/$2"; }
#exportmodule ACL_LIB acl.so
#exportmodule CLUSTER_LIB cluster.so
-#exportmodule REPLICATING_LISTENER_LIB replicating_listener.so
-#exportmodule REPLICATION_EXCHANGE_LIB replication_exchange.so
#exportmodule SSLCONNECTOR_LIB sslconnector.so
#exportmodule SSL_LIB ssl.so
#exportmodule WATCHDOG_LIB watchdog.so
diff --git a/cpp/src/tests/test_env.sh.in b/cpp/src/tests/test_env.sh.in
index cee36843ae..76e88283ed 100644
--- a/cpp/src/tests/test_env.sh.in
+++ b/cpp/src/tests/test_env.sh.in
@@ -43,7 +43,6 @@ export PYTHON_COMMANDS=$QPID_TOOLS/src/py
export PYTHONPATH=$srcdir:$PYTHON_DIR:$PYTHON_COMMANDS:$QPID_TESTS_PY:$QMF_LIB:$PYTHONPATH
export QPID_CONFIG_EXEC=$PYTHON_COMMANDS/qpid-config
export QPID_ROUTE_EXEC=$PYTHON_COMMANDS/qpid-route
-export QPID_CLUSTER_EXEC=$PYTHON_COMMANDS/qpid-cluster
export QPID_HA_EXEC=$PYTHON_COMMANDS/qpid-ha
# Executables
@@ -63,10 +62,7 @@ export TEST_STORE_LIB=$testmoduledir/test_store.so
exportmodule() { test -f $moduledir/$2 && eval "export $1=$moduledir/$2"; }
exportmodule ACL_LIB acl.so
-exportmodule CLUSTER_LIB cluster.so
exportmodule HA_LIB ha.so
-exportmodule REPLICATING_LISTENER_LIB replicating_listener.so
-exportmodule REPLICATION_EXCHANGE_LIB replication_exchange.so
exportmodule SSLCONNECTOR_LIB sslconnector.so
exportmodule SSL_LIB ssl.so
exportmodule WATCHDOG_LIB watchdog.so
diff --git a/cpp/src/tests/test_store.cpp b/cpp/src/tests/test_store.cpp
index e6bc55c033..46b7ea1196 100644
--- a/cpp/src/tests/test_store.cpp
+++ b/cpp/src/tests/test_store.cpp
@@ -37,17 +37,13 @@
//#include "qpid/broker/amqp_0_10/MessageTransfer.h"
//#include "qpid/framing/AMQFrame.h"
//#include "qpid/log/Statement.h"
+//#include "qpid/sys/Thread.h"
//#include "qpid/Plugin.h"
//#include "qpid/Options.h"
//#include <boost/cast.hpp>
//#include <boost/lexical_cast.hpp>
//#include <memory>
//#include <fstream>
-//
-//using namespace qpid;
-//using namespace broker;
-//using namespace std;
-//using namespace qpid::sys;
namespace qpid {
namespace tests {
diff --git a/cpp/src/tests/testagent.mk b/cpp/src/tests/testagent.mk
index 25cf43d71e..0492f3e3bb 100644
--- a/cpp/src/tests/testagent.mk
+++ b/cpp/src/tests/testagent.mk
@@ -46,6 +46,6 @@ testagent-testagent.$(OBJEXT): $(TESTAGENT_GEN_SRC)
qpidexectest_PROGRAMS+=testagent
testagent_CXXFLAGS=$(CXXFLAGS) -Itestagent_gen
testagent_SOURCES=testagent.cpp $(TESTAGENT_GEN_SRC)
-testagent_LDADD=$(top_builddir)/src/libqmf.la
+testagent_LDADD=$(top_builddir)/src/libqmf.la $(top_builddir)/src/libqpidcommon.la $(top_builddir)/src/libqpidtypes.la $(top_builddir)/src/libqpidclient.la
EXTRA_DIST+=testagent.xml
diff --git a/cpp/src/tests/testlib.py b/cpp/src/tests/testlib.py
deleted file mode 100644
index 71ad59e5c1..0000000000
--- a/cpp/src/tests/testlib.py
+++ /dev/null
@@ -1,766 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# Support library for qpid python tests.
-#
-
-import os, re, signal, subprocess, time, unittest
-
-class TestBase(unittest.TestCase):
- """
- Base class for qpid tests. Provides broker start/stop/kill methods
- """
-
- """
- The following environment vars control if and how the test is run, and determine where many of the helper
- executables/libs are to be found.
- """
- _storeLib = os.getenv("STORE_LIB")
- _storeEnable = _storeLib != None # Must be True for durability to be enabled during the test
- _qpiddExec = os.getenv("QPIDD_EXEC", "/usr/sbin/qpidd")
- _tempStoreDir = os.path.abspath(os.getenv("TMP_DATA_DIR", "/tmp/qpid"))
-
- """Global message counter ensures unique messages"""
- _msgCnt = 0
-
- # --- Helper functions for parameter handling ---
-
- def _paramBool(self, key, val, keyOnly = False):
- if val == None:
- return ""
- if keyOnly:
- if val:
- return " --%s" % key
- else:
- return ""
- else:
- if val:
- return " --%s yes" % key
- else:
- return " --%s no" % key
-
- # --- Helper functions for message creation ---
-
- def _makeMessage(self, msgSize):
- msg = "Message-%04d" % self._msgCnt
- self._msgCnt = self._msgCnt + 1
- msgLen = len(msg)
- if msgSize > msgLen:
- for i in range(msgLen, msgSize):
- if i == msgLen:
- msg += "-"
- else:
- msg += chr(ord('a') + (i % 26))
- return msg
-
- def _makeMessageList(self, numMsgs, msgSize):
- if msgSize == None:
- msgSize = 12
- msgs = ""
- for m in range(0, numMsgs):
- msgs += "%s\n" % self._makeMessage(msgSize)
- return msgs
-
- # --- Starting and stopping a broker ---
-
- def startBroker(self, qpiddArgs, logFile = None):
- """Start a single broker daemon, returns tuple (pid, port)"""
- if self._qpiddExec == None:
- raise Exception("Environment variable QPIDD is not set")
- cmd = "%s --daemon --port=0 %s" % (self._qpiddExec, qpiddArgs)
- portStr = os.popen(cmd).read()
- if len(portStr) == 0:
- err = "Broker daemon startup failed."
- if logFile != None:
- err += " See log file %s" % logFile
- raise Exception(err)
- port = int(portStr)
- pidStr = os.popen("%s -p %d -c" % (self._qpiddExec, port)).read()
- try:
- pid = int(pidStr)
- except:
- raise Exception("Unable to get pid: \"%s -p %d -c\" returned %s" % (self._qpiddExec, port, pidStr))
- #print "started broker: pid=%d, port=%d args: %s" % (pid, port, qpiddArgs)
- return (pid, port)
-
- def killBroker(self, nodeTuple, ignoreFailures = False):
- """Kill a broker using kill -9"""
- try:
- os.kill(nodeTuple[self.PID], signal.SIGKILL)
- try:
- os.waitpid(nodeTuple[self.PID], 0)
- except:
- pass
- #print "killed broker: port=%d pid=%d" % (nodeTuple[self.PORT], nodeTuple[self.PID])
- except:
- if ignoreFailures:
- print "WARNING: killBroker (port=%d pid=%d) failed - ignoring." % (nodeTuple[self.PORT], nodeTuple[self.PID])
- else:
- raise
-
- def stopBroker(self, nodeTuple, ignoreFailures = False):
- """Stop a broker using qpidd -q"""
- try:
- ret = os.spawnl(os.P_WAIT, self._qpiddExec, self._qpiddExec, "--port=%d" % nodeTuple[self.PORT], "--quit", "--no-module-dir")
- if ret != 0:
- raise Exception("stopBroker(): port=%d: qpidd -q returned %d" % (nodeTuple[self.PORT], ret))
- try:
- os.waitpid(nodeTuple[self.PID], 0)
- except:
- pass
- #print "stopped broker: port=%d pid=%d" % (nodeTuple[self.PORT], nodeTuple[self.PID])
- except:
- if ignoreFailures:
- print "WARNING: stopBroker (port=%d pid=%d) failed - ignoring." % (nodeTuple[self.PORT], nodeTuple[self.PID])
- else:
- raise
-
-
-
-class TestBaseCluster(TestBase):
- """
- Base class for cluster tests. Provides methods for starting and stopping clusters and cluster nodes.
- """
-
- """
- The following environment vars control if and how the test is run, and determine where many of the helper
- executables/libs are to be found.
- """
- _clusterLib = os.getenv("CLUSTER_LIB")
- _clusterTestEnable = _clusterLib != None # Must be True for these cluster tests to run
- _xmlLib = os.getenv("XML_LIB")
- _xmlEnable = _xmlLib != None
- _qpidConfigExec = os.getenv("QPID_CONFIG_EXEC", "/usr/bin/qpid-config")
- _qpidRouteExec = os.getenv("QPID_ROUTE_EXEC", "/usr/bin/qpid-route")
- _receiverExec = os.getenv("RECEIVER_EXEC", "/usr/libexec/qpid/test/receiver")
- _senderExec = os.getenv("SENDER_EXEC", "/usr/libexec/qpid/test/sender")
-
-
- """
- _clusterDict is a dictionary of clusters:
- key = cluster name (string)
- val = dictionary of node numbers:
- key = integer node number
- val = tuple containing (pid, port)
- For example, two clusters "TestCluster0" and "TestCluster1" containing several nodes would look as follows:
- {"TestCluster0": {0: (pid0-0, port0-0), 1: (pid0-1, port0-1), ...}, "TestCluster1": {0: (pid1-0, port1-0), 1: (pid1-1, port1-1), ...}}
- where pidm-n and portm-n are the int pid and port for TestCluster m node n respectively.
- """
- _clusterDict = {}
-
- """Index for (pid, port) tuple"""
- PID = 0
- PORT = 1
-
- def run(self, res):
- """ Skip cluster testing if env var RUN_CLUSTER_TESTS is not defined."""
- if not self._clusterTestEnable:
- return
- unittest.TestCase.run(self, res)
-
- # --- Private helper / convenience functions ---
-
- def _checkPids(self, clusterName = None):
- for pid, port in self.getTupleList():
- try:
- os.kill(pid, 0)
- except:
- raise Exception("_checkPids(): Broker with pid %d expected but does not exist! (crashed?)" % pid)
-
-
- # --- Starting cluster node(s) ---
-
- def createClusterNode(self, nodeNumber, clusterName):
- """Create a node and add it to the named cluster"""
- if self._tempStoreDir == None:
- raise Exception("Environment variable TMP_DATA_DIR is not set")
- if self._clusterLib == None:
- raise Exception("Environment variable LIBCLUSTER is not set")
- name = "%s-%d" % (clusterName, nodeNumber)
- dataDir = os.path.join(self._tempStoreDir, "cluster", name)
- logFile = "%s.log" % dataDir
- args = "--no-module-dir --load-module=%s --data-dir=%s --cluster-name=%s --auth=no --log-enable=notice+ --log-to-file=%s" % \
- (self._clusterLib, dataDir, clusterName, logFile)
- if self._storeEnable:
- if self._storeLib == None:
- raise Exception("Environment variable LIBSTORE is not set")
- args += " --load-module %s" % self._storeLib
- self._clusterDict[clusterName][nodeNumber] = self.startBroker(args, logFile)
-
- def createCluster(self, clusterName, numberNodes = 0):
- """Create a cluster containing an initial number of nodes"""
- self._clusterDict[clusterName] = {}
- for n in range(0, numberNodes):
- self.createClusterNode(n, clusterName)
-
- def waitForNodes(self, clusterName):
- """Wait for all nodes to become active (ie finish cluster sync)"""
- # TODO - connect to each known node in cluster
- # Until this is done, wait a bit (hack)
- time.sleep(1)
-
- # --- Cluster and node status ---
-
- def getTupleList(self, clusterName = None):
- """Get list of (pid, port) tuples of all known cluster brokers"""
- tList = []
- for c, l in self._clusterDict.iteritems():
- if clusterName == None or c == clusterName:
- for t in l.itervalues():
- tList.append(t)
- return tList
-
- def getNumBrokers(self):
- """Get total number of brokers in all known clusters"""
- return len(self.getTupleList())
-
- def checkNumBrokers(self, expected = None, checkPids = True):
- """Check that the total number of brokers in all known clusters is the expected value"""
- if expected != None and self.getNumBrokers() != expected:
- raise Exception("Unexpected number of brokers: expected %d, found %d" % (expected, self.getNumBrokers()))
- if checkPids:
- self._checkPids()
-
- def getClusterTupleList(self, clusterName):
- """Get list of (pid, port) tuples of all nodes in named cluster"""
- if clusterName in self._clusterDict:
- return self._clusterDict[clusterName].values()
- return []
-
- def getNumClusterBrokers(self, clusterName):
- """Get total number of brokers in named cluster"""
- return len(self.getClusterTupleList(clusterName))
-
- def getNodeTuple(self, nodeNumber, clusterName):
- """Get the (pid, port) tuple for the given cluster node"""
- return self._clusterDict[clusterName][nodeNumber]
-
- def checkNumClusterBrokers(self, clusterName, expected = None, checkPids = True, waitForNodes = True):
- """Check that the total number of brokers in the named cluster is the expected value"""
- if expected != None and self.getNumClusterBrokers(clusterName) != expected:
- raise Exception("Unexpected number of brokers in cluster %s: expected %d, found %d" % \
- (clusterName, expected, self.getNumClusterBrokers(clusterName)))
- if checkPids:
- self._checkPids(clusterName)
- if waitForNodes:
- self.waitForNodes(clusterName)
-
- def clusterExists(self, clusterName):
- """ Return True if clusterName exists, False otherwise"""
- return clusterName in self._clusterDict.keys()
-
- def clusterNodeExists(self, clusterName, nodeNumber):
- """ Return True if nodeNumber in clusterName exists, False otherwise"""
- if clusterName in self._clusterDict.keys():
- return nodeNumber in self._clusterDict[nodeName]
- return False
-
- def createCheckCluster(self, clusterName, size):
- """Create a cluster using the given name and size, then check the number of brokers"""
- self.createCluster(clusterName, size)
- self.checkNumClusterBrokers(clusterName, size)
-
- # --- Kill cluster nodes using signal 9 ---
-
- def killNode(self, nodeNumber, clusterName, updateDict = True, ignoreFailures = False):
- """Kill the given node in the named cluster using kill -9"""
- self.killBroker(self.getNodeTuple(nodeNumber, clusterName), ignoreFailures)
- if updateDict:
- del(self._clusterDict[clusterName][nodeNumber])
-
- def killCluster(self, clusterName, updateDict = True, ignoreFailures = False):
- """Kill all nodes in the named cluster"""
- for n in self._clusterDict[clusterName].iterkeys():
- self.killNode(n, clusterName, False, ignoreFailures)
- if updateDict:
- del(self._clusterDict[clusterName])
-
- def killClusterCheck(self, clusterName):
- """Kill the named cluster and check that the name is removed from the cluster dictionary"""
- self.killCluster(clusterName)
- if self.clusterExists(clusterName):
- raise Exception("Unable to kill cluster %s; %d nodes still exist" % \
- (clusterName, self.getNumClusterBrokers(clusterName)))
-
- def killAllClusters(self, ignoreFailures = False):
- """Kill all known clusters"""
- for n in self._clusterDict.iterkeys():
- self.killCluster(n, False, ignoreFailures)
- self._clusterDict.clear()
-
- def killAllClustersCheck(self, ignoreFailures = False):
- """Kill all known clusters and check that the cluster dictionary is empty"""
- self.killAllClusters(ignoreFailures)
- self.checkNumBrokers(0)
-
- # --- Stop cluster nodes using qpidd -q ---
-
- def stopNode(self, nodeNumber, clusterName, updateDict = True, ignoreFailures = False):
- """Stop the given node in the named cluster using qpidd -q"""
- self.stopBroker(self.getNodeTuple(nodeNumber, clusterName), ignoreFailures)
- if updateDict:
- del(self._clusterDict[clusterName][nodeNumber])
-
- def stopAllClusters(self, ignoreFailures = False):
- """Stop all known clusters"""
- for n in self._clusterDict.iterkeys():
- self.stopCluster(n, False, ignoreFailures)
- self._clusterDict.clear()
-
-
- def stopCluster(self, clusterName, updateDict = True, ignoreFailures = False):
- """Stop all nodes in the named cluster"""
- for n in self._clusterDict[clusterName].iterkeys():
- self.stopNode(n, clusterName, False, ignoreFailures)
- if updateDict:
- del(self._clusterDict[clusterName])
-
- def stopCheckCluster(self, clusterName, ignoreFailures = False):
- """Stop the named cluster and check that the name is removed from the cluster dictionary"""
- self.stopCluster(clusterName, True, ignoreFailures)
- if self.clusterExists(clusterName):
- raise Exception("Unable to kill cluster %s; %d nodes still exist" % (clusterName, self.getNumClusterBrokers(clusterName)))
-
- def stopAllCheck(self, ignoreFailures = False):
- """Kill all known clusters and check that the cluster dictionary is empty"""
- self.stopAllClusters()
- self.checkNumBrokers(0)
-
- # --- qpid-config functions ---
-
- def _qpidConfig(self, nodeNumber, clusterName, action):
- """Configure some aspect of a qpid broker using the qpid_config executable"""
- port = self.getNodeTuple(nodeNumber, clusterName)[self.PORT]
- #print "%s -b localhost:%d %s" % (self._qpidConfigExec, port, action)
- ret = os.spawnl(os.P_WAIT, self._qpidConfigExec, self._qpidConfigExec, "-b", "localhost:%d" % port, *action.split())
- if ret != 0:
- raise Exception("_qpidConfig(): cluster=\"%s\" nodeNumber=%d port=%d action=\"%s\" returned %d" % \
- (clusterName, nodeNumber, port, action, ret))
-
- def addExchange(self, nodeNumber, clusterName, exchangeType, exchangeName, durable = False, sequence = False, \
- ive = False):
- """Add a named exchange."""
- action = "add exchange %s %s" % (exchangeType, exchangeName)
- action += self._paramBool("durable", durable, True)
- action += self._paramBool("sequence", sequence, True)
- action += self._paramBool("ive", ive, True)
- self._qpidConfig(nodeNumber, clusterName, action)
-
- def deleteExchange(self, nodeNumber, clusterName, exchangeName):
- """Delete a named exchange"""
- self._qpidConfig(nodeNumber, clusterName, "del exchange %s" % exchangeName)
-
- def addQueue(self, nodeNumber, clusterName, queueName, configArgs = None):
- """Add a queue using qpid-config."""
- action = "add queue %s" % queueName
- if self._storeEnable:
- action += " --durable"
- if configArgs != None:
- action += " %s" % configArgs
- self._qpidConfig(nodeNumber, clusterName, action)
-
- def delQueue(self, nodeNumber, clusterName, queueName):
- """Delete a named queue using qpid-config."""
- self._qpidConfig(nodeNumber, clusterName, "del queue %s" % queueName)
-
- def bind(self, nodeNumber, clusterName, exchangeName, queueName, key):
- """Create an exchange-queue binding using qpid-config."""
- self._qpidConfig(nodeNumber, clusterName, "bind %s %s %s" % (exchangeName, queueName, key))
-
- def unbind(self, nodeNumber, clusterName, exchangeName, queueName, key):
- """Remove an exchange-queue binding using qpid-config."""
- self._qpidConfig(nodeNumber, clusterName, "unbind %s %s %s" % (exchangeName, queueName, key))
-
- # --- qpid-route functions (federation) ---
-
- def brokerDict(self, nodeNumber, clusterName, host = "localhost", user = None, password = None):
- """Returns a dictionary containing the broker info to be passed to route functions"""
- port = self.getNodeTuple(nodeNumber, clusterName)[self.PORT]
- return {"cluster": clusterName, "node":nodeNumber, "port":port, "host":host, "user":user, "password":password}
-
- def _brokerStr(self, brokerDict):
- """Set up a broker string in the format [user/password@]host:port"""
- str = ""
- if brokerDict["user"] !=None and brokerDict["password"] != None:
- str = "%s@%s" % (brokerDict["user"], brokerDict["password"])
- str += "%s:%d" % (brokerDict["host"], brokerDict["port"])
- return str
-
- def _qpidRoute(self, action):
- """Set up a route using qpid-route"""
- #print "%s %s" % (self._qpidRouteExec, action)
- ret = os.spawnl(os.P_WAIT, self._qpidRouteExec, self._qpidRouteExec, *action.split())
- if ret != 0:
- raise Exception("_qpidRoute(): action=\"%s\" returned %d" % (action, ret))
-
- def routeDynamicAdd(self, destBrokerDict, srcBrokerDict, exchangeName):
- self._qpidRoute("dynamic add %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName))
-
- def routeDynamicDelete(self, destBrokerDict, srcBrokerDict, exchangeName):
- self._qpidRoute("dynamic del %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName))
-
- def routeAdd(self, destBrokerDict, srcBrokerDict, exchangeName, routingKey):
- self._qpidRoute("route add %s %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName, routingKey))
-
- def routeDelete(self, destBrokerDict, srcBrokerDict, exchangeName, routingKey):
- self._qpidRoute("route del %s %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName, routingKey))
-
- def routeQueueAdd(self, destBrokerDict, srcBrokerDict, exchangeName, queueName):
- self._qpidRoute("queue add %s %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName, queueName))
-
- def routeQueueDelete(self, destBrokerDict, srcBrokerDict, exchangeName, queueName):
- self._qpidRoute("queue del %s %s %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict), exchangeName, queueName))
-
- def routeLinkAdd(self, destBrokerDict, srcBrokerDict):
- self._qpidRoute("link add %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict)))
-
- def routeLinkDelete(self, destBrokerDict, srcBrokerDict):
- self._qpidRoute("link del %s %s" % (self._brokerStr(destBrokerDict), self._brokerStr(srcBrokerDict)))
-
- # --- Message send and receive functions ---
-
- def _receiver(self, action):
- if self._receiverExec == None:
- raise Exception("Environment variable RECEIVER is not set")
- cmd = "%s %s" % (self._receiverExec, action)
- #print cmd
- return subprocess.Popen(cmd.split(), stdout = subprocess.PIPE)
-
- def _sender(self, action):
- if self._senderExec == None:
- raise Exception("Environment variable SENDER is not set")
- cmd = "%s %s" % (self._senderExec, action)
- #print cmd
- return subprocess.Popen(cmd.split(), stdin = subprocess.PIPE)
-
- def createReciever(self, nodeNumber, clusterName, queueName, numMsgs = None, receiverArgs = None):
- port = self.getNodeTuple(nodeNumber, clusterName)[self.PORT]
- action = "--port %d --queue %s" % (port, queueName)
- if numMsgs != None:
- action += " --messages %d" % numMsgs
- if receiverArgs != None:
- action += " %s" % receiverArgs
- return self._receiver(action)
-
- def createSender(self, nodeNumber, clusterName, exchangeName, routingKey, senderArgs = None):
- port = self.getNodeTuple(nodeNumber, clusterName)[self.PORT]
- action = "--port %d --exchange %s" % (port, exchangeName)
- if routingKey != None and len(routingKey) > 0:
- action += " --routing-key %s" % routingKey
- if self._storeEnable:
- action += " --durable yes"
- if senderArgs != None:
- action += " %s" % senderArgs
- return self._sender(action)
-
- def createBindDirectExchangeQueue(self, nodeNumber, clusterName, exchangeName, queueName):
- self.addExchange(nodeNumber, clusterName, "direct", exchangeName)
- self.addQueue(nodeNumber, clusterName, queueName)
- self.bind(nodeNumber, clusterName, exchangeName, queueName, queueName)
-
- def createBindTopicExchangeQueues(self, nodeNumber, clusterName, exchangeName, queueNameKeyList):
- self.addExchange(nodeNumber, clusterName, "topic", exchangeName)
- for queueName, key in queueNameKeyList.iteritems():
- self.addQueue(nodeNumber, clusterName, queueName)
- self.bind(nodeNumber, clusterName, exchangeName, queueName, key)
-
- def createBindFanoutExchangeQueues(self, nodeNumber, clusterName, exchangeName, queueNameList):
- self.addExchange(nodeNumber, clusterName, "fanout", exchangeName)
- for queueName in queueNameList:
- self.addQueue(nodeNumber, clusterName, queueName)
- self.bind(nodeNumber, clusterName, exchangeName, queueName, "")
-
- def sendMsgs(self, nodeNumber, clusterName, exchangeName, routingKey, numMsgs, msgSize = None, wait = True):
- msgs = self._makeMessageList(numMsgs, msgSize)
- sender = self.createSender(nodeNumber, clusterName, exchangeName, routingKey)
- sender.stdin.write(msgs)
- sender.stdin.close()
- if wait:
- sender.wait()
- return msgs
-
- def receiveMsgs(self, nodeNumber, clusterName, queueName, numMsgs, wait = True):
- receiver = self.createReciever(nodeNumber, clusterName, queueName, numMsgs)
- cnt = 0
- msgs = ""
- while cnt < numMsgs:
- rx = receiver.stdout.readline()
- if rx == "" and receiver.poll() != None: break
- msgs += rx
- cnt = cnt + 1
- if wait:
- receiver.wait()
- return msgs
-
-
- # --- Exchange-specific helper inner classes ---
-
- class TestHelper:
- """
- This is a "virtual" superclass for test helpers, and is not useful on its own, but the
- per-exchange subclasses are designed to keep track of the messages sent to and received
- from queues which have bindings to that exchange type.
- """
-
- def __init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameList):
-
- """Dictionary of queues and lists of messages sent to them."""
- self._txMsgs = {}
- """Dictionary of queues and lists of messages received from them."""
- self._rxMsgs = {}
- """List of node numbers currently in the cluster"""
- self._nodes = []
- """List of node numbers which have been killed and can therefore be recovered"""
- self._deadNodes = []
- """Last node to be used"""
- self._lastNode = None
-
- self._testBaseCluster = testBaseCluster
- self._clusterName = clusterName
- self._exchangeName = exchangeName
- self._queueNameList = queueNameList
- self._addQueues(queueNameList)
- self._testBaseCluster.createCheckCluster(clusterName, numNodes)
- self._nodes.extend(range(0, numNodes))
-
- def _addQueues(self, queueNameList):
- for qn in queueNameList:
- if not qn in self._txMsgs:
- self._txMsgs[qn] = []
- if not qn in self._rxMsgs:
- self._rxMsgs[qn] = []
-
- def _bindQueue(self, queueName, bindingKey, nodeNumber = None):
- """Bind a queue to an exchange using a binding key."""
- if nodeNumber == None:
- nodeNumber = self._nodes[0] # first available node
- self._testBaseCluster.addQueue(nodeNumber, self._clusterName, queueName)
- self._testBaseCluster.bind(nodeNumber, self._clusterName, self._exchangeName, queueName, bindingKey)
-
- def _highestNodeNumber(self):
- """Find the highest node number used so far between the current nodes and those stopped/killed."""
- highestNode = self._nodes[-1]
- if len(self._deadNodes) == 0:
- return highestNode
- highestDeadNode = self._deadNodes[-1]
- if highestNode > highestDeadNode:
- return highestNode
- return highestDeadNode
-
- def killCluster(self):
- """Kill all nodes in the cluster"""
- self._testBaseCluster.killCluster(self._clusterName)
- self._testBaseCluster.checkNumClusterBrokers(self._clusterName, 0)
- self._deadNodes.extend(self._nodes)
- self._deadNodes.sort()
- del self._nodes[:]
-
- def restoreCluster(self, lastNode = None, restoreNodes = True):
- """Restore a previously killed cluster"""
- self._testBaseCluster.createCluster(self._clusterName)
- if restoreNodes:
- numNodes = len(self._deadNodes)
- self.restoreNodes(lastNode)
- self._testBaseCluster.checkNumClusterBrokers(self._clusterName, numNodes)
-
- def addNodes(self, numberOfNodes = 1):
- """Add a fixed number of nodes to the cluster."""
- nodeStart = self._highestNodeNumber() + 1
- for i in range(0, numberOfNodes):
- nodeNumber = nodeStart + i
- self._testBaseCluster.createClusterNode(nodeNumber, self._clusterName)
- self._nodes.append(nodeNumber)
- self._testBaseCluster.checkNumClusterBrokers(self._clusterName, len(self._nodes))
- self._testBaseCluster.waitForNodes(self._clusterName)
-
- def restoreNode(self, nodeNumber):
- """Restore a cluster node that has been previously killed"""
- if nodeNumber not in self._deadNodes:
- raise Exception("restoreNode(): Node number %d not in dead node list %s" % (nodeNumber, self._deadNodes))
- self._testBaseCluster.createClusterNode(nodeNumber, self._clusterName)
- self._deadNodes.remove(nodeNumber)
- self._nodes.append(nodeNumber)
- self._nodes.sort()
-
- def restoreNodes(self, lastNode = None):
- """Restore all known cluster nodes that have been previously killed starting with a known last-used node"""
- if len(self._nodes) == 0: # restore last-used node first
- if lastNode == None:
- lastNode = self._lastNode
- self.restoreNode(lastNode)
- while len(self._deadNodes) > 0:
- self.restoreNode(self._deadNodes[0])
- self._testBaseCluster.waitForNodes(self._clusterName)
-
- def killNode(self, nodeNumber):
- """Kill a cluster node (if it is in the _nodes list)."""
- if nodeNumber not in self._nodes:
- raise Exception("killNode(): Node number %d not in node list %s" % (nodeNumber, self._nodes))
- self._testBaseCluster.killNode(nodeNumber, self._clusterName)
- self._nodes.remove(nodeNumber)
- self._deadNodes.append(nodeNumber)
- self._deadNodes.sort()
-
- def sendMsgs(self, routingKey, numMsgs, nodeNumber = None, msgSize = None, wait = True):
- """Send a fixed number of messages using the given routing key."""
- if nodeNumber == None:
- nodeNumber = self._nodes[0] # Use first available node
- msgs = self._testBaseCluster._makeMessageList(numMsgs, msgSize)
- sender = self._testBaseCluster.createSender(nodeNumber, self._clusterName, self._exchangeName, routingKey)
- sender.stdin.write(msgs)
- sender.stdin.close()
- if wait:
- sender.wait()
- self._lastNode = nodeNumber
- return msgs.split()
-
- # TODO - this i/f is messy: one mumMsgs can be given, but a list of queues
- # so assuming numMsgs for each queue
- # A mechanism is needed to specify a different numMsgs per queue
- def receiveMsgs(self, numMsgs, nodeNumber = None, queueNameList = None, wait = True):
- """Receive a fixed number of messages from a named queue. If numMsgs == None, get all remaining messages."""
- if nodeNumber == None:
- nodeNumber = self._nodes[0] # Use first available node
- if queueNameList == None:
- queueNameList = self._txMsgs.iterkeys()
- for qn in queueNameList:
- nm = numMsgs
- if nm == None:
- nm = len(self._txMsgs[qn]) - len(self._rxMsgs[qn]) # get all remaining messages
- if nm > 0:
- while nm > 0:
- receiver = self._testBaseCluster.createReciever(nodeNumber, self._clusterName, qn, nm)
- cnt = 0
- while cnt < nm:
- rx = receiver.stdout.readline().strip()
- if rx == "":
- if receiver.poll() != None: break
- elif rx not in self._rxMsgs[qn]:
- self._rxMsgs[qn].append(rx)
- cnt = cnt + 1
- nm = nm - cnt
- if wait:
- receiver.wait()
- self._rxMsgs[qn].sort()
- self._lastNode = nodeNumber
-
- def receiveRemainingMsgs(self, nodeNumber = None, queueNameList = None, wait = True):
- """Receive all remaining messages on named queue."""
- self.receiveMsgs(None, nodeNumber, queueNameList, wait)
-
- def checkMsgs(self):
- """Return True if all expected messages have been received (ie the transmit and receive list are identical)."""
- txMsgTot = 0
- rxMsgTot = 0
- for qn, txMsgList in self._txMsgs.iteritems():
- rxMsgList = self._rxMsgs[qn]
- txMsgTot = txMsgTot + len(txMsgList)
- rxMsgTot = rxMsgTot + len(rxMsgList)
- if len(txMsgList) != len(rxMsgList):
- return False
- for i, m in enumerate(txMsgList):
- if m != rxMsgList[i]:
- return False
- if txMsgTot == 0 and rxMsgTot == 0:
- print "WARNING: No messages were either sent or received"
- return True
-
- def finalizeTest(self):
- """Recover all the remaining messages on all queues, then check that all expected messages were received."""
- self.receiveRemainingMsgs()
- self._testBaseCluster.stopAllCheck()
- if not self.checkMsgs():
- self.printMsgs()
- self._testBaseCluster.fail("Send - receive message mismatch")
-
- def printMsgs(self, txMsgs = True, rxMsgs = True):
- """Print all messages transmitted and received."""
- for qn, txMsgList in self._txMsgs.iteritems():
- print "Queue: %s" % qn
- if txMsgs:
- print " txMsgList = %s" % txMsgList
- if rxMsgs:
- rxMsgList = self._rxMsgs[qn]
- print " rxMsgList = %s" % rxMsgList
-
-
- class DirectExchangeTestHelper(TestHelper):
-
- def __init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameList):
- TestBaseCluster.TestHelper.__init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameList)
- self._testBaseCluster.addExchange(0, clusterName, "direct", exchangeName)
- for qn in queueNameList:
- self._bindQueue(qn, qn)
-
- def addQueues(self, queueNameList):
- self._addQueues(queueNameList)
- for qn in queueNameList:
- self._bindQueue(qn, qn)
-
- def sendMsgs(self, numMsgs, nodeNumber = None, queueNameList = None, msgSize = None, wait = True):
- if queueNameList == None:
- queueNameList = self._txMsgs.iterkeys()
- for qn in queueNameList:
- self._txMsgs[qn].extend(TestBaseCluster.TestHelper.sendMsgs(self, qn, numMsgs, nodeNumber, msgSize, wait))
-
-
- class TopicExchangeTestHelper(TestHelper):
-
- def __init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameKeyList):
- self._queueNameKeyList = queueNameKeyList
- TestBaseCluster.TestHelper.__init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameKeyList.iterkeys())
- self._testBaseCluster.addExchange(0, clusterName, "topic", exchangeName)
- for qn, bk in queueNameKeyList.iteritems():
- self._bindQueue(qn, bk)
-
- def addQueues(self, queueNameKeyList):
- self._addQueues(queueNameKeyList.iterkeys())
- for qn, bk in queueNameKeyList.iteritems():
- self._bindQueue(qn, bk)
-
- def _prepareRegex(self, bk):
- # This regex conversion is not very complete - there are other chars that should be escaped too
- return "^%s$" % bk.replace(".", r"\.").replace("*", r"[^.]*").replace("#", ".*")
-
- def sendMsgs(self, routingKey, numMsgs, nodeNumber = None, msgSize = None, wait = True):
- msgList = TestBaseCluster.TestHelper.sendMsgs(self, routingKey, numMsgs, nodeNumber, msgSize, wait)
- for qn, bk in self._queueNameKeyList.iteritems():
- if re.match(self._prepareRegex(bk), routingKey):
- self._txMsgs[qn].extend(msgList)
-
-
- class FanoutExchangeTestHelper(TestHelper):
-
- def __init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameList):
- TestBaseCluster.TestHelper.__init__(self, testBaseCluster, clusterName, numNodes, exchangeName, queueNameList)
- self._testBaseCluster.addExchange(0, clusterName, "fanout", exchangeName)
- for qn in queueNameList:
- self._bindQueue(qn, "")
-
- def addQueues(self, queueNameList):
- self._addQueues(queueNameList)
- for qn in queueNameList:
- self._bindQueue(qn, "")
-
- def sendMsgs(self, numMsgs, nodeNumber = None, msgSize = None, wait = True):
- msgList = TestBaseCluster.TestHelper.sendMsgs(self, "", numMsgs, nodeNumber, msgSize, wait)
- for ml in self._txMsgs.itervalues():
- ml.extend(msgList)
-
diff --git a/cpp/src/tests/verify_cluster_objects b/cpp/src/tests/verify_cluster_objects
deleted file mode 100755
index 94661cf6b9..0000000000
--- a/cpp/src/tests/verify_cluster_objects
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env python
-
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-# Verify managment objects are consistent in a cluster.
-# Arguments: url of one broker in the cluster.
-
-import qmf.console, sys, re
-
-class Session(qmf.console.Session):
- """A qmf.console.Session that caches useful values"""
-
- def __init__(self):
- qmf.console.Session.__init__(self)
- self.classes = None
-
- def all_classes(self):
- if self.classes is None:
- self.classes = [c for p in self.getPackages() for c in self.getClasses(p)]
- return self.classes
-
-class Broker:
- def __init__(self, url, qmf):
- self.url = url
- self.qmf = qmf
- self.broker = self.qmf.addBroker(url)
- self.broker._waitForStable()
- self.objects = None
- self.ignore_list = [ re.compile("org.apache.qpid.broker:system:") ]
-
- def get_objects(self):
- def ignore(name):
- for m in self.ignore_list:
- if m.match(name): return True
- if self.objects is None:
- obj_list = []
- ignored=0
- for c in self.qmf.all_classes():
- for o in self.qmf.getObjects(_key=c, _broker=self.broker):
- name=o.getObjectId().getObject()
- if not ignore(name): obj_list.append(name)
- else: ignored += 1
- self.objects = set(obj_list)
- if (len(obj_list) != len(self.objects)):
- raise Exception("Duplicates in object list for %s"%(self.url))
- print "%d objects on %s, ignored %d."%(len(self.objects), self.url, ignored)
- return self.objects
-
- def compare(self,other):
- def compare1(x,y):
- diff = x.get_objects() - y.get_objects()
- if diff:
- print "ERROR: found on %s but not %s"%(x, y)
- for o in diff: print " %s"%(o)
- return False
- return True
-
- so = compare1(self, other)
- os = compare1(other, self)
- return so and os
-
- def __str__(self): return self.url
-
- def get_cluster(self):
- """Given one Broker, return list of all brokers in its cluster"""
- clusters = self.qmf.getObjects(_class="cluster")
- if not clusters: raise ("%s is not a cluster member"%(self.url))
- def first_address(url):
- """Python doesn't understand the brokers URL syntax. Extract a simple addres"""
- return re.compile("amqp:tcp:([^,]*)").match(url).group(1)
- return [Broker(first_address(url), self.qmf)
- for url in clusters[0].members.split(";")]
-
- def __del__(self): self.qmf.delBroker(self.broker)
-
-def main(argv=None):
- if argv is None: argv = sys.argv
- qmf = Session()
- brokers = Broker(argv[1], qmf).get_cluster()
- print "%d members in cluster."%(len(brokers))
- base = brokers.pop(0)
- try:
- for b in brokers:
- if not base.compare(b): return 1
- print "No differences."
- return 0
- finally:
- del base
- del brokers
-
-if __name__ == "__main__": sys.exit(main())