summaryrefslogtreecommitdiff
path: root/qpid/cpp/src/tests
diff options
context:
space:
mode:
authorAlan Conway <aconway@apache.org>2013-06-21 15:02:08 +0000
committerAlan Conway <aconway@apache.org>2013-06-21 15:02:08 +0000
commit8bdb080ef1f4afb1727dc3fc5f2666bdfd982107 (patch)
treedb7893cba9090fe6a3078d61bef5aa68fc2adfdc /qpid/cpp/src/tests
parent08a1ae9c049d1faf75608e9a2ef43024653e304b (diff)
downloadqpid-python-8bdb080ef1f4afb1727dc3fc5f2666bdfd982107.tar.gz
QPID-4944: HA Sporadic failure in ha_tests: tes_failover_send_receive and test_expected_backup_timeout
Very sporadic failures so difficult to verify the fix. - Simplified Membership, centralized status change, make it atomic. - Fix test bug in test_expected_backup_timeout: not waiting on final status check, race. - Remove out-of-date status info from log prefixes: Guard, ReplicatingSubscription git-svn-id: https://svn.apache.org/repos/asf/qpid/trunk@1495466 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'qpid/cpp/src/tests')
-rwxr-xr-xqpid/cpp/src/tests/ha_test.py2
-rwxr-xr-xqpid/cpp/src/tests/ha_tests.py9
2 files changed, 4 insertions, 7 deletions
diff --git a/qpid/cpp/src/tests/ha_test.py b/qpid/cpp/src/tests/ha_test.py
index f3c1d3a957..f2fc50054f 100755
--- a/qpid/cpp/src/tests/ha_test.py
+++ b/qpid/cpp/src/tests/ha_test.py
@@ -107,7 +107,7 @@ class HaBroker(Broker):
ha_port = ha_port or HaPort(test)
args = copy(args)
args += ["--load-module", BrokerTest.ha_lib,
- "--log-enable=trace+:ha::", # FIXME aconway 2013-06-14: debug+
+ "--log-enable=debug+:ha::",
# Non-standard settings for faster tests.
"--link-maintenance-interval=0.1",
# Heartbeat and negotiate time are needed so that a broker wont
diff --git a/qpid/cpp/src/tests/ha_tests.py b/qpid/cpp/src/tests/ha_tests.py
index 60e3444c45..368ac02506 100755
--- a/qpid/cpp/src/tests/ha_tests.py
+++ b/qpid/cpp/src/tests/ha_tests.py
@@ -1121,13 +1121,10 @@ class RecoveryTests(HaBrokerTest):
but can still rejoin.
"""
cluster = HaCluster(self, 3, args=["--ha-backup-timeout=0.5"]);
- cluster[0].wait_status("active") # Primary ready
- for b in cluster[1:3]: b.wait_status("ready") # Backups ready
for i in [0,1]: cluster.kill(i, False)
- cluster[2].promote() # New primary, expected backup will 1
- cluster[2].wait_status("recovering")
+ cluster[2].promote() # New primary, expected backup will be 1
# Should not go active till the expected backup connects or times out.
- self.assertEqual(cluster[2].ha_status(), "recovering")
+ cluster[2].wait_status("recovering")
# Messages should be held till expected backup times out
s = cluster[2].connect().session().sender("q;{create:always}")
s.send("foo", sync=False)
@@ -1135,7 +1132,7 @@ class RecoveryTests(HaBrokerTest):
try: s.sync(timeout=.01); self.fail("Expected Timeout exception")
except Timeout: pass
s.sync(timeout=1) # And released after the timeout.
- self.assertEqual(cluster[2].ha_status(), "active")
+ cluster[2].wait_status("active")
def test_join_ready_cluster(self):
"""If we join a cluster where the primary is dead, the new primary is