summaryrefslogtreecommitdiff
path: root/qpid/cpp/src/tests
diff options
context:
space:
mode:
authorAlan Conway <aconway@apache.org>2012-10-02 18:41:41 +0000
committerAlan Conway <aconway@apache.org>2012-10-02 18:41:41 +0000
commitb45b6daf6121e3f069de938439e1e37ff0cf7cc1 (patch)
tree90e8934452fa96d1b473dc73fdfca36f0d155490 /qpid/cpp/src/tests
parent91dfa1fd49a50a3dae9d12ae5f6b08c711428683 (diff)
downloadqpid-python-b45b6daf6121e3f069de938439e1e37ff0cf7cc1.tar.gz
QPID-4285: HA backups continuously disconnect / re-sync after attempting to replicate a deleted queue. (Based on patch by Jason Dillama)
This does not directly tackle the origin of the problem but extends Jasons's patch since it addresses something we had to fix anyway: "leaking" queues and exchanges. It does 2 things. 1. enabled hideDeletedError on all subscription objects used by HA This suppress the troublesome exception with a harmless no-op 2. Delete queues/exchanges missing from responses (based on Jasons patch) Fix the "leak" of queues and exchanges possible when an object replicated to a backup is deleted from the newn primary before the backup connects. git-svn-id: https://svn.apache.org/repos/asf/qpid/trunk@1393089 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'qpid/cpp/src/tests')
-rwxr-xr-xqpid/cpp/src/tests/ha_test.py2
-rwxr-xr-xqpid/cpp/src/tests/ha_tests.py24
2 files changed, 26 insertions, 0 deletions
diff --git a/qpid/cpp/src/tests/ha_test.py b/qpid/cpp/src/tests/ha_test.py
index 2f9d9a1211..79db67e3c8 100755
--- a/qpid/cpp/src/tests/ha_test.py
+++ b/qpid/cpp/src/tests/ha_test.py
@@ -209,6 +209,7 @@ class HaCluster(object):
def start(self, update_urls=True, args=[]):
"""Start a new broker in the cluster"""
b = HaBroker(self.test, name=self.next_name(), **self.kwargs)
+ b.ready()
self._brokers.append(b)
if update_urls: self.update_urls()
return b
@@ -235,6 +236,7 @@ class HaCluster(object):
self._brokers[i] = HaBroker(
self.test, name=b.name, port=b.port(), brokers_url=self.url,
**self.kwargs)
+ self._brokers[i].ready()
def bounce(self, i, promote_next=True):
"""Stop and restart a broker in a cluster."""
diff --git a/qpid/cpp/src/tests/ha_tests.py b/qpid/cpp/src/tests/ha_tests.py
index 3c43c6a914..86f33d8030 100755
--- a/qpid/cpp/src/tests/ha_tests.py
+++ b/qpid/cpp/src/tests/ha_tests.py
@@ -624,6 +624,30 @@ acl deny all all
actual = [m.content for m in primary.get_messages("pq", 100)]
self.assertEqual(expect, actual)
+ def test_delete_missing_response(self):
+ """Check that a backup correctly deletes leftover queues and exchanges that are
+ missing from the initial reponse set."""
+ cluster = HaCluster(self,2)
+ s = cluster[0].connect().session()
+ s.sender("q1;{create:always}")
+ s.sender("q2;{create:always}")
+ s.sender("e1;{create:always, node:{type:topic}}")
+ s.sender("e2;{create:always, node:{type:topic}}")
+ cluster.bounce(0, promote_next=False)
+ # Fake a primary that has deleted some queues and exchanges.
+ s = cluster[0].connect_admin().session()
+ s.sender("q2;{create:always}")
+ s.sender("e2;{create:always, node:{type:topic}}")
+ s.sender("x;{create:always}") # A new queue so we can wait for the update.
+ cluster[0].promote()
+ # Verify the backup has deleted the missing queues and exchanges
+ cluster[1].wait_status("ready")
+ s = cluster[1].connect_admin().session()
+ cluster[1].wait_backup("x");
+ self.assertRaises(NotFound, s.receiver, ("q1"));
+ self.assertRaises(NotFound, s.receiver, ("e1"));
+
+
def fairshare(msgs, limit, levels):
"""
Generator to return prioritised messages in expected order for a given fairshare limit