summaryrefslogtreecommitdiff
path: root/qpid/tests/src/py/qpid_tests
diff options
context:
space:
mode:
Diffstat (limited to 'qpid/tests/src/py/qpid_tests')
-rw-r--r--qpid/tests/src/py/qpid_tests/__init__.py22
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/__init__.py39
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/alternate_exchange.py351
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/broker.py93
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/dtx.py790
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/example.py95
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/exchange.py558
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/extensions.py87
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/lvq.py122
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/management.py726
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/message.py1108
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/msg_groups.py1195
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/new_api.py358
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/persistence.py68
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/priority.py252
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/qmf_events.py83
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/query.py247
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/queue.py436
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/stats.py519
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/threshold.py212
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_10/tx.py265
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_8/__init__.py22
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_8/basic.py441
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_8/broker.py120
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_8/example.py94
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_8/exchange.py349
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_8/queue.py255
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_8/testlib.py66
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_8/tx.py209
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_9/__init__.py22
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_9/echo.py159
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_9/messageheader.py61
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_9/query.py224
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_0_9/queue.py148
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_1_0/__init__.py26
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_1_0/general.py71
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_1_0/legacy_exchanges.py96
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_1_0/selector.py73
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_1_0/translation.py87
-rw-r--r--qpid/tests/src/py/qpid_tests/broker_1_0/tx.py264
-rwxr-xr-xqpid/tests/src/py/qpid_tests/client/client-api-example-tests.py338
-rw-r--r--qpid/tests/src/py/qpid_tests/client/log4j.conf25
42 files changed, 10776 insertions, 0 deletions
diff --git a/qpid/tests/src/py/qpid_tests/__init__.py b/qpid/tests/src/py/qpid_tests/__init__.py
new file mode 100644
index 0000000000..7b522f59af
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/__init__.py
@@ -0,0 +1,22 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import broker_0_10, broker_0_9, broker_0_8
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/__init__.py b/qpid/tests/src/py/qpid_tests/broker_0_10/__init__.py
new file mode 100644
index 0000000000..312dc22645
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/__init__.py
@@ -0,0 +1,39 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from alternate_exchange import *
+from broker import *
+from dtx import *
+from example import *
+from exchange import *
+from management import *
+from message import *
+from query import *
+from queue import *
+from tx import *
+from lvq import *
+from priority import *
+from threshold import *
+from extensions import *
+from msg_groups import *
+from new_api import *
+from stats import *
+from qmf_events import *
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/alternate_exchange.py b/qpid/tests/src/py/qpid_tests/broker_0_10/alternate_exchange.py
new file mode 100644
index 0000000000..2e2d5de13a
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/alternate_exchange.py
@@ -0,0 +1,351 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import traceback
+from qpid.queue import Empty
+from qpid.datatypes import Message, RangedSet
+from qpid.testlib import TestBase010
+from qpid.session import SessionException
+
+class AlternateExchangeTests(TestBase010):
+ """
+ Tests for the new mechanism for message returns introduced in 0-10
+ and available in 0-9 for preview
+ """
+
+ def test_unroutable(self):
+ """
+ Test that unroutable messages are delivered to the alternate-exchange if specified
+ """
+ session = self.session
+ #create an exchange with an alternate defined
+ session.exchange_declare(exchange="secondary", type="fanout")
+ session.exchange_declare(exchange="primary", type="direct", alternate_exchange="secondary")
+
+ #declare, bind (to the alternate exchange) and consume from a queue for 'returned' messages
+ session.queue_declare(queue="returns", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="returns", exchange="secondary")
+ session.message_subscribe(destination="a", queue="returns")
+ session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ returned = session.incoming("a")
+
+ #declare, bind (to the primary exchange) and consume from a queue for 'processed' messages
+ session.queue_declare(queue="processed", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="processed", exchange="primary", binding_key="my-key")
+ session.message_subscribe(destination="b", queue="processed")
+ session.message_flow(destination="b", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="b", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ processed = session.incoming("b")
+
+ #publish to the primary exchange
+ #...one message that makes it to the 'processed' queue:
+ dp=self.session.delivery_properties(routing_key="my-key")
+ session.message_transfer(destination="primary", message=Message(dp, "Good"))
+ #...and one that does not:
+ dp=self.session.delivery_properties(routing_key="unused-key")
+ session.message_transfer(destination="primary", message=Message(dp, "Bad"))
+
+ #delete the exchanges
+ session.exchange_delete(exchange="primary")
+ session.exchange_delete(exchange="secondary")
+
+ #verify behaviour
+ self.assertEqual("Good", processed.get(timeout=1).body)
+ self.assertEqual("Bad", returned.get(timeout=1).body)
+ self.assertEmpty(processed)
+ self.assertEmpty(returned)
+
+ def test_queue_delete(self):
+ """
+ Test that messages in a queue being deleted are delivered to the alternate-exchange if specified
+ """
+ session = self.session
+ #set up a 'dead letter queue':
+ dlq = self.setup_dlq()
+
+ #create a queue using the dlq as its alternate exchange:
+ session.queue_declare(queue="delete-me", alternate_exchange="dlq")
+ #send it some messages:
+ dp=self.session.delivery_properties(routing_key="delete-me")
+ session.message_transfer(message=Message(dp, "One"))
+ session.message_transfer(message=Message(dp, "Two"))
+ session.message_transfer(message=Message(dp, "Three"))
+ #delete it:
+ session.queue_delete(queue="delete-me")
+ #delete the dlq exchange:
+ session.exchange_delete(exchange="dlq")
+
+ #check the messages were delivered to the dlq:
+ self.assertEqual("One", dlq.get(timeout=1).body)
+ self.assertEqual("Two", dlq.get(timeout=1).body)
+ self.assertEqual("Three", dlq.get(timeout=1).body)
+ self.assertEmpty(dlq)
+
+ def test_delete_while_used_by_queue(self):
+ """
+ Ensure an exchange still in use as an alternate-exchange for a
+ queue can't be deleted
+ """
+ session = self.session
+ session.exchange_declare(exchange="alternate", type="fanout")
+
+ session2 = self.conn.session("alternate", 2)
+ session2.queue_declare(queue="q", alternate_exchange="alternate")
+ try:
+ session2.exchange_delete(exchange="alternate")
+ self.fail("Expected deletion of in-use alternate-exchange to fail")
+ except SessionException, e:
+ session = self.session
+ session.queue_delete(queue="q")
+ session.exchange_delete(exchange="alternate")
+ self.assertEquals(530, e.args[0].error_code)
+
+
+ def test_delete_while_used_by_exchange(self):
+ """
+ Ensure an exchange still in use as an alternate-exchange for
+ another exchange can't be deleted
+ """
+ session = self.session
+ session.exchange_declare(exchange="alternate", type="fanout")
+
+ session = self.conn.session("alternate", 2)
+ session.exchange_declare(exchange="e", type="fanout", alternate_exchange="alternate")
+ try:
+ session.exchange_delete(exchange="alternate")
+ self.fail("Expected deletion of in-use alternate-exchange to fail")
+ except SessionException, e:
+ session = self.session
+ session.exchange_delete(exchange="e")
+ session.exchange_delete(exchange="alternate")
+ self.assertEquals(530, e.args[0].error_code)
+
+
+ def test_modify_existing_exchange_alternate(self):
+ """
+ Ensure that attempting to modify an exhange to change
+ the alternate throws an exception
+ """
+ session = self.session
+ session.exchange_declare(exchange="alt1", type="direct")
+ session.exchange_declare(exchange="alt2", type="direct")
+ session.exchange_declare(exchange="onealternate", type="fanout", alternate_exchange="alt1")
+ try:
+ # attempt to change the alternate on an already existing exchange
+ session.exchange_declare(exchange="onealternate", type="fanout", alternate_exchange="alt2")
+ self.fail("Expected changing an alternate on an existing exchange to fail")
+ except SessionException, e:
+ self.assertEquals(530, e.args[0].error_code)
+ session = self.conn.session("alternate", 2)
+ session.exchange_delete(exchange="onealternate")
+ session.exchange_delete(exchange="alt2")
+ session.exchange_delete(exchange="alt1")
+
+
+ def test_add_alternate_to_exchange(self):
+ """
+ Ensure that attempting to modify an exhange by adding
+ an alternate throws an exception
+ """
+ session = self.session
+ session.exchange_declare(exchange="alt1", type="direct")
+ session.exchange_declare(exchange="noalternate", type="fanout")
+ try:
+ # attempt to add an alternate on an already existing exchange
+ session.exchange_declare(exchange="noalternate", type="fanout", alternate_exchange="alt1")
+ self.fail("Expected adding an alternate on an existing exchange to fail")
+ except SessionException, e:
+ self.assertEquals(530, e.args[0].error_code)
+ session = self.conn.session("alternate", 2)
+ session.exchange_delete(exchange="noalternate")
+ session.exchange_delete(exchange="alt1")
+
+
+ def test_del_alternate_to_exchange(self):
+ """
+ Ensure that attempting to modify an exhange by declaring
+ it again without an alternate does nothing
+ """
+ session = self.session
+ session.exchange_declare(exchange="alt1", type="direct")
+ session.exchange_declare(exchange="onealternate", type="fanout", alternate_exchange="alt1")
+ # attempt to re-declare without an alternate - silently ignore
+ session.exchange_declare(exchange="onealternate", type="fanout" )
+ session.exchange_delete(exchange="onealternate")
+ session.exchange_delete(exchange="alt1")
+
+ def test_queue_autodelete(self):
+ """
+ Test that messages in a queue being auto-deleted are delivered
+ to the alternate-exchange if specified, including messages
+ that are acquired but not accepted
+ """
+ session = self.session
+ #set up a 'dead letter queue':
+ session.exchange_declare(exchange="dlq", type="fanout")
+ session.queue_declare(queue="deleted", exclusive=True, auto_delete=True)
+ session.exchange_bind(exchange="dlq", queue="deleted")
+ session.message_subscribe(destination="dlq", queue="deleted")
+ session.message_flow(destination="dlq", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="dlq", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ dlq = session.incoming("dlq")
+
+ #on a separate session, create an auto-deleted queue using the
+ #dlq as its alternate exchange (handling of auto-delete is
+ #different for exclusive and non-exclusive queues, so test
+ #both modes):
+ for mode in [True, False]:
+ session2 = self.conn.session("another-session")
+ session2.queue_declare(queue="my-queue", alternate_exchange="dlq", exclusive=mode, auto_delete=True)
+ #send it some messages:
+ dp=session2.delivery_properties(routing_key="my-queue")
+ session2.message_transfer(message=Message(dp, "One"))
+ session2.message_transfer(message=Message(dp, "Two"))
+ session2.message_transfer(message=Message(dp, "Three"))
+ session2.message_subscribe(destination="incoming", queue="my-queue")
+ session2.message_flow(destination="incoming", unit=session.credit_unit.message, value=1)
+ session2.message_flow(destination="incoming", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ self.assertEqual("One", session2.incoming("incoming").get(timeout=1).body)
+ session2.close()
+
+ #check the messages were delivered to the dlq:
+ self.assertEqual("One", dlq.get(timeout=1).body)
+ self.assertEqual("Two", dlq.get(timeout=1).body)
+ self.assertEqual("Three", dlq.get(timeout=1).body)
+ self.assertEmpty(dlq)
+
+ def test_queue_delete_loop(self):
+ """
+ Test that if a queue is bound to its own alternate exchange,
+ then on deletion there is no infinite looping
+ """
+ session = self.session
+ dlq = self.setup_dlq()
+
+ #create a queue using the dlq as its alternate exchange:
+ session.queue_declare(queue="delete-me", alternate_exchange="dlq")
+ #bind that queue to the dlq as well:
+ session.exchange_bind(exchange="dlq", queue="delete-me")
+ #send it some messages:
+ dp=self.session.delivery_properties(routing_key="delete-me")
+ for m in ["One", "Two", "Three"]:
+ session.message_transfer(message=Message(dp, m))
+ #delete it:
+ session.queue_delete(queue="delete-me")
+ #cleanup:
+ session.exchange_delete(exchange="dlq")
+
+ #check the messages were delivered to the dlq:
+ for m in ["One", "Two", "Three"]:
+ self.assertEqual(m, dlq.get(timeout=1).body)
+ self.assertEmpty(dlq)
+
+ def test_queue_delete_no_match(self):
+ """
+ Test that on queue deletion, if the queues own alternate
+ exchange cannot find a match for the message, the
+ alternate-exchange of that exchange will be tried. Note:
+ though the spec rules out going to the alternate-exchanges
+ alternate exchange when sending to an exchange, it does not
+ cover this case.
+ """
+ session = self.session
+ dlq = self.setup_dlq()
+
+ #setu up an 'intermediary' exchange
+ session.exchange_declare(exchange="my-exchange", type="direct", alternate_exchange="dlq")
+
+ #create a queue using the intermediary as its alternate exchange:
+ session.queue_declare(queue="delete-me", alternate_exchange="my-exchange")
+ #bind that queue to the dlq as well:
+ session.exchange_bind(exchange="dlq", queue="delete-me")
+ #send it some messages:
+ dp=self.session.delivery_properties(routing_key="delete-me")
+ for m in ["One", "Two", "Three"]:
+ session.message_transfer(message=Message(dp, m))
+
+ #delete it:
+ session.queue_delete(queue="delete-me")
+ #cleanup:
+ session.exchange_delete(exchange="my-exchange")
+ session.exchange_delete(exchange="dlq")
+
+ #check the messages were delivered to the dlq:
+ for m in ["One", "Two", "Three"]:
+ self.assertEqual(m, dlq.get(timeout=1).body)
+ self.assertEmpty(dlq)
+
+ def test_reject_no_match(self):
+ """
+ Test that on rejecting a message, if the queues own alternate
+ exchange cannot find a match for the message, the
+ alternate-exchange of that exchange will be tried. Note:
+ though the spec rules out going to the alternate-exchanges
+ alternate exchange when sending to an exchange, it does not
+ cover this case.
+ """
+ session = self.session
+ dlq = self.setup_dlq()
+
+ #setu up an 'intermediary' exchange
+ session.exchange_declare(exchange="my-exchange", type="direct", alternate_exchange="dlq")
+
+ #create a queue using the intermediary as its alternate exchange:
+ session.queue_declare(queue="delivery-queue", alternate_exchange="my-exchange", auto_delete=True)
+ #send it some messages:
+ dp=self.session.delivery_properties(routing_key="delivery-queue")
+ for m in ["One", "Two", "Three"]:
+ session.message_transfer(message=Message(dp, m))
+
+ #get and reject those messages:
+ session.message_subscribe(destination="a", queue="delivery-queue")
+ session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ incoming = session.incoming("a")
+ for m in ["One", "Two", "Three"]:
+ msg = incoming.get(timeout=1)
+ self.assertEqual(m, msg.body)
+ session.message_reject(RangedSet(msg.id))
+ session.message_cancel(destination="a")
+
+ #check the messages were delivered to the dlq:
+ for m in ["One", "Two", "Three"]:
+ self.assertEqual(m, dlq.get(timeout=1).body)
+ self.assertEmpty(dlq)
+ #cleanup:
+ session.exchange_delete(exchange="my-exchange")
+ session.exchange_delete(exchange="dlq")
+
+ def setup_dlq(self):
+ session = self.session
+ #set up 'dead-letter' handling:
+ session.exchange_declare(exchange="dlq", type="fanout")
+ session.queue_declare(queue="deleted", exclusive=True, auto_delete=True)
+ session.exchange_bind(exchange="dlq", queue="deleted")
+ session.message_subscribe(destination="dlq", queue="deleted")
+ session.message_flow(destination="dlq", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="dlq", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ dlq = session.incoming("dlq")
+ return dlq
+
+ def assertEmpty(self, queue):
+ try:
+ msg = queue.get(timeout=1)
+ self.fail("Queue not empty: " + str(msg))
+ except Empty: None
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/broker.py b/qpid/tests/src/py/qpid_tests/broker_0_10/broker.py
new file mode 100644
index 0000000000..81d723e322
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/broker.py
@@ -0,0 +1,93 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Closed
+from qpid.queue import Empty
+from qpid.testlib import TestBase010
+from qpid.datatypes import Message, RangedSet
+
+class BrokerTests(TestBase010):
+ """Tests for basic Broker functionality"""
+
+ def test_ack_and_no_ack(self):
+ """
+ First, this test tries to receive a message with a no-ack
+ consumer. Second, this test tries to explicitly receive and
+ acknowledge a message with an acknowledging consumer.
+ """
+ session = self.session
+ session.queue_declare(queue = "myqueue", exclusive=True, auto_delete=True)
+
+ # No ack consumer
+ ctag = "tag1"
+ session.message_subscribe(queue = "myqueue", destination = ctag)
+ session.message_flow(destination=ctag, unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination=ctag, unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ body = "test no-ack"
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="myqueue"), body))
+ msg = session.incoming(ctag).get(timeout = 5)
+ self.assert_(msg.body == body)
+
+ # Acknowledging consumer
+ session.queue_declare(queue = "otherqueue", exclusive=True, auto_delete=True)
+ ctag = "tag2"
+ session.message_subscribe(queue = "otherqueue", destination = ctag, accept_mode = 1)
+ session.message_flow(destination=ctag, unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination=ctag, unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ body = "test ack"
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="otherqueue"), body))
+ msg = session.incoming(ctag).get(timeout = 5)
+ session.message_accept(RangedSet(msg.id))
+ self.assert_(msg.body == body)
+
+ def test_simple_delivery_immediate(self):
+ """
+ Test simple message delivery where consume is issued before publish
+ """
+ session = self.session
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="test-queue", exchange="amq.fanout")
+ consumer_tag = "tag1"
+ session.message_subscribe(queue="test-queue", destination=consumer_tag)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = consumer_tag)
+ queue = session.incoming(consumer_tag)
+
+ body = "Immediate Delivery"
+ session.message_transfer("amq.fanout", None, None, Message(body))
+ msg = queue.get(timeout=5)
+ self.assert_(msg.body == body)
+
+ def test_simple_delivery_queued(self):
+ """
+ Test basic message delivery where publish is issued before consume
+ (i.e. requires queueing of the message)
+ """
+ session = self.session
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="test-queue", exchange="amq.fanout")
+ body = "Queued Delivery"
+ session.message_transfer("amq.fanout", None, None, Message(body))
+
+ consumer_tag = "tag1"
+ session.message_subscribe(queue="test-queue", destination=consumer_tag)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = consumer_tag)
+ queue = session.incoming(consumer_tag)
+ msg = queue.get(timeout=5)
+ self.assert_(msg.body == body)
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/dtx.py b/qpid/tests/src/py/qpid_tests/broker_0_10/dtx.py
new file mode 100644
index 0000000000..a9619bcdb8
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/dtx.py
@@ -0,0 +1,790 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.datatypes import Message, RangedSet
+from qpid.session import SessionException
+from qpid.testlib import TestBase010
+from qpid.compat import set
+from struct import pack, unpack
+from time import sleep
+
+class DtxTests(TestBase010):
+ """
+ Tests for the amqp dtx related classes.
+
+ Tests of the form test_simple_xxx test the basic transactional
+ behaviour. The approach here is to 'swap' a message from one queue
+ to another by consuming and re-publishing in the same
+ transaction. That transaction is then completed in different ways
+ and the appropriate result verified.
+
+ The other tests enforce more specific rules and behaviour on a
+ per-method or per-field basis.
+ """
+
+ XA_RBROLLBACK = 1
+ XA_RBTIMEOUT = 2
+ XA_OK = 0
+ tx_counter = 0
+
+ def reset_channel(self):
+ self.session.close()
+ self.session = self.conn.session("dtx-session", 1)
+
+ def test_simple_commit(self):
+ """
+ Test basic one-phase commit behaviour.
+ """
+ guard = self.keepQueuesAlive(["queue-a", "queue-b"])
+ session = self.session
+ tx = self.xid("my-xid")
+ self.txswap(tx, "commit")
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #commit
+ self.assertEqual(self.XA_OK, session.dtx_commit(xid=tx, one_phase=True).status)
+
+ #should close and reopen session to ensure no unacked messages are held
+ self.reset_channel()
+
+ #check result
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(1, "queue-b")
+ self.assertMessageId("commit", "queue-b")
+
+ def test_simple_prepare_commit(self):
+ """
+ Test basic two-phase commit behaviour.
+ """
+ guard = self.keepQueuesAlive(["queue-a", "queue-b"])
+ session = self.session
+ tx = self.xid("my-xid")
+ self.txswap(tx, "prepare-commit")
+
+ #prepare
+ self.assertEqual(self.XA_OK, session.dtx_prepare(xid=tx).status)
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #commit
+ self.assertEqual(self.XA_OK, session.dtx_commit(xid=tx, one_phase=False).status)
+
+ self.reset_channel()
+
+ #check result
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(1, "queue-b")
+ self.assertMessageId("prepare-commit", "queue-b")
+
+
+ def test_simple_rollback(self):
+ """
+ Test basic rollback behaviour.
+ """
+ guard = self.keepQueuesAlive(["queue-a", "queue-b"])
+ session = self.session
+ tx = self.xid("my-xid")
+ self.txswap(tx, "rollback")
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #rollback
+ self.assertEqual(self.XA_OK, session.dtx_rollback(xid=tx).status)
+
+ self.reset_channel()
+
+ #check result
+ self.assertMessageCount(1, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+ self.assertMessageId("rollback", "queue-a")
+
+ def test_simple_prepare_rollback(self):
+ """
+ Test basic rollback behaviour after the transaction has been prepared.
+ """
+ guard = self.keepQueuesAlive(["queue-a", "queue-b"])
+ session = self.session
+ tx = self.xid("my-xid")
+ self.txswap(tx, "prepare-rollback")
+
+ #prepare
+ self.assertEqual(self.XA_OK, session.dtx_prepare(xid=tx).status)
+
+ #neither queue should have any messages accessible
+ self.assertMessageCount(0, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+
+ #rollback
+ self.assertEqual(self.XA_OK, session.dtx_rollback(xid=tx).status)
+
+ self.reset_channel()
+
+ #check result
+ self.assertMessageCount(1, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+ self.assertMessageId("prepare-rollback", "queue-a")
+
+ def test_select_required(self):
+ """
+ check that an error is flagged if select is not issued before
+ start or end
+ """
+ session = self.session
+ tx = self.xid("dummy")
+ try:
+ session.dtx_start(xid=tx)
+
+ #if we get here we have failed, but need to do some cleanup:
+ session.dtx_end(xid=tx)
+ session.dtx_rollback(xid=tx)
+ self.fail("Session not selected for use with dtx, expected exception!")
+ except SessionException, e:
+ self.assertEquals(503, e.args[0].error_code)
+
+ def test_start_already_known(self):
+ """
+ Verify that an attempt to start an association with a
+ transaction that is already known is not allowed (unless the
+ join flag is set).
+ """
+ #create two sessions on different connection & select them for use with dtx:
+ session1 = self.session
+ session1.dtx_select()
+
+ other = self.connect()
+ session2 = other.session("other", 0)
+ session2.dtx_select()
+
+ #create a xid
+ tx = self.xid("dummy")
+ #start work on one session under that xid:
+ session1.dtx_start(xid=tx)
+ #then start on the other without the join set
+ failed = False
+ try:
+ session2.dtx_start(xid=tx)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ #cleanup:
+ if not failed:
+ session2.dtx_end(xid=tx)
+ other.close()
+ session1.dtx_end(xid=tx)
+ session1.dtx_rollback(xid=tx)
+
+ #verification:
+ if failed: self.assertEquals(530, error.args[0].error_code)
+ else: self.fail("Xid already known, expected exception!")
+
+ def test_forget_xid_on_completion(self):
+ """
+ Verify that a xid is 'forgotten' - and can therefore be used
+ again - once it is completed.
+ """
+ #do some transactional work & complete the transaction
+ self.test_simple_commit()
+ # session has been reset, so reselect for use with dtx
+ self.session.dtx_select()
+
+ #start association for the same xid as the previously completed txn
+ tx = self.xid("my-xid")
+ self.session.dtx_start(xid=tx)
+ self.session.dtx_end(xid=tx)
+ self.session.dtx_rollback(xid=tx)
+
+ def test_start_join_and_resume(self):
+ """
+ Ensure the correct error is signalled when both the join and
+ resume flags are set on starting an association between a
+ session and a transcation.
+ """
+ session = self.session
+ session.dtx_select()
+ tx = self.xid("dummy")
+ try:
+ session.dtx_start(xid=tx, join=True, resume=True)
+ #failed, but need some cleanup:
+ session.dtx_end(xid=tx)
+ session.dtx_rollback(xid=tx)
+ self.fail("Join and resume both set, expected exception!")
+ except SessionException, e:
+ self.assertEquals(503, e.args[0].error_code)
+
+ def test_start_join(self):
+ """
+ Verify 'join' behaviour, where a session is associated with a
+ transaction that is already associated with another session.
+ """
+ guard = self.keepQueuesAlive(["one", "two"])
+ #create two sessions & select them for use with dtx:
+ session1 = self.session
+ session1.dtx_select()
+
+ session2 = self.conn.session("second", 2)
+ session2.dtx_select()
+
+ #setup
+ session1.queue_declare(queue="one", auto_delete=True)
+ session1.queue_declare(queue="two", auto_delete=True)
+ session1.message_transfer(self.createMessage(session1, "one", "a", "DtxMessage"))
+ session1.message_transfer(self.createMessage(session1, "two", "b", "DtxMessage"))
+
+ #create a xid
+ tx = self.xid("dummy")
+ #start work on one session under that xid:
+ session1.dtx_start(xid=tx)
+ #then start on the other with the join flag set
+ session2.dtx_start(xid=tx, join=True)
+
+ #do work through each session
+ self.swap(session1, "one", "two")#swap 'a' from 'one' to 'two'
+ self.swap(session2, "two", "one")#swap 'b' from 'two' to 'one'
+
+ #mark end on both sessions
+ session1.dtx_end(xid=tx)
+ session2.dtx_end(xid=tx)
+
+ #commit and check
+ session1.dtx_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "one")
+ self.assertMessageCount(1, "two")
+ self.assertMessageId("a", "two")
+ self.assertMessageId("b", "one")
+
+
+ def test_suspend_resume(self):
+ """
+ Test suspension and resumption of an association
+ """
+ session = self.session
+ session.dtx_select()
+
+ #setup
+ session.queue_declare(queue="one", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="two", exclusive=True, auto_delete=True)
+ session.message_transfer(self.createMessage(session, "one", "a", "DtxMessage"))
+ session.message_transfer(self.createMessage(session, "two", "b", "DtxMessage"))
+
+ tx = self.xid("dummy")
+
+ session.dtx_start(xid=tx)
+ self.swap(session, "one", "two")#swap 'a' from 'one' to 'two'
+ session.dtx_end(xid=tx, suspend=True)
+
+ session.dtx_start(xid=tx, resume=True)
+ self.swap(session, "two", "one")#swap 'b' from 'two' to 'one'
+ session.dtx_end(xid=tx)
+
+ #commit and check
+ session.dtx_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "one")
+ self.assertMessageCount(1, "two")
+ self.assertMessageId("a", "two")
+ self.assertMessageId("b", "one")
+
+ def test_suspend_start_end_resume(self):
+ """
+ Test suspension and resumption of an association with work
+ done on another transaction when the first transaction is
+ suspended
+ """
+ session = self.session
+ session.dtx_select()
+
+ #setup
+ session.queue_declare(queue="one", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="two", exclusive=True, auto_delete=True)
+ session.message_transfer(self.createMessage(session, "one", "a", "DtxMessage"))
+ session.message_transfer(self.createMessage(session, "two", "b", "DtxMessage"))
+
+ tx = self.xid("dummy")
+
+ session.dtx_start(xid=tx)
+ self.swap(session, "one", "two")#swap 'a' from 'one' to 'two'
+ session.dtx_end(xid=tx, suspend=True)
+
+ session.dtx_start(xid=tx, resume=True)
+ self.swap(session, "two", "one")#swap 'b' from 'two' to 'one'
+ session.dtx_end(xid=tx)
+
+ #commit and check
+ session.dtx_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "one")
+ self.assertMessageCount(1, "two")
+ self.assertMessageId("a", "two")
+ self.assertMessageId("b", "one")
+
+ def test_end_suspend_and_fail(self):
+ """
+ Verify that the correct error is signalled if the suspend and
+ fail flag are both set when disassociating a transaction from
+ the session
+ """
+ session = self.session
+ session.dtx_select()
+ tx = self.xid("suspend_and_fail")
+ session.dtx_start(xid=tx)
+ try:
+ session.dtx_end(xid=tx, suspend=True, fail=True)
+ self.fail("Suspend and fail both set, expected exception!")
+ except SessionException, e:
+ self.assertEquals(503, e.args[0].error_code)
+
+ #cleanup
+ other = self.connect()
+ session = other.session("cleanup", 1)
+ session.dtx_rollback(xid=tx)
+ session.close()
+ other.close()
+
+
+ def test_end_unknown_xid(self):
+ """
+ Verifies that the correct exception is thrown when an attempt
+ is made to end the association for a xid not previously
+ associated with the session
+ """
+ session = self.session
+ session.dtx_select()
+ tx = self.xid("unknown-xid")
+ try:
+ session.dtx_end(xid=tx)
+ self.fail("Attempted to end association with unknown xid, expected exception!")
+ except SessionException, e:
+ self.assertEquals(409, e.args[0].error_code)
+
+ def test_end(self):
+ """
+ Verify that the association is terminated by end and subsequent
+ operations are non-transactional
+ """
+ guard = self.keepQueuesAlive(["tx-queue"])
+ session = self.conn.session("alternate", 1)
+
+ #publish a message under a transaction
+ session.dtx_select()
+ tx = self.xid("dummy")
+ session.dtx_start(xid=tx)
+ session.message_transfer(self.createMessage(session, "tx-queue", "one", "DtxMessage"))
+ session.dtx_end(xid=tx)
+
+ #now that association with txn is ended, publish another message
+ session.message_transfer(self.createMessage(session, "tx-queue", "two", "DtxMessage"))
+
+ #check the second message is available, but not the first
+ self.assertMessageCount(1, "tx-queue")
+ self.subscribe(session, queue="tx-queue", destination="results")
+ msg = session.incoming("results").get(timeout=1)
+ self.assertEqual("two", self.getMessageProperty(msg, 'correlation_id'))
+ session.message_cancel(destination="results")
+ #ack the message then close the session
+ session.message_accept(RangedSet(msg.id))
+ session.close()
+
+ session = self.session
+ #commit the transaction and check that the first message (and
+ #only the first message) is then delivered
+ session.dtx_commit(xid=tx, one_phase=True)
+ self.assertMessageCount(1, "tx-queue")
+ self.assertMessageId("one", "tx-queue")
+
+ def test_invalid_commit_one_phase_true(self):
+ """
+ Test that a commit with one_phase = True is rejected if the
+ transaction in question has already been prepared.
+ """
+ other = self.connect()
+ tester = other.session("tester", 1)
+ tester.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ tester.dtx_select()
+ tx = self.xid("dummy")
+ tester.dtx_start(xid=tx)
+ tester.message_transfer(self.createMessage(tester, "dummy", "dummy", "whatever"))
+ tester.dtx_end(xid=tx)
+ tester.dtx_prepare(xid=tx)
+ failed = False
+ try:
+ tester.dtx_commit(xid=tx, one_phase=True)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.session.dtx_rollback(xid=tx)
+ self.assertEquals(409, error.args[0].error_code)
+ else:
+ tester.close()
+ other.close()
+ self.fail("Invalid use of one_phase=True, expected exception!")
+
+ def test_invalid_commit_one_phase_false(self):
+ """
+ Test that a commit with one_phase = False is rejected if the
+ transaction in question has not yet been prepared.
+ """
+ other = self.connect()
+ tester = other.session("tester", 1)
+ tester.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ tester.dtx_select()
+ tx = self.xid("dummy")
+ tester.dtx_start(xid=tx)
+ tester.message_transfer(self.createMessage(tester, "dummy", "dummy", "whatever"))
+ tester.dtx_end(xid=tx)
+ failed = False
+ try:
+ tester.dtx_commit(xid=tx, one_phase=False)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.session.dtx_rollback(xid=tx)
+ self.assertEquals(409, error.args[0].error_code)
+ else:
+ tester.close()
+ other.close()
+ self.fail("Invalid use of one_phase=False, expected exception!")
+
+ def test_invalid_commit_not_ended(self):
+ """
+ Test that a commit fails if the xid is still associated with a session.
+ """
+ other = self.connect()
+ tester = other.session("tester", 1)
+ self.session.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ self.session.dtx_select()
+ tx = self.xid("dummy")
+ self.session.dtx_start(xid=tx)
+ self.session.message_transfer(self.createMessage(tester, "dummy", "dummy", "whatever"))
+
+ failed = False
+ try:
+ tester.dtx_commit(xid=tx, one_phase=False)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.session.dtx_end(xid=tx)
+ self.session.dtx_rollback(xid=tx)
+ self.assertEquals(409, error.args[0].error_code)
+ else:
+ tester.close()
+ other.close()
+ self.fail("Commit should fail as xid is still associated!")
+
+ def test_invalid_rollback_not_ended(self):
+ """
+ Test that a rollback fails if the xid is still associated with a session.
+ """
+ other = self.connect()
+ tester = other.session("tester", 1)
+ self.session.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ self.session.dtx_select()
+ tx = self.xid("dummy")
+ self.session.dtx_start(xid=tx)
+ self.session.message_transfer(self.createMessage(tester, "dummy", "dummy", "whatever"))
+
+ failed = False
+ try:
+ tester.dtx_rollback(xid=tx)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.session.dtx_end(xid=tx)
+ self.session.dtx_rollback(xid=tx)
+ self.assertEquals(409, error.args[0].error_code)
+ else:
+ tester.close()
+ other.close()
+ self.fail("Rollback should fail as xid is still associated!")
+
+
+ def test_invalid_prepare_not_ended(self):
+ """
+ Test that a prepare fails if the xid is still associated with a session.
+ """
+ other = self.connect()
+ tester = other.session("tester", 1)
+ self.session.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ self.session.dtx_select()
+ tx = self.xid("dummy")
+ self.session.dtx_start(xid=tx)
+ self.session.message_transfer(self.createMessage(tester, "dummy", "dummy", "whatever"))
+
+ failed = False
+ try:
+ tester.dtx_prepare(xid=tx)
+ except SessionException, e:
+ failed = True
+ error = e
+
+ if failed:
+ self.session.dtx_end(xid=tx)
+ self.session.dtx_rollback(xid=tx)
+ self.assertEquals(409, error.args[0].error_code)
+ else:
+ tester.close()
+ other.close()
+ self.fail("Rollback should fail as xid is still associated!")
+
+ def test_implicit_end(self):
+ """
+ Test that an association is implicitly ended when the session
+ is closed (whether by exception or explicit client request)
+ and the transaction in question is marked as rollback only.
+ """
+ session1 = self.session
+ session2 = self.conn.session("other", 2)
+
+ #setup:
+ session2.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+ session2.message_transfer(self.createMessage(session2, "dummy", "a", "whatever"))
+ tx = self.xid("dummy")
+
+ session2.dtx_select()
+ session2.dtx_start(xid=tx)
+ session2.message_subscribe(queue="dummy", destination="dummy")
+ session2.message_flow(destination="dummy", unit=session2.credit_unit.message, value=1)
+ session2.message_flow(destination="dummy", unit=session2.credit_unit.byte, value=0xFFFFFFFFL)
+ msg = session2.incoming("dummy").get(timeout=1)
+ session2.message_accept(RangedSet(msg.id))
+ session2.message_cancel(destination="dummy")
+ session2.message_transfer(self.createMessage(session2, "dummy", "b", "whatever"))
+ session2.close()
+
+ self.assertEqual(self.XA_RBROLLBACK, session1.dtx_prepare(xid=tx).status)
+ session1.dtx_rollback(xid=tx)
+
+ def test_get_timeout(self):
+ """
+ Check that get-timeout returns the correct value, (and that a
+ transaction with a timeout can complete normally)
+ """
+ session = self.session
+ tx = self.xid("dummy")
+
+ session.dtx_select()
+ session.dtx_start(xid=tx)
+ # below test checks for default value of dtx-default-timeout broker option
+ self.assertEqual(60, session.dtx_get_timeout(xid=tx).timeout)
+ session.dtx_set_timeout(xid=tx, timeout=200)
+ self.assertEqual(200, session.dtx_get_timeout(xid=tx).timeout)
+ self.assertEqual(self.XA_OK, session.dtx_end(xid=tx).status)
+ self.assertEqual(self.XA_OK, session.dtx_rollback(xid=tx).status)
+
+ def test_set_timeout(self):
+ """
+ Test the timeout of a transaction results in the expected
+ behaviour
+ """
+
+ guard = self.keepQueuesAlive(["queue-a", "queue-b"])
+ #open new session to allow self.session to be used in checking the queue
+ session = self.conn.session("worker", 1)
+ #setup:
+ tx = self.xid("dummy")
+ session.queue_declare(queue="queue-a", auto_delete=True)
+ session.queue_declare(queue="queue-b", auto_delete=True)
+ session.message_transfer(self.createMessage(session, "queue-a", "timeout", "DtxMessage"))
+
+ session.dtx_select()
+ session.dtx_start(xid=tx)
+ self.swap(session, "queue-a", "queue-b")
+ session.dtx_set_timeout(xid=tx, timeout=2)
+ sleep(3)
+ #check that the work has been rolled back already
+ self.assertMessageCount(1, "queue-a")
+ self.assertMessageCount(0, "queue-b")
+ self.assertMessageId("timeout", "queue-a")
+ #check the correct codes are returned when we try to complete the txn
+ self.assertEqual(self.XA_RBTIMEOUT, session.dtx_end(xid=tx).status)
+ self.assertEqual(self.XA_RBTIMEOUT, session.dtx_rollback(xid=tx).status)
+
+ def test_set_timeout_too_high(self):
+ """
+ Test the timeout can't be more than --dtx-max-timeout
+ broker option
+ """
+ session = self.session
+ tx = self.xid("dummy")
+
+ session.dtx_select()
+ session.dtx_start(xid=tx)
+ try:
+ session.dtx_set_timeout(xid=tx, timeout=3601)
+ except SessionException, e:
+ self.assertEquals(542, e.args[0].error_code)
+
+
+
+ def test_recover(self):
+ """
+ Test basic recover behaviour
+ """
+ session = self.session
+
+ session.dtx_select()
+ session.queue_declare(queue="dummy", exclusive=True, auto_delete=True)
+
+ prepared = []
+ for i in range(1, 10):
+ tx = self.xid("tx%s" % (i))
+ session.dtx_start(xid=tx)
+ session.message_transfer(self.createMessage(session, "dummy", "message%s" % (i), "message%s" % (i)))
+ session.dtx_end(xid=tx)
+ if i in [2, 5, 6, 8]:
+ session.dtx_prepare(xid=tx)
+ prepared.append(tx)
+ else:
+ session.dtx_rollback(xid=tx)
+
+ xids = session.dtx_recover().in_doubt
+
+ #rollback the prepared transactions returned by recover
+ for x in xids:
+ session.dtx_rollback(xid=x)
+
+ #validate against the expected list of prepared transactions
+ actual = set([x.global_id for x in xids]) #TODO: come up with nicer way to test these
+ expected = set([x.global_id for x in prepared])
+ intersection = actual.intersection(expected)
+
+ if intersection != expected:
+ missing = expected.difference(actual)
+ extra = actual.difference(expected)
+ self.fail("Recovered xids not as expected. missing: %s; extra: %s" % (missing, extra))
+
+ def test_bad_resume(self):
+ """
+ Test that a resume on a session not selected for use with dtx fails
+ """
+ session = self.session
+ try:
+ session.dtx_start(resume=True)
+ except SessionException, e:
+ self.assertEquals(503, e.args[0].error_code)
+
+ def test_prepare_unknown(self):
+ session = self.session
+ try:
+ session.dtx_prepare(xid=self.xid("unknown"))
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_commit_unknown(self):
+ session = self.session
+ try:
+ session.dtx_commit(xid=self.xid("unknown"))
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_rollback_unknown(self):
+ session = self.session
+ try:
+ session.dtx_rollback(xid=self.xid("unknown"))
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_get_timeout_unknown(self):
+ session = self.session
+ try:
+ session.dtx_get_timeout(xid=self.xid("unknown"))
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def xid(self, txid):
+ DtxTests.tx_counter += 1
+ branchqual = "v%s" % DtxTests.tx_counter
+ return self.session.xid(format=0, global_id=txid, branch_id=branchqual)
+
+ def txswap(self, tx, id):
+ session = self.session
+ #declare two queues:
+ session.queue_declare(queue="queue-a", auto_delete=True)
+ session.queue_declare(queue="queue-b", auto_delete=True)
+
+ #put message with specified id on one queue:
+ dp=session.delivery_properties(routing_key="queue-a")
+ mp=session.message_properties(correlation_id=id)
+ session.message_transfer(message=Message(dp, mp, "DtxMessage"))
+
+ #start the transaction:
+ session.dtx_select()
+ self.assertEqual(self.XA_OK, self.session.dtx_start(xid=tx).status)
+
+ #'swap' the message from one queue to the other, under that transaction:
+ self.swap(self.session, "queue-a", "queue-b")
+
+ #mark the end of the transactional work:
+ self.assertEqual(self.XA_OK, self.session.dtx_end(xid=tx).status)
+
+ def swap(self, session, src, dest):
+ #consume from src:
+ session.message_subscribe(destination="temp-swap", queue=src)
+ session.message_flow(destination="temp-swap", unit=session.credit_unit.message, value=1)
+ session.message_flow(destination="temp-swap", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ msg = session.incoming("temp-swap").get(timeout=1)
+ session.message_cancel(destination="temp-swap")
+ session.message_accept(RangedSet(msg.id))
+ #todo: also complete at this point?
+
+ #re-publish to dest:
+ dp=session.delivery_properties(routing_key=dest)
+ mp=session.message_properties(correlation_id=self.getMessageProperty(msg, 'correlation_id'))
+ session.message_transfer(message=Message(dp, mp, msg.body))
+
+ def assertMessageCount(self, expected, queue):
+ self.assertEqual(expected, self.session.queue_query(queue=queue).message_count)
+
+ def assertMessageId(self, expected, queue):
+ self.session.message_subscribe(queue=queue, destination="results")
+ self.session.message_flow(destination="results", unit=self.session.credit_unit.message, value=1)
+ self.session.message_flow(destination="results", unit=self.session.credit_unit.byte, value=0xFFFFFFFFL)
+ self.assertEqual(expected, self.getMessageProperty(self.session.incoming("results").get(timeout=1), 'correlation_id'))
+ self.session.message_cancel(destination="results")
+
+ def getMessageProperty(self, msg, prop):
+ for h in msg.headers:
+ if hasattr(h, prop): return getattr(h, prop)
+ return None
+
+ def keepQueuesAlive(self, names):
+ session = self.conn.session("nasty", 99)
+ for n in names:
+ session.queue_declare(queue=n, auto_delete=True)
+ session.message_subscribe(destination=n, queue=n)
+ return session
+
+ def createMessage(self, session, key, id, body):
+ dp=session.delivery_properties(routing_key=key)
+ mp=session.message_properties(correlation_id=id)
+ session.message_transfer(message=Message(dp, mp, body))
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/example.py b/qpid/tests/src/py/qpid_tests/broker_0_10/example.py
new file mode 100644
index 0000000000..e36907d501
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/example.py
@@ -0,0 +1,95 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.datatypes import Message, RangedSet
+from qpid.testlib import TestBase010
+
+class ExampleTest (TestBase010):
+ """
+ An example Qpid test, illustrating the unittest framework and the
+ python Qpid client. The test class must inherit TestBase. The
+ test code uses the Qpid client to interact with a qpid broker and
+ verify it behaves as expected.
+ """
+
+ def test_example(self):
+ """
+ An example test. Note that test functions must start with 'test_'
+ to be recognized by the test framework.
+ """
+
+ # By inheriting TestBase, self.client is automatically connected
+ # and self.session is automatically opened as session(1)
+ # Other session methods mimic the protocol.
+ session = self.session
+
+ # Now we can send regular commands. If you want to see what the method
+ # arguments mean or what other commands are available, you can use the
+ # python builtin help() method. For example:
+ #help(chan)
+ #help(chan.exchange_declare)
+
+ # If you want browse the available protocol methods without being
+ # connected to a live server you can use the amqp-doc utility:
+ #
+ # Usage amqp-doc [<options>] <spec> [<pattern_1> ... <pattern_n>]
+ #
+ # Options:
+ # -e, --regexp use regex instead of glob when matching
+
+ # Now that we know what commands are available we can use them to
+ # interact with the server.
+
+ # Here we use ordinal arguments.
+ session.exchange_declare("test", "direct")
+
+ # Here we use keyword arguments.
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="test-queue", exchange="test", binding_key="key")
+
+ # Call Session.subscribe to register as a consumer.
+ # All the protocol methods return a message object. The message object
+ # has fields corresponding to the reply method fields, plus a content
+ # field that is filled if the reply includes content. In this case the
+ # interesting field is the consumer_tag.
+ session.message_subscribe(queue="test-queue", destination="consumer_tag")
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+
+ # We can use the session.incoming(...) method to access the messages
+ # delivered for our consumer_tag.
+ queue = session.incoming("consumer_tag")
+
+ # Now lets publish a message and see if our consumer gets it. To do
+ # this we need to import the Message class.
+ delivery_properties = session.delivery_properties(routing_key="key")
+ sent = Message(delivery_properties, "Hello World!")
+ session.message_transfer(destination="test", message=sent)
+
+ # Now we'll wait for the message to arrive. We can use the timeout
+ # argument in case the server hangs. By default queue.get() will wait
+ # until a message arrives or the connection to the server dies.
+ msg = queue.get(timeout=10)
+
+ # And check that we got the right response with assertEqual
+ self.assertEqual(sent.body, msg.body)
+
+ # Now acknowledge the message.
+ session.message_accept(RangedSet(msg.id))
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/exchange.py b/qpid/tests/src/py/qpid_tests/broker_0_10/exchange.py
new file mode 100644
index 0000000000..916f9d8b85
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/exchange.py
@@ -0,0 +1,558 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Tests for exchange behaviour.
+
+Test classes ending in 'RuleTests' are derived from rules in amqp.xml.
+"""
+
+import Queue, logging, traceback
+from qpid.testlib import TestBase010
+from qpid.datatypes import Message
+from qpid.client import Closed
+from qpid.session import SessionException
+
+
+class TestHelper(TestBase010):
+ def setUp(self):
+ TestBase010.setUp(self)
+ self.queues = []
+ self.exchanges = []
+ self.subscriptions = []
+
+ def tearDown(self):
+ try:
+ for s in self.subscriptions:
+ self.session.message_cancel(destination=s)
+ for ssn, q in self.queues:
+ ssn.queue_delete(queue=q)
+ for ssn, ex in self.exchanges:
+ ssn.exchange_delete(exchange=ex)
+ except:
+ print "Error on tearDown:"
+ print traceback.print_exc()
+ TestBase010.tearDown(self)
+
+ def createMessage(self, key="", body=""):
+ return Message(self.session.delivery_properties(routing_key=key), body)
+
+ def getApplicationHeaders(self, msg):
+ for h in msg.headers:
+ if hasattr(h, 'application_headers'): return getattr(h, 'application_headers')
+ return None
+
+ def assertPublishGet(self, queue, exchange="", routing_key="", properties=None):
+ """
+ Publish to exchange and assert queue.get() returns the same message.
+ """
+ body = self.uniqueString()
+ dp=self.session.delivery_properties(routing_key=routing_key)
+ mp=self.session.message_properties(application_headers=properties)
+ self.session.message_transfer(destination=exchange, message=Message(dp, mp, body))
+ msg = queue.get(timeout=1)
+ self.assertEqual(body, msg.body)
+ if (properties):
+ self.assertEqual(properties, self.getApplicationHeaders(msg))
+
+ def assertPublishConsume(self, queue="", exchange="", routing_key="", properties=None):
+ """
+ Publish a message and consume it, assert it comes back intact.
+ Return the Queue object used to consume.
+ """
+ self.assertPublishGet(self.consume(queue), exchange, routing_key, properties)
+
+ def assertEmpty(self, queue):
+ """Assert that the queue is empty"""
+ try:
+ queue.get(timeout=1)
+ self.fail("Queue is not empty.")
+ except Queue.Empty: None # Ignore
+
+ def queue_declare(self, session=None, *args, **keys):
+ session = session or self.session
+ reply = session.queue_declare(*args, **keys)
+ self.queues.append((session, keys["queue"]))
+ return reply
+
+ def exchange_declare(self, session=None, ticket=0, exchange='',
+ type='', passive=False, durable=False,
+ auto_delete=False,
+ arguments={}):
+ session = session or self.session
+ reply = session.exchange_declare(exchange=exchange, type=type, passive=passive, durable=durable, auto_delete=auto_delete, arguments=arguments)
+ if exchange and not exchange.startswith("amq."):
+ self.exchanges.append((session,exchange))
+ return reply
+
+ def uniqueString(self):
+ """Generate a unique string, unique for this TestBase instance"""
+ if not "uniqueCounter" in dir(self): self.uniqueCounter = 1;
+ return "Test Message " + str(self.uniqueCounter)
+
+ def consume(self, queueName):
+ """Consume from named queue returns the Queue object."""
+ if not "uniqueTag" in dir(self): self.uniqueTag = 1
+ else: self.uniqueTag += 1
+ consumer_tag = "tag" + str(self.uniqueTag)
+ self.session.message_subscribe(queue=queueName, destination=consumer_tag)
+ self.session.message_flow(destination=consumer_tag, unit=self.session.credit_unit.message, value=0xFFFFFFFFL)
+ self.session.message_flow(destination=consumer_tag, unit=self.session.credit_unit.byte, value=0xFFFFFFFFL)
+ self.subscriptions.append(consumer_tag)
+ return self.session.incoming(consumer_tag)
+
+
+class StandardExchangeVerifier:
+ """Verifies standard exchange behavior.
+
+ Used as base class for classes that test standard exchanges."""
+
+ def verifyDirectExchange(self, ex, unbind=False):
+ """Verify that ex behaves like a direct exchange."""
+ self.queue_declare(queue="q")
+ self.session.exchange_bind(queue="q", exchange=ex, binding_key="k")
+ try:
+ self.assertPublishConsume(exchange=ex, queue="q", routing_key="k")
+ try:
+ self.assertPublishConsume(exchange=ex, queue="q", routing_key="kk")
+ self.fail("Expected Empty exception")
+ except Queue.Empty: None # Expected
+ finally:
+ if unbind:
+ self.session.exchange_unbind(queue="q", exchange=ex, binding_key="k")
+
+ def verifyFanOutExchange(self, ex, unbind=False):
+ """Verify that ex behaves like a fanout exchange."""
+ self.queue_declare(queue="q")
+ self.session.exchange_bind(queue="q", exchange=ex)
+ self.queue_declare(queue="p")
+ self.session.exchange_bind(queue="p", exchange=ex)
+ try:
+ for qname in ["q", "p"]: self.assertPublishGet(self.consume(qname), ex)
+ finally:
+ if unbind:
+ self.session.exchange_unbind(queue="q", exchange=ex, binding_key="")
+ self.session.exchange_unbind(queue="p", exchange=ex, binding_key="")
+
+
+ def verifyTopicExchange(self, ex, unbind=False):
+ """Verify that ex behaves like a topic exchange"""
+ self.queue_declare(queue="a")
+ self.session.exchange_bind(queue="a", exchange=ex, binding_key="a.#.b.*")
+ try:
+ q = self.consume("a")
+ self.assertPublishGet(q, ex, "a.b.x")
+ self.assertPublishGet(q, ex, "a.x.b.x")
+ self.assertPublishGet(q, ex, "a.x.x.b.x")
+ # Shouldn't match
+ self.session.message_transfer(destination=ex, message=self.createMessage("a.b"))
+ self.session.message_transfer(destination=ex, message=self.createMessage("a.b.x.y"))
+ self.session.message_transfer(destination=ex, message=self.createMessage("x.a.b.x"))
+ self.session.message_transfer(destination=ex, message=self.createMessage("a.b"))
+ self.assert_(q.empty())
+ finally:
+ if unbind:
+ self.session.exchange_unbind(queue="a", exchange=ex, binding_key="a.#.b.*")
+
+ def verifyHeadersExchange(self, ex, unbind=False):
+ """Verify that ex is a headers exchange"""
+ self.queue_declare(queue="q")
+ self.session.exchange_bind(queue="q", exchange=ex, arguments={ "x-match":"all", "name":"fred" , "age":3} )
+ try:
+ q = self.consume("q")
+ headers = {"name":"fred", "age":3}
+ self.assertPublishGet(q, exchange=ex, properties=headers)
+ self.session.message_transfer(destination=ex) # No headers, won't deliver
+ self.assertEmpty(q);
+ finally:
+ if unbind:
+ self.session.exchange_unbind(queue="q", exchange=ex, binding_key="")
+
+
+class RecommendedTypesRuleTests(TestHelper, StandardExchangeVerifier):
+ """
+ The server SHOULD implement these standard exchange types: topic, headers.
+
+ Client attempts to declare an exchange with each of these standard types.
+ """
+
+ def testDirect(self):
+ """Declare and test a direct exchange"""
+ self.exchange_declare(0, exchange="d", type="direct")
+ self.verifyDirectExchange("d")
+
+ def testFanout(self):
+ """Declare and test a fanout exchange"""
+ self.exchange_declare(0, exchange="f", type="fanout")
+ self.verifyFanOutExchange("f")
+
+ def testTopic(self):
+ """Declare and test a topic exchange"""
+ self.exchange_declare(0, exchange="t", type="topic")
+ self.verifyTopicExchange("t")
+
+ def testHeaders(self):
+ """Declare and test a headers exchange"""
+ self.exchange_declare(0, exchange="h", type="headers")
+ self.verifyHeadersExchange("h")
+
+
+class RequiredInstancesRuleTests(TestHelper, StandardExchangeVerifier):
+ """
+ The server MUST, in each virtual host, pre-declare an exchange instance
+ for each standard exchange type that it implements, where the name of the
+ exchange instance is amq. followed by the exchange type name.
+
+ Client creates a temporary queue and attempts to bind to each required
+ exchange instance (amq.fanout, amq.direct, and amq.topic, amq.match if
+ those types are defined).
+ """
+ def testAmqDirect(self): self.verifyDirectExchange("amq.direct")
+
+ def testAmqFanOut(self): self.verifyFanOutExchange("amq.fanout")
+
+ def testAmqTopic(self): self.verifyTopicExchange("amq.topic")
+
+ def testAmqMatch(self): self.verifyHeadersExchange("amq.match")
+
+class DefaultExchangeRuleTests(TestHelper, StandardExchangeVerifier):
+ """
+ The server MUST predeclare a direct exchange to act as the default exchange
+ for content Publish methods and for default queue bindings.
+
+ Client checks that the default exchange is active by specifying a queue
+ binding with no exchange name, and publishing a message with a suitable
+ routing key but without specifying the exchange name, then ensuring that
+ the message arrives in the queue correctly.
+ """
+ def testDefaultExchange(self):
+ # Test automatic binding by queue name.
+ self.queue_declare(queue="d")
+ self.assertPublishConsume(queue="d", routing_key="d")
+
+
+# TODO aconway 2006-09-27: Fill in empty tests:
+
+class DefaultAccessRuleTests(TestHelper):
+ """
+ The server MUST NOT allow clients to access the default exchange except
+ by specifying an empty exchange name in the Queue.Bind and content Publish
+ methods.
+ """
+
+class ExtensionsRuleTests(TestHelper):
+ """
+ The server MAY implement other exchange types as wanted.
+ """
+
+
+class DeclareMethodMinimumRuleTests(TestHelper):
+ """
+ The server SHOULD support a minimum of 16 exchanges per virtual host and
+ ideally, impose no limit except as defined by available resources.
+
+ The client creates as many exchanges as it can until the server reports
+ an error; the number of exchanges successfuly created must be at least
+ sixteen.
+ """
+
+
+class DeclareMethodTicketFieldValidityRuleTests(TestHelper):
+ """
+ The client MUST provide a valid access ticket giving "active" access to
+ the realm in which the exchange exists or will be created, or "passive"
+ access if the if-exists flag is set.
+
+ Client creates access ticket with wrong access rights and attempts to use
+ in this method.
+ """
+
+
+class DeclareMethodExchangeFieldReservedRuleTests(TestHelper):
+ """
+ Exchange names starting with "amq." are reserved for predeclared and
+ standardised exchanges. The client MUST NOT attempt to create an exchange
+ starting with "amq.".
+
+ Similarly, exchanges starting with "qpid." are reserved for Qpid
+ implementation-specific system exchanges (such as the management exchange).
+ The client must not attempt to create an exchange starting with the string
+ "qpid.".
+ """
+ def template(self, reservedString, exchangeType):
+ try:
+ self.session.exchange_declare(exchange=reservedString, type=exchangeType)
+ self.fail("Expected not allowed error (530) for exchanges starting with \"" + reservedString + "\".")
+ except SessionException, e:
+ self.assertEquals(e.args[0].error_code, 530)
+ # connection closed, reopen it
+ self.tearDown()
+ self.setUp()
+ try:
+ self.session.exchange_declare(exchange=reservedString + "abc123", type=exchangeType)
+ self.fail("Expected not allowed error (530) for exchanges starting with \"" + reservedString + "\".")
+ except SessionException, e:
+ self.assertEquals(e.args[0].error_code, 530)
+ # connection closed, reopen it
+ self.tearDown()
+ self.setUp()
+ # The following should be legal:
+ self.session.exchange_declare(exchange=reservedString[:-1], type=exchangeType)
+ self.session.exchange_delete(exchange=reservedString[:-1])
+ self.session.exchange_declare(exchange=reservedString[1:], type=exchangeType)
+ self.session.exchange_delete(exchange=reservedString[1:])
+ self.session.exchange_declare(exchange="." + reservedString, type=exchangeType)
+ self.session.exchange_delete(exchange="." + reservedString)
+ self.session.exchange_declare(exchange="abc." + reservedString, type=exchangeType)
+ self.session.exchange_delete(exchange="abc." + reservedString)
+ self.session.exchange_declare(exchange="abc." + reservedString + "def", type=exchangeType)
+ self.session.exchange_delete(exchange="abc." + reservedString + "def")
+
+ def test_amq(self):
+ self.template("amq.", "direct")
+ self.template("amq.", "topic")
+ self.template("amq.", "fanout")
+
+ def test_qpid(self):
+ self.template("qpid.", "direct")
+ self.template("qpid.", "topic")
+ self.template("qpid.", "fanout")
+
+
+class DeclareMethodTypeFieldTypedRuleTests(TestHelper):
+ """
+ Exchanges cannot be redeclared with different types. The client MUST not
+ attempt to redeclare an existing exchange with a different type than used
+ in the original Exchange.Declare method.
+
+
+ """
+
+
+class DeclareMethodTypeFieldSupportRuleTests(TestHelper):
+ """
+ The client MUST NOT attempt to create an exchange with a type that the
+ server does not support.
+
+
+ """
+
+
+class DeclareMethodPassiveFieldNotFoundRuleTests(TestHelper):
+ """
+ If set, and the exchange does not already exist, the server MUST raise a
+ channel exception with reply code 404 (not found).
+ """
+ def test(self):
+ try:
+ self.session.exchange_declare(exchange="humpty_dumpty", passive=True)
+ self.fail("Expected 404 for passive declaration of unknown exchange.")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
+class DeclareMethodDurableFieldSupportRuleTests(TestHelper):
+ """
+ The server MUST support both durable and transient exchanges.
+
+
+ """
+
+
+class DeclareMethodDurableFieldStickyRuleTests(TestHelper):
+ """
+ The server MUST ignore the durable field if the exchange already exists.
+
+
+ """
+
+
+class DeclareMethodAutoDeleteFieldStickyRuleTests(TestHelper):
+ """
+ The server MUST ignore the auto-delete field if the exchange already
+ exists.
+
+
+ """
+
+
+class DeleteMethodTicketFieldValidityRuleTests(TestHelper):
+ """
+ The client MUST provide a valid access ticket giving "active" access
+ rights to the exchange's access realm.
+
+ Client creates access ticket with wrong access rights and attempts to use
+ in this method.
+ """
+
+
+class DeleteMethodExchangeFieldExistsRuleTests(TestHelper):
+ """
+ The client MUST NOT attempt to delete an exchange that does not exist.
+ """
+
+
+class HeadersExchangeTests(TestHelper):
+ """
+ Tests for headers exchange functionality.
+ """
+ def setUp(self):
+ TestHelper.setUp(self)
+ self.queue_declare(queue="q")
+ self.q = self.consume("q")
+
+ def myAssertPublishGet(self, headers):
+ self.assertPublishGet(self.q, exchange="amq.match", properties=headers)
+
+ def myBasicPublish(self, headers):
+ mp=self.session.message_properties(application_headers=headers)
+ self.session.message_transfer(destination="amq.match", message=Message(mp, "foobar"))
+
+ def testMatchAll(self):
+ self.session.exchange_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'all', "name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3, "extra":"ignoreme"})
+
+ # None of these should match
+ self.myBasicPublish({})
+ self.myBasicPublish({"name":"barney"})
+ self.myBasicPublish({"name":10})
+ self.myBasicPublish({"name":"fred", "age":2})
+ self.assertEmpty(self.q)
+
+ def testMatchAny(self):
+ self.session.exchange_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'any', "name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred"})
+ self.myAssertPublishGet({"name":"fred", "ignoreme":10})
+ self.myAssertPublishGet({"ignoreme":10, "age":3})
+
+ # Wont match
+ self.myBasicPublish({})
+ self.myBasicPublish({"irrelevant":0})
+ self.assertEmpty(self.q)
+
+ def testMatchVoidValue(self):
+ self.session.exchange_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'any', "name":None})
+ self.myAssertPublishGet({"name":"fred"})
+ self.myAssertPublishGet({"name":"bob"})
+
+ # Wont match
+ self.myBasicPublish({})
+ self.myBasicPublish({"irrelevant":0})
+ self.assertEmpty(self.q)
+
+ def testMultipleBindings(self):
+ self.session.exchange_bind(queue="q", exchange="amq.match", binding_key="SomeKey", arguments={ 'x-match':'any', "name":"fred"})
+ self.session.exchange_bind(queue="q", exchange="amq.match", binding_key="AnotherKey", arguments={ 'x-match':'all', "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3})
+ self.assertEmpty(self.q)
+
+
+class MiscellaneousErrorsTests(TestHelper):
+ """
+ Test some miscellaneous error conditions
+ """
+ def testTypeNotKnown(self):
+ try:
+ self.session.exchange_declare(exchange="test_type_not_known_exchange", type="invalid_type")
+ self.fail("Expected 404 for declaration of unknown exchange type.")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def testDifferentDeclaredType(self):
+ self.exchange_declare(exchange="test_different_declared_type_exchange", type="direct")
+ try:
+ session = self.conn.session("alternate", 2)
+ session.exchange_declare(exchange="test_different_declared_type_exchange", type="topic")
+ self.fail("Expected 530 for redeclaration of exchange with different type.")
+ except SessionException, e:
+ self.assertEquals(530, e.args[0].error_code)
+
+ def testReservedExchangeRedeclaredSameType(self):
+ self.exchange_declare(exchange="amq.direct", type="direct", passive=True)
+
+ def testReservedExchangeRedeclaredDifferentType(self):
+ try:
+ self.exchange_declare(exchange="amq.direct", type="topic")
+ self.fail("Expected 530 for redeclaration of exchange with different type.")
+ except SessionException, e:
+ self.assertEquals(530, e.args[0].error_code)
+
+ def testDefaultAccessBind(self):
+ try:
+ self.session.queue_declare(queue="my-queue", auto_delete=True, exclusive=True)
+ self.session.exchange_bind(exchange="", queue="my-queue", binding_key="another-key")
+ self.fail("Expected 542 (invalid-argument) code for bind to default exchange.")
+ except SessionException, e:
+ self.assertEquals(542, e.args[0].error_code)
+
+ def testDefaultAccessUnbind(self):
+ try:
+ self.session.queue_declare(queue="my-queue", auto_delete=True, exclusive=True)
+ self.session.exchange_unbind(exchange="", queue="my-queue", binding_key="my-queue")
+ self.fail("Expected 542 (invalid-argument) code for unbind from default exchange.")
+ except SessionException, e:
+ self.assertEquals(542, e.args[0].error_code)
+
+ def testDefaultAccessDelete(self):
+ try:
+ self.session.exchange_delete(exchange="")
+ self.fail("Expected 542 (invalid-argument) code for delete of default exchange.")
+ except SessionException, e:
+ self.assertEquals(542, e.args[0].error_code)
+
+class ExchangeTests(TestHelper):
+ def testHeadersBindNoMatchArg(self):
+ self.session.queue_declare(queue="q", exclusive=True, auto_delete=True)
+ try:
+ self.session.exchange_bind(queue="q", exchange="amq.match", arguments={"name":"fred" , "age":3} )
+ self.fail("Expected failure for missing x-match arg.")
+ except SessionException, e:
+ self.assertEquals(541, e.args[0].error_code)
+
+class AutodeleteTests(TestHelper, StandardExchangeVerifier):
+ def checkNotExists(self, e):
+ try:
+ s = self.conn.session("verifier")
+ s.exchange_declare(exchange=e, passive=True)
+ s.exchange_delete(exchange=e)
+ self.fail("Expected failure for passive declare of %s" % e)
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
+ def testAutodeleteFanout(self):
+ self.session.exchange_declare(exchange="e", type="fanout", auto_delete=True)
+ self.verifyFanOutExchange("e", unbind=True)
+ self.checkNotExists("e");
+
+ def testAutodeleteDirect(self):
+ self.session.exchange_declare(exchange="e", type="direct", auto_delete=True)
+ self.verifyDirectExchange("e", unbind=True)
+ self.checkNotExists("e");
+
+ def testAutodeleteTopic(self):
+ self.session.exchange_declare(exchange="e", type="topic", auto_delete=True)
+ self.verifyTopicExchange("e", unbind=True)
+ self.checkNotExists("e");
+
+ def testAutodeleteHeaders(self):
+ self.session.exchange_declare(exchange="e", type="headers", auto_delete=True)
+ self.verifyHeadersExchange("e", unbind=True)
+ self.checkNotExists("e");
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/extensions.py b/qpid/tests/src/py/qpid_tests/broker_0_10/extensions.py
new file mode 100644
index 0000000000..50c0aa3dd1
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/extensions.py
@@ -0,0 +1,87 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import TestBase010
+from qpid.session import SessionException
+from qpid.datatypes import uuid4
+from time import sleep
+
+class ExtensionTests(TestBase010):
+ """Tests for various extensions to AMQP 0-10"""
+
+ def test_timed_autodelete(self):
+ session = self.session
+ session2 = self.conn.session("another-session")
+ name=str(uuid4())
+ session2.queue_declare(queue=name, exclusive=True, auto_delete=True, arguments={"qpid.auto_delete_timeout":3})
+ session2.close()
+ result = session.queue_query(queue=name)
+ self.assertEqual(name, result.queue)
+ sleep(5)
+ result = session.queue_query(queue=name)
+ self.assert_(not result.queue)
+
+ def valid_policy_args(self, args, name="test-queue"):
+ try:
+ self.session.queue_declare(queue=name, arguments=args)
+ self.session.queue_delete(queue=name) # cleanup
+ except SessionException, e:
+ self.fail("declare with valid policy args failed: %s" % (args))
+ self.session = self.conn.session("replacement", 2)
+
+ def invalid_policy_args(self, args, name="test-queue"):
+ # go through invalid declare attempts twice to make sure that
+ # the queue doesn't actually get created first time around
+ # even if exception is thrown
+ for i in range(1, 3):
+ try:
+ self.session.queue_declare(queue=name, arguments=args)
+ self.session.queue_delete(queue=name) # cleanup
+ self.fail("declare with invalid policy args suceeded: %s (iteration %d)" % (args, i))
+ except SessionException, e:
+ self.session = self.conn.session(str(uuid4()))
+
+ def test_policy_max_size_as_valid_string(self):
+ self.valid_policy_args({"qpid.max_size":"3"})
+
+ def test_policy_max_count_as_valid_string(self):
+ self.valid_policy_args({"qpid.max_count":"3"})
+
+ def test_policy_max_count_and_size_as_valid_strings(self):
+ self.valid_policy_args({"qpid.max_count":"3","qpid.max_size":"0"})
+
+ def test_policy_negative_count(self):
+ self.invalid_policy_args({"qpid.max_count":-1})
+
+ def test_policy_negative_size(self):
+ self.invalid_policy_args({"qpid.max_size":-1})
+
+ def test_policy_size_as_invalid_string(self):
+ self.invalid_policy_args({"qpid.max_size":"foo"})
+
+ def test_policy_count_as_invalid_string(self):
+ self.invalid_policy_args({"qpid.max_count":"foo"})
+
+ def test_policy_size_as_float(self):
+ self.invalid_policy_args({"qpid.max_size":3.14159})
+
+ def test_policy_count_as_float(self):
+ self.invalid_policy_args({"qpid.max_count":"2222222.22222"})
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/lvq.py b/qpid/tests/src/py/qpid_tests/broker_0_10/lvq.py
new file mode 100644
index 0000000000..07a8906fe7
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/lvq.py
@@ -0,0 +1,122 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.tests.messaging.implementation import *
+from qpid.tests.messaging import Base
+import math
+import random
+
+class LVQTests (Base):
+ """
+ Test last value queue behaviour
+ """
+
+ def setup_connection(self):
+ return Connection.establish(self.broker, **self.connection_options())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def test_simple(self):
+ snd = self.ssn.sender("lvq; {create: sender, delete: sender, node: {x-declare:{arguments:{'qpid.last_value_queue_key':lvq-key}}}}",
+ durable=self.durable())
+ snd.send(create_message("a", "a-1"))
+ snd.send(create_message("b", "b-1"))
+ snd.send(create_message("a", "a-2"))
+ snd.send(create_message("a", "a-3"))
+ snd.send(create_message("c", "c-1"))
+ snd.send(create_message("c", "c-2"))
+
+ rcv = self.ssn.receiver("lvq; {mode: browse}")
+ assert fetch_all(rcv) == ["b-1", "a-3", "c-2"]
+
+ snd.send(create_message("b", "b-2"))
+ assert fetch_all(rcv) == ["b-2"]
+
+ snd.send(create_message("c", "c-3"))
+ snd.send(create_message("d", "d-1"))
+ assert fetch_all(rcv) == ["c-3", "d-1"]
+
+ snd.send(create_message("b", "b-3"))
+ assert fetch_all(rcv) == ["b-3"]
+
+ rcv.close()
+ rcv = self.ssn.receiver("lvq; {mode: browse}")
+ assert (fetch_all(rcv) == ["a-3", "c-3", "d-1", "b-3"])
+
+ def check_ring_lvq(self, ring_size, keys, message_count):
+ address = "lvq; {create: sender, delete: sender, node: {x-declare:{arguments:{'qpid.last_value_queue_key':lvq-key,'qpid.policy_type':'ring','qpid.max_count':%i}}}}" % ring_size
+ snd = self.ssn.sender(address, durable=self.durable())
+ counters = {}
+ for k in keys:
+ counters[k] = 0
+ messages = []
+ for i in range(message_count):
+ k = random.choice(keys)
+ counters[k] += 1
+ messages.append(create_message(k, "%s-%i" % (k, counters[k])))
+ # make sure we have sent at least one message for every key
+ for k, v in counters.iteritems():
+ if v == 0:
+ counters[k] += 1
+ messages.append(create_message(k, "%s-%i" % (k, counters[k])))
+
+ for m in messages:
+ snd.send(m)
+
+ rcv = self.ssn.receiver("lvq; {mode: browse}")
+ retrieved = fetch_all_as_tuples(rcv)
+ print [v for k, v in retrieved]
+
+ for k, v in retrieved:
+ assert v == "%s-%i" % (k, counters[k])
+ assert len(retrieved) <= ring_size
+
+ def test_ring_lvq1(self):
+ self.check_ring_lvq(25, ["a","b","c","d"], 50)
+
+ def test_ring_lvq2(self):
+ self.check_ring_lvq(5, ["a","b","c","d","e","f","g"], 50)
+
+ def test_ring_lvq3(self):
+ self.check_ring_lvq(49, ["a"], 50)
+
+def create_message(key, content):
+ msg = Message(content=content, properties={"lvq-key":key})
+ return msg
+
+def fetch_all(rcv):
+ content = []
+ while True:
+ try:
+ content.append(rcv.fetch(0).content)
+ except Empty:
+ break
+ return content
+
+def fetch_all_as_tuples(rcv):
+ content = []
+ while True:
+ try:
+ m = rcv.fetch(0)
+ k = m.properties["lvq-key"]
+ content.append((k, m.content))
+ except Empty:
+ break
+ return content
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/management.py b/qpid/tests/src/py/qpid_tests/broker_0_10/management.py
new file mode 100644
index 0000000000..751839291b
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/management.py
@@ -0,0 +1,726 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.datatypes import Message, RangedSet
+from qpid.testlib import TestBase010
+from qpid.management import managementChannel, managementClient
+from threading import Condition
+from time import sleep
+import qmf.console
+import qpid.messaging
+from qpidtoollibs import BrokerAgent
+
+class ManagementTest (TestBase010):
+
+ def setup_access(self):
+ if 'broker_agent' not in self.__dict__:
+ self.conn2 = qpid.messaging.Connection(self.broker)
+ self.conn2.open()
+ self.broker_agent = BrokerAgent(self.conn2)
+ return self.broker_agent
+
+ """
+ Tests for the management hooks
+ """
+
+ def test_broker_connectivity_oldAPI (self):
+ """
+ Call the "echo" method on the broker to verify it is alive and talking.
+ """
+ session = self.session
+
+ mc = managementClient ()
+ mch = mc.addChannel (session)
+
+ mc.syncWaitForStable (mch)
+ brokers = mc.syncGetObjects (mch, "broker")
+ self.assertEqual (len (brokers), 1)
+ broker = brokers[0]
+ args = {}
+ body = "Echo Message Body"
+ args["body"] = body
+
+ for seq in range (1, 5):
+ args["sequence"] = seq
+ res = mc.syncCallMethod (mch, broker.id, broker.classKey, "echo", args)
+ self.assertEqual (res.status, 0)
+ self.assertEqual (res.statusText, "OK")
+ self.assertEqual (res.sequence, seq)
+ self.assertEqual (res.body, body)
+ mc.removeChannel (mch)
+
+ def test_methods_sync (self):
+ """
+ Call the "echo" method on the broker to verify it is alive and talking.
+ """
+ session = self.session
+ self.startQmf()
+
+ brokers = self.qmf.getObjects(_class="broker")
+ self.assertEqual(len(brokers), 1)
+ broker = brokers[0]
+
+ body = "Echo Message Body"
+ for seq in range(1, 20):
+ res = broker.echo(seq, body)
+ self.assertEqual(res.status, 0)
+ self.assertEqual(res.text, "OK")
+ self.assertEqual(res.sequence, seq)
+ self.assertEqual(res.body, body)
+
+ def test_get_objects(self):
+ self.startQmf()
+
+ # get the package list, verify that the qpid broker package is there
+ packages = self.qmf.getPackages()
+ assert 'org.apache.qpid.broker' in packages
+
+ # get the schema class keys for the broker, verify the broker table and link-down event
+ keys = self.qmf.getClasses('org.apache.qpid.broker')
+ broker = None
+ linkDown = None
+ for key in keys:
+ if key.getClassName() == "broker": broker = key
+ if key.getClassName() == "brokerLinkDown" : linkDown = key
+ assert broker
+ assert linkDown
+
+ brokerObjs = self.qmf.getObjects(_class="broker")
+ assert len(brokerObjs) == 1
+ brokerObjs = self.qmf.getObjects(_key=broker)
+ assert len(brokerObjs) == 1
+
+ def test_self_session_id (self):
+ self.startQmf()
+ sessionId = self.qmf_broker.getSessionId()
+ brokerSessions = self.qmf.getObjects(_class="session")
+
+ found = False
+ for bs in brokerSessions:
+ if bs.name.endswith(sessionId):
+ found = True
+ self.assertEqual (found, True)
+
+ def test_standard_exchanges (self):
+ self.startQmf()
+
+ exchanges = self.qmf.getObjects(_class="exchange")
+ exchange = self.findExchange (exchanges, "")
+ self.assertEqual (exchange.type, "direct")
+ exchange = self.findExchange (exchanges, "amq.direct")
+ self.assertEqual (exchange.type, "direct")
+ exchange = self.findExchange (exchanges, "amq.topic")
+ self.assertEqual (exchange.type, "topic")
+ exchange = self.findExchange (exchanges, "amq.fanout")
+ self.assertEqual (exchange.type, "fanout")
+ exchange = self.findExchange (exchanges, "amq.match")
+ self.assertEqual (exchange.type, "headers")
+ exchange = self.findExchange (exchanges, "qpid.management")
+ self.assertEqual (exchange.type, "topic")
+
+ def findExchange (self, exchanges, name):
+ for exchange in exchanges:
+ if exchange.name == name:
+ return exchange
+ return None
+
+ def test_move_queued_messages_empty(self):
+ """
+ Test that moving messages from an empty queue does not cause an error.
+ """
+ self.startQmf()
+ session = self.session
+ "Set up source queue"
+ session.queue_declare(queue="src-queue-empty", exclusive=True, auto_delete=True)
+
+ "Set up destination queue"
+ session.queue_declare(queue="dest-queue-empty", exclusive=True, auto_delete=True)
+
+ queues = self.qmf.getObjects(_class="queue")
+
+ "Move all messages from src-queue-empty to dest-queue-empty"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue-empty", "dest-queue-empty", 0, {})
+ self.assertEqual (result.status, 0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue-empty")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue-empty")[0]
+
+ self.assertEqual (sq.msgDepth,0)
+ self.assertEqual (dq.msgDepth,0)
+
+ def test_move_queued_messages(self):
+ """
+ Test ability to move messages from the head of one queue to another.
+ Need to test moveing all and N messages.
+ """
+ self.startQmf()
+ session = self.session
+ "Set up source queue"
+ session.queue_declare(queue="src-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="src-queue", exchange="amq.direct", binding_key="routing_key")
+
+ twenty = range(1,21)
+ props = session.delivery_properties(routing_key="routing_key")
+ for count in twenty:
+ body = "Move Message %d" % count
+ src_msg = Message(props, body)
+ session.message_transfer(destination="amq.direct", message=src_msg)
+
+ "Set up destination queue"
+ session.queue_declare(queue="dest-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="dest-queue", exchange="amq.direct")
+
+ queues = self.qmf.getObjects(_class="queue")
+
+ "Move 10 messages from src-queue to dest-queue"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue", "dest-queue", 10, {})
+ self.assertEqual (result.status, 0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue")[0]
+
+ self.assertEqual (sq.msgDepth,10)
+ self.assertEqual (dq.msgDepth,10)
+
+ "Move all remaining messages to destination"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue", "dest-queue", 0, {})
+ self.assertEqual (result.status,0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue")[0]
+
+ self.assertEqual (sq.msgDepth,0)
+ self.assertEqual (dq.msgDepth,20)
+
+ "Use a bad source queue name"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("bad-src-queue", "dest-queue", 0, {})
+ self.assertEqual (result.status,4)
+
+ "Use a bad destination queue name"
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("src-queue", "bad-dest-queue", 0, {})
+ self.assertEqual (result.status,4)
+
+ " Use a large qty (40) to move from dest-queue back to "
+ " src-queue- should move all "
+ result = self.qmf.getObjects(_class="broker")[0].queueMoveMessages("dest-queue", "src-queue", 40, {})
+ self.assertEqual (result.status,0)
+
+ sq = self.qmf.getObjects(_class="queue", name="src-queue")[0]
+ dq = self.qmf.getObjects(_class="queue", name="dest-queue")[0]
+
+ self.assertEqual (sq.msgDepth,20)
+ self.assertEqual (dq.msgDepth,0)
+
+ "Consume the messages of the queue and check they are all there in order"
+ session.message_subscribe(queue="src-queue", destination="tag")
+ session.message_flow(destination="tag", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="tag", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ queue = session.incoming("tag")
+ for count in twenty:
+ consumed_msg = queue.get(timeout=1)
+ body = "Move Message %d" % count
+ self.assertEqual(body, consumed_msg.body)
+
+ def test_purge_queue(self):
+ """
+ Test ability to purge messages from the head of a queue.
+ Need to test moveing all, 1 (top message) and N messages.
+ """
+ self.startQmf()
+ session = self.session
+ "Set up purge queue"
+ session.queue_declare(queue="purge-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="purge-queue", exchange="amq.direct", binding_key="routing_key")
+
+ twenty = range(1,21)
+ props = session.delivery_properties(routing_key="routing_key")
+ for count in twenty:
+ body = "Purge Message %d" % count
+ msg = Message(props, body)
+ session.message_transfer(destination="amq.direct", message=msg)
+
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+
+ "Purge top message from purge-queue"
+ result = pq.purge(1, {})
+ self.assertEqual (result.status, 0)
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+ self.assertEqual (pq.msgDepth,19)
+
+ "Purge top 9 messages from purge-queue"
+ result = pq.purge(9, {})
+ self.assertEqual (result.status, 0)
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+ self.assertEqual (pq.msgDepth,10)
+
+ "Purge all messages from purge-queue"
+ result = pq.purge(0, {})
+ self.assertEqual (result.status, 0)
+ pq = self.qmf.getObjects(_class="queue", name="purge-queue")[0]
+ self.assertEqual (pq.msgDepth,0)
+
+ def test_reroute_priority_queue(self):
+ self.startQmf()
+ session = self.session
+
+ #setup test queue supporting multiple priority levels
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True, arguments={'x-qpid-priorities':10})
+
+ #send some messages of varying priority to that queue:
+ for i in range(0, 5):
+ deliveryProps = session.delivery_properties(routing_key="test-queue", priority=i+5)
+ session.message_transfer(message=Message(deliveryProps, "Message %d" % (i+1)))
+
+
+ #declare and bind a queue to amq.fanout through which rerouted
+ #messages can be verified:
+ session.queue_declare(queue="rerouted", exclusive=True, auto_delete=True, arguments={'x-qpid-priorities':10})
+ session.exchange_bind(queue="rerouted", exchange="amq.fanout")
+
+ #reroute messages from test queue to amq.fanout (and hence to
+ #rerouted queue):
+ pq = self.qmf.getObjects(_class="queue", name="test-queue")[0]
+ result = pq.reroute(0, False, "amq.fanout", {})
+ self.assertEqual(result.status, 0)
+
+ #verify messages are all rerouted:
+ self.subscribe(destination="incoming", queue="rerouted")
+ incoming = session.incoming("incoming")
+ for i in range(0, 5):
+ msg = incoming.get(timeout=1)
+ self.assertEqual("Message %d" % (5-i), msg.body)
+
+
+ def test_reroute_queue(self):
+ """
+ Test ability to reroute messages from the head of a queue.
+ Need to test moving all, 1 (top message) and N messages.
+ """
+ self.startQmf()
+ session = self.session
+ "Set up test queue"
+ session.exchange_declare(exchange="alt.direct1", type="direct")
+ session.queue_declare(queue="alt-queue1", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="alt-queue1", exchange="alt.direct1", binding_key="routing_key")
+ session.exchange_declare(exchange="alt.direct2", type="direct")
+ session.queue_declare(queue="alt-queue2", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="alt-queue2", exchange="alt.direct2", binding_key="routing_key")
+ session.queue_declare(queue="reroute-queue", exclusive=True, auto_delete=True, alternate_exchange="alt.direct1")
+ session.exchange_bind(queue="reroute-queue", exchange="amq.direct", binding_key="routing_key")
+
+ twenty = range(1,21)
+ props = session.delivery_properties(routing_key="routing_key")
+ mp = session.message_properties(application_headers={'x-qpid.trace' : 'A,B,C'})
+ for count in twenty:
+ body = "Reroute Message %d" % count
+ msg = Message(props, mp, body)
+ session.message_transfer(destination="amq.direct", message=msg)
+
+ pq = self.qmf.getObjects(_class="queue", name="reroute-queue")[0]
+
+ "Reroute top message from reroute-queue to alternate exchange"
+ result = pq.reroute(1, True, "", {})
+ self.assertEqual(result.status, 0)
+ pq.update()
+ aq = self.qmf.getObjects(_class="queue", name="alt-queue1")[0]
+ self.assertEqual(pq.msgDepth,19)
+ self.assertEqual(aq.msgDepth,1)
+
+ "Verify that the trace was cleared on the rerouted message"
+ url = "%s://%s:%d" % (self.broker.scheme or "amqp", self.broker.host, self.broker.port)
+ conn = qpid.messaging.Connection(url)
+ conn.open()
+ sess = conn.session()
+ rx = sess.receiver("alt-queue1;{mode:browse}")
+ rm = rx.fetch(1)
+ self.assertEqual(rm.properties['x-qpid.trace'], '')
+ conn.close()
+
+ "Reroute top 9 messages from reroute-queue to alt.direct2"
+ result = pq.reroute(9, False, "alt.direct2", {})
+ self.assertEqual(result.status, 0)
+ pq.update()
+ aq = self.qmf.getObjects(_class="queue", name="alt-queue2")[0]
+ self.assertEqual(pq.msgDepth,10)
+ self.assertEqual(aq.msgDepth,9)
+
+ "Reroute using a non-existent exchange"
+ result = pq.reroute(0, False, "amq.nosuchexchange", {})
+ self.assertEqual(result.status, 4)
+
+ "Reroute all messages from reroute-queue"
+ result = pq.reroute(0, False, "alt.direct2", {})
+ self.assertEqual(result.status, 0)
+ pq.update()
+ aq = self.qmf.getObjects(_class="queue", name="alt-queue2")[0]
+ self.assertEqual(pq.msgDepth,0)
+ self.assertEqual(aq.msgDepth,19)
+
+ "Make more messages"
+ twenty = range(1,21)
+ props = session.delivery_properties(routing_key="routing_key")
+ for count in twenty:
+ body = "Reroute Message %d" % count
+ msg = Message(props, body)
+ session.message_transfer(destination="amq.direct", message=msg)
+
+ "Reroute onto the same queue"
+ result = pq.reroute(0, False, "amq.direct", {})
+ self.assertEqual(result.status, 0)
+ pq.update()
+ self.assertEqual(pq.msgDepth,20)
+
+ def test_reroute_alternate_exchange(self):
+ """
+ Test that when rerouting, the alternate-exchange is considered if relevant
+ """
+ self.startQmf()
+ session = self.session
+ # 1. Create 2 exchanges A and B (fanout) where B is the
+ # alternate exchange for A
+ session.exchange_declare(exchange="B", type="fanout")
+ session.exchange_declare(exchange="A", type="fanout", alternate_exchange="B")
+
+ # 2. Bind queue X to B
+ session.queue_declare(queue="X", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue="X", exchange="B")
+
+ # 3. Send 1 message to queue Y
+ session.queue_declare(queue="Y", exclusive=True, auto_delete=True)
+ props = session.delivery_properties(routing_key="Y")
+ session.message_transfer(message=Message(props, "reroute me!"))
+
+ # 4. Call reroute on queue Y and specify that messages should
+ # be sent to exchange A
+ y = self.qmf.getObjects(_class="queue", name="Y")[0]
+ result = y.reroute(1, False, "A", {})
+ self.assertEqual(result.status, 0)
+
+ # 5. verify that the message is rerouted through B (as A has
+ # no matching bindings) to X
+ self.subscribe(destination="x", queue="X")
+ self.assertEqual("reroute me!", session.incoming("x").get(timeout=1).body)
+
+ # Cleanup
+ for e in ["A", "B"]: session.exchange_delete(exchange=e)
+
+ def test_reroute_invalid_alt_exchange(self):
+ """
+ Test that an error is returned for an attempt to reroute to
+ alternate exchange on a queue for which no such exchange has
+ been defined.
+ """
+ self.startQmf()
+ session = self.session
+ # create queue with no alt-exchange, and send a message to it
+ session.queue_declare(queue="q", exclusive=True, auto_delete=True)
+ props = session.delivery_properties(routing_key="q")
+ session.message_transfer(message=Message(props, "don't reroute me!"))
+
+ # attempt to reroute the message to alt-exchange
+ q = self.qmf.getObjects(_class="queue", name="q")[0]
+ result = q.reroute(1, True, "", {})
+ # verify the attempt fails...
+ self.assertEqual(result.status, 4) #invalid parameter
+
+ # ...and message is still on the queue
+ self.subscribe(destination="d", queue="q")
+ self.assertEqual("don't reroute me!", session.incoming("d").get(timeout=1).body)
+
+
+ def test_methods_async (self):
+ """
+ """
+ class Handler (qmf.console.Console):
+ def __init__(self):
+ self.cv = Condition()
+ self.xmtList = {}
+ self.rcvList = {}
+
+ def methodResponse(self, broker, seq, response):
+ self.cv.acquire()
+ try:
+ self.rcvList[seq] = response
+ finally:
+ self.cv.release()
+
+ def request(self, broker, count):
+ self.count = count
+ for idx in range(count):
+ self.cv.acquire()
+ try:
+ seq = broker.echo(idx, "Echo Message", _async = True)
+ self.xmtList[seq] = idx
+ finally:
+ self.cv.release()
+
+ def check(self):
+ if self.count != len(self.xmtList):
+ return "fail (attempted send=%d, actual sent=%d)" % (self.count, len(self.xmtList))
+ lost = 0
+ mismatched = 0
+ for seq in self.xmtList:
+ value = self.xmtList[seq]
+ if seq in self.rcvList:
+ result = self.rcvList.pop(seq)
+ if result.sequence != value:
+ mismatched += 1
+ else:
+ lost += 1
+ spurious = len(self.rcvList)
+ if lost == 0 and mismatched == 0 and spurious == 0:
+ return "pass"
+ else:
+ return "fail (lost=%d, mismatch=%d, spurious=%d)" % (lost, mismatched, spurious)
+
+ handler = Handler()
+ self.startQmf(handler)
+ brokers = self.qmf.getObjects(_class="broker")
+ self.assertEqual(len(brokers), 1)
+ broker = brokers[0]
+ handler.request(broker, 20)
+ sleep(1)
+ self.assertEqual(handler.check(), "pass")
+
+ def test_connection_close(self):
+ """
+ Test management method for closing connection
+ """
+ self.startQmf()
+ conn = self.connect()
+ session = conn.session("my-named-session")
+
+ #using qmf find named session and close the corresponding connection:
+ qmf_ssn_object = [s for s in self.qmf.getObjects(_class="session") if s.name.endswith("my-named-session")][0]
+ qmf_ssn_object._connectionRef_.close()
+
+ #check that connection is closed
+ try:
+ conn.session("another-session")
+ self.fail("Expected failure from closed connection")
+ except: None
+
+ #make sure that the named session has been closed and the name can be re-used
+ conn = self.connect()
+ session = conn.session("my-named-session")
+ session.queue_declare(queue="whatever", exclusive=True, auto_delete=True)
+
+ def test_immediate_method(self):
+ url = "%s://%s:%d" % (self.broker.scheme or "amqp", self.broker.host or "localhost", self.broker.port or 5672)
+ conn = qpid.messaging.Connection(url)
+ conn.open()
+ sess = conn.session()
+ replyTo = "qmf.default.direct/reply_immediate_method_test;{node:{type:topic}}"
+ agent_sender = sess.sender("qmf.default.direct/broker")
+ agent_receiver = sess.receiver(replyTo)
+ queue_create = sess.sender("test-queue-imm-method;{create:always,delete:always,node:{type:queue,durable:False,x-declare:{auto-delete:True}}}")
+
+ method_request = {'_method_name':'reroute','_object_id':{'_object_name':'org.apache.qpid.broker:queue:test-queue-imm-method'}}
+ method_request['_arguments'] = {'request':0, 'useAltExchange':False, 'exchange':'amq.fanout'}
+
+ reroute_call = qpid.messaging.Message(method_request)
+ reroute_call.properties['qmf.opcode'] = '_method_request'
+ reroute_call.properties['x-amqp-0-10.app-id'] = 'qmf2'
+ reroute_call.reply_to = replyTo
+
+ agent_sender.send(reroute_call)
+ result = agent_receiver.fetch(3)
+ self.assertEqual(result.properties['qmf.opcode'], '_method_response')
+
+ conn.close()
+
+ def test_binding_count_on_queue(self):
+ self.startQmf()
+ conn = self.connect()
+ session = self.session
+
+ QUEUE = "binding_test_queue"
+ EX_DIR = "binding_test_exchange_direct"
+ EX_FAN = "binding_test_exchange_fanout"
+ EX_TOPIC = "binding_test_exchange_topic"
+ EX_HDR = "binding_test_exchange_headers"
+
+ #
+ # Create a test queue
+ #
+ session.queue_declare(queue=QUEUE, exclusive=True, auto_delete=True)
+ queue = self.qmf.getObjects(_class="queue", name=QUEUE)[0]
+ if not queue:
+ self.fail("Queue not found")
+ self.assertEqual(queue.bindingCount, 1, "wrong initial binding count")
+
+ #
+ # Create an exchange of each supported type
+ #
+ session.exchange_declare(exchange=EX_DIR, type="direct")
+ session.exchange_declare(exchange=EX_FAN, type="fanout")
+ session.exchange_declare(exchange=EX_TOPIC, type="topic")
+ session.exchange_declare(exchange=EX_HDR, type="headers")
+
+ #
+ # Bind each exchange to the test queue
+ #
+ match = {}
+ match['x-match'] = "all"
+ match['key'] = "value"
+ session.exchange_bind(exchange=EX_DIR, queue=QUEUE, binding_key="key1")
+ session.exchange_bind(exchange=EX_DIR, queue=QUEUE, binding_key="key2")
+ session.exchange_bind(exchange=EX_FAN, queue=QUEUE)
+ session.exchange_bind(exchange=EX_TOPIC, queue=QUEUE, binding_key="key1.#")
+ session.exchange_bind(exchange=EX_TOPIC, queue=QUEUE, binding_key="key2.#")
+ session.exchange_bind(exchange=EX_HDR, queue=QUEUE, binding_key="key1", arguments=match)
+ match['key2'] = "value2"
+ session.exchange_bind(exchange=EX_HDR, queue=QUEUE, binding_key="key2", arguments=match)
+
+ #
+ # Verify that the queue's binding count accounts for the new bindings
+ #
+ queue.update()
+ self.assertEqual(queue.bindingCount, 8,
+ "added bindings not accounted for (expected 8, got %d)" % queue.bindingCount)
+
+ #
+ # Remove some of the bindings
+ #
+ session.exchange_unbind(exchange=EX_DIR, queue=QUEUE, binding_key="key2")
+ session.exchange_unbind(exchange=EX_TOPIC, queue=QUEUE, binding_key="key2.#")
+ session.exchange_unbind(exchange=EX_HDR, queue=QUEUE, binding_key="key2")
+
+ #
+ # Verify that the queue's binding count accounts for the deleted bindings
+ #
+ queue.update()
+ self.assertEqual(queue.bindingCount, 5,
+ "deleted bindings not accounted for (expected 5, got %d)" % queue.bindingCount)
+ #
+ # Delete the exchanges
+ #
+ session.exchange_delete(exchange=EX_DIR)
+ session.exchange_delete(exchange=EX_FAN)
+ session.exchange_delete(exchange=EX_TOPIC)
+ session.exchange_delete(exchange=EX_HDR)
+
+ #
+ # Verify that the queue's binding count accounts for the lost bindings
+ #
+ queue.update()
+ self.assertEqual(queue.bindingCount, 1,
+ "deleted bindings not accounted for (expected 1, got %d)" % queue.bindingCount)
+
+ def test_connection_stats(self):
+ """
+ Test message in/out stats for connection
+ """
+ agent = self.setup_access()
+ conn = self.connect()
+ session = conn.session("stats-session")
+
+ #using qmf find named session and the corresponding connection:
+ conn_qmf = None
+ sessions = agent.getAllSessions()
+ for s in sessions:
+ if s.name.endswith("stats-session"):
+ conn_qmf = agent.getConnection(s.connectionRef)
+
+ assert(conn_qmf)
+
+ #send a message to a queue
+ session.queue_declare(queue="stats-q", exclusive=True, auto_delete=True)
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="stats-q"), "abc"))
+
+ #check the 'msgs sent from' stat for this connection
+ conn_qmf.update()
+ self.assertEqual(conn_qmf.msgsFromClient, 1)
+
+ #receive message from queue
+ session.message_subscribe(destination="d", queue="stats-q")
+ incoming = session.incoming("d")
+ incoming.start()
+ self.assertEqual("abc", incoming.get(timeout=1).body)
+
+ #check the 'msgs sent to' stat for this connection
+ conn_qmf.update()
+ self.assertEqual(conn_qmf.msgsToClient, 1)
+
+ def test_timestamp_config(self):
+ """
+ Test message timestamping control.
+ """
+ self.startQmf()
+ conn = self.connect()
+ session = conn.session("timestamp-session")
+
+ #verify that receive message timestamping is OFF by default
+ broker = self.qmf.getObjects(_class="broker")[0]
+ rc = broker.getTimestampConfig()
+ self.assertEqual(rc.status, 0)
+ self.assertEqual(rc.text, "OK")
+
+ #try to enable it
+ rc = broker.setTimestampConfig(True)
+ self.assertEqual(rc.status, 0)
+ self.assertEqual(rc.text, "OK")
+
+ rc = broker.getTimestampConfig()
+ self.assertEqual(rc.status, 0)
+ self.assertEqual(rc.text, "OK")
+ self.assertEqual(rc.receive, True)
+
+ # setup a connection & session to the broker
+ url = "%s://%s:%d" % (self.broker.scheme or "amqp", self.broker.host or "localhost", self.broker.port or 5672)
+ conn = qpid.messaging.Connection(url)
+ conn.open()
+ sess = conn.session()
+
+ #send a message to a queue
+ sender = sess.sender("ts-q; {create:sender, delete:receiver}")
+ sender.send( qpid.messaging.Message(content="abc") )
+
+ #receive message from queue, and verify timestamp is present
+ receiver = sess.receiver("ts-q")
+ try:
+ msg = receiver.fetch(timeout=1)
+ except Empty:
+ assert(False)
+ self.assertEqual("abc", msg.content)
+ self.assertEqual(True, "x-amqp-0-10.timestamp" in msg.properties)
+ assert(msg.properties["x-amqp-0-10.timestamp"])
+
+ #try to disable it
+ rc = broker.setTimestampConfig(False)
+ self.assertEqual(rc.status, 0)
+ self.assertEqual(rc.text, "OK")
+
+ rc = broker.getTimestampConfig()
+ self.assertEqual(rc.status, 0)
+ self.assertEqual(rc.text, "OK")
+ self.assertEqual(rc.receive, False)
+
+ #send another message to the queue
+ sender.send( qpid.messaging.Message(content="def") )
+
+ #receive message from queue, and verify timestamp is NOT PRESENT
+ receiver = sess.receiver("ts-q")
+ try:
+ msg = receiver.fetch(timeout=1)
+ except Empty:
+ assert(False)
+ self.assertEqual("def", msg.content)
+ self.assertEqual(False, "x-amqp-0-10.timestamp" in msg.properties)
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/message.py b/qpid/tests/src/py/qpid_tests/broker_0_10/message.py
new file mode 100644
index 0000000000..c6095a0579
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/message.py
@@ -0,0 +1,1108 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.testlib import TestBase010
+from qpid.datatypes import Message, RangedSet
+from qpid.session import SessionException
+
+from qpid.content import Content
+from time import sleep
+
+class MessageTests(TestBase010):
+ """Tests for 'methods' on the amqp message 'class'"""
+
+ def test_no_local(self):
+ """
+ NOTE: this is a test of a QPID specific feature
+
+ Test that the qpid specific no_local arg is honoured.
+ """
+ session = self.session
+ #setup, declare two queues one of which excludes delivery of locally sent messages
+ session.queue_declare(queue="test-queue-1a", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="test-queue-1b", exclusive=True, auto_delete=True, arguments={'no-local':'true'})
+ #establish two consumers
+ self.subscribe(destination="local_included", queue="test-queue-1a")
+ self.subscribe(destination="local_excluded", queue="test-queue-1b")
+
+ #send a message
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-1a"), "deliver-me"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-1b"), "dont-deliver-me"))
+
+ #send a message from another session on the same connection to each queue
+ session2 = self.conn.session("my-local-session")
+ session2.message_transfer(message=Message(session2.delivery_properties(routing_key="test-queue-1a"), "deliver-me-as-well"))
+ session2.message_transfer(message=Message(session2.delivery_properties(routing_key="test-queue-1b"), "dont-deliver-me-either"))
+
+ #send a message from a session on another connection to each queue
+ for q in ["test-queue-1a", "test-queue-1b"]:
+ session.exchange_bind(queue=q, exchange="amq.fanout", binding_key="my-key")
+ other = self.connect()
+ session3 = other.session("my-other-session")
+ session3.message_transfer(destination="amq.fanout", message=Message("i-am-not-local"))
+ other.close()
+
+ #check the queues of the two consumers
+ excluded = session.incoming("local_excluded")
+ included = session.incoming("local_included")
+ for b in ["deliver-me", "deliver-me-as-well", "i-am-not-local"]:
+ msg = included.get(timeout=1)
+ self.assertEqual(b, msg.body)
+ msg = excluded.get(timeout=1)
+ self.assertEqual("i-am-not-local", msg.body)
+ try:
+ excluded.get(timeout=1)
+ self.fail("Received locally published message though no_local=true")
+ except Empty: None
+
+ def test_no_local_awkward(self):
+
+ """
+ NOTE: this is a test of a QPID specific feature
+
+ Check that messages which will be excluded through no-local
+ processing will not block subsequent deliveries
+ """
+
+ session = self.session
+ #setup:
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True, arguments={'no-local':'true'})
+ #establish consumer which excludes delivery of locally sent messages
+ self.subscribe(destination="local_excluded", queue="test-queue")
+
+ #send a 'local' message
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "local"))
+
+ #send a non local message
+ other = self.connect()
+ session2 = other.session("my-session", 1)
+ session2.message_transfer(message=Message(session2.delivery_properties(routing_key="test-queue"), "foreign"))
+ session2.close()
+ other.close()
+
+ #check that the second message only is delivered
+ excluded = session.incoming("local_excluded")
+ msg = excluded.get(timeout=1)
+ self.assertEqual("foreign", msg.body)
+ try:
+ excluded.get(timeout=1)
+ self.fail("Received extra message")
+ except Empty: None
+ #check queue is empty
+ self.assertEqual(0, session.queue_query(queue="test-queue").message_count)
+
+ def test_no_local_exclusive_subscribe(self):
+ """
+ NOTE: this is a test of a QPID specific feature
+
+ Test that the no_local processing works on queues not declared
+ as exclusive, but with an exclusive subscription
+ """
+ session = self.session
+
+ #setup, declare two queues one of which excludes delivery of
+ #locally sent messages but is not declared as exclusive
+ session.queue_declare(queue="test-queue-1a", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="test-queue-1b", auto_delete=True, arguments={'no-local':'true'})
+ #establish two consumers
+ self.subscribe(destination="local_included", queue="test-queue-1a")
+ self.subscribe(destination="local_excluded", queue="test-queue-1b", exclusive=True)
+
+ #send a message from the same session to each queue
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-1a"), "deliver-me"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-1b"), "dont-deliver-me"))
+
+ #send a message from another session on the same connection to each queue
+ session2 = self.conn.session("my-session")
+ session2.message_transfer(message=Message(session2.delivery_properties(routing_key="test-queue-1a"), "deliver-me-as-well"))
+ session2.message_transfer(message=Message(session2.delivery_properties(routing_key="test-queue-1b"), "dont-deliver-me-either"))
+
+ #send a message from a session on another connection to each queue
+ for q in ["test-queue-1a", "test-queue-1b"]:
+ session.exchange_bind(queue=q, exchange="amq.fanout", binding_key="my-key")
+ other = self.connect()
+ session3 = other.session("my-other-session")
+ session3.message_transfer(destination="amq.fanout", message=Message("i-am-not-local"))
+ other.close()
+
+ #check the queues of the two consumers
+ excluded = session.incoming("local_excluded")
+ included = session.incoming("local_included")
+ for b in ["deliver-me", "deliver-me-as-well", "i-am-not-local"]:
+ msg = included.get(timeout=1)
+ self.assertEqual(b, msg.body)
+ msg = excluded.get(timeout=1)
+ self.assertEqual("i-am-not-local", msg.body)
+ try:
+ excluded.get(timeout=1)
+ self.fail("Received locally published message though no_local=true")
+ except Empty: None
+
+
+ def test_consume_exclusive(self):
+ """
+ Test an exclusive consumer prevents other consumer being created
+ """
+ session = self.session
+ session.queue_declare(queue="test-queue-2", exclusive=True, auto_delete=True)
+ session.message_subscribe(destination="first", queue="test-queue-2", exclusive=True)
+ try:
+ session.message_subscribe(destination="second", queue="test-queue-2")
+ self.fail("Expected consume request to fail due to previous exclusive consumer")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ def test_consume_exclusive2(self):
+ """
+ Check that an exclusive consumer cannot be created if a consumer already exists:
+ """
+ session = self.session
+ session.queue_declare(queue="test-queue-2", exclusive=True, auto_delete=True)
+ session.message_subscribe(destination="first", queue="test-queue-2")
+ try:
+ session.message_subscribe(destination="second", queue="test-queue-2", exclusive=True)
+ self.fail("Expected exclusive consume request to fail due to previous consumer")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ def test_consume_queue_not_found(self):
+ """
+ Test error conditions associated with the queue field of the consume method:
+ """
+ session = self.session
+ try:
+ #queue specified but doesn't exist:
+ session.message_subscribe(queue="invalid-queue", destination="a")
+ self.fail("Expected failure when consuming from non-existent queue")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_consume_queue_not_specified(self):
+ session = self.session
+ try:
+ #queue not specified and none previously declared for channel:
+ session.message_subscribe(destination="a")
+ self.fail("Expected failure when consuming from unspecified queue")
+ except SessionException, e:
+ self.assertEquals(531, e.args[0].error_code)
+
+ def test_consume_unique_consumers(self):
+ """
+ Ensure unique consumer tags are enforced
+ """
+ session = self.session
+ #setup, declare a queue:
+ session.queue_declare(queue="test-queue-3", exclusive=True, auto_delete=True)
+
+ #check that attempts to use duplicate tags are detected and prevented:
+ session.message_subscribe(destination="first", queue="test-queue-3")
+ try:
+ session.message_subscribe(destination="first", queue="test-queue-3")
+ self.fail("Expected consume request to fail due to non-unique tag")
+ except SessionException, e:
+ self.assertEquals(530, e.args[0].error_code)
+
+ def test_cancel(self):
+ """
+ Test compliance of the basic.cancel method
+ """
+ session = self.session
+ #setup, declare a queue:
+ session.queue_declare(queue="test-queue-4", exclusive=True, auto_delete=True)
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-4"), "One"))
+
+ session.message_subscribe(destination="my-consumer", queue="test-queue-4")
+ myqueue = session.incoming("my-consumer")
+ session.message_flow(destination="my-consumer", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="my-consumer", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+
+ #should flush here
+
+ #cancel should stop messages being delivered
+ session.message_cancel(destination="my-consumer")
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue-4"), "Two"))
+ msg = myqueue.get(timeout=1)
+ self.assertEqual("One", msg.body)
+ try:
+ msg = myqueue.get(timeout=1)
+ self.fail("Got message after cancellation: " + msg)
+ except Empty: None
+
+ #cancellation of non-existant consumers should be result in 404s
+ try:
+ session.message_cancel(destination="my-consumer")
+ self.fail("Expected 404 for recancellation of subscription.")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ session = self.conn.session("alternate-session", timeout=10)
+ try:
+ session.message_cancel(destination="this-never-existed")
+ self.fail("Expected 404 for cancellation of unknown subscription.")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
+ def test_ack(self):
+ """
+ Test basic ack/recover behaviour using a combination of implicit and
+ explicit accept subscriptions.
+ """
+ self.startQmf()
+ session1 = self.conn.session("alternate-session", timeout=10)
+ session1.queue_declare(queue="test-ack-queue", auto_delete=True)
+
+ delivery_properties = session1.delivery_properties(routing_key="test-ack-queue")
+ for i in ["One", "Two", "Three", "Four", "Five"]:
+ session1.message_transfer(message=Message(delivery_properties, i))
+
+ # verify enqueued message count, use both QMF and session query to verify consistency
+ self.assertEqual(5, session1.queue_query(queue="test-ack-queue").message_count)
+ queueObj = self.qmf.getObjects(_class="queue", name="test-ack-queue")[0]
+ self.assertEquals(queueObj.msgDepth, 5)
+ self.assertEquals(queueObj.msgTotalEnqueues, 5)
+ self.assertEquals(queueObj.msgTotalDequeues, 0)
+
+ # subscribe with implied acquire, explicit accept:
+ session1.message_subscribe(queue = "test-ack-queue", destination = "consumer")
+ session1.message_flow(destination="consumer", unit=session1.credit_unit.message, value=0xFFFFFFFFL)
+ session1.message_flow(destination="consumer", unit=session1.credit_unit.byte, value=0xFFFFFFFFL)
+ queue = session1.incoming("consumer")
+
+ msg1 = queue.get(timeout=1)
+ msg2 = queue.get(timeout=1)
+ msg3 = queue.get(timeout=1)
+ msg4 = queue.get(timeout=1)
+ msg5 = queue.get(timeout=1)
+
+ self.assertEqual("One", msg1.body)
+ self.assertEqual("Two", msg2.body)
+ self.assertEqual("Three", msg3.body)
+ self.assertEqual("Four", msg4.body)
+ self.assertEqual("Five", msg5.body)
+
+ # messages should not be on the queue:
+ self.assertEqual(0, session1.queue_query(queue="test-ack-queue").message_count)
+ # QMF shows the dequeues as not having happened yet, since they are have
+ # not been accepted
+ queueObj.update()
+ self.assertEquals(queueObj.msgDepth, 5)
+ self.assertEquals(queueObj.msgTotalEnqueues, 5)
+ self.assertEquals(queueObj.msgTotalDequeues, 0)
+
+ session1.message_accept(RangedSet(msg1.id, msg2.id, msg4.id))#One, Two and Four
+
+ # QMF should now reflect the accepted messages as being dequeued
+ self.assertEqual(0, session1.queue_query(queue="test-ack-queue").message_count)
+ queueObj.update()
+ self.assertEquals(queueObj.msgDepth, 2)
+ self.assertEquals(queueObj.msgTotalEnqueues, 5)
+ self.assertEquals(queueObj.msgTotalDequeues, 3)
+
+ #subscribe from second session here to ensure queue is not auto-deleted
+ #when alternate session closes. Use implicit accept mode to test that
+ #we don't need to explicitly accept
+ session2 = self.conn.session("alternate-session-2", timeout=10)
+ session2.message_subscribe(queue = "test-ack-queue", destination = "checker", accept_mode=1)
+
+ #now close the first session, and see that the unaccepted messages are
+ #then redelivered to another subscriber:
+ session1.close(timeout=10)
+
+ # check the statistics - the queue_query will show the non-accepted
+ # messages have been released. QMF never considered them dequeued, so
+ # those counts won't change
+ self.assertEqual(2, session2.queue_query(queue="test-ack-queue").message_count)
+ queueObj.update()
+ self.assertEquals(queueObj.msgDepth, 2)
+ self.assertEquals(queueObj.msgTotalEnqueues, 5)
+ self.assertEquals(queueObj.msgTotalDequeues, 3)
+
+ session2.message_flow(destination="checker", unit=session2.credit_unit.message, value=0xFFFFFFFFL)
+ session2.message_flow(destination="checker", unit=session2.credit_unit.byte, value=0xFFFFFFFFL)
+ queue = session2.incoming("checker")
+
+ msg3b = queue.get(timeout=1)
+ msg5b = queue.get(timeout=1)
+
+ self.assertEqual("Three", msg3b.body)
+ self.assertEqual("Five", msg5b.body)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ self.assertEqual(0, session2.queue_query(queue="test-ack-queue").message_count)
+ queueObj.update()
+ self.assertEquals(queueObj.msgDepth, 0)
+ self.assertEquals(queueObj.msgTotalEnqueues, 5)
+ self.assertEquals(queueObj.msgTotalDequeues, 5)
+
+ # Subscribe one last time to keep the queue available, and to verify
+ # that the implied accept worked by verifying no messages have been
+ # returned when session2 is closed.
+ self.session.message_subscribe(queue = "test-ack-queue", destination = "final-checker")
+
+ session2.close(timeout=10)
+
+ # check the statistics - they should not have changed
+ self.assertEqual(0, self.session.queue_query(queue="test-ack-queue").message_count)
+ queueObj.update()
+ self.assertEquals(queueObj.msgDepth, 0)
+ self.assertEquals(queueObj.msgTotalEnqueues, 5)
+ self.assertEquals(queueObj.msgTotalDequeues, 5)
+
+ self.session.message_flow(destination="final-checker", unit=self.session.credit_unit.message, value=0xFFFFFFFFL)
+ self.session.message_flow(destination="final-checker", unit=self.session.credit_unit.byte, value=0xFFFFFFFFL)
+ try:
+ extra = self.session.incoming("final-checker").get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ def test_reject(self):
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True, alternate_exchange="amq.fanout")
+ session.queue_declare(queue = "r", exclusive=True, auto_delete=True)
+ session.exchange_bind(queue = "r", exchange = "amq.fanout")
+
+ session.message_subscribe(queue = "q", destination = "consumer")
+ session.message_flow(destination="consumer", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="consumer", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "blah, blah"))
+ msg = session.incoming("consumer").get(timeout = 1)
+ self.assertEquals(msg.body, "blah, blah")
+ session.message_reject(RangedSet(msg.id))
+
+ session.message_subscribe(queue = "r", destination = "checker")
+ session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ msg = session.incoming("checker").get(timeout = 1)
+ self.assertEquals(msg.body, "blah, blah")
+
+ def test_credit_flow_messages(self):
+ """
+ Test basic credit based flow control with unit = message
+ """
+ #declare an exclusive queue
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ #create consumer (for now that defaults to infinite credit)
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_set_flow_mode(flow_mode = 0, destination = "c")
+ #send batch of messages to queue
+ for i in range(1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %d" % i))
+
+ #set message credit to finite amount (less than enough for all messages)
+ session.message_flow(unit = session.credit_unit.message, value = 5, destination = "c")
+ #set infinite byte credit
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "c")
+ #check that expected number were received
+ q = session.incoming("c")
+ for i in range(1, 6):
+ self.assertDataEquals(session, q.get(timeout = 1), "Message %d" % i)
+ self.assertEmpty(q)
+
+ #increase credit again and check more are received
+ for i in range(6, 11):
+ session.message_flow(unit = session.credit_unit.message, value = 1, destination = "c")
+ self.assertDataEquals(session, q.get(timeout = 1), "Message %d" % i)
+ self.assertEmpty(q)
+
+ def test_credit_flow_bytes(self):
+ """
+ Test basic credit based flow control with unit = bytes
+ """
+ #declare an exclusive queue
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ #create consumer (for now that defaults to infinite credit)
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_set_flow_mode(flow_mode = 0, destination = "c")
+ #send batch of messages to queue
+ for i in range(10):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "abcdefgh"))
+
+ #each message is currently interpreted as requiring msg_size bytes of credit
+ msg_size = 19
+
+ #set byte credit to finite amount (less than enough for all messages)
+ session.message_flow(unit = session.credit_unit.byte, value = msg_size*5, destination = "c")
+ #set infinite message credit
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = "c")
+ #check that expected number were received
+ q = session.incoming("c")
+ for i in range(5):
+ self.assertDataEquals(session, q.get(timeout = 1), "abcdefgh")
+ self.assertEmpty(q)
+
+ #increase credit again and check more are received
+ for i in range(5):
+ session.message_flow(unit = session.credit_unit.byte, value = msg_size, destination = "c")
+ self.assertDataEquals(session, q.get(timeout = 1), "abcdefgh")
+ self.assertEmpty(q)
+
+
+ def test_window_flow_messages(self):
+ """
+ Test basic window based flow control with unit = message
+ """
+ #declare an exclusive queue
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ #create consumer (for now that defaults to infinite credit)
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_set_flow_mode(flow_mode = 1, destination = "c")
+ #send batch of messages to queue
+ for i in range(1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %d" % i))
+
+ #set message credit to finite amount (less than enough for all messages)
+ session.message_flow(unit = session.credit_unit.message, value = 5, destination = "c")
+ #set infinite byte credit
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "c")
+ #check that expected number were received
+ q = session.incoming("c")
+ for i in range(1, 6):
+ msg = q.get(timeout = 1)
+ session.receiver._completed.add(msg.id)#TODO: this may be done automatically
+ self.assertDataEquals(session, msg, "Message %d" % i)
+ self.assertEmpty(q)
+
+ #acknowledge messages and check more are received
+ #TODO: there may be a nicer way of doing this
+ session.channel.session_completed(session.receiver._completed)
+
+ for i in range(6, 11):
+ self.assertDataEquals(session, q.get(timeout = 1), "Message %d" % i)
+ self.assertEmpty(q)
+
+
+ def test_window_flow_bytes(self):
+ """
+ Test basic window based flow control with unit = bytes
+ """
+ #declare an exclusive queue
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ #create consumer (for now that defaults to infinite credit)
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_set_flow_mode(flow_mode = 1, destination = "c")
+ #send batch of messages to queue
+ for i in range(10):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "abcdefgh"))
+
+ #each message is currently interpreted as requiring msg_size bytes of credit
+ msg_size = 19
+
+ #set byte credit to finite amount (less than enough for all messages)
+ session.message_flow(unit = session.credit_unit.byte, value = msg_size*5, destination = "c")
+ #set infinite message credit
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = "c")
+ #check that expected number were received
+ q = session.incoming("c")
+ msgs = []
+ for i in range(5):
+ msg = q.get(timeout = 1)
+ msgs.append(msg)
+ self.assertDataEquals(session, msg, "abcdefgh")
+ self.assertEmpty(q)
+
+ #ack each message individually and check more are received
+ for i in range(5):
+ msg = msgs.pop()
+ #TODO: there may be a nicer way of doing this
+ session.receiver._completed.add(msg.id)
+ session.channel.session_completed(session.receiver._completed)
+ self.assertDataEquals(session, q.get(timeout = 1), "abcdefgh")
+ self.assertEmpty(q)
+
+ def test_window_flush_ack_flow(self):
+ """
+ Test basic window based flow control with unit = bytes
+ """
+ #declare an exclusive queue
+ ssn = self.session
+ ssn.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ #create consumer
+ ssn.message_subscribe(queue = "q", destination = "c",
+ accept_mode=ssn.accept_mode.explicit)
+ ssn.message_set_flow_mode(flow_mode = ssn.flow_mode.window, destination = "c")
+
+ #send message A
+ ssn.message_transfer(message=Message(ssn.delivery_properties(routing_key="q"), "A"))
+
+ for unit in ssn.credit_unit.VALUES:
+ ssn.message_flow("c", unit, 0xFFFFFFFFL)
+
+ q = ssn.incoming("c")
+ msgA = q.get(timeout=10)
+
+ ssn.message_flush(destination="c")
+
+ # XXX
+ ssn.receiver._completed.add(msgA.id)
+ ssn.channel.session_completed(ssn.receiver._completed)
+ ssn.message_accept(RangedSet(msgA.id))
+
+ for unit in ssn.credit_unit.VALUES:
+ ssn.message_flow("c", unit, 0xFFFFFFFFL)
+
+ #send message B
+ ssn.message_transfer(message=Message(ssn.delivery_properties(routing_key="q"), "B"))
+
+ msgB = q.get(timeout=10)
+
+ def test_window_stop(self):
+ """
+ Ensure window based flow control reacts to stop correctly
+ """
+ session = self.session
+ #setup subscriber on a test queue
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_set_flow_mode(flow_mode = 1, destination = "c")
+ session.message_flow(unit = session.credit_unit.message, value = 5, destination = "c")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "c")
+
+
+ #send batch of messages to queue
+ for i in range(0, 10):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %d" % (i+1)))
+
+ #retrieve all delivered messages
+ q = session.incoming("c")
+ for i in range(0, 5):
+ msg = q.get(timeout = 1)
+ session.receiver._completed.add(msg.id)#TODO: this may be done automatically
+ self.assertDataEquals(session, msg, "Message %d" % (i+1))
+
+ session.message_stop(destination = "c")
+
+ #now send completions, normally used to move window forward,
+ #but after a stop should not do so
+ session.channel.session_completed(session.receiver._completed)
+
+ #check no more messages are sent
+ self.assertEmpty(q)
+
+ #re-establish window and check remaining messages
+ session.message_flow(unit = session.credit_unit.message, value = 5, destination = "c")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "c")
+ for i in range(0, 5):
+ msg = q.get(timeout = 1)
+ self.assertDataEquals(session, msg, "Message %d" % (i+6))
+
+ def test_credit_window_after_messagestop(self):
+ """
+ Tests that the broker's credit window size doesnt exceed the requested value when completing
+ previous messageTransfer commands after a message_stop and message_flow.
+ """
+
+ session = self.session
+
+ #create queue
+ session.queue_declare(queue = self.test_queue_name, exclusive=True, auto_delete=True)
+
+ #send 11 messages
+ for i in range(1, 12):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key=self.test_queue_name), "message-%d" % (i)))
+
+
+ #subscribe:
+ session.message_subscribe(queue=self.test_queue_name, destination="a")
+ a = session.incoming("a")
+ session.message_set_flow_mode(flow_mode = 1, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ # issue 5 message credits
+ session.message_flow(unit = session.credit_unit.message, value = 5, destination = "a")
+
+ # get 5 messages
+ ids = RangedSet()
+ for i in range(1, 6):
+ msg = a.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ ids.add(msg.id)
+
+ # now try and read a 6th message. we expect this to fail due to exhausted message credit.
+ try:
+ extra = a.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ session.message_stop(destination = "a")
+
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 5, destination = "a")
+
+ # complete earlier messages after setting the window to 5 message credits
+ session.channel.session_completed(ids)
+
+ # Now continue to read the next 5 messages
+ for i in range(6, 11):
+ msg = a.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+
+ # now try and read the 11th message. we expect this to fail due to exhausted message credit. If we receive an
+ # 11th this indicates the broker is not respecting the client's requested window size.
+ try:
+ extra = a.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ def test_no_credit_wrap(self):
+ """
+ Ensure that adding credit does not result in wrapround, lowering the balance.
+ """
+ session = self.session
+
+ session.queue_declare(queue = self.test_queue_name, exclusive=True, auto_delete=True)
+ session.message_subscribe(queue=self.test_queue_name, destination="a")
+ a = session.incoming("a")
+ session.message_set_flow_mode(flow_mode = session.flow_mode.credit, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFAL, destination = "a")
+ #test wraparound of credit balance does not occur
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+ for i in range(1, 50):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key=self.test_queue_name), "message-%d" % (i)))
+ session.message_flush(destination = "a")
+ for i in range(1, 50):
+ msg = a.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+
+
+ def test_subscribe_not_acquired(self):
+ """
+ Test the not-acquired modes works as expected for a simple case
+ """
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ for i in range(1, 6):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %s" % i))
+
+ session.message_subscribe(queue = "q", destination = "a", acquire_mode = 1)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ session.message_subscribe(queue = "q", destination = "b", acquire_mode = 1)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = "b")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "b")
+
+ for i in range(6, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "Message %s" % i))
+
+ #both subscribers should see all messages
+ qA = session.incoming("a")
+ qB = session.incoming("b")
+ for i in range(1, 11):
+ for q in [qA, qB]:
+ msg = q.get(timeout = 1)
+ self.assertEquals("Message %s" % i, msg.body)
+ #TODO: tidy up completion
+ session.receiver._completed.add(msg.id)
+
+ #TODO: tidy up completion
+ session.channel.session_completed(session.receiver._completed)
+ #messages should still be on the queue:
+ self.assertEquals(10, session.queue_query(queue = "q").message_count)
+
+ def test_acquire_with_no_accept_and_credit_flow(self):
+ """
+ Test that messages recieved unacquired, with accept not
+ required in windowing mode can be acquired.
+ """
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "acquire me"))
+
+ session.message_subscribe(queue = "q", destination = "a", acquire_mode = 1, accept_mode = 1)
+ session.message_set_flow_mode(flow_mode = session.flow_mode.credit, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ msg = session.incoming("a").get(timeout = 1)
+ self.assertEquals("acquire me", msg.body)
+ #message should still be on the queue:
+ self.assertEquals(1, session.queue_query(queue = "q").message_count)
+
+ transfers = RangedSet(msg.id)
+ response = session.message_acquire(transfers)
+ #check that we get notification (i.e. message_acquired)
+ self.assert_(msg.id in response.transfers)
+ #message should have been removed from the queue:
+ self.assertEquals(0, session.queue_query(queue = "q").message_count)
+
+ def test_acquire(self):
+ """
+ Test explicit acquire function
+ """
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "acquire me"))
+
+ session.message_subscribe(queue = "q", destination = "a", acquire_mode = 1)
+ session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ msg = session.incoming("a").get(timeout = 1)
+ self.assertEquals("acquire me", msg.body)
+ #message should still be on the queue:
+ self.assertEquals(1, session.queue_query(queue = "q").message_count)
+
+ transfers = RangedSet(msg.id)
+ response = session.message_acquire(transfers)
+ #check that we get notification (i.e. message_acquired)
+ self.assert_(msg.id in response.transfers)
+ #message should have been removed from the queue:
+ self.assertEquals(0, session.queue_query(queue = "q").message_count)
+ session.message_accept(transfers)
+
+
+ def test_release(self):
+ """
+ Test explicit release function
+ """
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "release me"))
+
+ session.message_subscribe(queue = "q", destination = "a")
+ session.message_flow(destination="a", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="a", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ msg = session.incoming("a").get(timeout = 1)
+ self.assertEquals("release me", msg.body)
+ session.message_cancel(destination = "a")
+ session.message_release(RangedSet(msg.id))
+
+ #message should not have been removed from the queue:
+ self.assertEquals(1, session.queue_query(queue = "q").message_count)
+
+ def test_release_ordering(self):
+ """
+ Test order of released messages is as expected
+ """
+ session = self.session
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ for i in range (1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "released message %s" % (i)))
+
+ session.message_subscribe(queue = "q", destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ queue = session.incoming("a")
+ first = queue.get(timeout = 1)
+ for i in range(2, 10):
+ msg = queue.get(timeout = 1)
+ self.assertEquals("released message %s" % (i), msg.body)
+
+ last = queue.get(timeout = 1)
+ self.assertEmpty(queue)
+ released = RangedSet()
+ released.add(first.id, last.id)
+ session.message_release(released)
+
+ #TODO: may want to clean this up...
+ session.receiver._completed.add(first.id, last.id)
+ session.channel.session_completed(session.receiver._completed)
+
+ for i in range(1, 11):
+ self.assertEquals("released message %s" % (i), queue.get(timeout = 1).body)
+
+ def test_ranged_ack(self):
+ """
+ Test acking of messages ranges
+ """
+ session = self.conn.session("alternate-session", timeout=10)
+
+ session.queue_declare(queue = "q", auto_delete=True)
+ delivery_properties = session.delivery_properties(routing_key="q")
+ for i in range (1, 11):
+ session.message_transfer(message=Message(delivery_properties, "message %s" % (i)))
+
+ session.message_subscribe(queue = "q", destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ queue = session.incoming("a")
+ ids = []
+ for i in range (1, 11):
+ msg = queue.get(timeout = 1)
+ self.assertEquals("message %s" % (i), msg.body)
+ ids.append(msg.id)
+
+ self.assertEmpty(queue)
+
+ #ack all but the fourth message (command id 2)
+ accepted = RangedSet()
+ accepted.add(ids[0], ids[2])
+ accepted.add(ids[4], ids[9])
+ session.message_accept(accepted)
+
+ #subscribe from second session here to ensure queue is not
+ #auto-deleted when alternate session closes (no need to ack on these):
+ self.session.message_subscribe(queue = "q", destination = "checker")
+
+ #now close the session, and see that the unacked messages are
+ #then redelivered to another subscriber:
+ session.close(timeout=10)
+
+ session = self.session
+ session.message_flow(destination="checker", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="checker", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ queue = session.incoming("checker")
+
+ self.assertEquals("message 4", queue.get(timeout = 1).body)
+ self.assertEmpty(queue)
+
+ def test_subscribe_not_acquired_2(self):
+ session = self.session
+
+ #publish some messages
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ for i in range(1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "message-%d" % (i)))
+
+ #consume some of them
+ session.message_subscribe(queue = "q", destination = "a")
+ session.message_set_flow_mode(flow_mode = 0, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 5, destination = "a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+
+ queue = session.incoming("a")
+ for i in range(1, 6):
+ msg = queue.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ #complete and accept
+ session.message_accept(RangedSet(msg.id))
+ #TODO: tidy up completion
+ session.receiver._completed.add(msg.id)
+ session.channel.session_completed(session.receiver._completed)
+ self.assertEmpty(queue)
+
+ #now create a not-acquired subscriber
+ session.message_subscribe(queue = "q", destination = "b", acquire_mode=1)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "b")
+
+ #check it gets those not consumed
+ queue = session.incoming("b")
+ session.message_flow(unit = session.credit_unit.message, value = 1, destination = "b")
+ for i in range(6, 11):
+ msg = queue.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ session.message_release(RangedSet(msg.id))
+ #TODO: tidy up completion
+ session.receiver._completed.add(msg.id)
+ session.channel.session_completed(session.receiver._completed)
+ session.message_flow(unit = session.credit_unit.message, value = 1, destination = "b")
+ self.assertEmpty(queue)
+
+ #check all 'browsed' messages are still on the queue
+ self.assertEqual(5, session.queue_query(queue="q").message_count)
+
+ def test_subscribe_not_acquired_3(self):
+ session = self.session
+
+ #publish some messages
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+ for i in range(1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "message-%d" % (i)))
+
+ #create a not-acquired subscriber
+ session.message_subscribe(queue = "q", destination = "a", acquire_mode=1)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+
+ #browse through messages
+ queue = session.incoming("a")
+ for i in range(1, 11):
+ msg = queue.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ if (i % 2):
+ #try to acquire every second message
+ response = session.message_acquire(RangedSet(msg.id))
+ #check that acquire succeeds
+ self.assert_(msg.id in response.transfers)
+ session.message_accept(RangedSet(msg.id))
+ else:
+ session.message_release(RangedSet(msg.id))
+ session.receiver._completed.add(msg.id)
+ session.channel.session_completed(session.receiver._completed)
+ self.assertEmpty(queue)
+
+ #create a second not-acquired subscriber
+ session.message_subscribe(queue = "q", destination = "b", acquire_mode=1)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "b")
+ session.message_flow(unit = session.credit_unit.message, value = 1, destination = "b")
+ #check it gets those not consumed
+ queue = session.incoming("b")
+ for i in [2,4,6,8,10]:
+ msg = queue.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ session.message_release(RangedSet(msg.id))
+ session.receiver._completed.add(msg.id)
+ session.channel.session_completed(session.receiver._completed)
+ session.message_flow(unit = session.credit_unit.message, value = 1, destination = "b")
+ self.assertEmpty(queue)
+
+ #check all 'browsed' messages are still on the queue
+ self.assertEqual(5, session.queue_query(queue="q").message_count)
+
+ def test_release_unacquired(self):
+ session = self.session
+
+ #create queue
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+
+ #send message
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "my-message"))
+
+ #create two 'browsers'
+ session.message_subscribe(queue = "q", destination = "a", acquire_mode=1)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+ queueA = session.incoming("a")
+
+ session.message_subscribe(queue = "q", destination = "b", acquire_mode=1)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "b")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "b")
+ queueB = session.incoming("b")
+
+ #have each browser release the message
+ msgA = queueA.get(timeout = 1)
+ session.message_release(RangedSet(msgA.id))
+
+ msgB = queueB.get(timeout = 1)
+ session.message_release(RangedSet(msgB.id))
+
+ #cancel browsers
+ session.message_cancel(destination = "a")
+ session.message_cancel(destination = "b")
+
+ #create consumer
+ session.message_subscribe(queue = "q", destination = "c")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "c")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "c")
+ queueC = session.incoming("c")
+ #consume the message then ack it
+ msgC = queueC.get(timeout = 1)
+ session.message_accept(RangedSet(msgC.id))
+ #ensure there are no other messages
+ self.assertEmpty(queueC)
+
+ def test_release_order(self):
+ session = self.session
+
+ #create queue
+ session.queue_declare(queue = "q", exclusive=True, auto_delete=True)
+
+ #send messages
+ for i in range(1, 11):
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="q"), "message-%d" % (i)))
+
+ #subscribe:
+ session.message_subscribe(queue="q", destination="a")
+ a = session.incoming("a")
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+
+ for i in range(1, 11):
+ msg = a.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+ if (i % 2):
+ #accept all odd messages
+ session.message_accept(RangedSet(msg.id))
+ else:
+ #release all even messages
+ session.message_release(RangedSet(msg.id))
+
+ session.message_subscribe(queue="q", destination="b", acquire_mode=0)
+ b = session.incoming("b")
+ b.start()
+ for i in [2, 4, 6, 8, 10]:
+ msg = b.get(timeout = 1)
+ self.assertEquals("message-%d" % (i), msg.body)
+
+
+ def test_empty_body(self):
+ session = self.session
+ session.queue_declare(queue="xyz", exclusive=True, auto_delete=True)
+ props = session.delivery_properties(routing_key="xyz")
+ session.message_transfer(message=Message(props, ""))
+
+ consumer_tag = "tag1"
+ session.message_subscribe(queue="xyz", destination=consumer_tag)
+ session.message_flow(unit = session.credit_unit.message, value = 0xFFFFFFFFL, destination = consumer_tag)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = consumer_tag)
+ queue = session.incoming(consumer_tag)
+ msg = queue.get(timeout=1)
+ self.assertEquals("", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ def test_incoming_start(self):
+ q = "test_incoming_start"
+ session = self.session
+
+ session.queue_declare(queue=q, exclusive=True, auto_delete=True)
+ session.message_subscribe(queue=q, destination="msgs")
+ messages = session.incoming("msgs")
+ assert messages.destination == "msgs"
+
+ dp = session.delivery_properties(routing_key=q)
+ session.message_transfer(message=Message(dp, "test"))
+
+ messages.start()
+ msg = messages.get()
+ assert msg.body == "test"
+
+ def test_ttl(self):
+ q = "test_ttl"
+ session = self.session
+
+ session.queue_declare(queue=q, exclusive=True, auto_delete=True)
+
+ dp = session.delivery_properties(routing_key=q, ttl=500)#expire in half a second
+ session.message_transfer(message=Message(dp, "first"))
+
+ dp = session.delivery_properties(routing_key=q, ttl=300000)#expire in fives minutes
+ session.message_transfer(message=Message(dp, "second"))
+
+ d = "msgs"
+ session.message_subscribe(queue=q, destination=d)
+ messages = session.incoming(d)
+ sleep(1)
+ session.message_flow(unit = session.credit_unit.message, value=2, destination=d)
+ session.message_flow(unit = session.credit_unit.byte, value=0xFFFFFFFFL, destination=d)
+ assert messages.get(timeout=1).body == "second"
+ self.assertEmpty(messages)
+
+ def assertDataEquals(self, session, msg, expected):
+ self.assertEquals(expected, msg.body)
+
+ def assertEmpty(self, queue):
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Queue not empty, contains: " + extra.body)
+ except Empty: None
+
+class SizelessContent(Content):
+
+ def size(self):
+ return None
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/msg_groups.py b/qpid/tests/src/py/qpid_tests/broker_0_10/msg_groups.py
new file mode 100644
index 0000000000..ec015e1be4
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/msg_groups.py
@@ -0,0 +1,1195 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.messaging import *
+from qpid.tests.messaging import Base
+import qmf.console
+
+from time import sleep
+#
+# Tests the Broker's support for message groups
+#
+
+class MultiConsumerMsgGroupTests(Base):
+ """
+ Tests for the behavior of multi-consumer message groups. These tests allow
+ a messages from the same group be consumed by multiple different clients as
+ long as each message is processed "in sequence". See QPID-3346 for
+ details.
+ """
+
+ def setup_connection(self):
+ return Connection.establish(self.broker, **self.connection_options())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def test_simple(self):
+ """ Verify simple acquire/accept actions on a set of grouped
+ messages shared between two receivers.
+ """
+ ## Create a msg group queue
+
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","A","A","B","B","B","C","C","C"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ ## Queue = a-0, a-1, a-2, b-3, b-4, b-5, c-6, c-7, c-8...
+ ## Owners= ---, ---, ---, ---, ---, ---, ---, ---, ---,
+
+ # create consumers on separate sessions: C1,C2
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ s2 = self.setup_session()
+ c2 = s2.receiver("msg-group-q", options={"capacity":0})
+
+ # C1 should acquire A-0, then C2 should acquire B-3
+
+ m1 = c1.fetch(0);
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ m2 = c2.fetch(0);
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 3
+
+ # C1 Acknowledge A-0
+ c1.session.acknowledge(m1);
+
+ # C2 should next acquire A-1
+ m3 = c2.fetch(0);
+ assert m3.properties['THE-GROUP'] == 'A'
+ assert m3.content['index'] == 1
+
+ # C1 should next acquire C-6, since groups A&B are held by c2
+ m4 = c1.fetch(0);
+ assert m4.properties['THE-GROUP'] == 'C'
+ assert m4.content['index'] == 6
+
+ ## Queue = XXX, a-1, a-2, b-3, b-4, b-5, c-6, c-7, c-8...
+ ## Owners= ---, ^C2, +C2, ^C2, +C2, +C2, ^C1, +C1, +C1,
+
+ # C2 Acknowledge B-3, freeing up the rest of B group
+ c2.session.acknowledge(m2);
+
+ ## Queue = XXX, a-1, a-2, XXX, b-4, b-5, c-6, c-7, c-8...
+ ## Owners= ---, ^C2, +C2, ---, ---, ---, ^C1, +C1, +C1,
+
+ # C1 should now acquire B-4, since it is next "free"
+ m5 = c1.fetch(0);
+ assert m5.properties['THE-GROUP'] == 'B'
+ assert m5.content['index'] == 4
+
+ ## Queue = XXX, a-1, a-2, XXX, b-4, b-5, c-6, c-7, c-8...
+ ## Owners= ---, ^C2, +C2, ---, ^C1, +C1, ^C1, +C1, +C1,
+
+ # C1 acknowledges C-6, freeing the C group
+ c1.session.acknowledge(m4)
+
+ ## Queue = XXX, a-1, a-2, XXX, b-4, b-5, XXX, c-7, c-8...
+ ## Owners= ---, ^C2, +C2, ---, ^C1, +C1, ---, ---, ---
+
+ # C2 should next fetch A-2, followed by C-7
+ m7 = c2.fetch(0);
+ assert m7.properties['THE-GROUP'] == 'A'
+ assert m7.content['index'] == 2
+
+ m8 = c2.fetch(0);
+ assert m8.properties['THE-GROUP'] == 'C'
+ assert m8.content['index'] == 7
+
+ ## Queue = XXX, a-1, a-2, XXX, b-4, b-5, XXX, c-7, c-8...
+ ## Owners= ---, ^C2, ^C2, ---, ^C1, +C1, ---, ^C2, +C2
+
+ # have C2 ack all fetched messages, freeing C-8
+ c2.session.acknowledge()
+
+ ## Queue = XXX, XXX, XXX, XXX, b-4, b-5, XXX, XXX, c-8...
+ ## Owners= ---, ---, ---, ---, ^C1, +C1, ---, ---, ---
+
+ # the next fetch of C2 would get C-8, since B-5 is "owned"
+ m9 = c2.fetch(0);
+ assert m9.properties['THE-GROUP'] == 'C'
+ assert m9.content['index'] == 8
+
+ ## Queue = XXX, XXX, XXX, XXX, b-4, b-5, XXX, XXX, c-8...
+ ## Owners= ---, ---, ---, ---, ^C1, +C1, ---, ---, ^C2
+
+ # C1 acks B-4, freeing B-5 for consumption
+ c1.session.acknowledge(m5)
+
+ ## Queue = XXX, XXX, XXX, XXX, XXX, b-5, XXX, XXX, c-8...
+ ## Owners= ---, ---, ---, ---, ---, ^C2, ---, ---, ^C2
+
+ # the next fetch of C2 would get B-5
+ m10 = c2.fetch(0);
+ assert m10.properties['THE-GROUP'] == 'B'
+ assert m10.content['index'] == 5
+
+ # there should be no more left for C1:
+ try:
+ mx = c1.fetch(0)
+ assert False # should never get here
+ except Empty:
+ pass
+
+ c1.session.acknowledge()
+ c2.session.acknowledge()
+ c1.close()
+ c2.close()
+ snd.close()
+
+ def test_simple_browse(self):
+ """ Test the behavior of a browsing subscription on a message grouping
+ queue.
+ """
+
+ ## Create a msg group queue
+
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","A","B","C"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ ## Queue = A-0, B-1, A-2, b-3, C-4
+ ## Owners= ---, ---, ---, ---, ---
+
+ # create consumer and browser
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ s2 = self.setup_session()
+ b1 = s2.receiver("msg-group-q; {mode: browse}", options={"capacity":0})
+
+ m2 = b1.fetch(0);
+ assert m2.properties['THE-GROUP'] == 'A'
+ assert m2.content['index'] == 0
+
+ # C1 should acquire A-0
+
+ m1 = c1.fetch(0);
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ ## Queue = A-0, B-1, A-2, b-3, C-4
+ ## Owners= ^C1, ---, +C1, ---, ---
+
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 1
+
+ # verify that the browser may see A-2, even though its group is owned
+ # by C1
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'A'
+ assert m2.content['index'] == 2
+
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 3
+
+ # verify the consumer can own groups currently seen by the browser
+ m3 = c1.fetch(0);
+ assert m3.properties['THE-GROUP'] == 'B'
+ assert m3.content['index'] == 1
+
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'C'
+ assert m2.content['index'] == 4
+
+ def test_release(self):
+ """ Verify releasing a message can free its assocated group
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","A","B","B"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ s2 = self.setup_session()
+ c2 = s2.receiver("msg-group-q", options={"capacity":0})
+
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 2
+
+ # C1 release m1, and the first group
+
+ s1.acknowledge(m1, Disposition(RELEASED, set_redelivered=True))
+
+ # C2 should be able to get group 'A', msg 'A-0' now
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'A'
+ assert m2.content['index'] == 0
+
+ def test_reject(self):
+ """ Verify rejecting a message can free its associated group
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","A","B","B"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ s2 = self.setup_session()
+ c2 = s2.receiver("msg-group-q", options={"capacity":0})
+
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 2
+
+ # C1 rejects m1, and the first group is released
+ s1.acknowledge(m1, Disposition(REJECTED))
+
+ # C2 should be able to get group 'A', msg 'A-1' now
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'A'
+ assert m2.content['index'] == 1
+
+ def test_close(self):
+ """ Verify behavior when a consumer that 'owns' a group closes.
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","A","B","B"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ s2 = self.setup_session()
+ c2 = s2.receiver("msg-group-q", options={"capacity":0})
+
+ # C1 will own group A
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ # C2 will own group B
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 2
+
+ # C1 shuffles off the mortal coil...
+ c1.close();
+
+ # but the session (s1) remains active, so "A" remains blocked
+ # from c2, c2 should fetch the next B-3
+
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 3
+
+ # and there should be no more messages available for C2
+ try:
+ m2 = c2.fetch(0)
+ assert False # should never get here
+ except Empty:
+ pass
+
+ # close session s1, releasing the A group
+ s1.close()
+
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'A'
+ assert m2.content['index'] == 0
+
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'A'
+ assert m2.content['index'] == 1
+
+ # and there should be no more messages now
+ try:
+ m2 = c2.fetch(0)
+ assert False # should never get here
+ except Empty:
+ pass
+
+ def test_transaction(self):
+ """ Verify behavior when using transactions.
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","A","B","B","A","B"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ s1 = self.conn.session(transactional=True)
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ s2 = self.conn.session(transactional=True)
+ c2 = s2.receiver("msg-group-q", options={"capacity":0})
+
+ # C1 gets group A
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ # C2 gets group B
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 2
+
+ s1.acknowledge(m1) # A-0 consumed, A group freed
+ s2.acknowledge(m2) # B-2 consumed, B group freed
+
+ s1.commit() # A-0 consumption done, A group now free
+ s2.rollback() # releases B-2, and group B
+
+ ## Q: ["A1","B2","B3","A4","B5"]
+
+ # C2 should be able to get the next A
+ m3 = c2.fetch(0)
+ assert m3.properties['THE-GROUP'] == 'A'
+ assert m3.content['index'] == 1
+
+ # C1 should be able to get B-2
+ m4 = c1.fetch(0)
+ assert m4.properties['THE-GROUP'] == 'B'
+ assert m4.content['index'] == 2
+
+ s2.acknowledge(m3) # C2 consumes A-1
+ s1.acknowledge(m4) # C1 consumes B-2
+ s1.commit() # C1 consume B-2 occurs, free group B
+
+ ## Q: [["A1",]"B3","A4","B5"]
+
+ # A-1 is still considered owned by C2, since the commit has yet to
+ # occur, so the next available to C1 would be B-3
+ m5 = c1.fetch(0) # B-3
+ assert m5.properties['THE-GROUP'] == 'B'
+ assert m5.content['index'] == 3
+
+ # and C2 should find A-4 available, since it owns the A group
+ m6 = c2.fetch(0) # A-4
+ assert m6.properties['THE-GROUP'] == 'A'
+ assert m6.content['index'] == 4
+
+ s2.acknowledge(m6) # C2 consumes A-4
+
+ # uh-oh, A-1 and A-4 released, along with A group
+ s2.rollback()
+
+ ## Q: ["A1",["B3"],"A4","B5"]
+ m7 = c1.fetch(0) # A-1 is found
+ assert m7.properties['THE-GROUP'] == 'A'
+ assert m7.content['index'] == 1
+
+ ## Q: [["A1"],["B3"],"A4","B5"]
+ # since C1 "owns" both A and B group, C2 should find nothing available
+ try:
+ m8 = c2.fetch(0)
+ assert False # should not get here
+ except Empty:
+ pass
+
+ # C1 next gets A4
+ m9 = c1.fetch(0)
+ assert m9.properties['THE-GROUP'] == 'A'
+ assert m9.content['index'] == 4
+
+ s1.acknowledge()
+
+ ## Q: [["A1"],["B3"],["A4"],"B5"]
+ # even though C1 acknowledges A1,B3, and A4, B5 is still considered
+ # owned as the commit has yet to take place
+ try:
+ m10 = c2.fetch(0)
+ assert False # should not get here
+ except Empty:
+ pass
+
+ # now A1,B3,A4 dequeued, B5 should be free
+ s1.commit()
+
+ ## Q: ["B5"]
+ m11 = c2.fetch(0)
+ assert m11.properties['THE-GROUP'] == 'B'
+ assert m11.content['index'] == 5
+
+ s2.acknowledge()
+ s2.commit()
+
+ def test_send_transaction(self):
+ """ Verify behavior when sender is using transactions.
+ """
+ ssn = self.conn.session(transactional=True)
+ snd = ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ msg = Message(content={'index':0}, properties={"THE-GROUP": "A"})
+ snd.send(msg)
+ msg = Message(content={'index':1}, properties={"THE-GROUP": "B"})
+ snd.send(msg)
+ snd.session.commit()
+ msg = Message(content={'index':2}, properties={"THE-GROUP": "A"})
+ snd.send(msg)
+
+ # Queue: [A0,B1, (uncommitted: A2) ]
+
+ s1 = self.conn.session(transactional=True)
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ s2 = self.conn.session(transactional=True)
+ c2 = s2.receiver("msg-group-q", options={"capacity":0})
+
+ # C1 gets A0, group A
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ # C2 gets B2, group B
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 1
+
+ # Since A2 uncommitted, there should be nothing left to fetch
+ try:
+ mX = c1.fetch(0)
+ assert False # should not get here
+ except Empty:
+ pass
+ try:
+ mX = c2.fetch(0)
+ assert False # should not get here
+ except Empty:
+ pass
+
+ snd.session.commit()
+ msg = Message(content={'index':3}, properties={"THE-GROUP": "B"})
+ snd.send(msg)
+
+ # Queue: [A2, (uncommitted: B3) ]
+
+ # B3 has yet to be committed, so C2 should see nothing available:
+ try:
+ mX = c2.fetch(0)
+ assert False # should not get here
+ except Empty:
+ pass
+
+ # but A2 should be available to C1
+ m3 = c1.fetch(0)
+ assert m3.properties['THE-GROUP'] == 'A'
+ assert m3.content['index'] == 2
+
+ # now make B3 available
+ snd.session.commit()
+
+ # C1 should still be done:
+ try:
+ mX = c1.fetch(0)
+ assert False # should not get here
+ except Empty:
+ pass
+
+ # but C2 should find the new B
+ m4 = c2.fetch(0)
+ assert m4.properties['THE-GROUP'] == 'B'
+ assert m4.content['index'] == 3
+
+ # extra: have C1 rollback, verify C2 finds the released 'A' messages
+ c1.session.rollback()
+
+ ## Q: ["A0","A2"]
+
+ # C2 should be able to get the next A
+ m5 = c2.fetch(0)
+ assert m5.properties['THE-GROUP'] == 'A'
+ assert m5.content['index'] == 0
+
+ m6 = c2.fetch(0)
+ assert m6.properties['THE-GROUP'] == 'A'
+ assert m6.content['index'] == 2
+
+ c2.session.acknowledge()
+ c2.session.commit()
+
+ def test_query(self):
+ """ Verify the queue query method against message groups
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","C","A","B","C","A"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ s2 = self.setup_session()
+ c2 = s2.receiver("msg-group-q", options={"capacity":0})
+
+ m1 = c1.fetch(0)
+ m2 = c2.fetch(0)
+
+ # at this point, group A should be owned by C1, group B by C2, and
+ # group C should be available
+
+ # now setup a QMF session, so we can call methods
+ self.qmf_session = qmf.console.Session()
+ self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
+ brokers = self.qmf_session.getObjects(_class="broker")
+ assert len(brokers) == 1
+ broker = brokers[0]
+
+ # verify the query method call's group information
+ rc = broker.query("queue", "msg-group-q")
+ assert rc.status == 0
+ assert rc.text == "OK"
+ results = rc.outArgs['results']
+ assert 'qpid.message_group_queue' in results
+ q_info = results['qpid.message_group_queue']
+ assert 'group_header_key' in q_info and q_info['group_header_key'] == "THE-GROUP"
+ assert 'group_state' in q_info and len(q_info['group_state']) == 3
+ for g_info in q_info['group_state']:
+ assert 'group_id' in g_info
+ if g_info['group_id'] == "A":
+ assert g_info['msg_count'] == 3
+ assert g_info['consumer'] != ""
+ elif g_info['group_id'] == "B":
+ assert g_info['msg_count'] == 2
+ assert g_info['consumer'] != ""
+ elif g_info['group_id'] == "C":
+ assert g_info['msg_count'] == 2
+ assert g_info['consumer'] == ""
+ else:
+ assert(False) # should never get here
+ self.qmf_session.delBroker(self.qmf_broker)
+
+ def test_purge_free(self):
+ """ Verify we can purge a queue of all messages of a given "unowned"
+ group.
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","A","B","C","A"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ # now setup a QMF session, so we can call methods
+ self.qmf_session = qmf.console.Session()
+ self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
+ queue = self.qmf_session.getObjects(_class="queue", name="msg-group-q")[0]
+ assert queue
+ msg_filter = { 'filter_type' : 'header_match_str',
+ 'filter_params' : { 'header_key' : "THE-GROUP",
+ 'header_value' : "B" }}
+ assert queue.msgDepth == 6
+ rc = queue.purge(0, msg_filter)
+ assert rc.status == 0
+ queue.update()
+ assert queue.msgDepth == 4
+
+ # verify all B's removed....
+ s2 = self.setup_session()
+ b1 = s2.receiver("msg-group-q; {mode: browse}", options={"capacity":0})
+ count = 0
+ try:
+ while True:
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] != 'B'
+ count += 1
+ except Empty:
+ pass
+ assert count == 4
+
+ self.qmf_session.delBroker(self.qmf_broker)
+
+ def test_purge_acquired(self):
+ """ Verify we can purge messages from an acquired group.
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","A","B","C","A"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ # acquire group "A"
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ # now setup a QMF session, so we can purge group A
+ self.qmf_session = qmf.console.Session()
+ self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
+ queue = self.qmf_session.getObjects(_class="queue", name="msg-group-q")[0]
+ assert queue
+ msg_filter = { 'filter_type' : 'header_match_str',
+ 'filter_params' : { 'header_key' : "THE-GROUP",
+ 'header_value' : "A" }}
+ assert queue.msgDepth == 6
+ rc = queue.purge(0, msg_filter)
+ assert rc.status == 0
+ queue.update()
+ queue.msgDepth == 4 # the pending acquired A still counts!
+ s1.acknowledge()
+
+ # verify all other A's removed....
+ s2 = self.setup_session()
+ b1 = s2.receiver("msg-group-q; {mode: browse}", options={"capacity":0})
+ count = 0
+ try:
+ while True:
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] != 'A'
+ count += 1
+ except Empty:
+ pass
+ assert count == 3 # only 3 really available
+ s1.acknowledge() # ack the consumed A-0
+ self.qmf_session.delBroker(self.qmf_broker)
+
+ def test_purge_count(self):
+ """ Verify we can purge a fixed number of messages from an acquired
+ group.
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","A","B","C","A"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ # acquire group "A"
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ # now setup a QMF session, so we can purge group A
+ self.qmf_session = qmf.console.Session()
+ self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
+ queue = self.qmf_session.getObjects(_class="queue", name="msg-group-q")[0]
+ assert queue
+ msg_filter = { 'filter_type' : 'header_match_str',
+ 'filter_params' : { 'header_key' : "THE-GROUP",
+ 'header_value' : "A" }}
+ assert queue.msgDepth == 6
+ rc = queue.purge(1, msg_filter)
+ assert rc.status == 0
+ queue.update()
+ queue.msgDepth == 5 # the pending acquired A still counts!
+
+ # verify all other A's removed....
+ s2 = self.setup_session()
+ b1 = s2.receiver("msg-group-q; {mode: browse}", options={"capacity":0})
+ count = 0
+ a_count = 0
+ try:
+ while True:
+ m2 = b1.fetch(0)
+ if m2.properties['THE-GROUP'] != 'A':
+ count += 1
+ else:
+ a_count += 1
+ except Empty:
+ pass
+ assert count == 3 # non-A's
+ assert a_count == 1 # assumes the acquired message was not the one purged and regular browsers don't get acquired messages
+ s1.acknowledge() # ack the consumed A-0
+ self.qmf_session.delBroker(self.qmf_broker)
+
+ def test_move_all(self):
+ """ Verify we can move messages from an acquired group.
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","A","B","C","A"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ # set up destination queue
+ rcvr = self.ssn.receiver("dest-q; {create:always, delete:receiver," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ # acquire group "A"
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ # now setup a QMF session, so we can move what's left of group A
+ self.qmf_session = qmf.console.Session()
+ self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
+ brokers = self.qmf_session.getObjects(_class="broker")
+ assert len(brokers) == 1
+ broker = brokers[0]
+ msg_filter = { 'filter_type' : 'header_match_str',
+ 'filter_params' : { 'header_key' : "THE-GROUP",
+ 'header_value' : "A" }}
+ rc = broker.queueMoveMessages("msg-group-q", "dest-q", 0, msg_filter)
+ assert rc.status == 0
+
+ # verify all other A's removed from msg-group-q
+ s2 = self.setup_session()
+ b1 = s2.receiver("msg-group-q", options={"capacity":0})
+ count = 0
+ try:
+ while True:
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] != 'A'
+ count += 1
+ except Empty:
+ pass
+ assert count == 3 # only 3 really available
+
+ # verify the moved A's are at the dest-q
+ s2 = self.setup_session()
+ b1 = s2.receiver("dest-q; {mode: browse}", options={"capacity":0})
+ count = 0
+ try:
+ while True:
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'A'
+ assert m2.content['index'] == 2 or m2.content['index'] == 5
+ count += 1
+ except Empty:
+ pass
+ assert count == 2 # two A's moved
+
+ s1.acknowledge() # ack the consumed A-0
+ self.qmf_session.delBroker(self.qmf_broker)
+
+ def test_move_count(self):
+ """ Verify we can move a fixed number of messages from an acquired group.
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","A","B","C","A"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ # set up destination queue
+ rcvr = self.ssn.receiver("dest-q; {create:always, delete:receiver," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ # now setup a QMF session, so we can move group B
+ self.qmf_session = qmf.console.Session()
+ self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
+ brokers = self.qmf_session.getObjects(_class="broker")
+ assert len(brokers) == 1
+ broker = brokers[0]
+ msg_filter = { 'filter_type' : 'header_match_str',
+ 'filter_params' : { 'header_key' : "THE-GROUP",
+ 'header_value' : "B" }}
+ rc = broker.queueMoveMessages("msg-group-q", "dest-q", 3, msg_filter)
+ assert rc.status == 0
+
+ # verify all B's removed from msg-group-q
+ s2 = self.setup_session()
+ b1 = s2.receiver("msg-group-q; {mode: browse}", options={"capacity":0})
+ count = 0
+ try:
+ while True:
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] != 'B'
+ count += 1
+ except Empty:
+ pass
+ assert count == 4
+
+ # verify the moved B's are at the dest-q
+ s2 = self.setup_session()
+ b1 = s2.receiver("dest-q; {mode: browse}", options={"capacity":0})
+ count = 0
+ try:
+ while True:
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 1 or m2.content['index'] == 3
+ count += 1
+ except Empty:
+ pass
+ assert count == 2
+
+ self.qmf_session.delBroker(self.qmf_broker)
+
+ def test_reroute(self):
+ """ Verify we can reroute messages from an acquired group.
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","A","B","C","A"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ # create a topic exchange for the reroute
+ rcvr = self.ssn.receiver("reroute-q; {create: always, delete:receiver," +
+ " node: {type: topic}}")
+
+ # acquire group "A"
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ # now setup a QMF session, so we can reroute group A
+ self.qmf_session = qmf.console.Session()
+ self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
+ queue = self.qmf_session.getObjects(_class="queue", name="msg-group-q")[0]
+ assert queue
+ msg_filter = { 'filter_type' : 'header_match_str',
+ 'filter_params' : { 'header_key' : "THE-GROUP",
+ 'header_value' : "A" }}
+ assert queue.msgDepth == 6
+ rc = queue.reroute(0, False, "reroute-q", msg_filter)
+ assert rc.status == 0
+ queue.update()
+ queue.msgDepth == 4 # the pending acquired A still counts!
+
+ # verify all other A's removed....
+ s2 = self.setup_session()
+ b1 = s2.receiver("msg-group-q", options={"capacity":0})
+ count = 0
+ try:
+ while True:
+ m2 = b1.fetch(0)
+ assert m2.properties['THE-GROUP'] != 'A'
+ count += 1
+ except Empty:
+ pass
+ assert count == 3 # only 3 really available
+
+ # and what of reroute-q?
+ count = 0
+ try:
+ while True:
+ m2 = rcvr.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'A'
+ assert m2.content['index'] == 2 or m2.content['index'] == 5
+ count += 1
+ except Empty:
+ pass
+ assert count == 2
+
+ s1.acknowledge() # ack the consumed A-0
+ self.qmf_session.delBroker(self.qmf_broker)
+
+ def test_queue_delete(self):
+ """ Test deleting a queue while consumers are active.
+ """
+
+ ## Create a msg group queue
+
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","A","B","C"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ ## Queue = A-0, B-1, A-2, b-3, C-4
+ ## Owners= ---, ---, ---, ---, ---
+
+ # create consumers
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ s2 = self.setup_session()
+ c2 = s2.receiver("msg-group-q", options={"capacity":0})
+
+ # C1 should acquire A-0
+ m1 = c1.fetch(0);
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ # c2 acquires B-1
+ m2 = c2.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 1
+
+ # with group A and B owned, and C free, delete the
+ # queue
+ snd.close()
+ self.ssn.close()
+
+ def test_default_group_id(self):
+ """ Verify the queue assigns the default group id should a message
+ arrive without a group identifier.
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ m = Message(content={}, properties={"NO-GROUP-HEADER":"HA-HA"})
+ snd.send(m)
+
+ # now setup a QMF session, so we can call methods
+ self.qmf_session = qmf.console.Session()
+ self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
+ brokers = self.qmf_session.getObjects(_class="broker")
+ assert len(brokers) == 1
+ broker = brokers[0]
+
+ # grab the group state off the queue, and verify the default group is
+ # present ("qpid.no-group" is the broker default)
+ rc = broker.query("queue", "msg-group-q")
+ assert rc.status == 0
+ assert rc.text == "OK"
+ results = rc.outArgs['results']
+ assert 'qpid.message_group_queue' in results
+ q_info = results['qpid.message_group_queue']
+ assert 'group_header_key' in q_info and q_info['group_header_key'] == "THE-GROUP"
+ assert 'group_state' in q_info and len(q_info['group_state']) == 1
+ g_info = q_info['group_state'][0]
+ assert 'group_id' in g_info
+ assert g_info['group_id'] == 'qpid.no-group'
+
+ self.qmf_session.delBroker(self.qmf_broker)
+
+
+ def test_transaction_order(self):
+ """ Verify that rollback does not reorder the messages with respect to
+ the consumer (QPID-3804)
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","A"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ snd.send(m)
+
+ s1 = self.conn.session(transactional=True)
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+
+ # C1 gets group A
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+ s1.acknowledge(m1)
+
+ s1.rollback() # release A back to the queue
+
+ # the order should be preserved as follows:
+
+ m1 = c1.fetch(0)
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ m2 = c1.fetch(0)
+ assert m2.properties['THE-GROUP'] == 'B'
+ assert m2.content['index'] == 1
+
+ m3 = c1.fetch(0)
+ assert m3.properties['THE-GROUP'] == 'A'
+ assert m3.content['index'] == 2
+
+ s1.commit()
+
+ c1.close()
+ s1.close()
+ snd.close()
+
+
+ def test_ttl_expire(self):
+ """ Verify that expired (TTL) group messages are skipped correctly
+ """
+ snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
+ " node: {x-declare: {arguments:" +
+ " {'qpid.group_header_key':'THE-GROUP'," +
+ "'qpid.shared_msg_group':1}}}}")
+
+ groups = ["A","B","C","A","B","C"]
+ messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
+ index = 0
+ for m in messages:
+ m.content['index'] = index
+ index += 1
+ if m.properties['THE-GROUP'] == 'B':
+ m.ttl = 1;
+ snd.send(m)
+
+ sleep(2) # let all B's expire
+
+ # create consumers on separate sessions: C1,C2
+ s1 = self.setup_session()
+ c1 = s1.receiver("msg-group-q", options={"capacity":0})
+ s2 = self.setup_session()
+ c2 = s2.receiver("msg-group-q", options={"capacity":0})
+
+ # C1 should acquire A-0, then C2 should acquire C-2, Group B should
+ # expire and never be fetched
+
+ m1 = c1.fetch(0);
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 0
+
+ m2 = c2.fetch(0);
+ assert m2.properties['THE-GROUP'] == 'C'
+ assert m2.content['index'] == 2
+
+ m1 = c1.fetch(0);
+ assert m1.properties['THE-GROUP'] == 'A'
+ assert m1.content['index'] == 3
+
+ m2 = c2.fetch(0);
+ assert m2.properties['THE-GROUP'] == 'C'
+ assert m2.content['index'] == 5
+
+ # there should be no more left for either consumer
+ try:
+ mx = c1.fetch(0)
+ assert False # should never get here
+ except Empty:
+ pass
+ try:
+ mx = c2.fetch(0)
+ assert False # should never get here
+ except Empty:
+ pass
+
+ c1.session.acknowledge()
+ c2.session.acknowledge()
+ c1.close()
+ c2.close()
+ snd.close()
+
+
+class StickyConsumerMsgGroupTests(Base):
+ """
+ Tests for the behavior of sticky-consumer message groups. These tests
+ expect all messages from the same group be consumed by the same clients.
+ See QPID-3347 for details.
+ """
+ pass # TBD
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/new_api.py b/qpid/tests/src/py/qpid_tests/broker_0_10/new_api.py
new file mode 100644
index 0000000000..4e94395121
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/new_api.py
@@ -0,0 +1,358 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.tests.messaging.implementation import *
+from qpid.tests.messaging import Base
+from qpidtoollibs import BrokerAgent
+from time import sleep
+
+#
+# Broker tests using the new messaging API
+#
+
+class GeneralTests(Base):
+ """
+ Tests of the API and broker via the new API.
+ """
+
+ def assertEqual(self, left, right, text=None):
+ if not left == right:
+ print "assertEqual failure: %r != %r" % (left, right)
+ if text:
+ print " %r" % text
+ assert None
+
+ def fail(self, text=None):
+ if text:
+ print "Fail: %r" % text
+ assert None
+
+ def setup_connection(self):
+ return Connection.establish(self.broker, **self.connection_options())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def test_not_found(self):
+ ssn = self.setup_session()
+ try:
+ ssn.receiver("does-not-exist")
+ self.fail("Expected non-existent node to cause NotFound exception")
+ except NotFound, e: None
+
+ def test_qpid_3481_acquired_to_alt_exchange(self):
+ """
+ Verify that acquired messages are routed to the alternate when the queue is deleted.
+ """
+ sess1 = self.setup_session()
+ sess2 = self.setup_session()
+
+ tx = sess1.sender("amq.direct/key")
+ rx_main = sess1.receiver("amq.direct/key;{link:{reliability:at-least-once,x-declare:{alternate-exchange:'amq.fanout'}}}")
+ rx_alt = sess2.receiver("amq.fanout")
+ rx_alt.capacity = 10
+
+ tx.send("DATA")
+ tx.send("DATA")
+ tx.send("DATA")
+ tx.send("DATA")
+ tx.send("DATA")
+
+ msg = rx_main.fetch()
+ msg = rx_main.fetch()
+ msg = rx_main.fetch()
+
+ self.assertEqual(rx_alt.available(), 0, "No messages should have been routed to the alt_exchange")
+
+ sess1.close()
+ sleep(1)
+ self.assertEqual(rx_alt.available(), 5, "All 5 messages should have been routed to the alt_exchange")
+
+ sess2.close()
+
+ def test_qpid_3481_acquired_to_alt_exchange_2_consumers(self):
+ """
+ Verify that acquired messages are routed to the alternate when the queue is deleted.
+ """
+ sess1 = self.setup_session()
+ sess2 = self.setup_session()
+ sess3 = self.setup_session()
+ sess4 = self.setup_session()
+
+ tx = sess1.sender("test_acquired;{create:always,delete:always,node:{x-declare:{alternate-exchange:'amq.fanout'}}}")
+ rx_main1 = sess2.receiver("test_acquired")
+ rx_main2 = sess3.receiver("test_acquired")
+ rx_alt = sess4.receiver("amq.fanout")
+ rx_alt.capacity = 10
+
+ tx.send("DATA")
+ tx.send("DATA")
+ tx.send("DATA")
+ tx.send("DATA")
+ tx.send("DATA")
+
+ msg = rx_main1.fetch()
+ msg = rx_main1.fetch()
+ msg = rx_main1.fetch()
+
+ self.assertEqual(rx_alt.available(), 0, "No messages should have been routed to the alt_exchange")
+
+ # Close sess1; This will cause the queue to be deleted and all its messages (including those acquired) to be reouted to the alternate exchange
+ sess1.close()
+ sleep(1)
+ self.assertEqual(rx_alt.available(), 5, "All the messages should have been routed to the alt_exchange")
+
+ # Close sess2; This will cause the acquired messages to be requeued and routed to the alternate
+ sess2.close()
+ for i in range(5):
+ try:
+ m = rx_alt.fetch(0)
+ except:
+ self.fail("failed to receive all 5 messages via alternate exchange")
+
+ sess3.close()
+ self.assertEqual(rx_alt.available(), 0, "No further messages should be received via the alternate exchange")
+
+ sess4.close()
+
+ def test_next_receiver(self):
+ keys = ["a", "b", "c"]
+ receivers = [self.ssn.receiver("amq.direct/%s" % k) for k in keys]
+ for r in receivers:
+ r.capacity = 10
+
+ snd = self.ssn.sender("amq.direct")
+
+ for k in keys:
+ snd.send(Message(subject=k, content=k))
+
+ expected = keys
+ while len(expected):
+ rcv = self.ssn.next_receiver(timeout=self.delay())
+ c = rcv.fetch().content
+ assert c in expected
+ expected.remove(c)
+ self.ssn.acknowledge()
+
+ def test_nolocal_rerouted(self):
+ conn2 = Connection.establish(self.broker, **self.connection_options())
+ ssn2 = conn2.session()
+
+ s1 = self.ssn.sender("holding_q; {create:always, delete:always, node:{x-declare:{alternate-exchange:'amq.fanout'}}}");
+ s2 = ssn2.sender("holding_q");
+
+ s2.send(Message("a"));
+ s1.send(Message("b"));
+ s2.send(Message("c"));
+
+ r = self.ssn.receiver("amq.fanout; {link:{x-declare:{arguments:{'no-local':True}}}}")
+
+ # close connection of one of the publishers
+ conn2.close()
+
+ # close sender which should cause the orphaned messages on
+ # holding_q to be rerouted through alternate exchange onto the
+ # subscription queue of the receiver above
+ s1.close()
+
+ received = []
+ try:
+ while True:
+ received.append(r.fetch(0).content)
+ except Empty: pass
+ self.assertEqual(received, ["a", "c"])
+
+ def _node_disambiguation_test(self, e, q, ambiguous_send=False):
+ s1 = self.ssn.sender("ambiguous; {node:{type:topic}}");
+ s2 = self.ssn.sender("ambiguous; {node:{type:queue}}");
+ s1.send(Message("a"))
+ s2.send(Message("b"))
+ if ambiguous_send:
+ # pure python client defaults to using the queue when the
+ # node name is ambiguous and no type is specified; the
+ # swigged version treats this as an error
+ s3 = self.ssn.sender("ambiguous");
+ s3.send(Message("c"))
+ self.assertEqual(e.fetch().content, "a")
+ self.assertEqual(q.fetch().content, "b")
+ if ambiguous_send:
+ self.assertEqual(q.fetch().content, "c")
+ for r in [e, q]:
+ try:
+ m = r.fetch(timeout=0)
+ self.fail("Found unexpected message %s")
+ except Empty: pass
+
+ def _node_disambiguation_precreated(self, ambiguous_send):
+ agent = BrokerAgent(self.conn)
+ agent.addExchange("fanout", "ambiguous")
+ agent.addQueue("ambiguous")
+ try:
+ r1 = self.ssn.receiver("ambiguous; {node:{type:topic}}")
+ r2 = self.ssn.receiver("ambiguous; {node:{type:queue}}")
+ self._node_disambiguation_test(r1, r2, ambiguous_send=ambiguous_send)
+ finally:
+ agent.delExchange("ambiguous")
+ agent.delQueue("ambiguous", False, False)
+
+ def test_node_disambiguation_1(self):
+ self._node_disambiguation_precreated(False)
+
+ def test_node_disambiguation_2(self):
+ self._node_disambiguation_precreated(True)
+
+ def test_ambiguous_create_1(self):
+ #create queue first, then exchange
+ r1 = self.ssn.receiver("ambiguous; {create:receiver, node:{type:queue}}")
+ r2 = self.ssn.receiver("ambiguous; {create:receiver, node:{type:topic}}")
+ agent = BrokerAgent(self.conn)
+ exchange = agent.getExchange("ambiguous")
+ queue = agent.getQueue("ambiguous")
+ try:
+ assert(exchange)
+ assert(queue)
+ self._node_disambiguation_test(r2, r1)
+ finally:
+ if exchange: agent.delExchange("ambiguous")
+ if queue: agent.delQueue("ambiguous", False, False)
+
+ def test_ambiguous_create_2(self):
+ #create exchange first, then queue
+ r1 = self.ssn.receiver("ambiguous; {create:receiver, node:{type:topic}}")
+ r2 = self.ssn.receiver("ambiguous; {create:receiver, node:{type:queue}}")
+ agent = BrokerAgent(self.conn)
+ exchange = agent.getExchange("ambiguous")
+ queue = agent.getQueue("ambiguous")
+ try:
+ assert(exchange)
+ assert(queue)
+ self._node_disambiguation_test(r1, r2)
+ finally:
+ if exchange: agent.delExchange("ambiguous")
+ if queue: agent.delQueue("ambiguous", False, False)
+
+ def test_ambiguous_delete_1(self):
+ agent = BrokerAgent(self.conn)
+ agent.addExchange("fanout", "ambiguous")
+ agent.addQueue("ambiguous")
+ self.ssn.receiver("ambiguous; {delete:receiver, node:{type:topic}}").close()
+ exchange = agent.getExchange("ambiguous")
+ queue = agent.getQueue("ambiguous")
+ try:
+ assert(not exchange)
+ assert(queue)
+ finally:
+ if exchange: agent.delExchange("ambiguous")
+ if queue: agent.delQueue("ambiguous", False, False)
+
+ def test_ambiguous_delete_2(self):
+ agent = BrokerAgent(self.conn)
+ agent.addExchange("fanout", "ambiguous")
+ agent.addQueue("ambiguous")
+ self.ssn.receiver("ambiguous; {delete:receiver, node:{type:queue}}").close()
+ exchange = agent.getExchange("ambiguous")
+ queue = agent.getQueue("ambiguous")
+ try:
+ assert(exchange)
+ assert(not queue)
+ finally:
+ if exchange: agent.delExchange("ambiguous")
+ if queue: agent.delQueue("ambiguous", False, False)
+
+
+class SequenceNumberTests(Base):
+ """
+ Tests of ring queue sequence number
+ """
+
+ def fail(self, text=None):
+ if text:
+ print "Fail: %r" % text
+ assert None
+
+ def setup_connection(self):
+ return Connection.establish(self.broker, **self.connection_options())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def setup_sender(self, name="ring-sequence-queue", key="qpid.queue_msg_sequence"):
+ addr = "%s; {create:sender, node: {x-declare: {auto-delete: True, arguments: {'qpid.queue_msg_sequence':'%s', 'qpid.policy_type':'ring', 'qpid.max_count':4}}}}" % (name, key)
+ sender = self.ssn.sender(addr)
+ return sender
+
+ def test_create_sequence_queue(self):
+ """
+ Test a queue with sequencing can be created
+ """
+
+ #setup, declare a queue
+ try:
+ sender = self.setup_sender()
+ except:
+ self.fail("Unable to create ring queue with sequencing enabled")
+
+ def test_get_sequence_number(self):
+ """
+ Test retrieving sequence number for queues
+ """
+
+ key = "k"
+ sender = self.setup_sender("ring-sequence-queue2", key=key)
+
+ # send and receive 1 message and test the sequence number
+ msg = Message()
+ sender.send(msg)
+
+ receiver = self.ssn.receiver("ring-sequence-queue2")
+ msg = receiver.fetch(1)
+ try:
+ seqNo = msg.properties[key]
+ if int(seqNo) != 1:
+ txt = "Unexpected sequence number. Should be 1. Received (%s)" % seqNo
+ self.fail(txt)
+ except:
+ txt = "Unable to get key (%s) from message properties" % key
+ self.fail(txt)
+ receiver.close()
+
+ def test_sequence_number_gap(self):
+ """
+ Test that sequence number for ring queues shows gaps when queue
+ messages are overwritten
+ """
+ key = "qpid.seq"
+ sender = self.setup_sender("ring-sequence-queue3", key=key)
+ receiver = self.ssn.receiver("ring-sequence-queue3")
+
+ msg = Message()
+ sender.send(msg)
+ msg = receiver.fetch(1)
+
+ # send 5 more messages to overflow the queue
+ for i in range(5):
+ sender.send(msg)
+
+ msg = receiver.fetch(1)
+ seqNo = msg.properties[key]
+ if int(seqNo) != 3:
+ txt = "Unexpected sequence number. Should be 3. Received (%s)" % seqNo
+ self.fail(txt)
+ receiver.close()
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/persistence.py b/qpid/tests/src/py/qpid_tests/broker_0_10/persistence.py
new file mode 100644
index 0000000000..e9cf9b7caa
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/persistence.py
@@ -0,0 +1,68 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.datatypes import Message, RangedSet
+#from qpid.testlib import testrunner, TestBase010
+from qpid.testlib import TestBase010
+
+class PersistenceTests(TestBase010):
+ def test_delete_queue_after_publish(self):
+ session = self.session
+ session.auto_sync = False
+
+ #create queue
+ session.queue_declare(queue = "q", auto_delete=True, durable=True)
+
+ #send message
+ for i in range(1, 10):
+ dp = session.delivery_properties(routing_key="q", delivery_mode=2)
+ session.message_transfer(message=Message(dp, "my-message"))
+
+ session.auto_sync = True
+ #explicitly delete queue
+ session.queue_delete(queue = "q")
+
+ def test_ack_message_from_deleted_queue(self):
+ session = self.session
+ session.auto_sync = False
+
+ #create queue
+ session.queue_declare(queue = "q", auto_delete=True, durable=True)
+
+ #send message
+ dp = session.delivery_properties(routing_key="q", delivery_mode=2)
+ session.message_transfer(message=Message(dp, "my-message"))
+
+ #create consumer
+ session.message_subscribe(queue = "q", destination = "a", accept_mode = 1, acquire_mode=0)
+ session.message_flow(unit = session.credit_unit.byte, value = 0xFFFFFFFFL, destination = "a")
+ session.message_flow(unit = session.credit_unit.message, value = 10, destination = "a")
+ queue = session.incoming("a")
+
+ #consume the message, cancel subscription (triggering auto-delete), then ack it
+ msg = queue.get(timeout = 5)
+ session.message_cancel(destination = "a")
+ session.message_accept(RangedSet(msg.id))
+
+ def test_queue_deletion(self):
+ session = self.session
+ session.queue_declare(queue = "durable-subscriber-queue", exclusive=True, durable=True)
+ session.exchange_bind(exchange="amq.topic", queue="durable-subscriber-queue", binding_key="xyz")
+ dp = session.delivery_properties(routing_key="xyz", delivery_mode=2)
+ session.message_transfer(destination="amq.topic", message=Message(dp, "my-message"))
+ session.queue_delete(queue = "durable-subscriber-queue")
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/priority.py b/qpid/tests/src/py/qpid_tests/broker_0_10/priority.py
new file mode 100644
index 0000000000..47aae6dfd6
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/priority.py
@@ -0,0 +1,252 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.tests.messaging.implementation import *
+from qpid.tests.messaging import Base
+from qpid.compat import set
+import math
+
+class PriorityTests (Base):
+ """
+ Test prioritised messaging
+ """
+
+ def setup_connection(self):
+ return Connection.establish(self.broker, **self.connection_options())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def prioritised_delivery(self, priorities, levels=10, key="x-qpid-priorities"):
+ """
+ Test that message on a queue are delivered in priority order.
+ """
+ msgs = [Message(content=str(uuid4()), priority = p) for p in priorities]
+
+ snd = self.ssn.sender("priority-queue; {create: sender, delete: receiver, node: {x-declare:{arguments:{'%s':%s}}}}" % (key, levels),
+ durable=self.durable())
+ for m in msgs: snd.send(m)
+
+ rcv = self.ssn.receiver(snd.target)
+ for expected in sorted_(msgs, key=lambda m: priority_level(m.priority,levels), reverse=True):
+ msg = rcv.fetch(0)
+ #print "expected priority %s got %s" % (expected.priority, msg.priority)
+ assert msg.content == expected.content
+ self.ssn.acknowledge(msg)
+
+ def fairshare_delivery(self, priorities, default_limit=5, limits=None, levels=10, level_key="x-qpid-priorities", fairshare_key="x-qpid-fairshare"):
+ msgs = [Message(content=str(uuid4()), priority = p) for p in priorities]
+
+ limit_policy = "'%s':%s" % (fairshare_key, default_limit)
+ if limits:
+ for k, v in limits.items():
+ limit_policy += ", '%s-%s':%s" % (fairshare_key, k, v)
+
+ snd = self.ssn.sender("priority-queue; {create: sender, delete: receiver, node: {x-declare:{arguments:{'%s':%s, %s}}}}"
+ % (level_key, levels, limit_policy),
+ durable=self.durable())
+ for m in msgs: snd.send(m)
+
+ rcv = self.ssn.receiver(snd.target)
+ if limits:
+ limit_function = lambda x : limits.get(x, 0)
+ else:
+ limit_function = lambda x : default_limit
+ for expected in fairshare(sorted_(msgs, key=lambda m: priority_level(m.priority,levels), reverse=True),
+ limit_function, levels):
+ msg = rcv.fetch(0)
+ #print "expected priority %s got %s" % (expected.priority, msg.priority)
+ assert msg.priority == expected.priority
+ assert msg.content == expected.content
+ self.ssn.acknowledge(msg)
+
+ def test_prioritised_delivery_1(self):
+ self.prioritised_delivery(priorities = [8,9,5,1,2,2,3,4,9,7,8,9,9,2], levels = 10)
+
+ def test_prioritised_delivery_with_alias(self):
+ self.prioritised_delivery(priorities = [8,9,5,1,2,2,3,4,15,7,8,10,10,2], levels = 10, key="qpid.priorities")
+
+ def test_prioritised_delivery_2(self):
+ self.prioritised_delivery(priorities = [8,9,5,1,2,2,3,4,9,7,8,9,9,2], levels = 5)
+
+ def test_fairshare_1(self):
+ self.fairshare_delivery(priorities = [4,5,3,6,10,10,2,10,2,10,10,1,10,10,10,3,3,3,10,10,3,10,3,10,10,10,10,10,10,2,3])
+
+ def test_fairshare_with_alias(self):
+ self.fairshare_delivery(priorities = [4,5,3,6,10,10,2,10,2,10,10,1,10,10,10,3,3,3,10,10,2,3], level_key="qpid.priorities", fairshare_key="qpid.fairshare")
+
+ def test_fairshare_2(self):
+ self.fairshare_delivery(priorities = [10 for i in range(30)])
+
+ def test_fairshare_3(self):
+ self.fairshare_delivery(priorities = [4,5,3,7,8,8,2,8,2,8,8,16,6,6,6,6,6,6,8,3,5,8,3,5,5,3,3,8,8,3,7,3,7,7,7,8,8,8,2,3], limits={7:0,6:4,5:3,4:2,3:2,2:2,1:2}, levels=8)
+
+ def test_browsing(self):
+ priorities = [4,5,3,6,0,1,2,8,2,0,2,1,6,0,1,3,3,3,8,1,3,0,3,7,9,0,1,9,0,2,3]
+ msgs = [Message(content=str(uuid4()), priority = p) for p in priorities]
+ snd = self.ssn.sender("priority-queue; {create: sender, node: {x-declare:{arguments:{x-qpid-priorities:10}}}}",
+ durable=self.durable())
+ for m in msgs: snd.send(m)
+
+ rcv = self.ssn.receiver("priority-queue; {mode: browse, delete: receiver}")
+ received = []
+ try:
+ while True: received.append(rcv.fetch(0))
+ except Empty: None
+ #check all messages on the queue were received by the browser; don't relay on any specific ordering at present
+ assert set([m.content for m in msgs]) == set([m.content for m in received])
+
+ def ring_queue_check(self, msgs, count=10):
+ """
+ Ensure that a ring queue removes lowest priority messages first.
+ """
+ snd = self.ssn.sender(address("priority-ring-queue", arguments="x-qpid-priorities:10, 'qpid.policy_type':ring, 'qpid.max_count':%s" % count),
+ durable=self.durable())
+ for m in msgs: snd.send(m)
+
+ rcv = self.ssn.receiver(snd.target)
+ received = []
+ try:
+ while True: received.append(rcv.fetch(0))
+ except Empty: None
+
+ expected = sorted_(msgs, key=lambda x: priority_level(x.priority,10))[len(msgs)-count:]
+ expected = sorted_(expected, key=lambda x: priority_level(x.priority,10), reverse=True)
+ #print "sent %s; expected %s; got %s" % ([m.priority for m in msgs], [m.priority for m in expected], [m.priority for m in received])
+ #print "sent %s; expected %s; got %s" % ([m.content for m in msgs], [m.content for m in expected], [m.content for m in received])
+ assert [m.content for m in expected] == [m.content for m in received]
+
+ def test_ring_queue_1(self):
+ priorities = [4,5,3,6,9,9,2,9,2,9,9,1,9,9,9,3,3,3,9,9,3,9,3,9,9,9,9,9,9,2,3]
+ seq = content("msg")
+ self.ring_queue_check([Message(content=seq.next(), priority = p) for p in priorities])
+
+ def test_ring_queue_2(self):
+ priorities = [9,0,2,3,6,3,4,2,9,2,9,9,1,9,4,7,1,1,3,9,7,3,9,3,9,1,5,1,9,7,2,3,0,9]
+ seq = content("msg")
+ self.ring_queue_check([Message(content=seq.next(), priority = p) for p in priorities])
+
+ def test_ring_queue_3(self):
+ #test case given for QPID-3866
+ priorities = [8,9,5,1,2,2,3,4,9,7,8,9,9,2]
+ seq = content("msg")
+ self.ring_queue_check([Message(content=seq.next(), priority = p) for p in priorities], 5)
+
+ def test_ring_queue_4(self):
+ priorities = [9,0,2,3,6,3,4,2,9,2,9,3,1,9,4,7,1,1,3,2,7,3,9,3,6,1,5,1,9,7,2,3,0,2]
+ seq = content("msg")
+ self.ring_queue_check([Message(content=seq.next(), priority = p) for p in priorities])
+
+ def test_requeue(self):
+ priorities = [4,5,3,6,9,9,2,9,2,9,9,1,9,9,9,3,3,3,9,9,3,9,3,9,9,9,9,9,9,2,3]
+ msgs = [Message(content=str(uuid4()), priority = p) for p in priorities]
+
+ snd = self.ssn.sender("priority-queue; {create: sender, delete: receiver, node: {x-declare:{arguments:{x-qpid-priorities:10}}}}",
+ durable=self.durable())
+ #want to have some messages requeued so enable prefetch on a dummy receiver
+ other = self.conn.session()
+ dummy = other.receiver("priority-queue")
+ dummy.capacity = 10
+
+ for m in msgs: snd.send(m)
+
+ #fetch some with dummy receiver on which prefetch is also enabled
+ for i in range(5):
+ msg = dummy.fetch(0)
+ #close session without acknowledgements to requeue messages
+ other.close()
+
+ #now test delivery works as expected after that
+ rcv = self.ssn.receiver(snd.target)
+ for expected in sorted_(msgs, key=lambda m: priority_level(m.priority,10), reverse=True):
+ msg = rcv.fetch(0)
+ #print "expected priority %s got %s" % (expected.priority, msg.priority)
+ #print "expected content %s got %s" % (expected.content, msg.content)
+ assert msg.content == expected.content
+ self.ssn.acknowledge(msg)
+
+def content(base, counter=1):
+ while True:
+ yield "%s-%s" % (base, counter)
+ counter += 1
+
+def address(name, create_policy="sender", delete_policy="receiver", arguments=None):
+ if arguments: node = "node: {x-declare:{arguments:{%s}}}" % arguments
+ else: node = "node: {}"
+ return "%s; {create: %s, delete: %s, %s}" % (name, create_policy, delete_policy, node)
+
+def fairshare(msgs, limit, levels):
+ """
+ Generator to return prioritised messages in expected order for a given fairshare limit
+ """
+ count = 0
+ last_priority = None
+ postponed = []
+ while msgs or postponed:
+ if not msgs:
+ msgs = postponed
+ count = 0
+ last_priority = None
+ postponed = []
+ msg = msgs.pop(0)
+ if last_priority and priority_level(msg.priority, levels) == last_priority:
+ count += 1
+ else:
+ last_priority = priority_level(msg.priority, levels)
+ count = 1
+ l = limit(last_priority)
+ if (l and count > l):
+ postponed.append(msg)
+ else:
+ yield msg
+ return
+
+def effective_priority(value, levels):
+ """
+ Method to determine effective priority given a distinct number of
+ levels supported. Returns the lowest priority value that is of
+ equivalent priority to the value passed in.
+ """
+ if value <= 5-math.ceil(levels/2.0): return 0
+ if value >= 4+math.floor(levels/2.0): return 4+math.floor(levels/2.0)
+ return value
+
+def priority_level(value, levels):
+ """
+ Method to determine which of a distinct number of priority levels
+ a given value falls into.
+ """
+ offset = 5-math.ceil(levels/2.0)
+ return min(max(value - offset, 0), levels-1)
+
+def sorted_(msgs, key=None, reverse=False):
+ """
+ Workaround lack of sorted builtin function in python 2.3 and lack
+ of keyword arguments to list.sort()
+ """
+ temp = [m for m in msgs]
+ temp.sort(key_to_cmp(key, reverse=reverse))
+ return temp
+
+def key_to_cmp(key, reverse=False):
+ if key:
+ if reverse: return lambda a, b: cmp(key(b), key(a))
+ else: return lambda a, b: cmp(key(a), key(b))
+ else:
+ return None
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/qmf_events.py b/qpid/tests/src/py/qpid_tests/broker_0_10/qmf_events.py
new file mode 100644
index 0000000000..7ab7b0a1ac
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/qmf_events.py
@@ -0,0 +1,83 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.messaging import *
+from qpid.tests.messaging import Base
+from qpidtoollibs.broker import EventHelper
+import math
+
+class EventTests (Base):
+ """
+ Test various qmf events
+ """
+ def setup_connection(self):
+ return Connection.establish(self.broker, **self.connection_options())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def test_queue_declare(self):
+ helper = EventHelper()
+
+ # subscribe for queue declare events
+ rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDeclare"))
+ # create a queue
+ snd = self.ssn.sender("myq; {create:always, delete:always}")
+ # ensure we got an event
+ event = helper.event(rcv.fetch(timeout=1))
+ assert event.name, "org_apache_qpid_broker:queueDeclare"
+ assert event.qName, "myq"
+
+ def test_queue_delete(self):
+ helper = EventHelper()
+
+ rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDelete"))
+ snd = self.ssn.sender("myq; {create:always, delete:always}")
+ snd.close()
+
+ event = helper.event(rcv.fetch(timeout=1))
+ assert event.name, "org_apache_qpid_broker:queueDelete"
+ assert event.qName, "myq"
+
+ def test_queue_autodelete_exclusive(self):
+ helper = EventHelper()
+
+ rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDelete"))
+
+ #create new session
+ ssn2 = self.setup_session()
+ snd = ssn2.sender("myq; {create:always, node:{x-declare:{auto-delete:True, exclusive:True}}}")
+ ssn2.close()
+
+ event = helper.event(rcv.fetch(timeout=5))
+ assert event.name, "org_apache_qpid_broker:queueDelete"
+ assert event.qName, "myq"
+
+ def test_queue_autodelete_shared(self):
+ helper = EventHelper()
+
+ rcv = self.ssn.receiver(helper.eventAddress("org.apache.qpid.broker", "queueDelete"))
+
+ rcv2 = self.ssn.receiver("myq; {create:always, node:{x-declare:{auto-delete:True}}}")
+ rcv2.close()
+
+ event = helper.event(rcv.fetch(timeout=5))
+ assert event.name, "org_apache_qpid_broker:queueDelete"
+ assert event.qName, "myq"
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/query.py b/qpid/tests/src/py/qpid_tests/broker_0_10/query.py
new file mode 100644
index 0000000000..fd741821d5
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/query.py
@@ -0,0 +1,247 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import TestBase010
+
+class QueryTests(TestBase010):
+ """Tests for various query methods"""
+
+ def test_queue_query(self):
+ session = self.session
+ session.queue_declare(queue="my-queue", exclusive=True, auto_delete=True)
+ result = session.queue_query(queue="my-queue")
+ self.assertEqual("my-queue", result.queue)
+
+ def test_queue_query_unknown(self):
+ session = self.session
+ result = session.queue_query(queue="I don't exist")
+ self.assert_(not result.queue)
+
+ def test_exchange_query(self):
+ """
+ Test that the exchange_query method works as expected
+ """
+ session = self.session
+ #check returned type for the standard exchanges
+ self.assertEqual("direct", session.exchange_query(name="amq.direct").type)
+ self.assertEqual("topic", session.exchange_query(name="amq.topic").type)
+ self.assertEqual("fanout", session.exchange_query(name="amq.fanout").type)
+ self.assertEqual("headers", session.exchange_query(name="amq.match").type)
+ self.assertEqual("direct", session.exchange_query(name="").type)
+ #declare an exchange
+ session.exchange_declare(exchange="my-test-exchange", type= "direct", durable=False)
+ #check that the result of a query is as expected
+ response = session.exchange_query(name="my-test-exchange")
+ self.assertEqual("direct", response.type)
+ self.assert_(not response.durable)
+ self.assert_(not response.not_found)
+ #delete the exchange
+ session.exchange_delete(exchange="my-test-exchange")
+ #check that the query now reports not-found
+ self.assert_(session.exchange_query(name="my-test-exchange").not_found)
+
+ def test_exchange_bound_direct(self):
+ """
+ Test that the exchange_bound method works as expected with the direct exchange
+ """
+ self.exchange_bound_with_key("amq.direct")
+
+ def test_exchange_bound_topic(self):
+ """
+ Test that the exchange_bound method works as expected with the direct exchange
+ """
+ self.exchange_bound_with_key("amq.topic")
+
+ def exchange_bound_with_key(self, exchange_name):
+ session = self.session
+ #setup: create two queues
+ session.queue_declare(queue="used-queue", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="unused-queue", exclusive=True, auto_delete=True)
+
+ session.exchange_bind(exchange=exchange_name, queue="used-queue", binding_key="used-key")
+
+ # test detection of any binding to specific queue
+ response = session.exchange_bound(exchange=exchange_name, queue="used-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+
+ # test detection of specific binding to any queue
+ response = session.exchange_bound(exchange=exchange_name, binding_key="used-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.key_not_matched)
+
+ # test detection of specific binding to specific queue
+ response = session.exchange_bound(exchange=exchange_name, queue="used-queue", binding_key="used-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+ self.assert_(not response.key_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = session.exchange_bound(exchange=exchange_name, queue="unused-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ # test unspecified queue, unmatched binding
+ response = session.exchange_bound(exchange=exchange_name, binding_key="unused-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.key_not_matched)
+
+ # test matched queue, unmatched binding
+ response = session.exchange_bound(exchange=exchange_name, queue="used-queue", binding_key="unused-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+ self.assertEqual(True, response.key_not_matched)
+
+ # test unmatched queue, matched binding
+ response = session.exchange_bound(exchange=exchange_name, queue="unused-queue", binding_key="used-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assert_(not response.key_not_matched)
+
+ # test unmatched queue, unmatched binding
+ response = session.exchange_bound(exchange=exchange_name, queue="unused-queue", binding_key="unused-key")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(True, response.key_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, session.exchange_bound(exchange="unknown-exchange").exchange_not_found)
+
+ #test exchange found, queue not found
+ response = session.exchange_bound(exchange=exchange_name, queue="unknown-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(True, response.queue_not_found)
+
+ #test exchange not found, queue found
+ response = session.exchange_bound(exchange="unknown-exchange", queue="used-queue")
+ self.assertEqual(True, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+
+ #test not exchange found, queue not found
+ response = session.exchange_bound(exchange="unknown-exchange", queue="unknown-queue")
+ self.assertEqual(True, response.exchange_not_found)
+ self.assertEqual(True, response.queue_not_found)
+
+
+ def test_exchange_bound_fanout(self):
+ """
+ Test that the exchange_bound method works as expected with fanout exchange
+ """
+ session = self.session
+ #setup
+ session.queue_declare(queue="used-queue", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="unused-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(exchange="amq.fanout", queue="used-queue")
+
+ # test detection of any binding to specific queue
+ response = session.exchange_bound(exchange="amq.fanout", queue="used-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = session.exchange_bound(exchange="amq.fanout", queue="unused-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, session.exchange_bound(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, session.exchange_bound(exchange="amq.fanout", queue="unknown-queue").queue_not_found)
+
+ def test_exchange_bound_header(self):
+ """
+ Test that the exchange_bound method works as expected with headers exchanges
+ """
+ session = self.session
+ #setup
+ session.queue_declare(queue="used-queue", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="unused-queue", exclusive=True, auto_delete=True)
+ session.exchange_bind(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "a":"A"} )
+
+ # test detection of any binding to specific queue
+ response = session.exchange_bound(exchange="amq.match", queue="used-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+
+ # test detection of specific binding to any queue
+ response = session.exchange_bound(exchange="amq.match", arguments={"x-match":"all", "a":"A"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.args_not_matched)
+
+ # test detection of specific binding to specific queue
+ response = session.exchange_bound(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "a":"A"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+ self.assert_(not response.args_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = session.exchange_bound(exchange="amq.match", queue="unused-queue")
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ # test unspecified queue, unmatched binding
+ response = session.exchange_bound(exchange="amq.match", arguments={"x-match":"all", "b":"B"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.args_not_matched)
+
+ # test matched queue, unmatched binding
+ response = session.exchange_bound(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "b":"B"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assert_(not response.queue_not_matched)
+ self.assertEqual(True, response.args_not_matched)
+
+ # test unmatched queue, matched binding
+ response = session.exchange_bound(exchange="amq.match", queue="unused-queue", arguments={"x-match":"all", "a":"A"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assert_(not response.args_not_matched)
+
+ # test unmatched queue, unmatched binding
+ response = session.exchange_bound(exchange="amq.match", queue="unused-queue", arguments={"x-match":"all", "b":"B"})
+ self.assert_(not response.exchange_not_found)
+ self.assert_(not response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(True, response.args_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, session.exchange_bound(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, session.exchange_bound(exchange="amq.match", queue="unknown-queue").queue_not_found)
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/queue.py b/qpid/tests/src/py/qpid_tests/broker_0_10/queue.py
new file mode 100644
index 0000000000..132bd7b987
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/queue.py
@@ -0,0 +1,436 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.testlib import TestBase010
+from qpid.datatypes import Message, RangedSet
+from qpid.session import SessionException
+
+class QueueTests(TestBase010):
+ """Tests for 'methods' on the amqp queue 'class'"""
+
+ def test_purge(self):
+ """
+ Test that the purge method removes messages from the queue
+ """
+ session = self.session
+ #setup, declare a queue and add some messages to it:
+ session.queue_declare(queue="test-queue", exclusive=True, auto_delete=True)
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "one"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "two"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "three"))
+
+ #check that the queue now reports 3 messages:
+ session.queue_declare(queue="test-queue")
+ reply = session.queue_query(queue="test-queue")
+ self.assertEqual(3, reply.message_count)
+
+ #now do the purge, then test that three messages are purged and the count drops to 0
+ session.queue_purge(queue="test-queue");
+ reply = session.queue_query(queue="test-queue")
+ self.assertEqual(0, reply.message_count)
+
+ #send a further message and consume it, ensuring that the other messages are really gone
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="test-queue"), "four"))
+ session.message_subscribe(queue="test-queue", destination="tag")
+ session.message_flow(destination="tag", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="tag", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ queue = session.incoming("tag")
+ msg = queue.get(timeout=1)
+ self.assertEqual("four", msg.body)
+
+ def test_purge_queue_exists(self):
+ """
+ Test that the correct exception is thrown is no queue exists
+ for the name specified in purge
+ """
+ session = self.session
+ try:
+ #queue specified but doesn't exist:
+ session.queue_purge(queue="invalid-queue")
+ self.fail("Expected failure when purging non-existent queue")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code) #not-found
+
+ def test_purge_empty_name(self):
+ """
+ Test that the correct exception is thrown is no queue name
+ is specified for purge
+ """
+ session = self.session
+ try:
+ #queue not specified and none previously declared for channel:
+ session.queue_purge()
+ self.fail("Expected failure when purging unspecified queue")
+ except SessionException, e:
+ self.assertEquals(531, e.args[0].error_code) #illegal-argument
+
+ def test_declare_exclusive(self):
+ """
+ Test that the exclusive field is honoured in queue.declare
+ """
+ # TestBase.setUp has already opened session(1)
+ s1 = self.session
+ # Here we open a second separate connection:
+ s2 = self.conn.session("other")
+
+ #declare an exclusive queue:
+ s1.queue_declare(queue="exclusive-queue", exclusive=True, auto_delete=True)
+ s1.exchange_bind(exchange="amq.fanout", queue="exclusive-queue")
+ try:
+ #other connection should not be allowed to declare this:
+ s2.queue_declare(queue="exclusive-queue", exclusive=True, auto_delete=True)
+ self.fail("Expected second exclusive queue_declare to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ s3 = self.conn.session("subscriber")
+ try:
+ #other connection should not be allowed to declare this:
+ s3.message_subscribe(queue="exclusive-queue")
+ self.fail("Expected message_subscribe on an exclusive queue to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ s4 = self.conn.session("deleter")
+ try:
+ #other connection should not be allowed to declare this:
+ s4.queue_delete(queue="exclusive-queue")
+ self.fail("Expected queue_delete on an exclusive queue to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ s5 = self.conn.session("binder")
+ try:
+ #other connection should not be allowed to declare this:
+ s5.exchange_bind(exchange="amq.direct", queue="exclusive-queue", binding_key="abc")
+ self.fail("Expected exchange_bind on an exclusive queue to raise an exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ s6 = self.conn.session("unbinder")
+ try:
+ #other connection should not be allowed to declare this:
+ s6.exchange_unbind(exchange="amq.fanout", queue="exclusive-queue")
+ self.fail("Expected exchange_unbind on an exclusive queue to raise an exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ def test_declare_exclusive_alreadyinuse(self):
+ """
+ Test that exclusivity is real if granted
+ """
+ # TestBase.setUp has already opened session(1)
+ s1 = self.session
+ # Here we open a second separate connection:
+ s2 = self.conn.session("other")
+
+ #declare an exclusive queue:
+ s1.queue_declare(queue="a-queue", auto_delete=True)
+ s1.message_subscribe(queue="a-queue")
+ try:
+ #other connection should not be allowed to declare this:
+ s2.queue_declare(queue="a-queue", exclusive=True, auto_delete=True)
+ self.fail("Expected request for exclusivity to fail")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code)
+
+ def test_declare_passive(self):
+ """
+ Test that the passive field is honoured in queue.declare
+ """
+ s1 = self.session
+ s2 = self.conn.session("other")
+
+ s1.queue_declare(queue="passive-queue-1")
+
+ #ensure that same/separate sessions can passively declare same queue
+ s1.queue_declare(queue="passive-queue-1", passive=True)
+ s2.queue_declare(queue="passive-queue-1", passive=True)
+
+ s1.queue_delete(queue="passive-queue-1")
+
+ def test_declare_passive_queue_not_found(self):
+ """
+ Test that the passive field is honoured in queue.declare
+ """
+ s1 = self.session
+
+ try:
+ s1.queue_declare(queue="passive-queue-not-found", passive=True)
+ self.fail("Expected passive declaration of non-existent queue to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code) #not-found
+
+
+ def test_declare_passive_with_exclusive(self):
+ """
+ Test that the passive field is honoured in queue.declare
+ """
+ s1 = self.session
+ s2 = self.conn.session("other")
+
+ #declare exclusive/non-exclusive queues:
+ s1.queue_declare(queue="passive-queue-exc", exclusive=True, auto_delete=True)
+ s1.queue_declare(queue="passive-queue-nonexc", exclusive=False, auto_delete=True)
+
+ #ensure that same/separate sessions can passively declare same queue *without* the exclusive flag
+ #this is important for the request/reply pattern
+ s1.queue_declare(queue="passive-queue-exc", passive=True)
+ s2.queue_declare(queue="passive-queue-exc", passive=True)
+
+ try:
+ s2.queue_declare(queue="passive-queue-nonexc", exclusive=True, passive=True)
+ self.fail("Expected exclusive passive declaration of existing queue to raise a channel exception")
+ except SessionException, e:
+ self.assertEquals(405, e.args[0].error_code) # resource locked
+
+ def test_bind(self):
+ """
+ Test various permutations of the queue.bind method+
+ """
+ session = self.session
+ session.queue_declare(queue="queue-1", exclusive=True, auto_delete=True)
+
+ #straightforward case, both exchange & queue exist so no errors expected:
+ session.exchange_bind(queue="queue-1", exchange="amq.direct", binding_key="key1")
+
+ #use the queue name where the routing key is not specified:
+ session.exchange_bind(queue="queue-1", exchange="amq.direct")
+
+ #try and bind to non-existant exchange
+ try:
+ session.exchange_bind(queue="queue-1", exchange="an-invalid-exchange", binding_key="key1")
+ self.fail("Expected bind to non-existant exchange to fail")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
+ def test_bind_queue_existence(self):
+ session = self.session
+ #try and bind non-existant queue:
+ try:
+ session.exchange_bind(queue="queue-2", exchange="amq.direct", binding_key="key1")
+ self.fail("Expected bind of non-existant queue to fail")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_unbind_direct(self):
+ self.unbind_test(exchange="amq.direct", routing_key="key")
+
+ def test_unbind_topic(self):
+ self.unbind_test(exchange="amq.topic", routing_key="key")
+
+ def test_unbind_fanout(self):
+ self.unbind_test(exchange="amq.fanout")
+
+ def test_unbind_headers(self):
+ self.unbind_test(exchange="amq.match", args={ "x-match":"all", "a":"b"}, headers={"a":"b"})
+
+ def unbind_test(self, exchange, routing_key="", args=None, headers=None):
+ #bind two queues and consume from them
+ session = self.session
+
+ session.queue_declare(queue="queue-1", exclusive=True, auto_delete=True)
+ session.queue_declare(queue="queue-2", exclusive=True, auto_delete=True)
+
+ session.message_subscribe(queue="queue-1", destination="queue-1")
+ session.message_flow(destination="queue-1", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="queue-1", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ session.message_subscribe(queue="queue-2", destination="queue-2")
+ session.message_flow(destination="queue-2", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="queue-2", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+
+ queue1 = session.incoming("queue-1")
+ queue2 = session.incoming("queue-2")
+
+ session.exchange_bind(exchange=exchange, queue="queue-1", binding_key=routing_key, arguments=args)
+ session.exchange_bind(exchange=exchange, queue="queue-2", binding_key=routing_key, arguments=args)
+
+ dp = session.delivery_properties(routing_key=routing_key)
+ if (headers):
+ mp = session.message_properties(application_headers=headers)
+ msg1 = Message(dp, mp, "one")
+ msg2 = Message(dp, mp, "two")
+ else:
+ msg1 = Message(dp, "one")
+ msg2 = Message(dp, "two")
+
+ #send a message that will match both bindings
+ session.message_transfer(destination=exchange, message=msg1)
+
+ #unbind first queue
+ session.exchange_unbind(exchange=exchange, queue="queue-1", binding_key=routing_key)
+
+ #send another message
+ session.message_transfer(destination=exchange, message=msg2)
+
+ #check one queue has both messages and the other has only one
+ self.assertEquals("one", queue1.get(timeout=1).body)
+ try:
+ msg = queue1.get(timeout=1)
+ self.fail("Got extra message: %s" % msg.body)
+ except Empty: pass
+
+ self.assertEquals("one", queue2.get(timeout=1).body)
+ self.assertEquals("two", queue2.get(timeout=1).body)
+ try:
+ msg = queue2.get(timeout=1)
+ self.fail("Got extra message: " + msg)
+ except Empty: pass
+
+
+ def test_delete_simple(self):
+ """
+ Test core queue deletion behaviour
+ """
+ session = self.session
+
+ #straight-forward case:
+ session.queue_declare(queue="delete-me")
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="delete-me"), "a"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="delete-me"), "b"))
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="delete-me"), "c"))
+ session.queue_delete(queue="delete-me")
+ #check that it has gone by declaring passively
+ try:
+ session.queue_declare(queue="delete-me", passive=True)
+ self.fail("Queue has not been deleted")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_delete_queue_exists(self):
+ """
+ Test core queue deletion behaviour
+ """
+ #check attempted deletion of non-existant queue is handled correctly:
+ session = self.session
+ try:
+ session.queue_delete(queue="i-dont-exist", if_empty=True)
+ self.fail("Expected delete of non-existant queue to fail")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
+
+ def test_delete_ifempty(self):
+ """
+ Test that if_empty field of queue_delete is honoured
+ """
+ session = self.session
+
+ #create a queue and add a message to it (use default binding):
+ session.queue_declare(queue="delete-me-2")
+ session.queue_declare(queue="delete-me-2", passive=True)
+ session.message_transfer(message=Message(session.delivery_properties(routing_key="delete-me-2"), "message"))
+
+ #try to delete, but only if empty:
+ try:
+ session.queue_delete(queue="delete-me-2", if_empty=True)
+ self.fail("Expected delete if_empty to fail for non-empty queue")
+ except SessionException, e:
+ self.assertEquals(406, e.args[0].error_code)
+
+ #need new session now:
+ session = self.conn.session("replacement", 2)
+
+ #empty queue:
+ session.message_subscribe(destination="consumer_tag", queue="delete-me-2")
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination="consumer_tag", unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+ queue = session.incoming("consumer_tag")
+ msg = queue.get(timeout=1)
+ self.assertEqual("message", msg.body)
+ session.message_accept(RangedSet(msg.id))
+ session.message_cancel(destination="consumer_tag")
+
+ #retry deletion on empty queue:
+ session.queue_delete(queue="delete-me-2", if_empty=True)
+
+ #check that it has gone by declaring passively:
+ try:
+ session.queue_declare(queue="delete-me-2", passive=True)
+ self.fail("Queue has not been deleted")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+ def test_delete_ifunused(self):
+ """
+ Test that if_unused field of queue_delete is honoured
+ """
+ session = self.session
+
+ #create a queue and register a consumer:
+ session.queue_declare(queue="delete-me-3")
+ session.queue_declare(queue="delete-me-3", passive=True)
+ session.message_subscribe(destination="consumer_tag", queue="delete-me-3")
+
+ #need new session now:
+ session2 = self.conn.session("replacement", 2)
+
+ #try to delete, but only if empty:
+ try:
+ session2.queue_delete(queue="delete-me-3", if_unused=True)
+ self.fail("Expected delete if_unused to fail for queue with existing consumer")
+ except SessionException, e:
+ self.assertEquals(406, e.args[0].error_code)
+
+ session.message_cancel(destination="consumer_tag")
+ session.queue_delete(queue="delete-me-3", if_unused=True)
+ #check that it has gone by declaring passively:
+ try:
+ session.queue_declare(queue="delete-me-3", passive=True)
+ self.fail("Queue has not been deleted")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
+ def test_autodelete_shared(self):
+ """
+ Test auto-deletion (of non-exclusive queues)
+ """
+ session = self.session
+ session2 =self.conn.session("other", 1)
+
+ session.queue_declare(queue="auto-delete-me", auto_delete=True)
+
+ #consume from both sessions
+ tag = "my-tag"
+ session.message_subscribe(queue="auto-delete-me", destination=tag)
+ session2.message_subscribe(queue="auto-delete-me", destination=tag)
+
+ #implicit cancel
+ session2.close()
+
+ #check it is still there
+ session.queue_declare(queue="auto-delete-me", passive=True)
+
+ #explicit cancel => queue is now unused again:
+ session.message_cancel(destination=tag)
+
+ #NOTE: this assumes there is no timeout in use
+
+ #check that it has gone by declaring it passively
+ try:
+ session.queue_declare(queue="auto-delete-me", passive=True)
+ self.fail("Expected queue to have been deleted")
+ except SessionException, e:
+ self.assertEquals(404, e.args[0].error_code)
+
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/stats.py b/qpid/tests/src/py/qpid_tests/broker_0_10/stats.py
new file mode 100644
index 0000000000..4f3931b78b
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/stats.py
@@ -0,0 +1,519 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.tests.messaging.implementation import *
+from qpid.tests.messaging import Base
+from time import sleep
+from qpidtoollibs.broker import BrokerAgent
+
+#
+# Tests the Broker's statistics reporting
+#
+
+class BrokerStatsTests(Base):
+ """
+ Tests of the broker's statistics
+ """
+
+ def assertEqual(self, left, right, text=None):
+ if not left == right:
+ print "assertEqual failure: %r != %r" % (left, right)
+ if text:
+ print " %r" % text
+ assert None
+
+ def failUnless(self, value, text=None):
+ if value:
+ return
+ print "failUnless failure",
+ if text:
+ print ": %r" % text
+ else:
+ print
+ assert None
+
+ def fail(self, text=None):
+ if text:
+ print "Fail: %r" % text
+ assert None
+
+ def setup_connection(self):
+ return Connection.establish(self.broker, **self.connection_options())
+
+ def setup_session(self, tx=False):
+ return self.conn.session(transactional=tx)
+
+ def setup_access(self):
+ return BrokerAgent(self.conn)
+
+ def test_exchange_stats(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ agent.addExchange("direct", "stats-test-exchange")
+ try:
+ sess = self.setup_session()
+ tx_a = sess.sender("stats-test-exchange/a")
+ tx_b = sess.sender("stats-test-exchange/b")
+ rx_a = sess.receiver("stats-test-exchange/a")
+
+ exchange = agent.getExchange("stats-test-exchange")
+ self.failUnless(exchange, "expected a valid exchange object")
+ self.assertEqual(exchange.msgReceives, 0, "msgReceives")
+ self.assertEqual(exchange.msgDrops, 0, "msgDrops")
+ self.assertEqual(exchange.msgRoutes, 0, "msgRoutes")
+ self.assertEqual(exchange.byteReceives, 0, "byteReceives")
+ self.assertEqual(exchange.byteDrops, 0, "byteDrops")
+ self.assertEqual(exchange.byteRoutes, 0, "byteRoutes")
+
+ tx_a.send("0123456789")
+ tx_b.send("01234567890123456789")
+ tx_a.send("012345678901234567890123456789")
+ tx_b.send("0123456789012345678901234567890123456789")
+
+ overhead = 63 #overhead added to message from headers
+ exchange.update()
+ self.assertEqual(exchange.msgReceives, 4, "msgReceives")
+ self.assertEqual(exchange.msgDrops, 2, "msgDrops")
+ self.assertEqual(exchange.msgRoutes, 2, "msgRoutes")
+ self.assertEqual(exchange.byteReceives, 100+(4*overhead), "byteReceives")
+ self.assertEqual(exchange.byteDrops, 60+(2*overhead), "byteDrops")
+ self.assertEqual(exchange.byteRoutes, 40+(2*overhead), "byteRoutes")
+ finally:
+ agent.delExchange("stats-test-exchange")
+
+ def test_enqueues_dequeues(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("enqueue_test;{create:always,delete:always}")
+ rx = sess.receiver("enqueue_test")
+
+ queue = agent.getQueue("enqueue_test")
+ self.failUnless(queue, "expected a valid queue object")
+ self.assertEqual(queue.msgTotalEnqueues, 0, "msgTotalEnqueues")
+ self.assertEqual(queue.byteTotalEnqueues, 0, "byteTotalEnqueues")
+ self.assertEqual(queue.msgTotalDequeues, 0, "msgTotalDequeues")
+ self.assertEqual(queue.byteTotalDequeues, 0, "byteTotalDequeues")
+ self.assertEqual(queue.msgDepth, 0, "msgDepth")
+ self.assertEqual(queue.byteDepth, 0, "byteDepth")
+
+ tx.send("0123456789")
+ tx.send("01234567890123456789")
+ tx.send("012345678901234567890123456789")
+ tx.send("0123456789012345678901234567890123456789")
+ overhead = 38 #overhead added to message from headers
+
+ queue.update()
+ self.assertEqual(queue.msgTotalEnqueues, 4, "msgTotalEnqueues")
+ self.assertEqual(queue.byteTotalEnqueues, 100+(4*overhead), "byteTotalEnqueues")
+ self.assertEqual(queue.msgTotalDequeues, 0, "msgTotalDequeues")
+ self.assertEqual(queue.byteTotalDequeues, 0, "byteTotalDequeues")
+ self.assertEqual(queue.msgDepth, 4, "msgDepth")
+ self.assertEqual(queue.byteDepth, 100+(4*overhead), "byteDepth")
+
+ now_broker = agent.getBroker()
+ self.failUnless((now_broker.msgTotalEnqueues - start_broker.msgTotalEnqueues) >= 4, "broker msgTotalEnqueues")
+ self.failUnless((now_broker.byteTotalEnqueues - start_broker.byteTotalEnqueues) >= 100, "broker byteTotalEnqueues")
+
+ m = rx.fetch()
+ m = rx.fetch()
+ sess.acknowledge()
+
+ queue.update()
+ self.assertEqual(queue.msgTotalEnqueues, 4, "msgTotalEnqueues")
+ self.assertEqual(queue.byteTotalEnqueues, 100+(4*overhead), "byteTotalEnqueues")
+ self.assertEqual(queue.msgTotalDequeues, 2, "msgTotalDequeues")
+ self.assertEqual(queue.byteTotalDequeues, 30+(2*overhead), "byteTotalDequeues")
+ self.assertEqual(queue.msgDepth, 2, "msgDepth")
+ self.assertEqual(queue.byteDepth, 70+(2*overhead), "byteDepth")
+
+ now_broker = agent.getBroker()
+ self.failUnless((now_broker.msgTotalDequeues - start_broker.msgTotalDequeues) >= 2, "broker msgTotalDequeues")
+ self.failUnless((now_broker.byteTotalDequeues - start_broker.byteTotalDequeues) >= 30, "broker byteTotalDequeues")
+
+ sess.close()
+
+ now_broker = agent.getBroker()
+ self.assertEqual(now_broker.abandoned - start_broker.abandoned, 2, "expect 2 abandoned messages")
+ self.assertEqual(now_broker.msgDepth, start_broker.msgDepth, "expect broker message depth to be unchanged")
+ self.assertEqual(now_broker.byteDepth, start_broker.byteDepth, "expect broker byte depth to be unchanged")
+
+
+ def test_transactional_enqueues_dequeues(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session(True)
+ tx = sess.sender("tx_enqueue_test;{create:always,delete:always}")
+
+ tx.send("0123456789")
+ tx.send("0123456789")
+ tx.send("0123456789")
+ tx.send("0123456789")
+ overhead = 41 #overhead added to message from headers
+
+ queue = agent.getQueue("tx_enqueue_test")
+ self.failUnless(queue, "expected a valid queue object")
+ self.assertEqual(queue.msgTotalEnqueues, 0, "msgTotalEnqueues pre-tx-commit")
+ self.assertEqual(queue.byteTotalEnqueues, 0, "byteTotalEnqueues pre-tx-commit")
+ self.assertEqual(queue.msgTxnEnqueues, 0, "msgTxnEnqueues pre-tx-commit")
+ self.assertEqual(queue.byteTxnEnqueues, 0, "byteTxnEnqueues pre-tx-commit")
+ self.assertEqual(queue.msgTotalDequeues, 0, "msgTotalDequeues pre-tx-commit")
+ self.assertEqual(queue.byteTotalDequeues, 0, "byteTotalDequeues pre-tx-commit")
+ self.assertEqual(queue.msgTxnDequeues, 0, "msgTxnDequeues pre-tx-commit")
+ self.assertEqual(queue.byteTxnDequeues, 0, "byteTxnDequeues pre-tx-commit")
+
+ sess.commit()
+ queue.update()
+ self.assertEqual(queue.msgTotalEnqueues, 4, "msgTotalEnqueues post-tx-commit")
+ self.assertEqual(queue.byteTotalEnqueues, 40+(4*overhead), "byteTotalEnqueues post-tx-commit")
+ self.assertEqual(queue.msgTxnEnqueues, 4, "msgTxnEnqueues post-tx-commit")
+ self.assertEqual(queue.byteTxnEnqueues, 40+(4*overhead), "byteTxnEnqueues post-tx-commit")
+ self.assertEqual(queue.msgTotalDequeues, 0, "msgTotalDequeues post-tx-commit")
+ self.assertEqual(queue.byteTotalDequeues, 0, "byteTotalDequeues post-tx-commit")
+ self.assertEqual(queue.msgTxnDequeues, 0, "msgTxnDequeues post-tx-commit")
+ self.assertEqual(queue.byteTxnDequeues, 0, "byteTxnDequeues post-tx-commit")
+
+ sess2 = self.setup_session(True)
+ rx = sess2.receiver("tx_enqueue_test")
+
+ m = rx.fetch()
+ m = rx.fetch()
+ m = rx.fetch()
+ m = rx.fetch()
+
+ queue.update()
+ self.assertEqual(queue.msgTotalEnqueues, 4, "msgTotalEnqueues pre-rx-commit")
+ self.assertEqual(queue.byteTotalEnqueues, 40+(4*overhead), "byteTotalEnqueues pre-rx-commit")
+ self.assertEqual(queue.msgTxnEnqueues, 4, "msgTxnEnqueues pre-rx-commit")
+ self.assertEqual(queue.byteTxnEnqueues, 40+(4*overhead), "byteTxnEnqueues pre-rx-commit")
+ self.assertEqual(queue.msgTotalDequeues, 0, "msgTotalDequeues pre-rx-commit")
+ self.assertEqual(queue.byteTotalDequeues, 0, "byteTotalDequeues pre-rx-commit")
+ self.assertEqual(queue.msgTxnDequeues, 0, "msgTxnDequeues pre-rx-commit")
+ self.assertEqual(queue.byteTxnDequeues, 0, "byteTxnDequeues pre-rx-commit")
+
+ sess2.acknowledge()
+ sess2.commit()
+
+ queue.update()
+ self.assertEqual(queue.msgTotalEnqueues, 4, "msgTotalEnqueues post-rx-commit")
+ self.assertEqual(queue.byteTotalEnqueues, 40+(4*overhead), "byteTotalEnqueues post-rx-commit")
+ self.assertEqual(queue.msgTxnEnqueues, 4, "msgTxnEnqueues post-rx-commit")
+ self.assertEqual(queue.byteTxnEnqueues, 40+(4*overhead), "byteTxnEnqueues post-rx-commit")
+ self.assertEqual(queue.msgTotalDequeues, 4, "msgTotalDequeues post-rx-commit")
+ self.assertEqual(queue.byteTotalDequeues, 40+(4*overhead), "byteTotalDequeues post-rx-commit")
+ self.assertEqual(queue.msgTxnDequeues, 4, "msgTxnDequeues post-rx-commit")
+ self.assertEqual(queue.byteTxnDequeues, 40+(4*overhead), "byteTxnDequeues post-rx-commit")
+
+ sess.close()
+ sess2.close()
+
+ now_broker = agent.getBroker()
+ self.assertEqual(now_broker.msgTxnEnqueues - start_broker.msgTxnEnqueues, 4, "broker msgTxnEnqueues")
+ self.assertEqual(now_broker.byteTxnEnqueues - start_broker.byteTxnEnqueues, 40+(4*overhead), "broker byteTxnEnqueues")
+ self.assertEqual(now_broker.msgTxnDequeues - start_broker.msgTxnDequeues, 4, "broker msgTxnDequeues")
+ self.assertEqual(now_broker.byteTxnDequeues - start_broker.byteTxnDequeues, 40+(4*overhead), "broker byteTxnDequeues")
+
+
+ def test_discards_no_route(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("amq.topic/non.existing.key")
+ tx.send("NO_ROUTE")
+ tx.send("NO_ROUTE")
+ tx.send("NO_ROUTE")
+ tx.send("NO_ROUTE")
+ tx.send("NO_ROUTE")
+
+ now_broker = agent.getBroker()
+
+ self.failUnless((now_broker.discardsNoRoute - start_broker.discardsNoRoute) >= 5, "Expect at least 5 no-routes")
+
+ sess.close()
+
+
+ def test_abandoned_alt(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("abandon_alt;{create:always,delete:always,node:{x-declare:{alternate-exchange:'amq.fanout'}}}")
+ rx = sess.receiver("abandon_alt")
+ rx.capacity = 2
+
+ tx.send("ABANDON_ALT")
+ tx.send("ABANDON_ALT")
+ tx.send("ABANDON_ALT")
+ tx.send("ABANDON_ALT")
+ tx.send("ABANDON_ALT")
+
+ rx.fetch()
+
+ sess.close()
+ now_broker = agent.getBroker()
+ self.assertEqual(now_broker.abandonedViaAlt - start_broker.abandonedViaAlt, 5, "Expect 5 abandonedViaAlt")
+ self.assertEqual(now_broker.abandoned - start_broker.abandoned, 0, "Expect 0 abandoned")
+
+
+ def test_discards_ttl(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("discards_ttl;{create:always,delete:always}")
+ msg = Message("TTL")
+ msg.ttl = 1
+
+ tx.send(msg)
+ tx.send(msg)
+ tx.send(msg)
+ tx.send(msg)
+ tx.send(msg)
+ tx.send(msg)
+
+ sleep(2)
+
+ rx = sess.receiver("discards_ttl")
+ try:
+ rx.fetch(0)
+ except:
+ pass
+
+ now_broker = agent.getBroker()
+ queue = agent.getQueue("discards_ttl")
+
+ self.failUnless(queue, "expected a valid queue object")
+ self.assertEqual(queue.discardsTtl, 6, "expect 6 TTL discards on queue")
+ self.assertEqual(now_broker.discardsTtl - start_broker.discardsTtl, 6, "expect 6 TTL discards on broker")
+ self.assertEqual(queue.msgTotalDequeues, 6, "expect 6 total dequeues on queue")
+
+ sess.close()
+
+
+ def test_discards_limit_overflow(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("discards_limit;{create:always,node:{x-declare:{arguments:{'qpid.max_count':3,'qpid.flow_stop_count':0}}}}")
+ tx.send("LIMIT")
+ tx.send("LIMIT")
+ tx.send("LIMIT")
+ try:
+ tx.send("LIMIT")
+ self.fail("expected to fail sending 4th message")
+ except:
+ pass
+
+ now_broker = agent.getBroker()
+ queue = agent.getQueue("discards_limit")
+
+ self.failUnless(queue, "expected a valid queue object")
+ self.assertEqual(queue.discardsOverflow, 1, "expect 1 overflow discard on queue")
+ self.assertEqual(now_broker.discardsOverflow - start_broker.discardsOverflow, 1, "expect 1 overflow discard on broker")
+
+ ##
+ ## Shut down and restart the connection to clear the error condition.
+ ##
+ try:
+ self.conn.close(timeout=.1)
+ except:
+ pass
+ self.conn = self.setup_connection()
+
+ ##
+ ## Re-create the session to delete the queue.
+ ##
+ sess = self.setup_session()
+ tx = sess.sender("discards_limit;{create:always,delete:always}")
+ sess.close()
+
+
+ def test_discards_ring_overflow(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("discards_ring;{create:always,delete:always,node:{x-declare:{arguments:{'qpid.max_count':3,'qpid.flow_stop_count':0,'qpid.policy_type':ring}}}}")
+
+ tx.send("RING")
+ tx.send("RING")
+ tx.send("RING")
+ tx.send("RING")
+ tx.send("RING")
+
+ now_broker = agent.getBroker()
+ queue = agent.getQueue("discards_ring")
+
+ self.failUnless(queue, "expected a valid queue object")
+ self.assertEqual(queue.discardsRing, 2, "expect 2 ring discards on queue")
+ self.assertEqual(now_broker.discardsRing - start_broker.discardsRing, 2, "expect 2 ring discards on broker")
+ self.assertEqual(queue.msgTotalDequeues, 2, "expect 2 total dequeues on queue")
+
+ sess.close()
+
+
+ def test_discards_lvq_replace(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("discards_lvq;{create:always,delete:always,node:{x-declare:{arguments:{'qpid.max_count':3,'qpid.flow_stop_count':0,'qpid.last_value_queue_key':key}}}}")
+ msgA = Message("LVQ_A")
+ msgA.properties['key'] = 'AAA'
+ msgB = Message("LVQ_B")
+ msgB.properties['key'] = 'BBB'
+
+ tx.send(msgA)
+ tx.send(msgB)
+ tx.send(msgA)
+ tx.send(msgA)
+ tx.send(msgB)
+
+ now_broker = agent.getBroker()
+ queue = agent.getQueue("discards_lvq")
+
+ self.failUnless(queue, "expected a valid queue object")
+ self.assertEqual(queue.discardsLvq, 3, "expect 3 lvq discards on queue")
+ self.assertEqual(now_broker.discardsLvq - start_broker.discardsLvq, 3, "expect 3 lvq discards on broker")
+ self.assertEqual(queue.msgTotalDequeues, 3, "expect 3 total dequeues on queue")
+
+ sess.close()
+
+
+ def test_discards_reject(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("discards_reject;{create:always,delete:always}")
+ tx.send("REJECT")
+ tx.send("REJECT")
+ tx.send("REJECT")
+
+ rx = sess.receiver("discards_reject")
+ m = rx.fetch()
+ sess.acknowledge()
+ m1 = rx.fetch()
+ m2 = rx.fetch()
+ sess.acknowledge(m1, Disposition(REJECTED))
+ sess.acknowledge(m2, Disposition(REJECTED))
+
+ now_broker = agent.getBroker()
+ queue = agent.getQueue("discards_reject")
+
+ self.failUnless(queue, "expected a valid queue object")
+ self.assertEqual(queue.discardsSubscriber, 2, "expect 2 reject discards on queue")
+ self.assertEqual(now_broker.discardsSubscriber - start_broker.discardsSubscriber, 2, "expect 2 reject discards on broker")
+ self.assertEqual(queue.msgTotalDequeues, 3, "expect 3 total dequeues on queue")
+
+ sess.close()
+
+
+ def test_message_release(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("message_release;{create:always,delete:always}")
+ tx.send("RELEASE")
+ tx.send("RELEASE")
+ tx.send("RELEASE")
+ tx.send("RELEASE")
+ tx.send("RELEASE")
+
+ rx = sess.receiver("message_release")
+ m1 = rx.fetch()
+ m2 = rx.fetch()
+ sess.acknowledge(m1, Disposition(RELEASED))
+ sess.acknowledge(m2, Disposition(RELEASED))
+
+ now_broker = agent.getBroker()
+ queue = agent.getQueue("message_release")
+
+ self.failUnless(queue, "expected a valid queue object")
+ self.assertEqual(queue.acquires, 2, "expect 2 acquires on queue")
+ self.failUnless(now_broker.acquires - start_broker.acquires >= 2, "expect at least 2 acquires on broker")
+ self.assertEqual(queue.msgTotalDequeues, 0, "expect 0 total dequeues on queue")
+
+ self.assertEqual(queue.releases, 2, "expect 2 releases on queue")
+ self.failUnless(now_broker.releases - start_broker.releases >= 2, "expect at least 2 releases on broker")
+
+ sess.close()
+
+
+ def test_discards_purge(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("discards_purge;{create:always,delete:always}")
+ tx.send("PURGE")
+ tx.send("PURGE")
+ tx.send("PURGE")
+ tx.send("PURGE")
+ tx.send("PURGE")
+
+ queue = agent.getQueue("discards_purge")
+ self.failUnless(queue, "expected a valid queue object")
+
+ queue.purge(3)
+ queue.update()
+
+ now_broker = agent.getBroker()
+ self.assertEqual(queue.discardsPurge, 3, "expect 3 purge discards on queue")
+ self.assertEqual(now_broker.discardsPurge - start_broker.discardsPurge, 3, "expect 3 purge discards on broker")
+ self.assertEqual(queue.msgTotalDequeues, 3, "expect 3 total dequeues on queue")
+
+ sess.close()
+
+
+ def test_reroutes(self):
+ agent = self.setup_access()
+ start_broker = agent.getBroker()
+
+ sess = self.setup_session()
+ tx = sess.sender("reroute;{create:always,delete:always}")
+ tx.send("REROUTE")
+ tx.send("REROUTE")
+ tx.send("REROUTE")
+ tx.send("REROUTE")
+ tx.send("REROUTE")
+ tx.send("REROUTE")
+ tx.send("REROUTE")
+ tx.send("REROUTE")
+
+ queue = agent.getQueue("reroute")
+ self.failUnless(queue, "expected a valid queue object")
+
+ queue.reroute(5, False, 'amq.fanout')
+ queue.update()
+
+ now_broker = agent.getBroker()
+ self.assertEqual(queue.reroutes, 5, "expect 5 reroutes on queue")
+ self.assertEqual(now_broker.reroutes - start_broker.reroutes, 5, "expect 5 reroutes on broker")
+ self.assertEqual(queue.msgTotalDequeues, 5, "expect 5 total dequeues on queue")
+
+ sess.close()
+
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/threshold.py b/qpid/tests/src/py/qpid_tests/broker_0_10/threshold.py
new file mode 100644
index 0000000000..fa172c66d2
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/threshold.py
@@ -0,0 +1,212 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.messaging import *
+from qpid.tests.messaging import Base
+import math
+
+class ThresholdTests (Base):
+ """
+ Test queue threshold events are sent and received correctly
+ """
+
+ def setup_connection(self):
+ return Connection.establish(self.broker, **self.connection_options())
+
+ def setup_session(self):
+ return self.conn.session()
+
+ def enqueue(self, snd, count):
+ for i in range(count):
+ m = Message("msg-%d" % i)
+ snd.send(m)
+
+ def dequeue(self, rcv, count):
+ for i in range(count):
+ m = rcv.fetch(timeout=1)
+ self.ssn.acknowledge()
+
+ def check_events(self, rcv, count):
+ for i in range(count):
+ m = rcv.fetch(timeout=0)
+ try:
+ m = rcv.fetch(timeout=0)
+ assert False
+ except:
+ pass
+
+ def do_threshold_test(self, args, messages, drain_count, bw_compat=None):
+ astr = ''
+ first = True
+ for key, value in args.items():
+ if first:
+ first = None
+ else:
+ astr += ','
+ astr += "'%s':%s" % (key, value)
+ rcvUp = self.ssn.receiver("qmf.default.topic/agent.ind.event.org_apache_qpid_broker.queueThresholdCrossedUpward.#")
+ rcvDn = self.ssn.receiver("qmf.default.topic/agent.ind.event.org_apache_qpid_broker.queueThresholdCrossedDownward.#")
+ rcvBw = self.ssn.receiver("qmf.default.topic/agent.ind.event.org_apache_qpid_broker.queueThresholdExceeded.#")
+ snd = self.ssn.sender("ttq; {create:always, node: {x-declare:{auto_delete:True,exclusive:True,arguments:{%s}}}}" % astr)
+ rcv = self.ssn.receiver("ttq")
+ overhead = 29 #additional bytes in broker's view of message size from headers etc
+ size = 0
+ count = 0
+ for m in messages:
+ snd.send(m)
+ count = count + 1
+ size = size + len(m.content) + overhead
+ event = rcvUp.fetch(timeout=1)
+ schema = event.content[0]["_schema_id"]
+ assert schema["_class_name"] == "queueThresholdCrossedUpward"
+ values = event.content[0]["_values"]
+ assert values["qName"] == "ttq"
+ assert values["msgDepth"] == count, "msgDepth %s, expected %s" % (values["msgDepth"], count)
+ assert values["byteDepth"] == size, "byteDepth %s, expected %s" % (values["byteDepth"], size)
+ if bw_compat:
+ event = rcvBw.fetch(timeout=0)
+
+ try:
+ event = rcvUp.fetch(timeout=0)
+ assert False
+ except:
+ pass
+
+ if drain_count > 0:
+ for i in range(drain_count):
+ m = rcv.fetch(timeout=1)
+ self.ssn.acknowledge()
+ count -= 1
+ size -= (len(m.content) + overhead)
+ event = rcvDn.fetch(timeout=1)
+ schema = event.content[0]["_schema_id"]
+ assert schema["_class_name"] == "queueThresholdCrossedDownward"
+ values = event.content[0]["_values"]
+ assert values["qName"] == "ttq"
+ assert values["msgDepth"] == count, "msgDepth %s, expected %s" % (values["msgDepth"], count)
+ assert values["byteDepth"] == size, "byteDepth %s, expected %s" % (values["byteDepth"], size)
+ try:
+ event = rcvUp.fetch(timeout=0)
+ assert False
+ except:
+ pass
+
+ def test_alert_count(self):
+ a = {'qpid.alert_count':5, 'qpid.alert_count_down':3}
+ self.do_threshold_test(a, [Message("msg-%s" % i) for i in range(5)], 2)
+
+ def test_alert_size(self):
+ a = {'qpid.alert_size_up':150,'qpid.alert_size_down':120}
+ self.do_threshold_test(a, [Message("msg-%s" % i) for i in range(5)], 2)
+
+ def test_alert_count_alias(self):
+ a = {'x-qpid-maximum-message-count':10}
+ self.do_threshold_test(a, [Message("msg-%s" % i) for i in range(10)], 0, True)
+
+ def test_alert_size_alias(self):
+ a = {'x-qpid-maximum-message-size':100}
+ self.do_threshold_test(a, [Message("msg-%s" % i) for i in range(3)], 0, True)
+
+ def test_alert_on_alert_queue(self):
+ rcv = self.ssn.receiver("qmf.default.topic/agent.ind.event.org_apache_qpid_broker.queueThresholdCrossedUpward.#; {link:{x-declare:{arguments:{'qpid.alert_count':1}}}}")
+ snd = self.ssn.sender("ttq; {create:always, node: {x-declare:{auto_delete:True,exclusive:True,arguments:{'qpid.alert_count':1}}}}")
+ snd.send(Message("my-message"))
+ queues = []
+ for i in range(2):
+ event = rcv.fetch(timeout=1)
+ schema = event.content[0]["_schema_id"]
+ assert schema["_class_name"] == "queueThresholdCrossedUpward"
+ values = event.content[0]["_values"]
+ queues.append(values["qName"])
+ assert "ttq" in queues, "expected event for ttq (%s)" % (queues)
+
+ def test_hysteresis(self):
+ astr = "'qpid.alert_count_up':10,'qpid.alert_count_down':5"
+ rcvUp = self.ssn.receiver("qmf.default.topic/agent.ind.event.org_apache_qpid_broker.queueThresholdCrossedUpward.#")
+ rcvDn = self.ssn.receiver("qmf.default.topic/agent.ind.event.org_apache_qpid_broker.queueThresholdCrossedDownward.#")
+ snd = self.ssn.sender("thq; {create:always, node: {x-declare:{auto_delete:True,exclusive:True,arguments:{%s}}}}" % astr)
+ rcv = self.ssn.receiver("thq")
+
+ rcvUp.capacity = 5
+ rcvDn.capacity = 5
+ rcv.capacity = 5
+
+ self.enqueue(snd, 8) # depth = 8
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+ self.dequeue(rcv, 6) # depth = 2
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+ self.enqueue(snd, 8) # depth = 10
+ self.check_events(rcvUp, 1)
+ self.check_events(rcvDn, 0)
+
+ self.dequeue(rcv, 1) # depth = 9
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+ self.enqueue(snd, 1) # depth = 10
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+ self.enqueue(snd, 10) # depth = 20
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+ self.dequeue(rcv, 5) # depth = 15
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+ self.dequeue(rcv, 12) # depth = 3
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 1)
+
+ self.dequeue(rcv, 1) # depth = 2
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+ self.enqueue(snd, 6) # depth = 8
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+ self.enqueue(snd, 6) # depth = 14
+ self.check_events(rcvUp, 1)
+ self.check_events(rcvDn, 0)
+
+ self.dequeue(rcv, 9) # depth = 5
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 1)
+
+ self.enqueue(snd, 1) # depth = 6
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+ self.dequeue(rcv, 1) # depth = 5
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+ self.dequeue(rcv, 5) # depth = 0
+ self.check_events(rcvUp, 0)
+ self.check_events(rcvDn, 0)
+
+
+
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_10/tx.py b/qpid/tests/src/py/qpid_tests/broker_0_10/tx.py
new file mode 100644
index 0000000000..8cdc539a08
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_10/tx.py
@@ -0,0 +1,265 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.datatypes import Message, RangedSet
+from qpid.testlib import TestBase010
+
+class TxTests(TestBase010):
+ """
+ Tests for 'methods' on the amqp tx 'class'
+ """
+
+ def test_commit(self):
+ """
+ Test that commited publishes are delivered and commited acks are not re-delivered
+ """
+ session = self.session
+
+ #declare queues and create subscribers in the checking session
+ #to ensure that the queues are not auto-deleted too early:
+ self.declare_queues(["tx-commit-a", "tx-commit-b", "tx-commit-c"])
+ session.message_subscribe(queue="tx-commit-a", destination="qa")
+ session.message_subscribe(queue="tx-commit-b", destination="qb")
+ session.message_subscribe(queue="tx-commit-c", destination="qc")
+
+ #use a separate session for actual work
+ session2 = self.conn.session("worker", 2)
+ self.perform_txn_work(session2, "tx-commit-a", "tx-commit-b", "tx-commit-c")
+ session2.tx_commit()
+ session2.close()
+
+ session.tx_select()
+
+ self.enable_flow("qa")
+ queue_a = session.incoming("qa")
+
+ self.enable_flow("qb")
+ queue_b = session.incoming("qb")
+
+ self.enable_flow("qc")
+ queue_c = session.incoming("qc")
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("TxMessage %d" % i, msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("TxMessage 6", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("TxMessage 7", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ session.tx_commit()
+
+ def test_auto_rollback(self):
+ """
+ Test that a session closed with an open transaction is effectively rolled back
+ """
+ session = self.session
+ self.declare_queues(["tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c"])
+ session.message_subscribe(queue="tx-autorollback-a", destination="qa")
+ session.message_subscribe(queue="tx-autorollback-b", destination="qb")
+ session.message_subscribe(queue="tx-autorollback-c", destination="qc")
+
+ session2 = self.conn.session("worker", 2)
+ queue_a, queue_b, queue_c, ignore = self.perform_txn_work(session2, "tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ session2.close()
+
+ session.tx_select()
+
+ self.enable_flow("qa")
+ queue_a = session.incoming("qa")
+
+ self.enable_flow("qb")
+ queue_b = session.incoming("qb")
+
+ self.enable_flow("qc")
+ queue_c = session.incoming("qc")
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ session.tx_commit()
+
+ def test_rollback(self):
+ """
+ Test that rolled back publishes are not delivered and rolled back acks are re-delivered
+ """
+ session = self.session
+ queue_a, queue_b, queue_c, consumed = self.perform_txn_work(session, "tx-rollback-a", "tx-rollback-b", "tx-rollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ session.tx_rollback()
+
+ #need to release messages to get them redelivered now:
+ session.message_release(consumed)
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ session.tx_commit()
+
+ def perform_txn_work(self, session, name_a, name_b, name_c):
+ """
+ Utility method that does some setup and some work under a transaction. Used for testing both
+ commit and rollback
+ """
+ #setup:
+ self.declare_queues([name_a, name_b, name_c])
+
+ key = "my_key_" + name_b
+ topic = "my_topic_" + name_c
+
+ session.exchange_bind(queue=name_b, exchange="amq.direct", binding_key=key)
+ session.exchange_bind(queue=name_c, exchange="amq.topic", binding_key=topic)
+
+ dp = session.delivery_properties(routing_key=name_a)
+ for i in range(1, 5):
+ mp = session.message_properties(message_id="msg%d" % i)
+ session.message_transfer(message=Message(dp, mp, "Message %d" % i))
+
+ dp = session.delivery_properties(routing_key=key)
+ mp = session.message_properties(message_id="msg6")
+ session.message_transfer(destination="amq.direct", message=Message(dp, mp, "Message 6"))
+
+ dp = session.delivery_properties(routing_key=topic)
+ mp = session.message_properties(message_id="msg7")
+ session.message_transfer(destination="amq.topic", message=Message(dp, mp, "Message 7"))
+
+ session.tx_select()
+
+ #consume and ack messages
+ acked = RangedSet()
+ self.subscribe(session, queue=name_a, destination="sub_a")
+ queue_a = session.incoming("sub_a")
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ acked.add(msg.id)
+ self.assertEqual("Message %d" % i, msg.body)
+
+ self.subscribe(session, queue=name_b, destination="sub_b")
+ queue_b = session.incoming("sub_b")
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ acked.add(msg.id)
+
+ sub_c = self.subscribe(session, queue=name_c, destination="sub_c")
+ queue_c = session.incoming("sub_c")
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ acked.add(msg.id)
+
+ session.message_accept(acked)
+
+ dp = session.delivery_properties(routing_key=topic)
+ #publish messages
+ for i in range(1, 5):
+ mp = session.message_properties(message_id="tx-msg%d" % i)
+ session.message_transfer(destination="amq.topic", message=Message(dp, mp, "TxMessage %d" % i))
+
+ dp = session.delivery_properties(routing_key=key)
+ mp = session.message_properties(message_id="tx-msg6")
+ session.message_transfer(destination="amq.direct", message=Message(dp, mp, "TxMessage 6"))
+
+ dp = session.delivery_properties(routing_key=name_a)
+ mp = session.message_properties(message_id="tx-msg7")
+ session.message_transfer(message=Message(dp, mp, "TxMessage 7"))
+ return queue_a, queue_b, queue_c, acked
+
+ def declare_queues(self, names, session=None):
+ session = session or self.session
+ for n in names:
+ session.queue_declare(queue=n, auto_delete=True)
+
+ def subscribe(self, session=None, **keys):
+ session = session or self.session
+ consumer_tag = keys["destination"]
+ session.message_subscribe(**keys)
+ session.message_flow(destination=consumer_tag, unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination=consumer_tag, unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+
+ def enable_flow(self, tag, session=None):
+ session = session or self.session
+ session.message_flow(destination=tag, unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination=tag, unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+
+ def complete(self, session, msg):
+ session.receiver._completed.add(msg.id)#TODO: this may be done automatically
+ session.channel.session_completed(session.receiver._completed)
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_8/__init__.py b/qpid/tests/src/py/qpid_tests/broker_0_8/__init__.py
new file mode 100644
index 0000000000..526f2452f8
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_8/__init__.py
@@ -0,0 +1,22 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import basic, broker, example, exchange, queue, testlib, tx
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_8/basic.py b/qpid/tests/src/py/qpid_tests/broker_0_8/basic.py
new file mode 100644
index 0000000000..13f4252ffb
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_8/basic.py
@@ -0,0 +1,441 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import TestBase
+
+class BasicTests(TestBase):
+ """Tests for 'methods' on the amqp basic 'class'"""
+
+ def test_consume_no_local(self):
+ """
+ Test that the no_local flag is honoured in the consume method
+ """
+ channel = self.channel
+ #setup, declare two queues:
+ channel.queue_declare(queue="test-queue-1a", exclusive=True)
+ channel.queue_declare(queue="test-queue-1b", exclusive=True)
+ #establish two consumers one of which excludes delivery of locally sent messages
+ channel.basic_consume(consumer_tag="local_included", queue="test-queue-1a")
+ channel.basic_consume(consumer_tag="local_excluded", queue="test-queue-1b", no_local=True)
+
+ #send a message
+ channel.basic_publish(routing_key="test-queue-1a", content=Content("consume_no_local"))
+ channel.basic_publish(routing_key="test-queue-1b", content=Content("consume_no_local"))
+
+ #check the queues of the two consumers
+ excluded = self.client.queue("local_excluded")
+ included = self.client.queue("local_included")
+ msg = included.get(timeout=1)
+ self.assertEqual("consume_no_local", msg.content.body)
+ try:
+ excluded.get(timeout=1)
+ self.fail("Received locally published message though no_local=true")
+ except Empty: None
+
+
+ def test_consume_exclusive(self):
+ """
+ Test that the exclusive flag is honoured in the consume method
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-2", exclusive=True)
+
+ #check that an exclusive consumer prevents other consumer being created:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-2", exclusive=True)
+ try:
+ channel.basic_consume(consumer_tag="second", queue="test-queue-2")
+ self.fail("Expected consume request to fail due to previous exclusive consumer")
+ except Closed, e:
+ self.assertChannelException(403, e.args[0])
+
+ #open new channel and cleanup last consumer:
+ channel = self.client.channel(2)
+ channel.channel_open()
+
+ #check that an exclusive consumer cannot be created if a consumer already exists:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-2")
+ try:
+ channel.basic_consume(consumer_tag="second", queue="test-queue-2", exclusive=True)
+ self.fail("Expected exclusive consume request to fail due to previous consumer")
+ except Closed, e:
+ self.assertChannelException(403, e.args[0])
+
+ def test_reconnect_to_durable_subscription(self):
+ try:
+ publisherchannel = self.channel
+ my_id = "my_id"
+ consumer_connection_properties_with_instance = {"instance": my_id}
+ queue_for_subscription = "queue_for_subscription_%s" % my_id
+ topic_name = "my_topic_name"
+ test_message = self.uniqueString()
+
+ durable_subscription_client = self.connect(client_properties=consumer_connection_properties_with_instance)
+ consumerchannel = durable_subscription_client.channel(1)
+ consumerchannel.channel_open()
+
+ self._declare_and_bind_exclusive_queue_on_topic_exchange(consumerchannel, queue_for_subscription, topic_name)
+
+ # disconnect
+ durable_subscription_client.close()
+
+ # send message to topic
+ publisherchannel.basic_publish(routing_key=topic_name, exchange="amq.topic", content=Content(test_message))
+
+ # reconnect and consume message
+ durable_subscription_client = self.connect(client_properties=consumer_connection_properties_with_instance)
+ consumerchannel = durable_subscription_client.channel(1)
+ consumerchannel.channel_open()
+
+ self._declare_and_bind_exclusive_queue_on_topic_exchange(consumerchannel, queue_for_subscription, topic_name)
+
+ # Create consumer and consume the message that was sent whilst subscriber was disconnected. By convention we
+ # declare the consumer as exclusive to forbid concurrent access.
+ subscription = consumerchannel.basic_consume(queue=queue_for_subscription, exclusive=True)
+ queue = durable_subscription_client.queue(subscription.consumer_tag)
+
+ # consume and verify message content
+ msg = queue.get(timeout=1)
+ self.assertEqual(test_message, msg.content.body)
+ consumerchannel.basic_ack(delivery_tag=msg.delivery_tag)
+ finally:
+ consumerchannel.queue_delete(queue=queue_for_subscription)
+ durable_subscription_client.close()
+
+ def _declare_and_bind_exclusive_queue_on_topic_exchange(self, channel, queue, topic_name):
+ channel.queue_declare(queue=queue, exclusive=True, auto_delete=False, durable=True)
+ channel.queue_bind(exchange="amq.topic", queue=queue, routing_key=topic_name)
+
+ def test_consume_queue_errors(self):
+ """
+ Test error conditions associated with the queue field of the consume method:
+ """
+ channel = self.channel
+ try:
+ #queue specified but doesn't exist:
+ channel.basic_consume(queue="invalid-queue")
+ self.fail("Expected failure when consuming from non-existent queue")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ channel = self.client.channel(2)
+ channel.channel_open()
+ try:
+ #queue not specified and none previously declared for channel:
+ channel.basic_consume(queue="")
+ self.fail("Expected failure when consuming from unspecified queue")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ def test_consume_unique_consumers(self):
+ """
+ Ensure unique consumer tags are enforced
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-3", exclusive=True)
+
+ #check that attempts to use duplicate tags are detected and prevented:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-3")
+ try:
+ channel.basic_consume(consumer_tag="first", queue="test-queue-3")
+ self.fail("Expected consume request to fail due to non-unique tag")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ def test_cancel(self):
+ """
+ Test compliance of the basic.cancel method
+ """
+ channel = self.channel
+ #setup, declare a queue:
+ channel.queue_declare(queue="test-queue-4", exclusive=True)
+ channel.basic_consume(consumer_tag="my-consumer", queue="test-queue-4")
+ channel.basic_publish(routing_key="test-queue-4", content=Content("One"))
+
+ myqueue = self.client.queue("my-consumer")
+ msg = myqueue.get(timeout=1)
+ self.assertEqual("One", msg.content.body)
+
+ #cancel should stop messages being delivered
+ channel.basic_cancel(consumer_tag="my-consumer")
+ channel.basic_publish(routing_key="test-queue-4", content=Content("Two"))
+ try:
+ msg = myqueue.get(timeout=1)
+ self.fail("Got message after cancellation: " + msg)
+ except Empty: None
+
+ #cancellation of non-existant consumers should be handled without error
+ channel.basic_cancel(consumer_tag="my-consumer")
+ channel.basic_cancel(consumer_tag="this-never-existed")
+
+
+ def test_ack(self):
+ """
+ Test basic ack/recover behaviour
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-ack-queue", exclusive=True)
+
+ reply = channel.basic_consume(queue="test-ack-queue", no_ack=False)
+ queue = self.client.queue(reply.consumer_tag)
+
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("One"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Two"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Three"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Four"))
+ channel.basic_publish(routing_key="test-ack-queue", content=Content("Five"))
+
+ msg1 = queue.get(timeout=1)
+ msg2 = queue.get(timeout=1)
+ msg3 = queue.get(timeout=1)
+ msg4 = queue.get(timeout=1)
+ msg5 = queue.get(timeout=1)
+
+ self.assertEqual("One", msg1.content.body)
+ self.assertEqual("Two", msg2.content.body)
+ self.assertEqual("Three", msg3.content.body)
+ self.assertEqual("Four", msg4.content.body)
+ self.assertEqual("Five", msg5.content.body)
+
+ channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
+ channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
+
+ channel.basic_recover(requeue=False)
+
+ msg3b = queue.get(timeout=1)
+ msg5b = queue.get(timeout=1)
+
+ self.assertEqual("Three", msg3b.content.body)
+ self.assertEqual("Five", msg5b.content.body)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ def test_recover_requeue(self):
+ """
+ Test requeing on recovery
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-requeue", exclusive=True)
+
+ subscription = channel.basic_consume(queue="test-requeue", no_ack=False)
+ queue = self.client.queue(subscription.consumer_tag)
+
+ channel.basic_publish(routing_key="test-requeue", content=Content("One"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Two"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Three"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Four"))
+ channel.basic_publish(routing_key="test-requeue", content=Content("Five"))
+
+ msg1 = queue.get(timeout=1)
+ msg2 = queue.get(timeout=1)
+ msg3 = queue.get(timeout=1)
+ msg4 = queue.get(timeout=1)
+ msg5 = queue.get(timeout=1)
+
+ self.assertEqual("One", msg1.content.body)
+ self.assertEqual("Two", msg2.content.body)
+ self.assertEqual("Three", msg3.content.body)
+ self.assertEqual("Four", msg4.content.body)
+ self.assertEqual("Five", msg5.content.body)
+
+ channel.basic_ack(delivery_tag=msg2.delivery_tag, multiple=True) #One & Two
+ channel.basic_ack(delivery_tag=msg4.delivery_tag, multiple=False) #Four
+
+ channel.basic_cancel(consumer_tag=subscription.consumer_tag)
+
+ channel.basic_recover(requeue=True)
+
+ subscription2 = channel.basic_consume(queue="test-requeue")
+ queue2 = self.client.queue(subscription2.consumer_tag)
+
+ msg3b = queue2.get(timeout=1)
+ msg5b = queue2.get(timeout=1)
+
+ self.assertEqual("Three", msg3b.content.body)
+ self.assertEqual("Five", msg5b.content.body)
+
+ self.assertEqual(True, msg3b.redelivered)
+ self.assertEqual(True, msg5b.redelivered)
+
+ try:
+ extra = queue2.get(timeout=1)
+ self.fail("Got unexpected message in second queue: " + extra.content.body)
+ except Empty: None
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message in original queue: " + extra.content.body)
+ except Empty: None
+
+
+ def test_qos_prefetch_count(self):
+ """
+ Test that the prefetch count specified is honoured
+ """
+ #setup: declare queue and subscribe
+ channel = self.channel
+ channel.queue_declare(queue="test-prefetch-count", exclusive=True)
+ subscription = channel.basic_consume(queue="test-prefetch-count", no_ack=False)
+ queue = self.client.queue(subscription.consumer_tag)
+
+ #set prefetch to 5:
+ channel.basic_qos(prefetch_count=5)
+
+ #publish 10 messages:
+ for i in range(1, 11):
+ channel.basic_publish(routing_key="test-prefetch-count", content=Content("Message %d" % i))
+
+ #only 5 messages should have been delivered:
+ for i in range(1, 6):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
+ except Empty: None
+
+ #ack messages and check that the next set arrive ok:
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ for i in range(6, 11):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
+ except Empty: None
+
+
+
+ def test_qos_prefetch_size(self):
+ """
+ Test that the prefetch size specified is honoured
+ """
+ #setup: declare queue and subscribe
+ channel = self.channel
+ channel.queue_declare(queue="test-prefetch-size", exclusive=True)
+ subscription = channel.basic_consume(queue="test-prefetch-size", no_ack=False)
+ queue = self.client.queue(subscription.consumer_tag)
+
+ #set prefetch to 50 bytes (each message is 9 or 10 bytes):
+ channel.basic_qos(prefetch_size=50)
+
+ #publish 10 messages:
+ for i in range(1, 11):
+ channel.basic_publish(routing_key="test-prefetch-size", content=Content("Message %d" % i))
+
+ #only 5 messages should have been delivered (i.e. 45 bytes worth):
+ for i in range(1, 6):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 6th message in original queue: " + extra.content.body)
+ except Empty: None
+
+ #ack messages and check that the next set arrive ok:
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ for i in range(6, 11):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected 11th message in original queue: " + extra.content.body)
+ except Empty: None
+
+ #make sure that a single oversized message still gets delivered
+ large = "abcdefghijklmnopqrstuvwxyz"
+ large = large + "-" + large;
+ channel.basic_publish(routing_key="test-prefetch-size", content=Content(large))
+ msg = queue.get(timeout=1)
+ self.assertEqual(large, msg.content.body)
+
+ def test_get(self):
+ """
+ Test basic_get method
+ """
+ channel = self.channel
+ channel.queue_declare(queue="test-get", exclusive=True)
+
+ #publish some messages (no_ack=True)
+ for i in range(1, 11):
+ channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
+
+ #use basic_get to read back the messages, and check that we get an empty at the end
+ for i in range(1, 11):
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_ok")
+ self.assertEqual("Message %d" % i, reply.content.body)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
+
+ #repeat for no_ack=False
+ for i in range(11, 21):
+ channel.basic_publish(routing_key="test-get", content=Content("Message %d" % i))
+
+ for i in range(11, 21):
+ reply = channel.basic_get(no_ack=False)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_ok")
+ self.assertEqual("Message %d" % i, reply.content.body)
+ if(i == 13):
+ channel.basic_ack(delivery_tag=reply.delivery_tag, multiple=True)
+ if(i in [15, 17, 19]):
+ channel.basic_ack(delivery_tag=reply.delivery_tag)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
+
+ #recover(requeue=True)
+ channel.basic_recover(requeue=True)
+
+ #get the unacked messages again (14, 16, 18, 20)
+ for i in [14, 16, 18, 20]:
+ reply = channel.basic_get(no_ack=False)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_ok")
+ self.assertEqual("Message %d" % i, reply.content.body)
+ channel.basic_ack(delivery_tag=reply.delivery_tag)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
+
+ channel.basic_recover(requeue=True)
+
+ reply = channel.basic_get(no_ack=True)
+ self.assertEqual(reply.method.klass.name, "basic")
+ self.assertEqual(reply.method.name, "get_empty")
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_8/broker.py b/qpid/tests/src/py/qpid_tests/broker_0_8/broker.py
new file mode 100644
index 0000000000..7f3fe7530e
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_8/broker.py
@@ -0,0 +1,120 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import TestBase
+
+class BrokerTests(TestBase):
+ """Tests for basic Broker functionality"""
+
+ def test_ack_and_no_ack(self):
+ """
+ First, this test tries to receive a message with a no-ack
+ consumer. Second, this test tries to explicitly receive and
+ acknowledge a message with an acknowledging consumer.
+ """
+ ch = self.channel
+ self.queue_declare(ch, queue = "myqueue")
+
+ # No ack consumer
+ ctag = ch.basic_consume(queue = "myqueue", no_ack = True).consumer_tag
+ body = "test no-ack"
+ ch.basic_publish(routing_key = "myqueue", content = Content(body))
+ msg = self.client.queue(ctag).get(timeout = 5)
+ self.assert_(msg.content.body == body)
+
+ # Acknowledging consumer
+ self.queue_declare(ch, queue = "otherqueue")
+ ctag = ch.basic_consume(queue = "otherqueue", no_ack = False).consumer_tag
+ body = "test ack"
+ ch.basic_publish(routing_key = "otherqueue", content = Content(body))
+ msg = self.client.queue(ctag).get(timeout = 5)
+ ch.basic_ack(delivery_tag = msg.delivery_tag)
+ self.assert_(msg.content.body == body)
+
+ def test_basic_delivery_immediate(self):
+ """
+ Test basic message delivery where consume is issued before publish
+ """
+ channel = self.channel
+ self.exchange_declare(channel, exchange="test-exchange", type="direct")
+ self.queue_declare(channel, queue="test-queue")
+ channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
+ reply = channel.basic_consume(queue="test-queue", no_ack=True)
+ queue = self.client.queue(reply.consumer_tag)
+
+ body = "Immediate Delivery"
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content(body), immediate=True)
+ msg = queue.get(timeout=5)
+ self.assert_(msg.content.body == body)
+
+ # TODO: Ensure we fail if immediate=True and there's no consumer.
+
+
+ def test_basic_delivery_queued(self):
+ """
+ Test basic message delivery where publish is issued before consume
+ (i.e. requires queueing of the message)
+ """
+ channel = self.channel
+ self.exchange_declare(channel, exchange="test-exchange", type="direct")
+ self.queue_declare(channel, queue="test-queue")
+ channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
+ body = "Queued Delivery"
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content(body))
+ reply = channel.basic_consume(queue="test-queue", no_ack=True)
+ queue = self.client.queue(reply.consumer_tag)
+ msg = queue.get(timeout=5)
+ self.assert_(msg.content.body == body)
+
+ def test_invalid_channel(self):
+ channel = self.client.channel(200)
+ try:
+ channel.queue_declare(exclusive=True)
+ self.fail("Expected error on queue_declare for invalid channel")
+ except Closed, e:
+ self.assertConnectionException(504, e.args[0])
+
+ def test_closed_channel(self):
+ channel = self.client.channel(200)
+ channel.channel_open()
+ channel.channel_close()
+ try:
+ channel.queue_declare(exclusive=True)
+ self.fail("Expected error on queue_declare for closed channel")
+ except Closed, e:
+ self.assertConnectionException(504, e.args[0])
+
+ def test_channel_flow(self):
+ channel = self.channel
+ channel.queue_declare(queue="flow_test_queue", exclusive=True)
+ ctag = channel.basic_consume(queue="flow_test_queue", no_ack=True).consumer_tag
+ incoming = self.client.queue(ctag)
+
+ channel.channel_flow(active=False)
+ channel.basic_publish(routing_key="flow_test_queue", content=Content("abcdefghijklmnopqrstuvwxyz"))
+ try:
+ incoming.get(timeout=1)
+ self.fail("Received message when flow turned off.")
+ except Empty: None
+
+ channel.channel_flow(active=True)
+ msg = incoming.get(timeout=1)
+ self.assertEqual("abcdefghijklmnopqrstuvwxyz", msg.content.body)
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_8/example.py b/qpid/tests/src/py/qpid_tests/broker_0_8/example.py
new file mode 100644
index 0000000000..d82bad1f61
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_8/example.py
@@ -0,0 +1,94 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.content import Content
+from qpid.testlib import TestBase
+
+class ExampleTest (TestBase):
+ """
+ An example Qpid test, illustrating the unittest frameowkr and the
+ python Qpid client. The test class must inherit TestCase. The
+ test code uses the Qpid client to interact with a qpid broker and
+ verify it behaves as expected.
+ """
+
+ def test_example(self):
+ """
+ An example test. Note that test functions must start with 'test_'
+ to be recognized by the test framework.
+ """
+
+ # By inheriting TestBase, self.client is automatically connected
+ # and self.channel is automatically opened as channel(1)
+ # Other channel methods mimic the protocol.
+ channel = self.channel
+
+ # Now we can send regular commands. If you want to see what the method
+ # arguments mean or what other commands are available, you can use the
+ # python builtin help() method. For example:
+ #help(chan)
+ #help(chan.exchange_declare)
+
+ # If you want browse the available protocol methods without being
+ # connected to a live server you can use the amqp-doc utility:
+ #
+ # Usage amqp-doc [<options>] <spec> [<pattern_1> ... <pattern_n>]
+ #
+ # Options:
+ # -e, --regexp use regex instead of glob when matching
+
+ # Now that we know what commands are available we can use them to
+ # interact with the server.
+
+ # Here we use ordinal arguments.
+ self.exchange_declare(channel, 0, "test", "direct")
+
+ # Here we use keyword arguments.
+ self.queue_declare(channel, queue="test-queue")
+ channel.queue_bind(queue="test-queue", exchange="test", routing_key="key")
+
+ # Call Channel.basic_consume to register as a consumer.
+ # All the protocol methods return a message object. The message object
+ # has fields corresponding to the reply method fields, plus a content
+ # field that is filled if the reply includes content. In this case the
+ # interesting field is the consumer_tag.
+ reply = channel.basic_consume(queue="test-queue")
+
+ # We can use the Client.queue(...) method to access the queue
+ # corresponding to our consumer_tag.
+ queue = self.client.queue(reply.consumer_tag)
+
+ # Now lets publish a message and see if our consumer gets it. To do
+ # this we need to import the Content class.
+ body = "Hello World!"
+ channel.basic_publish(exchange="test",
+ routing_key="key",
+ content=Content(body))
+
+ # Now we'll wait for the message to arrive. We can use the timeout
+ # argument in case the server hangs. By default queue.get() will wait
+ # until a message arrives or the connection to the server dies.
+ msg = queue.get(timeout=10)
+
+ # And check that we got the right response with assertEqual
+ self.assertEqual(body, msg.content.body)
+
+ # Now acknowledge the message.
+ channel.basic_ack(msg.delivery_tag, True)
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_8/exchange.py b/qpid/tests/src/py/qpid_tests/broker_0_8/exchange.py
new file mode 100644
index 0000000000..8d610a79dd
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_8/exchange.py
@@ -0,0 +1,349 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+"""
+Tests for exchange behaviour.
+
+Test classes ending in 'RuleTests' are derived from rules in amqp.xml.
+"""
+
+import Queue, logging
+from qpid.testlib import TestBase
+from qpid.content import Content
+from qpid.client import Closed
+
+
+class StandardExchangeVerifier:
+ """Verifies standard exchange behavior.
+
+ Used as base class for classes that test standard exchanges."""
+
+ def verifyDirectExchange(self, ex):
+ """Verify that ex behaves like a direct exchange."""
+ self.queue_declare(queue="q")
+ self.channel.queue_bind(queue="q", exchange=ex, routing_key="k")
+ self.assertPublishConsume(exchange=ex, queue="q", routing_key="k")
+ try:
+ self.assertPublishConsume(exchange=ex, queue="q", routing_key="kk")
+ self.fail("Expected Empty exception")
+ except Queue.Empty: None # Expected
+
+ def verifyFanOutExchange(self, ex):
+ """Verify that ex behaves like a fanout exchange."""
+ self.queue_declare(queue="q")
+ self.channel.queue_bind(queue="q", exchange=ex)
+ self.queue_declare(queue="p")
+ self.channel.queue_bind(queue="p", exchange=ex)
+ for qname in ["q", "p"]: self.assertPublishGet(self.consume(qname), ex)
+
+ def verifyTopicExchange(self, ex):
+ """Verify that ex behaves like a topic exchange"""
+ self.queue_declare(queue="a")
+ self.channel.queue_bind(queue="a", exchange=ex, routing_key="a.#.b.*")
+ q = self.consume("a")
+ self.assertPublishGet(q, ex, "a.b.x")
+ self.assertPublishGet(q, ex, "a.x.b.x")
+ self.assertPublishGet(q, ex, "a.x.x.b.x")
+ # Shouldn't match
+ self.channel.basic_publish(exchange=ex, routing_key="a.b")
+ self.channel.basic_publish(exchange=ex, routing_key="a.b.x.y")
+ self.channel.basic_publish(exchange=ex, routing_key="x.a.b.x")
+ self.channel.basic_publish(exchange=ex, routing_key="a.b")
+ self.assert_(q.empty())
+
+ def verifyHeadersExchange(self, ex):
+ """Verify that ex is a headers exchange"""
+ self.queue_declare(queue="q")
+ self.channel.queue_bind(queue="q", exchange=ex, arguments={ "x-match":"all", "name":"fred" , "age":3} )
+ q = self.consume("q")
+ headers = {"name":"fred", "age":3}
+ self.assertPublishGet(q, exchange=ex, properties={'headers':headers})
+ self.channel.basic_publish(exchange=ex) # No headers, won't deliver
+ self.assertEmpty(q);
+
+
+class RecommendedTypesRuleTests(TestBase, StandardExchangeVerifier):
+ """
+ The server SHOULD implement these standard exchange types: topic, headers.
+
+ Client attempts to declare an exchange with each of these standard types.
+ """
+
+ def testDirect(self):
+ """Declare and test a direct exchange"""
+ self.exchange_declare(0, exchange="d", type="direct")
+ self.verifyDirectExchange("d")
+
+ def testFanout(self):
+ """Declare and test a fanout exchange"""
+ self.exchange_declare(0, exchange="f", type="fanout")
+ self.verifyFanOutExchange("f")
+
+ def testTopic(self):
+ """Declare and test a topic exchange"""
+ self.exchange_declare(0, exchange="t", type="topic")
+ self.verifyTopicExchange("t")
+
+ def testHeaders(self):
+ """Declare and test a headers exchange"""
+ self.exchange_declare(0, exchange="h", type="headers")
+ self.verifyHeadersExchange("h")
+
+
+class RequiredInstancesRuleTests(TestBase, StandardExchangeVerifier):
+ """
+ The server MUST, in each virtual host, pre-declare an exchange instance
+ for each standard exchange type that it implements, where the name of the
+ exchange instance is amq. followed by the exchange type name.
+
+ Client creates a temporary queue and attempts to bind to each required
+ exchange instance (amq.fanout, amq.direct, and amq.topic, amq.match if
+ those types are defined).
+ """
+ def testAmqDirect(self): self.verifyDirectExchange("amq.direct")
+
+ def testAmqFanOut(self): self.verifyFanOutExchange("amq.fanout")
+
+ def testAmqTopic(self): self.verifyTopicExchange("amq.topic")
+
+ def testAmqMatch(self): self.verifyHeadersExchange("amq.match")
+
+class DefaultExchangeRuleTests(TestBase, StandardExchangeVerifier):
+ """
+ The server MUST predeclare a direct exchange to act as the default exchange
+ for content Publish methods and for default queue bindings.
+
+ Client checks that the default exchange is active by specifying a queue
+ binding with no exchange name, and publishing a message with a suitable
+ routing key but without specifying the exchange name, then ensuring that
+ the message arrives in the queue correctly.
+ """
+ def testDefaultExchange(self):
+ # Test automatic binding by queue name.
+ self.queue_declare(queue="d")
+ self.assertPublishConsume(queue="d", routing_key="d")
+
+ def testDefaultExchangeExplicitBind(self):
+ # Test automatic binding by queue name.
+ self.queue_declare(queue="d")
+ # Test explicit bind to default queue
+ self.verifyDirectExchange("")
+
+
+# TODO aconway 2006-09-27: Fill in empty tests:
+
+class DefaultAccessRuleTests(TestBase):
+ """
+ The server MUST NOT allow clients to access the default exchange except
+ by specifying an empty exchange name in the Queue.Bind and content Publish
+ methods.
+ """
+
+class ExtensionsRuleTests(TestBase):
+ """
+ The server MAY implement other exchange types as wanted.
+ """
+
+
+class DeclareMethodMinimumRuleTests(TestBase):
+ """
+ The server SHOULD support a minimum of 16 exchanges per virtual host and
+ ideally, impose no limit except as defined by available resources.
+
+ The client creates as many exchanges as it can until the server reports
+ an error; the number of exchanges successfuly created must be at least
+ sixteen.
+ """
+
+
+class DeclareMethodTicketFieldValidityRuleTests(TestBase):
+ """
+ The client MUST provide a valid access ticket giving "active" access to
+ the realm in which the exchange exists or will be created, or "passive"
+ access if the if-exists flag is set.
+
+ Client creates access ticket with wrong access rights and attempts to use
+ in this method.
+ """
+
+
+class DeclareMethodExchangeFieldReservedRuleTests(TestBase):
+ """
+ Exchange names starting with "amq." are reserved for predeclared and
+ standardised exchanges. The client MUST NOT attempt to create an exchange
+ starting with "amq.".
+
+
+ """
+
+
+class DeclareMethodTypeFieldTypedRuleTests(TestBase):
+ """
+ Exchanges cannot be redeclared with different types. The client MUST not
+ attempt to redeclare an existing exchange with a different type than used
+ in the original Exchange.Declare method.
+
+
+ """
+
+
+class DeclareMethodTypeFieldSupportRuleTests(TestBase):
+ """
+ The client MUST NOT attempt to create an exchange with a type that the
+ server does not support.
+
+
+ """
+
+
+class DeclareMethodPassiveFieldNotFoundRuleTests(TestBase):
+ """
+ If set, and the exchange does not already exist, the server MUST raise a
+ channel exception with reply code 404 (not found).
+ """
+ def test(self):
+ try:
+ self.channel.exchange_declare(exchange="humpty_dumpty", passive=True)
+ self.fail("Expected 404 for passive declaration of unknown exchange.")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+class DeclareMethodDurableFieldSupportRuleTests(TestBase):
+ """
+ The server MUST support both durable and transient exchanges.
+
+
+ """
+
+
+class DeclareMethodDurableFieldStickyRuleTests(TestBase):
+ """
+ The server MUST ignore the durable field if the exchange already exists.
+
+
+ """
+
+
+class DeclareMethodAutoDeleteFieldStickyRuleTests(TestBase):
+ """
+ The server MUST ignore the auto-delete field if the exchange already
+ exists.
+
+
+ """
+
+
+class DeleteMethodTicketFieldValidityRuleTests(TestBase):
+ """
+ The client MUST provide a valid access ticket giving "active" access
+ rights to the exchange's access realm.
+
+ Client creates access ticket with wrong access rights and attempts to use
+ in this method.
+ """
+
+
+class DeleteMethodExchangeFieldExistsRuleTests(TestBase):
+ """
+ The client MUST NOT attempt to delete an exchange that does not exist.
+ """
+
+
+class HeadersExchangeTests(TestBase):
+ """
+ Tests for headers exchange functionality.
+ """
+ def setUp(self):
+ TestBase.setUp(self)
+ self.queue_declare(queue="q")
+ self.q = self.consume("q")
+
+ def myAssertPublishGet(self, headers):
+ self.assertPublishGet(self.q, exchange="amq.match", properties={'headers':headers})
+
+ def myBasicPublish(self, headers):
+ self.channel.basic_publish(exchange="amq.match", content=Content("foobar", properties={'headers':headers}))
+
+ def testMatchAll(self):
+ self.channel.queue_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'all', "name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred", "age":3, "extra":"ignoreme"})
+
+ # None of these should match
+ self.myBasicPublish({})
+ self.myBasicPublish({"name":"barney"})
+ self.myBasicPublish({"name":10})
+ self.myBasicPublish({"name":"fred", "age":2})
+ self.assertEmpty(self.q)
+
+ def testMatchAny(self):
+ self.channel.queue_bind(queue="q", exchange="amq.match", arguments={ 'x-match':'any', "name":"fred", "age":3})
+ self.myAssertPublishGet({"name":"fred"})
+ self.myAssertPublishGet({"name":"fred", "ignoreme":10})
+ self.myAssertPublishGet({"ignoreme":10, "age":3})
+
+ # Wont match
+ self.myBasicPublish({})
+ self.myBasicPublish({"irrelevant":0})
+ self.assertEmpty(self.q)
+
+
+class MiscellaneousErrorsTests(TestBase):
+ """
+ Test some miscellaneous error conditions
+ """
+ def testTypeNotKnown(self):
+ try:
+ self.channel.exchange_declare(exchange="test_type_not_known_exchange", type="invalid_type")
+ self.fail("Expected 503 for declaration of unknown exchange type.")
+ except Closed, e:
+ self.assertConnectionException(503, e.args[0])
+
+ def testDifferentDeclaredType(self):
+ self.channel.exchange_declare(exchange="test_different_declared_type_exchange", type="direct")
+ try:
+ self.channel.exchange_declare(exchange="test_different_declared_type_exchange", type="topic")
+ self.fail("Expected 530 for redeclaration of exchange with different type.")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+ #cleanup
+ other = self.connect()
+ c2 = other.channel(1)
+ c2.channel_open()
+ c2.exchange_delete(exchange="test_different_declared_type_exchange")
+
+ def testReservedExchangeRedeclaredSameNameAndType(self):
+ self.channel.exchange_declare(exchange="amq.direct", type="direct", passive=True)
+ self.channel.exchange_declare(exchange="amq.direct", type="direct", passive=False)
+
+ def testReservedExchangeNameRedeclaredDifferentType(self):
+ try:
+ self.channel.exchange_declare(exchange="amq.direct", type="topic", passive=False)
+ self.fail("Expected 530 for redeclaration of exchange with different type.")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ def testReservedExchangeNameDisallowed(self):
+ try:
+ self.channel.exchange_declare(exchange="amq.myexch", type="direct", passive=False)
+ self.fail("Expected 530 for redeclaration of exchange with different type.")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_8/queue.py b/qpid/tests/src/py/qpid_tests/broker_0_8/queue.py
new file mode 100644
index 0000000000..b7a41736ab
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_8/queue.py
@@ -0,0 +1,255 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import TestBase
+
+class QueueTests(TestBase):
+ """Tests for 'methods' on the amqp queue 'class'"""
+
+ def test_purge(self):
+ """
+ Test that the purge method removes messages from the queue
+ """
+ channel = self.channel
+ #setup, declare a queue and add some messages to it:
+ channel.exchange_declare(exchange="test-exchange", type="direct")
+ channel.queue_declare(queue="test-queue", exclusive=True)
+ channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("one"))
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("two"))
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("three"))
+
+ #check that the queue now reports 3 messages:
+ reply = channel.queue_declare(queue="test-queue")
+ self.assertEqual(3, reply.message_count)
+
+ #now do the purge, then test that three messages are purged and the count drops to 0
+ reply = channel.queue_purge(queue="test-queue");
+ self.assertEqual(3, reply.message_count)
+ reply = channel.queue_declare(queue="test-queue")
+ self.assertEqual(0, reply.message_count)
+
+ #send a further message and consume it, ensuring that the other messages are really gone
+ channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content("four"))
+ reply = channel.basic_consume(queue="test-queue", no_ack=True)
+ queue = self.client.queue(reply.consumer_tag)
+ msg = queue.get(timeout=1)
+ self.assertEqual("four", msg.content.body)
+
+ #check error conditions (use new channels):
+ channel = self.client.channel(2)
+ channel.channel_open()
+ try:
+ #queue specified but doesn't exist:
+ channel.queue_purge(queue="invalid-queue")
+ self.fail("Expected failure when purging non-existent queue")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ channel = self.client.channel(3)
+ channel.channel_open()
+ try:
+ #queue not specified and none previously declared for channel:
+ channel.queue_purge()
+ self.fail("Expected failure when purging unspecified queue")
+ except Closed, e:
+ self.assertConnectionException(530, e.args[0])
+
+ #cleanup
+ other = self.connect()
+ channel = other.channel(1)
+ channel.channel_open()
+ channel.exchange_delete(exchange="test-exchange")
+
+ def test_declare_exclusive(self):
+ """
+ Test that the exclusive field is honoured in queue.declare
+ """
+ # TestBase.setUp has already opened channel(1)
+ c1 = self.channel
+ # Here we open a second separate connection:
+ other = self.connect()
+ c2 = other.channel(1)
+ c2.channel_open()
+
+ #declare an exclusive queue:
+ c1.queue_declare(queue="exclusive-queue", exclusive="True")
+ try:
+ #other connection should not be allowed to declare this:
+ c2.queue_declare(queue="exclusive-queue", exclusive="True")
+ self.fail("Expected second exclusive queue_declare to raise a channel exception")
+ except Closed, e:
+ self.assertChannelException(405, e.args[0])
+
+
+ def test_declare_passive(self):
+ """
+ Test that the passive field is honoured in queue.declare
+ """
+ channel = self.channel
+ #declare an exclusive queue:
+ channel.queue_declare(queue="passive-queue-1", exclusive="True")
+ channel.queue_declare(queue="passive-queue-1", passive="True")
+ try:
+ #other connection should not be allowed to declare this:
+ channel.queue_declare(queue="passive-queue-2", passive="True")
+ self.fail("Expected passive declaration of non-existant queue to raise a channel exception")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+ def test_bind(self):
+ """
+ Test various permutations of the queue.bind method
+ """
+ channel = self.channel
+ channel.queue_declare(queue="queue-1", exclusive="True")
+
+ #straightforward case, both exchange & queue exist so no errors expected:
+ channel.queue_bind(queue="queue-1", exchange="amq.direct", routing_key="key1")
+
+ #bind the default queue for the channel (i.e. last one declared):
+ channel.queue_bind(exchange="amq.direct", routing_key="key2")
+
+ #use the queue name where neither routing key nor queue are specified:
+ channel.queue_bind(exchange="amq.direct")
+
+ #try and bind to non-existant exchange
+ try:
+ channel.queue_bind(queue="queue-1", exchange="an-invalid-exchange", routing_key="key1")
+ self.fail("Expected bind to non-existant exchange to fail")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ #need to reopen a channel:
+ channel = self.client.channel(2)
+ channel.channel_open()
+
+ #try and bind non-existant queue:
+ try:
+ channel.queue_bind(queue="queue-2", exchange="amq.direct", routing_key="key1")
+ self.fail("Expected bind of non-existant queue to fail")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+ def test_delete_simple(self):
+ """
+ Test basic queue deletion
+ """
+ channel = self.channel
+
+ #straight-forward case:
+ channel.queue_declare(queue="delete-me")
+ channel.basic_publish(routing_key="delete-me", content=Content("a"))
+ channel.basic_publish(routing_key="delete-me", content=Content("b"))
+ channel.basic_publish(routing_key="delete-me", content=Content("c"))
+ reply = channel.queue_delete(queue="delete-me")
+ self.assertEqual(3, reply.message_count)
+ #check that it has gone be declaring passively
+ try:
+ channel.queue_declare(queue="delete-me", passive="True")
+ self.fail("Queue has not been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ #check attempted deletion of non-existant queue is handled correctly:
+ channel = self.client.channel(2)
+ channel.channel_open()
+ try:
+ channel.queue_delete(queue="i-dont-exist", if_empty="True")
+ self.fail("Expected delete of non-existant queue to fail")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
+
+ def test_delete_ifempty(self):
+ """
+ Test that if_empty field of queue_delete is honoured
+ """
+ channel = self.channel
+
+ #create a queue and add a message to it (use default binding):
+ channel.queue_declare(queue="delete-me-2")
+ channel.queue_declare(queue="delete-me-2", passive="True")
+ channel.basic_publish(routing_key="delete-me-2", content=Content("message"))
+
+ #try to delete, but only if empty:
+ try:
+ channel.queue_delete(queue="delete-me-2", if_empty="True")
+ self.fail("Expected delete if_empty to fail for non-empty queue")
+ except Closed, e:
+ self.assertChannelException(406, e.args[0])
+
+ #need new channel now:
+ channel = self.client.channel(2)
+ channel.channel_open()
+
+ #empty queue:
+ reply = channel.basic_consume(queue="delete-me-2", no_ack=True)
+ queue = self.client.queue(reply.consumer_tag)
+ msg = queue.get(timeout=1)
+ self.assertEqual("message", msg.content.body)
+ channel.basic_cancel(consumer_tag=reply.consumer_tag)
+
+ #retry deletion on empty queue:
+ channel.queue_delete(queue="delete-me-2", if_empty="True")
+
+ #check that it has gone by declaring passively:
+ try:
+ channel.queue_declare(queue="delete-me-2", passive="True")
+ self.fail("Queue has not been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ def test_delete_ifunused(self):
+ """
+ Test that if_unused field of queue_delete is honoured
+ """
+ channel = self.channel
+
+ #create a queue and register a consumer:
+ channel.queue_declare(queue="delete-me-3")
+ channel.queue_declare(queue="delete-me-3", passive="True")
+ reply = channel.basic_consume(queue="delete-me-3", no_ack=True)
+
+ #need new channel now:
+ channel2 = self.client.channel(2)
+ channel2.channel_open()
+ #try to delete, but only if empty:
+ try:
+ channel2.queue_delete(queue="delete-me-3", if_unused="True")
+ self.fail("Expected delete if_unused to fail for queue with existing consumer")
+ except Closed, e:
+ self.assertChannelException(406, e.args[0])
+
+
+ channel.basic_cancel(consumer_tag=reply.consumer_tag)
+ channel.queue_delete(queue="delete-me-3", if_unused="True")
+ #check that it has gone by declaring passively:
+ try:
+ channel.queue_declare(queue="delete-me-3", passive="True")
+ self.fail("Queue has not been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_8/testlib.py b/qpid/tests/src/py/qpid_tests/broker_0_8/testlib.py
new file mode 100644
index 0000000000..76f7e964a2
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_8/testlib.py
@@ -0,0 +1,66 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# Tests for the testlib itself.
+#
+
+from qpid.content import Content
+from qpid.testlib import TestBase
+from Queue import Empty
+
+import sys
+from traceback import *
+
+def mytrace(frame, event, arg):
+ print_stack(frame);
+ print "===="
+ return mytrace
+
+class TestBaseTest(TestBase):
+ """Verify TestBase functions work as expected"""
+
+ def testAssertEmptyPass(self):
+ """Test assert empty works"""
+ self.queue_declare(queue="empty")
+ q = self.consume("empty")
+ self.assertEmpty(q)
+ try:
+ q.get(timeout=1)
+ self.fail("Queue is not empty.")
+ except Empty: None # Ignore
+
+ def testAssertEmptyFail(self):
+ self.queue_declare(queue="full")
+ q = self.consume("full")
+ self.channel.basic_publish(routing_key="full")
+ try:
+ self.assertEmpty(q);
+ self.fail("assertEmpty did not assert on non-empty queue")
+ except AssertionError: None # Ignore
+
+ def testMessageProperties(self):
+ """Verify properties are passed with message"""
+ props={"headers":{"x":1, "y":2}}
+ self.queue_declare(queue="q")
+ q = self.consume("q")
+ self.assertPublishGet(q, routing_key="q", properties=props)
+
+
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_8/tx.py b/qpid/tests/src/py/qpid_tests/broker_0_8/tx.py
new file mode 100644
index 0000000000..9faddb1110
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_8/tx.py
@@ -0,0 +1,209 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import TestBase
+
+class TxTests(TestBase):
+ """
+ Tests for 'methods' on the amqp tx 'class'
+ """
+
+ def test_commit(self):
+ """
+ Test that commited publishes are delivered and commited acks are not re-delivered
+ """
+ channel = self.channel
+ queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-commit-a", "tx-commit-b", "tx-commit-c")
+ channel.tx_commit()
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("TxMessage %d" % i, msg.content.body)
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("TxMessage 6", msg.content.body)
+
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("TxMessage 7", msg.content.body)
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ #cleanup
+ channel.basic_ack(delivery_tag=0, multiple=True)
+ channel.tx_commit()
+
+ def test_auto_rollback(self):
+ """
+ Test that a channel closed with an open transaction is effectively rolled back
+ """
+ channel = self.channel
+ queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ channel.tx_rollback()
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.content.body)
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.content.body)
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ #cleanup
+ channel.basic_ack(delivery_tag=0, multiple=True)
+ channel.tx_commit()
+
+ def test_rollback(self):
+ """
+ Test that rolled back publishes are not delivered and rolled back acks are re-delivered
+ """
+ channel = self.channel
+ queue_a, queue_b, queue_c = self.perform_txn_work(channel, "tx-rollback-a", "tx-rollback-b", "tx-rollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ channel.tx_rollback()
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.content.body)
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.content.body)
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
+
+ #cleanup
+ channel.basic_ack(delivery_tag=0, multiple=True)
+ channel.tx_commit()
+
+ def perform_txn_work(self, channel, name_a, name_b, name_c):
+ """
+ Utility method that does some setup and some work under a transaction. Used for testing both
+ commit and rollback
+ """
+ #setup:
+ channel.queue_declare(queue=name_a, exclusive=True)
+ channel.queue_declare(queue=name_b, exclusive=True)
+ channel.queue_declare(queue=name_c, exclusive=True)
+
+ key = "my_key_" + name_b
+ topic = "my_topic_" + name_c
+
+ channel.queue_bind(queue=name_b, exchange="amq.direct", routing_key=key)
+ channel.queue_bind(queue=name_c, exchange="amq.topic", routing_key=topic)
+
+ for i in range(1, 5):
+ channel.basic_publish(routing_key=name_a, content=Content("Message %d" % i))
+
+ channel.basic_publish(routing_key=key, exchange="amq.direct", content=Content("Message 6"))
+ channel.basic_publish(routing_key=topic, exchange="amq.topic", content=Content("Message 7"))
+
+ channel.tx_select()
+
+ #consume and ack messages
+ sub_a = channel.basic_consume(queue=name_a, no_ack=False)
+ queue_a = self.client.queue(sub_a.consumer_tag)
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+ channel.basic_ack(delivery_tag=msg.delivery_tag, multiple=True)
+
+ sub_b = channel.basic_consume(queue=name_b, no_ack=False)
+ queue_b = self.client.queue(sub_b.consumer_tag)
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.content.body)
+ channel.basic_ack(delivery_tag=msg.delivery_tag)
+
+ sub_c = channel.basic_consume(queue=name_c, no_ack=False)
+ queue_c = self.client.queue(sub_c.consumer_tag)
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.content.body)
+ channel.basic_ack(delivery_tag=msg.delivery_tag)
+
+ #publish messages
+ for i in range(1, 5):
+ channel.basic_publish(routing_key=topic, exchange="amq.topic", content=Content("TxMessage %d" % i))
+
+ channel.basic_publish(routing_key=key, exchange="amq.direct", content=Content("TxMessage 6"))
+ channel.basic_publish(routing_key=name_a, content=Content("TxMessage 7"))
+
+ return queue_a, queue_b, queue_c
+
+ def test_commit_overlapping_acks(self):
+ """
+ Test that logically 'overlapping' acks do not cause errors on commit
+ """
+ channel = self.channel
+ channel.queue_declare(queue="commit-overlapping", exclusive=True)
+ for i in range(1, 10):
+ channel.basic_publish(routing_key="commit-overlapping", content=Content("Message %d" % i))
+
+
+ channel.tx_select()
+
+ sub = channel.basic_consume(queue="commit-overlapping", no_ack=False)
+ queue = self.client.queue(sub.consumer_tag)
+ for i in range(1, 10):
+ msg = queue.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.content.body)
+ if i in [3, 6, 10]:
+ channel.basic_ack(delivery_tag=msg.delivery_tag)
+
+ channel.tx_commit()
+
+ #check all have been acked:
+ try:
+ extra = queue.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.content.body)
+ except Empty: None
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_9/__init__.py b/qpid/tests/src/py/qpid_tests/broker_0_9/__init__.py
new file mode 100644
index 0000000000..72e69a51b9
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_9/__init__.py
@@ -0,0 +1,22 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+import query, queue, messageheader, echo
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_9/echo.py b/qpid/tests/src/py/qpid_tests/broker_0_9/echo.py
new file mode 100644
index 0000000000..a883568e35
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_9/echo.py
@@ -0,0 +1,159 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.testlib import TestBase
+from qpid.content import Content
+import qpid.client
+
+
+
+class EchoTests(TestBase):
+ """Verify that messages can be sent and received retaining fidelity"""
+
+ def test_small_message(self):
+
+ channel = self.channel
+
+ self.queue_declare(queue="q")
+
+ channel.tx_select()
+ consumer = self.consume("q", no_ack=False)
+
+ body = self.uniqueString()
+ channel.basic_publish(
+ content=Content(body),
+ routing_key="q")
+ channel.tx_commit()
+
+ msg = consumer.get(timeout=1)
+ channel.basic_ack(delivery_tag=msg.delivery_tag)
+ channel.tx_commit()
+ self.assertEqual(body, msg.content.body)
+
+ def test_large_message(self):
+
+ channel = self.channel
+
+ self.queue_declare(queue="q")
+
+ channel.tx_select()
+ consumer = self.consume("q", no_ack=False)
+
+ # This is default maximum frame size supported by the Java Broker. Python
+ # currently does not support framing of oversized messages in multiple frames.
+ body = self.randomLongString()
+ channel.basic_publish(
+ content=Content(body),
+ routing_key="q")
+ channel.tx_commit()
+
+ msg = consumer.get(timeout=1)
+ channel.basic_ack(delivery_tag=msg.delivery_tag)
+ channel.tx_commit()
+ self.assertEqual(len(body), len(msg.content.body))
+ self.assertEqual(body, msg.content.body)
+
+
+ def test_large_message_received_in_many_content_frames(self):
+ channel = self.channel
+
+ queue_name = "q"
+ self.queue_declare(queue=queue_name)
+
+ channel.tx_select()
+
+ body = self.randomLongString()
+ channel.basic_publish(
+ content=Content(body),
+ routing_key=queue_name)
+ channel.tx_commit()
+
+ consuming_client = None
+ try:
+ # Create a second connection with minimum framesize. The Broker will then be forced to chunk
+ # the content in order to send it to us.
+ consuming_client = qpid.client.Client(self.config.broker.host, self.config.broker.port)
+ tune_params = { "channel_max" : 256, "frame_max" : 4096 }
+ consuming_client.start(username = self.config.broker.user, password = self.config.broker.password, tune_params = tune_params)
+
+ consuming_channel = consuming_client.channel(1)
+ consuming_channel.channel_open()
+ consuming_channel.tx_select()
+
+ consumer_reply = consuming_channel.basic_consume(queue=queue_name, no_ack=False)
+ consumer = consuming_client.queue(consumer_reply.consumer_tag)
+ msg = consumer.get(timeout=1)
+ consuming_channel.basic_ack(delivery_tag=msg.delivery_tag)
+ consuming_channel.tx_commit()
+
+ self.assertEqual(len(body), len(msg.content.body))
+ self.assertEqual(body, msg.content.body)
+ finally:
+ if consuming_client:
+ consuming_client.close()
+
+ def test_commit_ok_possibly_interleaved_with_message_delivery(self):
+ """This test exposes an defect on the Java Broker (QPID-6094). The Java Client
+ can contravene the AMQP spec by sending other frames between the message header/frames.
+ As this is a long standing defect in the Java Broker, QPID-6082 changed
+ the Python client to allow it to tolerate such illegal interleaving.
+ """
+ channel = self.channel
+
+ queue_name = "q"
+ self.queue_declare(queue=queue_name)
+
+ count = 25
+ channel.basic_qos(prefetch_count=count)
+
+ channel.tx_select()
+
+ bodies = []
+ for i in range(count):
+ body = self.randomLongString()
+ bodies.append(body)
+ channel.basic_publish(
+ content=Content(bodies[i]),
+ routing_key=queue_name)
+ channel.tx_commit()
+
+ # Start consuming. Prefetch will mean the Broker will start to send us
+ # all the messages accumulating them in the client.
+ consumer = self.consume("q", no_ack=False)
+
+ # Get and ack/commit the first message
+ msg = consumer.get(timeout=1)
+ channel.basic_ack(delivery_tag=msg.delivery_tag)
+ channel.tx_commit()
+ # In the problematic case, the Broker interleaves our commit-ok response amongst the content
+ # frames of message. QPID-6082 means the Python client now tolerates this
+ # problem and all messages should arrive correctly.
+
+ expectedBody = bodies[0]
+ self.assertEqual(len(expectedBody), len(msg.content.body))
+ self.assertEqual(expectedBody, msg.content.body)
+
+ for i in range(1, len(bodies)):
+ msg = consumer.get(timeout=5)
+
+ expectedBody = bodies[i]
+ self.assertEqual(len(expectedBody), len(msg.content.body))
+ self.assertEqual(expectedBody, msg.content.body)
+
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_9/messageheader.py b/qpid/tests/src/py/qpid_tests/broker_0_9/messageheader.py
new file mode 100644
index 0000000000..3d64adfcf0
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_9/messageheader.py
@@ -0,0 +1,61 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.testlib import TestBase
+
+class MessageHeaderTests(TestBase):
+ """Verify that messages with headers work as expected"""
+
+ def test_message_with_integer_header(self):
+ props={"headers":{"one":1, "zero":0}}
+ self.queue_declare(queue="q")
+ q = self.consume("q")
+ self.assertPublishGet(q, routing_key="q", properties=props)
+
+ def test_message_with_string_header(self):
+ props={"headers":{"mystr":"hello world", "myempty":""}}
+ self.queue_declare(queue="q")
+ q = self.consume("q")
+ self.assertPublishGet(q, routing_key="q", properties=props)
+
+ def test_message_with_boolean_header(self):
+ """The AMQP boolean type is not officially supported until 0-91 but the 0-8/9 Java client use its field value typecode.
+ Note: If you run this test with QPID_CODEC_DISABLE_0_91_BOOLEAN set, this test will still pass as the booleans are
+ coerced into integer."""
+
+ props={"headers":{"trueHeader":True, "falseHeader":False}}
+ self.queue_declare(queue="q")
+ q = self.consume("q")
+ self.assertPublishGet(q, routing_key="q", properties=props)
+
+ def test_message_with_negatives_longints_floats_and_None(self):
+ """ Tests sending and then receiving negative integers, longs, the None (void) value, and doubles."""
+ props={"headers":{"myIntMin": -2147483648,
+ "myIntMax": 2147483647,
+ "myLongMax": 9223372036854775807,
+ "myLongMin": -9223372036854775808,
+ "myNullString": None,
+ "myDouble1.1": 1.1,
+ "myDoubleMin": 4.9E-324,
+ "myDoubleMax": 1.7976931348623157E308}}
+
+ self.queue_declare(queue="q")
+ q = self.consume("q")
+ self.assertPublishGet(q, routing_key="q", properties=props)
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_9/query.py b/qpid/tests/src/py/qpid_tests/broker_0_9/query.py
new file mode 100644
index 0000000000..cb66d079e5
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_9/query.py
@@ -0,0 +1,224 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import TestBase
+
+class QueryTests(TestBase):
+ """Tests for various query methods introduced in 0-10 and available in 0-9 for preview"""
+
+ def test_exchange_query(self):
+ """
+ Test that the exchange_query method works as expected
+ """
+ channel = self.channel
+ #check returned type for the standard exchanges
+ self.assertEqual("direct", channel.exchange_query(name="amq.direct").type)
+ self.assertEqual("topic", channel.exchange_query(name="amq.topic").type)
+ self.assertEqual("fanout", channel.exchange_query(name="amq.fanout").type)
+ self.assertEqual("headers", channel.exchange_query(name="amq.match").type)
+ self.assertEqual("direct", channel.exchange_query(name="").type)
+ #declare an exchange
+ channel.exchange_declare(exchange="my-test-exchange", type= "direct", durable=False)
+ #check that the result of a query is as expected
+ response = channel.exchange_query(name="my-test-exchange")
+ self.assertEqual("direct", response.type)
+ self.assertEqual(False, response.durable)
+ self.assertEqual(False, response.not_found)
+ #delete the exchange
+ channel.exchange_delete(exchange="my-test-exchange")
+ #check that the query now reports not-found
+ self.assertEqual(True, channel.exchange_query(name="my-test-exchange").not_found)
+
+ def test_binding_query_direct(self):
+ """
+ Test that the binding_query method works as expected with the direct exchange
+ """
+ self.binding_query_with_key("amq.direct")
+
+ def test_binding_query_topic(self):
+ """
+ Test that the binding_query method works as expected with the direct exchange
+ """
+ self.binding_query_with_key("amq.topic")
+
+ def binding_query_with_key(self, exchange_name):
+ channel = self.channel
+ #setup: create two queues
+ channel.queue_declare(queue="used-queue", exclusive=True)
+ channel.queue_declare(queue="unused-queue", exclusive=True)
+
+ channel.queue_bind(exchange=exchange_name, queue="used-queue", routing_key="used-key")
+
+ # test detection of any binding to specific queue
+ response = channel.binding_query(exchange=exchange_name, queue="used-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+
+ # test detection of specific binding to any queue
+ response = channel.binding_query(exchange=exchange_name, routing_key="used-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.key_not_matched)
+
+ # test detection of specific binding to specific queue
+ response = channel.binding_query(exchange=exchange_name, queue="used-queue", routing_key="used-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+ self.assertEqual(False, response.key_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = channel.binding_query(exchange=exchange_name, queue="unused-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ # test unspecified queue, unmatched binding
+ response = channel.binding_query(exchange=exchange_name, routing_key="unused-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.key_not_matched)
+
+ # test matched queue, unmatched binding
+ response = channel.binding_query(exchange=exchange_name, queue="used-queue", routing_key="unused-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+ self.assertEqual(True, response.key_not_matched)
+
+ # test unmatched queue, matched binding
+ response = channel.binding_query(exchange=exchange_name, queue="unused-queue", routing_key="used-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(False, response.key_not_matched)
+
+ # test unmatched queue, unmatched binding
+ response = channel.binding_query(exchange=exchange_name, queue="unused-queue", routing_key="unused-key")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(True, response.key_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, channel.binding_query(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, channel.binding_query(exchange=exchange_name, queue="unknown-queue").queue_not_found)
+
+
+ def test_binding_query_fanout(self):
+ """
+ Test that the binding_query method works as expected with fanout exchange
+ """
+ channel = self.channel
+ #setup
+ channel.queue_declare(queue="used-queue", exclusive=True)
+ channel.queue_declare(queue="unused-queue", exclusive=True)
+ channel.queue_bind(exchange="amq.fanout", queue="used-queue")
+
+ # test detection of any binding to specific queue
+ response = channel.binding_query(exchange="amq.fanout", queue="used-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = channel.binding_query(exchange="amq.fanout", queue="unused-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, channel.binding_query(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, channel.binding_query(exchange="amq.fanout", queue="unknown-queue").queue_not_found)
+
+ def test_binding_query_header(self):
+ """
+ Test that the binding_query method works as expected with headers exchanges
+ """
+ channel = self.channel
+ #setup
+ channel.queue_declare(queue="used-queue", exclusive=True)
+ channel.queue_declare(queue="unused-queue", exclusive=True)
+ channel.queue_bind(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "a":"A"} )
+
+ # test detection of any binding to specific queue
+ response = channel.binding_query(exchange="amq.match", queue="used-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+
+ # test detection of specific binding to any queue
+ response = channel.binding_query(exchange="amq.match", arguments={"x-match":"all", "a":"A"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.args_not_matched)
+
+ # test detection of specific binding to specific queue
+ response = channel.binding_query(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "a":"A"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+ self.assertEqual(False, response.args_not_matched)
+
+ # test unmatched queue, unspecified binding
+ response = channel.binding_query(exchange="amq.match", queue="unused-queue")
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+
+ # test unspecified queue, unmatched binding
+ response = channel.binding_query(exchange="amq.match", arguments={"x-match":"all", "b":"B"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.args_not_matched)
+
+ # test matched queue, unmatched binding
+ response = channel.binding_query(exchange="amq.match", queue="used-queue", arguments={"x-match":"all", "b":"B"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(False, response.queue_not_matched)
+ self.assertEqual(True, response.args_not_matched)
+
+ # test unmatched queue, matched binding
+ response = channel.binding_query(exchange="amq.match", queue="unused-queue", arguments={"x-match":"all", "a":"A"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(False, response.args_not_matched)
+
+ # test unmatched queue, unmatched binding
+ response = channel.binding_query(exchange="amq.match", queue="unused-queue", arguments={"x-match":"all", "b":"B"})
+ self.assertEqual(False, response.exchange_not_found)
+ self.assertEqual(False, response.queue_not_found)
+ self.assertEqual(True, response.queue_not_matched)
+ self.assertEqual(True, response.args_not_matched)
+
+ #test exchange not found
+ self.assertEqual(True, channel.binding_query(exchange="unknown-exchange").exchange_not_found)
+
+ #test queue not found
+ self.assertEqual(True, channel.binding_query(exchange="amq.match", queue="unknown-queue").queue_not_found)
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_0_9/queue.py b/qpid/tests/src/py/qpid_tests/broker_0_9/queue.py
new file mode 100644
index 0000000000..249850caf9
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_0_9/queue.py
@@ -0,0 +1,148 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+import time
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.content import Content
+from qpid.testlib import TestBase
+from qpid.exceptions import Timeout
+
+class QueueTests(TestBase):
+ """Tests for 'methods' on the amqp queue 'class'"""
+
+ def test_unbind_direct(self):
+ self.unbind_test(exchange="amq.direct", routing_key="key")
+
+ def test_unbind_topic(self):
+ self.unbind_test(exchange="amq.topic", routing_key="key")
+
+ def test_unbind_fanout(self):
+ self.unbind_test(exchange="amq.fanout")
+
+ def test_unbind_headers(self):
+ self.unbind_test(exchange="amq.match", args={ "x-match":"all", "a":"b"}, headers={"a":"b"})
+
+ def unbind_test(self, exchange, routing_key="", args=None, headers={}):
+ #bind two queues and consume from them
+ channel = self.channel
+
+ channel.queue_declare(queue="queue-1", exclusive="True")
+ channel.queue_declare(queue="queue-2", exclusive="True")
+
+ channel.basic_consume(queue="queue-1", consumer_tag="queue-1", no_ack=True)
+ channel.basic_consume(queue="queue-2", consumer_tag="queue-2", no_ack=True)
+
+ queue1 = self.client.queue("queue-1")
+ queue2 = self.client.queue("queue-2")
+
+ channel.queue_bind(exchange=exchange, queue="queue-1", routing_key=routing_key, arguments=args)
+ channel.queue_bind(exchange=exchange, queue="queue-2", routing_key=routing_key, arguments=args)
+
+ #send a message that will match both bindings
+ channel.basic_publish(exchange=exchange, routing_key=routing_key,
+ content=Content("one", properties={"headers": headers}))
+
+ #unbind first queue
+ channel.queue_unbind(exchange=exchange, queue="queue-1", routing_key=routing_key, arguments=args)
+
+ #send another message
+ channel.basic_publish(exchange=exchange, routing_key=routing_key,
+ content=Content("two", properties={"headers": headers}))
+
+ #check one queue has both messages and the other has only one
+ self.assertEquals("one", queue1.get(timeout=1).content.body)
+ try:
+ msg = queue1.get(timeout=1)
+ self.fail("Got extra message: %s" % msg.body)
+ except Empty: pass
+
+ self.assertEquals("one", queue2.get(timeout=1).content.body)
+ self.assertEquals("two", queue2.get(timeout=1).content.body)
+ try:
+ msg = queue2.get(timeout=1)
+ self.fail("Got extra message: " + msg)
+ except Empty: pass
+
+ def test_autodelete_shared(self):
+ """
+ Test auto-deletion (of non-exclusive queues)
+ """
+ channel = self.channel
+ other = self.connect()
+ channel2 = other.channel(1)
+ channel2.channel_open()
+
+ channel.queue_declare(queue="auto-delete-me", auto_delete=True)
+
+ #consume from both channels
+ reply = channel.basic_consume(queue="auto-delete-me", no_ack=True)
+ channel2.basic_consume(queue="auto-delete-me", no_ack=True)
+
+ #implicit cancel
+ channel2.channel_close()
+
+ #check it is still there
+ channel.queue_declare(queue="auto-delete-me", passive=True)
+
+ #explicit cancel => queue is now unused again:
+ channel.basic_cancel(consumer_tag=reply.consumer_tag)
+
+ #NOTE: this assumes there is no timeout in use
+
+ #check that it has gone be declaring passively
+ try:
+ channel.queue_declare(queue="auto-delete-me", passive=True)
+ self.fail("Expected queue to have been deleted")
+ except Closed, e:
+ self.assertChannelException(404, e.args[0])
+
+ def test_flow_control(self):
+ queue_name="flow-controled-queue"
+
+ connection = self.connect(channel_options={"qpid.flow_control_wait_failure" : 1})
+ channel = connection.channel(1)
+ channel.channel_open()
+ channel.queue_declare(queue=queue_name, arguments={"x-qpid-capacity" : 25, "x-qpid-flow-resume-capacity" : 15})
+
+ try:
+ for i in xrange(100):
+ channel.basic_publish(exchange="", routing_key=queue_name,
+ content=Content("This is a message with more than 25 bytes. This should trigger flow control."))
+ time.sleep(.1)
+ self.fail("Flow Control did not work")
+ except Timeout:
+ # this is expected
+ pass
+
+ consumer_reply = channel.basic_consume(queue=queue_name, consumer_tag="consumer", no_ack=True)
+ queue = self.client.queue(consumer_reply.consumer_tag)
+ while True:
+ try:
+ msg = queue.get(timeout=1)
+ except Empty:
+ break
+ channel.basic_cancel(consumer_tag=consumer_reply.consumer_tag)
+
+ try:
+ channel.basic_publish(exchange="", routing_key=queue_name,
+ content=Content("This should not block because we have just cleared the queue."))
+ except Timeout:
+ self.fail("Unexpected Timeout. Flow Control should not be in effect.")
+
+ connection.close()
diff --git a/qpid/tests/src/py/qpid_tests/broker_1_0/__init__.py b/qpid/tests/src/py/qpid_tests/broker_1_0/__init__.py
new file mode 100644
index 0000000000..b14bb96dc8
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_1_0/__init__.py
@@ -0,0 +1,26 @@
+# Do not delete - marks this directory as a python package.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from general import *
+from legacy_exchanges import *
+from selector import *
+from translation import *
+from tx import *
diff --git a/qpid/tests/src/py/qpid_tests/broker_1_0/general.py b/qpid/tests/src/py/qpid_tests/broker_1_0/general.py
new file mode 100644
index 0000000000..414d0b48f5
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_1_0/general.py
@@ -0,0 +1,71 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.tests.messaging.implementation import *
+from qpid.tests.messaging import VersionTest
+
+class GeneralTests (VersionTest):
+ """
+ Miscellaneous tests for core AMQP 1.0 messaging behaviour.
+ """
+ def test_request_response(self):
+ snd_request = self.ssn.sender("#")
+ rcv_response = self.ssn.receiver("#")
+
+ #send request
+ snd_request.send(Message(reply_to=rcv_response.source, id="a1", content="request"))
+
+ #receive request
+ rcv_request = self.ssn.receiver(snd_request.target)
+ request = rcv_request.fetch(5)
+ assert request.content == "request" and request.id == "a1", request
+ #send response
+ snd_response = self.ssn.sender(request.reply_to)
+ snd_response.send(Message(correlation_id=request.id, content="response"))
+
+ #receive response
+ response = rcv_response.fetch(5)
+ assert response.content == "response" and response.correlation_id == "a1", response
+
+ self.ssn.acknowledge()
+
+
+ def test_browse(self):
+ snd = self.ssn.sender("#")
+ rcv = self.ssn.receiver("%s; {mode: browse}" % snd.target)
+
+ msgs = [Message(content=s, subject = s) for s in ['a','b','c','d']]
+
+ for m in msgs: snd.send(m)
+
+ for expected in msgs:
+ msg = rcv.fetch(0)
+ assert msg.content == expected.content
+ try:
+ assert msg.properties.get('x-amqp-delivery-count') == 0, (msg.properties.get('x-amqp-delivery-count'))
+ except KeyError, e: None #default is 0
+ self.ssn.acknowledge(msg)
+ rcv.close()
+
+ rcv = self.ssn.receiver(snd.target)
+ for expected in msgs:
+ msg = rcv.fetch(0)
+ assert msg.content == expected.content
+ self.ssn.acknowledge(msg)
+
diff --git a/qpid/tests/src/py/qpid_tests/broker_1_0/legacy_exchanges.py b/qpid/tests/src/py/qpid_tests/broker_1_0/legacy_exchanges.py
new file mode 100644
index 0000000000..eefa49e817
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_1_0/legacy_exchanges.py
@@ -0,0 +1,96 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.tests.messaging.implementation import *
+from qpid.tests.messaging import VersionTest
+
+class LegacyExchangeTests (VersionTest):
+ """
+ Tests for the legacy (i.e. pre 1.0) AMQP exchanges and the filters
+ defined for them and registered for AMQP 1.0.
+ """
+ def test_fanout(self):
+ msgs = [Message(content=s, subject = s) for s in ['a','b','c','d']]
+
+ snd = self.ssn.sender("amq.fanout")
+ rcv = self.ssn.receiver("amq.fanout")
+
+ for m in msgs: snd.send(m)
+
+ for expected in msgs:
+ msg = rcv.fetch(0)
+ assert msg.content == expected.content
+ self.ssn.acknowledge(msg)
+ rcv.close()
+
+ def test_direct(self):
+ msgs = [Message(content=c, subject=s) for s, c in [('a', 'one'), ('b', 'two'),('a', 'three'),('b', 'four')]]
+
+ snd = self.ssn.sender("amq.direct")
+ rcv_a = self.ssn.receiver("amq.direct/a")
+ rcv_b = self.ssn.receiver("amq.direct/b")
+
+ for m in msgs: snd.send(m)
+
+ for expected in ['one', 'three']:
+ msg = rcv_a.fetch(0)
+ assert msg.content == expected, (msg, expected)
+ self.ssn.acknowledge(msg)
+
+ for expected in ['two', 'four']:
+ msg = rcv_b.fetch(0)
+ assert msg.content == expected
+ self.ssn.acknowledge(msg), (msg, expected)
+
+ def test_topic(self):
+ msgs = [Message(content=s, subject=s) for s in ['red.dog', 'black.cat', 'red.squirrel', 'grey.squirrel']]
+
+ snd = self.ssn.sender("amq.topic")
+ rcv_a = self.ssn.receiver("amq.topic/red.*")
+ rcv_b = self.ssn.receiver("amq.topic/*.squirrel")
+
+ for m in msgs: snd.send(m)
+
+ for expected in ['red.dog', 'red.squirrel']:
+ msg = rcv_a.fetch(0)
+ assert msg.content == expected, (msg, expected)
+ self.ssn.acknowledge(msg)
+
+ for expected in ['red.squirrel', 'grey.squirrel']:
+ msg = rcv_b.fetch(0)
+ assert msg.content == expected
+ self.ssn.acknowledge(msg), (msg, expected)
+
+ def test_headers(self):
+ msgs = [Message(content="%s.%s" % (colour, creature), properties={'creature':creature,'colour':colour}) for colour, creature in [('red','dog'), ('black', 'cat'), ('red', 'squirrel'), ('grey', 'squirrel')]]
+
+ snd = self.ssn.sender("amq.match")
+ rcv_a = self.ssn.receiver("amq.match; {link:{filter:{descriptor:'apache.org:legacy-amqp-headers-binding:map',name:'red-things',value:{'colour':'red','x-match':'all'}}}}")
+ rcv_b = self.ssn.receiver("amq.match; {link:{filter:{descriptor:'apache.org:legacy-amqp-headers-binding:map',name:'cats-and-squirrels',value:{'creature':'squirrel','colour':'black','x-match':'any'}}}}")
+ for m in msgs: snd.send(m)
+
+ for expected in ['red.dog', 'red.squirrel']:
+ msg = rcv_a.fetch(0)
+ assert msg.content == expected, (msg, expected)
+ self.ssn.acknowledge(msg)
+
+ for expected in ['black.cat', 'red.squirrel', 'grey.squirrel']:
+ msg = rcv_b.fetch(0)
+ assert msg.content == expected
+ self.ssn.acknowledge(msg), (msg, expected)
diff --git a/qpid/tests/src/py/qpid_tests/broker_1_0/selector.py b/qpid/tests/src/py/qpid_tests/broker_1_0/selector.py
new file mode 100644
index 0000000000..ac2bbd8db3
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_1_0/selector.py
@@ -0,0 +1,73 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.tests.messaging.implementation import *
+from qpid.tests.messaging import VersionTest
+
+class SelectorTests (VersionTest):
+ """
+ Tests for the selector filter registered for AMQP 1.0 under the
+ apache namespace.
+ """
+ def basic_selection_test(self, node):
+ properties = [(1, 'red','dog'), (2, 'black', 'cat'), (3, 'red', 'squirrel'), (4, 'grey', 'squirrel')]
+ msgs = [Message(content="%s.%s" % (colour, creature), properties={'sequence':sequence,'colour':colour}) for sequence, colour, creature in properties]
+
+ snd = self.ssn.sender(node)
+ rcv = self.ssn.receiver("%s; {link:{selector:\"colour IN ('red', 'grey') AND (sequence > 3 OR sequence = 1)\"}}" % snd.target)
+
+ for m in msgs: snd.send(m)
+
+ for expected in ["red.dog", "grey.squirrel"]:
+ msg = rcv.fetch(0)
+ assert msg.content == expected
+ self.ssn.acknowledge(msg)
+
+ def test_topic(self):
+ self.basic_selection_test(self.config.defines.get("topic_name", "amq.fanout"))
+
+ def test_queue(self):
+ self.basic_selection_test("#")
+
+ def test_special_fields(self):
+ msgs = [Message(content=i, id=i, correlation_id=i, priority=p+1) for p, i in enumerate(['a', 'b', 'c', 'd'])]
+
+ snd = self.ssn.sender("#")
+ rcv_1 = self.ssn.receiver("%s; {link:{selector:\"amqp.message_id = 'c'\"}}" % snd.target)
+ rcv_2 = self.ssn.receiver("%s; {link:{selector:\"amqp.correlation_id = 'b'\"}}" % snd.target)
+ rcv_3 = self.ssn.receiver("%s; {link:{selector:\"amqp.priority = 1\"}}" % snd.target)
+
+ for m in msgs: snd.send(m)
+
+ msg = rcv_1.fetch(0)
+ assert msg.content == 'c', msg
+ self.ssn.acknowledge(msg)
+
+ msg = rcv_2.fetch(0)
+ assert msg.content == 'b', msg
+ self.ssn.acknowledge(msg)
+
+ msg = rcv_3.fetch(0)
+ assert msg.content == 'a', msg
+ self.ssn.acknowledge(msg)
+
+ rcv_4 = self.ssn.receiver(snd.target)
+ msg = rcv_4.fetch(0)
+ assert msg.content == 'd'
+ self.ssn.acknowledge(msg)
diff --git a/qpid/tests/src/py/qpid_tests/broker_1_0/translation.py b/qpid/tests/src/py/qpid_tests/broker_1_0/translation.py
new file mode 100644
index 0000000000..a6394fb8c5
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_1_0/translation.py
@@ -0,0 +1,87 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+from qpid.tests.messaging.implementation import *
+from qpid.tests.messaging import VersionTest
+
+class TranslationTests (VersionTest):
+ """
+ Testing translation of messages between 1.0 and 0-10
+ """
+ def send_receive_messages(self, msgs, send_version, receive_version, address):
+ rcon = self.create_connection(receive_version, True)
+ rcv = rcon.session().receiver(address)
+
+ scon = self.create_connection(send_version, True)
+ snd = scon.session().sender(rcv.source)
+
+ for m in msgs: snd.send(m)
+
+ for expected in msgs:
+ msg = rcv.fetch()
+ assert msg.content == expected.content, (msg.content, expected.content)
+ assert msg.subject == expected.subject, (msg.subject, expected.subject)
+ self.ssn.acknowledge(msg)
+ scon.close()
+ rcon.close()
+
+ def send_receive(self, send_version, receive_version, address):
+ self.send_receive_messages([Message(content=s, subject = s) for s in ['a','b','c','d']], send_version, receive_version, address)
+
+ def send_receive_map(self, send_version, receive_version, address):
+ self.send_receive_messages([Message(content={'s':'abc','i':10})], send_version, receive_version, address)
+
+ def send_receive_list(self, send_version, receive_version, address):
+ self.send_receive_messages([Message(content=['a', 1, 'c'])], send_version, receive_version, address)
+
+ def test_translation_queue_1(self):
+ self.send_receive("amqp0-10", "amqp1.0", '#')
+
+ def test_translation_queue_2(self):
+ self.send_receive("amqp1.0", "amqp0-10", '#')
+
+ def test_translation_exchange_1(self):
+ self.send_receive("amqp0-10", "amqp1.0", 'amq.fanout')
+
+ def test_translation_exchange_2(self):
+ self.send_receive("amqp1.0", "amqp0-10", 'amq.fanout')
+
+ def test_send_receive_queue_1(self):
+ self.send_receive("amqp1.0", "amqp1.0", '#')
+
+ def test_send_receive_queue_2(self):
+ self.send_receive("amqp0-10", "amqp0-10", '#')
+
+ def test_send_receive_exchange_1(self):
+ self.send_receive("amqp1.0", "amqp1.0", 'amq.fanout')
+
+ def test_send_receive_exchange_2(self):
+ self.send_receive("amqp0-10", "amqp0-10", 'amq.fanout')
+
+ def test_translate_map_1(self):
+ self.send_receive_map("amqp0-10", "amqp1.0", '#')
+
+ def test_translate_map_2(self):
+ self.send_receive_map("amqp1.0", "amqp0-10", '#')
+
+ def test_translate_list_1(self):
+ self.send_receive_list("amqp0-10", "amqp1.0", '#')
+
+ def test_translate_list_2(self):
+ self.send_receive_list("amqp1.0", "amqp0-10", '#')
diff --git a/qpid/tests/src/py/qpid_tests/broker_1_0/tx.py b/qpid/tests/src/py/qpid_tests/broker_1_0/tx.py
new file mode 100644
index 0000000000..45817fc64f
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/broker_1_0/tx.py
@@ -0,0 +1,264 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+from qpid.client import Client, Closed
+from qpid.queue import Empty
+from qpid.datatypes import Message, RangedSet
+from qpid.testlib import TestBase010
+
+class TxTests(TestBase010):
+ """
+ Tests for 'methods' on the amqp tx 'class'
+ """
+
+ def test_commit(self):
+ """
+ Test that commited publishes are delivered and commited acks are not re-delivered
+ """
+ session = self.session
+
+ #declare queues and create subscribers in the checking session
+ #to ensure that the queues are not auto-deleted too early:
+ self.declare_queues(["tx-commit-a", "tx-commit-b", "tx-commit-c"])
+ session.message_subscribe(queue="tx-commit-a", destination="qa")
+ session.message_subscribe(queue="tx-commit-b", destination="qb")
+ session.message_subscribe(queue="tx-commit-c", destination="qc")
+
+ #use a separate session for actual work
+ session2 = self.conn.session("worker", 2)
+ self.perform_txn_work(session2, "tx-commit-a", "tx-commit-b", "tx-commit-c")
+ session2.tx_commit()
+ session2.close()
+
+ session.tx_select()
+
+ self.enable_flow("qa")
+ queue_a = session.incoming("qa")
+
+ self.enable_flow("qb")
+ queue_b = session.incoming("qb")
+
+ self.enable_flow("qc")
+ queue_c = session.incoming("qc")
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("TxMessage %d" % i, msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("TxMessage 6", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("TxMessage 7", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ session.tx_commit()
+
+ def test_auto_rollback(self):
+ """
+ Test that a session closed with an open transaction is effectively rolled back
+ """
+ session = self.session
+ self.declare_queues(["tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c"])
+ session.message_subscribe(queue="tx-autorollback-a", destination="qa")
+ session.message_subscribe(queue="tx-autorollback-b", destination="qb")
+ session.message_subscribe(queue="tx-autorollback-c", destination="qc")
+
+ session2 = self.conn.session("worker", 2)
+ queue_a, queue_b, queue_c, ignore = self.perform_txn_work(session2, "tx-autorollback-a", "tx-autorollback-b", "tx-autorollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ session2.close()
+
+ session.tx_select()
+
+ self.enable_flow("qa")
+ queue_a = session.incoming("qa")
+
+ self.enable_flow("qb")
+ queue_b = session.incoming("qb")
+
+ self.enable_flow("qc")
+ queue_c = session.incoming("qc")
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ session.tx_commit()
+
+ def test_rollback(self):
+ """
+ Test that rolled back publishes are not delivered and rolled back acks are re-delivered
+ """
+ session = self.session
+ queue_a, queue_b, queue_c, consumed = self.perform_txn_work(session, "tx-rollback-a", "tx-rollback-b", "tx-rollback-c")
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ session.tx_rollback()
+
+ #need to release messages to get them redelivered now:
+ session.message_release(consumed)
+
+ #check results
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ self.assertEqual("Message %d" % i, msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ session.message_accept(RangedSet(msg.id))
+
+ for q in [queue_a, queue_b, queue_c]:
+ try:
+ extra = q.get(timeout=1)
+ self.fail("Got unexpected message: " + extra.body)
+ except Empty: None
+
+ #cleanup
+ session.tx_commit()
+
+ def perform_txn_work(self, session, name_a, name_b, name_c):
+ """
+ Utility method that does some setup and some work under a transaction. Used for testing both
+ commit and rollback
+ """
+ #setup:
+ self.declare_queues([name_a, name_b, name_c])
+
+ key = "my_key_" + name_b
+ topic = "my_topic_" + name_c
+
+ session.exchange_bind(queue=name_b, exchange="amq.direct", binding_key=key)
+ session.exchange_bind(queue=name_c, exchange="amq.topic", binding_key=topic)
+
+ dp = session.delivery_properties(routing_key=name_a)
+ for i in range(1, 5):
+ mp = session.message_properties(message_id="msg%d" % i)
+ session.message_transfer(message=Message(dp, mp, "Message %d" % i))
+
+ dp = session.delivery_properties(routing_key=key)
+ mp = session.message_properties(message_id="msg6")
+ session.message_transfer(destination="amq.direct", message=Message(dp, mp, "Message 6"))
+
+ dp = session.delivery_properties(routing_key=topic)
+ mp = session.message_properties(message_id="msg7")
+ session.message_transfer(destination="amq.topic", message=Message(dp, mp, "Message 7"))
+
+ session.tx_select()
+
+ #consume and ack messages
+ acked = RangedSet()
+ self.subscribe(session, queue=name_a, destination="sub_a")
+ queue_a = session.incoming("sub_a")
+ for i in range(1, 5):
+ msg = queue_a.get(timeout=1)
+ acked.add(msg.id)
+ self.assertEqual("Message %d" % i, msg.body)
+
+ self.subscribe(session, queue=name_b, destination="sub_b")
+ queue_b = session.incoming("sub_b")
+ msg = queue_b.get(timeout=1)
+ self.assertEqual("Message 6", msg.body)
+ acked.add(msg.id)
+
+ sub_c = self.subscribe(session, queue=name_c, destination="sub_c")
+ queue_c = session.incoming("sub_c")
+ msg = queue_c.get(timeout=1)
+ self.assertEqual("Message 7", msg.body)
+ acked.add(msg.id)
+
+ session.message_accept(acked)
+
+ dp = session.delivery_properties(routing_key=topic)
+ #publish messages
+ for i in range(1, 5):
+ mp = session.message_properties(message_id="tx-msg%d" % i)
+ session.message_transfer(destination="amq.topic", message=Message(dp, mp, "TxMessage %d" % i))
+
+ dp = session.delivery_properties(routing_key=key)
+ mp = session.message_properties(message_id="tx-msg6")
+ session.message_transfer(destination="amq.direct", message=Message(dp, mp, "TxMessage 6"))
+
+ dp = session.delivery_properties(routing_key=name_a)
+ mp = session.message_properties(message_id="tx-msg7")
+ session.message_transfer(message=Message(dp, mp, "TxMessage 7"))
+ return queue_a, queue_b, queue_c, acked
+
+ def declare_queues(self, names, session=None):
+ session = session or self.session
+ for n in names:
+ session.queue_declare(queue=n, auto_delete=True)
+
+ def subscribe(self, session=None, **keys):
+ session = session or self.session
+ consumer_tag = keys["destination"]
+ session.message_subscribe(**keys)
+ session.message_flow(destination=consumer_tag, unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination=consumer_tag, unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+
+ def enable_flow(self, tag, session=None):
+ session = session or self.session
+ session.message_flow(destination=tag, unit=session.credit_unit.message, value=0xFFFFFFFFL)
+ session.message_flow(destination=tag, unit=session.credit_unit.byte, value=0xFFFFFFFFL)
+
+ def complete(self, session, msg):
+ session.receiver._completed.add(msg.id)#TODO: this may be done automatically
+ session.channel.session_completed(session.receiver._completed)
diff --git a/qpid/tests/src/py/qpid_tests/client/client-api-example-tests.py b/qpid/tests/src/py/qpid_tests/client/client-api-example-tests.py
new file mode 100755
index 0000000000..43e0aada46
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/client/client-api-example-tests.py
@@ -0,0 +1,338 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+"""
+ client-api-examples-interop.py
+
+"""
+
+"""
+
+** TODO Add XML Exchange tests
+
+"""
+
+import os
+import shlex
+import subprocess
+import unittest
+import uuid
+import re
+from time import sleep
+
+import logging
+
+logging.basicConfig(level=logging.DEBUG,
+ format='%(asctime)s %(levelname)s %(message)s',
+ filename='./client-api-example-tests.log',
+ filemode='w')
+
+#######################################################################################
+#
+# !!! Configure your paths here !!!
+#
+#######################################################################################
+
+## If you set qpid_root on a source tree, from the default install for
+## this script, you're good to go. If running from elsewhere against a
+## source tree, set QPID_ROOT. If running from an installed system,
+## set QPID_CPP_EXAMPLES, QPID_PYTHON_EXAMPLES, QPID_PYTHON_TOOLS,
+## etc. to the directories below.
+
+qpid_root = os.getenv("QPID_ROOT", os.path.abspath("../../../../../../qpid"))
+logging.debug("Qpid Root: " + qpid_root)
+
+qpid_broker = os.getenv("QPID_BROKER", "localhost:5672")
+logging.debug("Qpid Broker: " + qpid_broker)
+
+########################################################################################
+#
+# If you are working from a source tree, setting the above paths is
+# sufficient.
+#
+# If your examples are installed somewhere else, you have to tell us
+# where examples in each language are kept
+#
+########################################################################################
+
+cpp_examples_path = os.getenv("QPID_CPP_EXAMPLES", qpid_root + "/cpp/examples/messaging/")
+
+python_examples_path = os.getenv("QPID_PYTHON_EXAMPLES", qpid_root + "/python/examples/api/")
+python_path = os.getenv("PYTHONPATH", qpid_root+"/python:" + qpid_root+"/extras/qmf/src/py")
+os.environ["PYTHONPATH"] = python_path
+logging.debug("PYTHONPATH: " + os.environ["PYTHONPATH"])
+
+python_tools_path = os.getenv("QPID_PYTHON_TOOLS", qpid_root + "/tools/src/py/")
+logging.debug("QPID_PYTHON_TOOLS: " + python_tools_path)
+
+java_qpid_home = os.getenv("QPID_HOME", qpid_root + "/java/build/lib/")
+os.environ["QPID_HOME"] = java_qpid_home
+logging.debug("Java's QPID_HOME: " + os.environ["QPID_HOME"])
+java_examples_path = os.getenv("QPID_JAVA_EXAMPLES", qpid_root + "/java/client/example/")
+find = "find " + java_qpid_home + " -name '*.jar'"
+args = shlex.split(find)
+popen = subprocess.Popen(args, stdout=subprocess.PIPE)
+out, err = popen.communicate()
+os.environ["CLASSPATH"] = java_examples_path + ":" + re.sub("\\n", ":", out)
+logging.debug("Java CLASSPATH = " + os.environ["CLASSPATH"])
+
+java_invoke = "java " + "-Dlog4j.configuration=log4j.conf "
+
+############################################################################################
+
+
+drains = [
+ {'lang': 'CPP', 'command': cpp_examples_path + "drain" },
+ {'lang': 'PYTHON', 'command': python_examples_path + "drain"},
+ {'lang': 'JAVA', 'command': java_invoke + "org.apache.qpid.example.Drain"}
+ ]
+
+spouts = [
+ {'lang': 'CPP', 'command': cpp_examples_path + "spout" },
+ {'lang': 'PYTHON', 'command': python_examples_path + "spout"},
+ {'lang': 'JAVA', 'command': java_invoke + "org.apache.qpid.example.Spout"}
+ ]
+
+mapSenders = [
+ {'lang': 'CPP', 'command': cpp_examples_path + "map_sender" },
+ {'lang': 'JAVA', 'command': java_invoke + "org.apache.qpid.example.MapSender"}
+ ]
+
+mapReceivers = [
+ {'lang': 'CPP', 'command': cpp_examples_path + "map_receiver" },
+ {'lang': 'JAVA', 'command': java_invoke + "org.apache.qpid.example.MapReceiver"}
+ ]
+
+
+hellos = [
+ {'lang': 'CPP', 'command': cpp_examples_path + "hello_world" },
+ {'lang': 'PYTHON', 'command': python_examples_path + "hello" },
+ {'lang': 'JAVA', 'command': java_invoke + "org.apache.qpid.example.Hello"}
+ ]
+
+wockyClients = [
+ {'lang': 'CPP', 'command': cpp_examples_path + "client" },
+ ]
+
+wockyServers = [
+ {'lang': 'CPP', 'command': cpp_examples_path + "server" },
+ ]
+
+
+shortWait = 0.5
+longWait = 3 # use sparingly!
+
+class TestDrainSpout(unittest.TestCase):
+
+ # setUp / tearDown
+
+ def setUp(self):
+ logging.debug('----------------------------')
+ logging.debug('START: ' + self.tcaseName())
+
+ def tearDown(self):
+ pass
+
+ #############################################################################
+ #
+ # Lemmas
+ #
+ #############################################################################
+
+ def tcaseName(self):
+ return re.split('[.]', self.id())[-1]
+
+ # Python utilities
+
+ def qpid_config(self, args):
+ commandS = python_tools_path + "qpid-config" + ' ' + args
+ args = shlex.split(commandS)
+ logging.debug("qpid_config(): " + commandS)
+ popen = subprocess.Popen(args, stdout=subprocess.PIPE)
+ out, err = popen.communicate()
+ logging.debug("qpid-config() - out=" + str(out) + ", err=" + str(err))
+
+ # Send / receive methods in various languages
+
+ def send(self, spout=spouts[0], content="", destination="amq.topic", create=1, wait=0):
+ if wait:
+ sleep(wait)
+
+ createS = ";{create:always}" if create else ""
+ addressS = "'" + destination + createS + "'"
+ brokerS = "-b " + qpid_broker
+ if spout['lang']=='CPP':
+ contentS = " ".join(['--content',"'"+content+"'"]) if content else ""
+ commandS = " ".join([spout['command'], brokerS, contentS, addressS])
+ elif spout['lang']=='PYTHON':
+ commandS = " ".join([spout['command'], brokerS, addressS, content])
+ elif spout['lang']=='JAVA':
+ brokerS = "-b guest:guest@" + qpid_broker
+ commandS = " ".join([spout['command'], brokerS, "--content="+"'"+content+"'", addressS])
+ else:
+ raise "Ain't no such language ...."
+ logging.debug("send(): " + commandS)
+ args = shlex.split(commandS)
+ popen = subprocess.Popen(args, stdout=subprocess.PIPE)
+ out, err = popen.communicate()
+ logging.debug("send() - out=" + str(out) + ", err=" + str(err))
+
+
+ def receive(self, drain=drains[0], destination="amq.topic", delete=1):
+ deleteS = ";{delete:always}" if delete else ""
+ addressS = "'" + destination + deleteS + "'"
+ brokerS = "-b " + qpid_broker
+ optionS = "-c 1 -t 30"
+ if drain['lang']=='CPP':
+ commandS = " ".join([drain['command'], optionS, brokerS, optionS, addressS])
+ elif drain['lang']=='PYTHON':
+ commandS = " ".join([drain['command'], brokerS, optionS, addressS])
+ elif drain['lang']=='JAVA':
+ brokerS = "-b guest:guest@" + qpid_broker
+ commandS = " ".join([drain['command'], brokerS, optionS, addressS])
+ else:
+ raise "Ain't no such language ...."
+ logging.debug("receive() " + commandS)
+ args = shlex.split(commandS)
+ popen = subprocess.Popen(args, stdout=subprocess.PIPE)
+ out, err = popen.communicate()
+ logging.debug("receive() - out=" + str(out) + ", err=" + str(err))
+ return out
+
+ def subscribe(self, drain=drains[0], destination="amq.topic", create=0):
+ optionS = "-t 30 -c 1"
+ brokerS = "-b " + qpid_broker
+ if drain['lang']=='CPP':
+ commandS = " ".join([drain['command'], brokerS, optionS, destination])
+ elif drain['lang']=='PYTHON':
+ commandS = " ".join([drain['command'], brokerS, optionS, destination])
+ elif drain['lang']=='JAVA':
+ logging.debug("Java working directory: ")
+ brokerS = "-b guest:guest@" + qpid_broker
+ commandS = " ".join([drain['command'], brokerS, optionS, destination])
+ else:
+ logging.debug("subscribe() - no such language!")
+ raise "Ain't no such language ...."
+ logging.debug("subscribe() " + commandS)
+ args = shlex.split(commandS)
+ return subprocess.Popen(args, stdout=subprocess.PIPE)
+
+ def listen(self, popen):
+ out,err = popen.communicate()
+ logging.debug("listen(): out=" + str(out) + ", err=" + str(err))
+ return out
+
+ #############################################################################
+ #
+ # Tests
+ #
+ #############################################################################
+
+ # Hello world!
+
+ def test_hello_world(self):
+ for hello_world in hellos:
+ args = shlex.split(hello_world['command'])
+ popen = subprocess.Popen(args, stdout=subprocess.PIPE)
+ out = popen.communicate()[0]
+ logging.debug(out)
+ self.assertTrue(out.find("world!") > 0)
+
+ def test_jabberwocky(self):
+ for i, s in enumerate(wockyServers):
+ for j, c in enumerate(wockyClients):
+ args = shlex.split(s['command'])
+ server = subprocess.Popen(args, stdout=subprocess.PIPE)
+ args = shlex.split(c['command'])
+ client = subprocess.Popen(args, stdout=subprocess.PIPE)
+ out = client.communicate()[0]
+ logging.debug(out)
+ self.assertTrue(out.find("BRILLIG") >= 0)
+ server.terminate()
+
+ def test_maps(self):
+ for s in mapSenders:
+ for r in mapReceivers:
+ args = shlex.split(s['command'])
+ sender = subprocess.Popen(args, stdout=subprocess.PIPE)
+ args = shlex.split(r['command'])
+ receiver = subprocess.Popen(args, stdout=subprocess.PIPE)
+ out = receiver.communicate()[0]
+ logging.debug(out)
+ sender.terminate()
+
+ def test_queues(self):
+ for i, s in enumerate(spouts):
+ for j, d in enumerate(drains):
+ content = self.tcaseName() + ": " + s['lang'] + str(i) + " => " + d['lang'] + str(j)
+ self.send(s, content=content, destination="hello_world", create=1)
+ out = self.receive(d, destination="hello_world", delete=1)
+ self.assertTrue(out.find(content) >= 0)
+
+ def test_direct_exchange(self):
+ for i, s in enumerate(spouts):
+ for j, d in enumerate(drains):
+ content = self.tcaseName() + ": " + s['lang'] + str(i) + " => " + d['lang'] + str(j)
+ popen1 = self.subscribe(d, destination="amq.direct/subject")
+ popen2 = self.subscribe(d, destination="amq.direct/subject")
+ self.send(s, content=content, destination="amq.direct/subject", create=0, wait=2)
+ out1 = self.listen(popen1)
+ out2 = self.listen(popen2)
+ self.assertTrue(out1.find(self.tcaseName()) >= 0)
+ self.assertTrue(out2.find(self.tcaseName()) >= 0)
+
+ def test_fanout_exchange(self):
+ for i, s in enumerate(spouts):
+ for j, d in enumerate(drains):
+ content = self.tcaseName() + ": " + s['lang'] + str(i) + " => " + d['lang'] + str(j)
+ popen1 = self.subscribe(d, destination="amq.fanout")
+ popen2 = self.subscribe(d, destination="amq.fanout")
+ self.send(s, content=content, destination="amq.fanout", create=0, wait=2)
+ out1 = self.listen(popen1)
+ out2 = self.listen(popen2)
+ self.assertTrue(out1.find(self.tcaseName()) >= 0)
+ self.assertTrue(out2.find(self.tcaseName()) >= 0)
+
+
+ def test_topic_exchange(self):
+ for i, s in enumerate(spouts):
+ for j, d in enumerate(drains):
+ content = self.tcaseName() + ": " + s['lang'] + str(i) + " => " + d['lang'] + str(j)
+ popen1 = self.subscribe(d, destination="amq.topic" + "/" + s['lang'] + "." + d['lang'])
+ popen2 = self.subscribe(d, destination="amq.topic" + "/" + "*" + "." + d['lang'])
+ popen3 = self.subscribe(d, destination="amq.topic" + "/" + s['lang'] + "." + "*")
+ popen4 = self.subscribe(d, destination="amq.topic" + "/" + "#" + "." + d['lang'])
+ self.send(s, content=content, destination="amq.topic"+ "/" + s['lang'] + "." + d['lang'], create=0, wait=4)
+ out1 = self.listen(popen1)
+ out2 = self.listen(popen2)
+ out3 = self.listen(popen3)
+ out4 = self.listen(popen4)
+ logging.debug("out1:"+out1)
+ logging.debug("out2:"+out2)
+ logging.debug("out3:"+out3)
+ logging.debug("out4:"+out4)
+ self.assertTrue(out1.find(self.tcaseName()) >= 0)
+ self.assertTrue(out2.find(self.tcaseName()) >= 0)
+ self.assertTrue(out3.find(self.tcaseName()) >= 0)
+ self.assertTrue(out4.find(self.tcaseName()) >= 0)
+
+
+if __name__ == '__main__':
+ unittest.main()
+
diff --git a/qpid/tests/src/py/qpid_tests/client/log4j.conf b/qpid/tests/src/py/qpid_tests/client/log4j.conf
new file mode 100644
index 0000000000..2c7bd74e29
--- /dev/null
+++ b/qpid/tests/src/py/qpid_tests/client/log4j.conf
@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+log4j.logger.org.apache.qpid=WARN, console
+log4j.additivity.org.apache.qpid=false
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.Threshold=all
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%t %d %p [%c{4}] %m%n