summaryrefslogtreecommitdiff
path: root/qpid/python/qpid/driver.py
blob: 2e07c82a0d46b7197bdf391bd0f7ce13a1c582e8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements.  See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership.  The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License.  You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

import compat, connection, socket, sys, time
from concurrency import synchronized
from datatypes import RangedSet, Message as Message010
from exceptions import Timeout
from logging import getLogger
from messaging import get_codec, ConnectError, Message, Pattern, UNLIMITED
from ops import delivery_mode
from session import Client, INCOMPLETE, SessionDetached
from threading import Condition, Thread
from util import connect

log = getLogger("qpid.messaging")

def parse_addr(address):
  parts = address.split("/", 1)
  if len(parts) == 1:
    return parts[0], None
  else:
    return parts[0], parts[i1]

def reply_to2addr(reply_to):
  if reply_to.routing_key is None:
    return reply_to.exchange
  elif reply_to.exchange in (None, ""):
    return reply_to.routing_key
  else:
    return "%s/%s" % (reply_to.exchange, reply_to.routing_key)

class Attachment:

  def __init__(self, target):
    self.target = target

DURABLE_DEFAULT=True

FILTER_DEFAULTS = {
  "topic": Pattern("*")
  }

def delegate(handler, session):
  class Delegate(Client):

    def message_transfer(self, cmd):
      return handler._message_transfer(session, cmd)
  return Delegate

class Driver:

  def __init__(self, connection):
    self.connection = connection
    self._lock = self.connection._lock
    self._wakeup_cond = Condition()
    self._socket = None
    self._conn = None
    self._connected = False
    self._attachments = {}
    self._modcount = self.connection._modcount
    self.thread = Thread(target=self.run)
    self.thread.setDaemon(True)
    # XXX: need to figure out how to join on this thread

  def wakeup(self):
    self._wakeup_cond.acquire()
    try:
      self._wakeup_cond.notifyAll()
    finally:
      self._wakeup_cond.release()

  def start(self):
    self.thread.start()

  def run(self):
    while True:
      self._wakeup_cond.acquire()
      try:
        if self.connection._modcount <= self._modcount:
          self._wakeup_cond.wait(10)
      finally:
        self._wakeup_cond.release()
      self.dispatch(self.connection._modcount)

  @synchronized
  def dispatch(self, modcount):
    try:
      if self._conn is None and self.connection._connected:
        self.connect()
      elif self._conn is not None and not self.connection._connected:
        self.disconnect()

      if self._conn is not None:
        for ssn in self.connection.sessions.values():
          self.attach(ssn)
          self.process(ssn)

      exi = None
    except:
      exi = sys.exc_info()

    if exi:
      msg = compat.format_exc()
      recoverable = ["aborted", "Connection refused", "SessionDetached", "Connection reset by peer",
                     "Bad file descriptor", "start timed out", "Broken pipe"]
      for r in recoverable:
        if self.connection.reconnect and r in msg:
          print "waiting to retry"
          self.reset()
          time.sleep(3)
          print "retrying..."
          return
      else:
        self.connection.error = (msg,)

    self._modcount = modcount
    self.connection._waiter.notifyAll()

  def connect(self):
    if self._conn is not None:
      return
    try:
      self._socket = connect(self.connection.host, self.connection.port)
    except socket.error, e:
      raise ConnectError(e)
    self._conn = connection.Connection(self._socket)
    try:
      self._conn.start(timeout=10)
      self._connected = True
    except connection.VersionError, e:
      raise ConnectError(e)
    except Timeout:
      print "start timed out"
      raise ConnectError("start timed out")

  def disconnect(self):
    self._conn.close()
    self.reset()

  def reset(self):
    self._conn = None
    self._connected = False
    self._attachments.clear()
    for ssn in self.connection.sessions.values():
      for m in ssn.acked + ssn.unacked + ssn.incoming:
        m._transfer_id = None
      for rcv in ssn.receivers:
        rcv.impending = rcv.received

  def connected(self):
    return self._conn is not None

  def attach(self, ssn):
    _ssn = self._attachments.get(ssn)
    if _ssn is None:
      _ssn = self._conn.session(ssn.name, delegate=delegate(self, ssn))
      _ssn.auto_sync = False
      _ssn.invoke_lock = self._lock
      _ssn.lock = self._lock
      _ssn.condition = self.connection._condition
      if ssn.transactional:
        # XXX: adding an attribute to qpid.session.Session
        _ssn.acked = []
        _ssn.tx_select()
      self._attachments[ssn] = _ssn

    for snd in ssn.senders:
      self.link_out(snd)
    for rcv in ssn.receivers:
      self.link_in(rcv)

    if ssn.closing:
      _ssn.close()
      del self._attachments[ssn]
      ssn.closed = True

  def _exchange_query(self, ssn, address):
    # XXX: auto sync hack is to avoid deadlock on future
    result = ssn.exchange_query(name=address, sync=True)
    ssn.sync()
    return result.get()

  def link_out(self, snd):
    _ssn = self._attachments[snd.session]
    _snd = self._attachments.get(snd)
    if _snd is None:
      _snd = Attachment(snd)
      node, _snd._subject = parse_addr(snd.target)
      result = self._exchange_query(_ssn, node)
      if result.not_found:
        # XXX: should check 'create' option
        _ssn.queue_declare(queue=node, durable=DURABLE_DEFAULT, sync=True)
        _ssn.sync()
        _snd._exchange = ""
        _snd._routing_key = node
      else:
        _snd._exchange = node
        _snd._routing_key = _snd._subject
      self._attachments[snd] = _snd

    if snd.closed:
      del self._attachments[snd]
      return None
    else:
      return _snd

  def link_in(self, rcv):
    _ssn = self._attachments[rcv.session]
    _rcv = self._attachments.get(rcv)
    if _rcv is None:
      _rcv = Attachment(rcv)
      result = self._exchange_query(_ssn, rcv.source)
      if result.not_found:
        _rcv._queue = rcv.source
        # XXX: should check 'create' option
        _ssn.queue_declare(queue=_rcv._queue, durable=DURABLE_DEFAULT)
      else:
        _rcv._queue = "%s.%s" % (rcv.session.name, rcv.destination)
        _ssn.queue_declare(queue=_rcv._queue, durable=DURABLE_DEFAULT, exclusive=True, auto_delete=True)
        if rcv.filter is None:
          f = FILTER_DEFAULTS[result.type]
        else:
          f = rcv.filter
        f._bind(_ssn, rcv.source, _rcv._queue)
      _ssn.message_subscribe(queue=_rcv._queue, destination=rcv.destination)
      _ssn.message_set_flow_mode(rcv.destination, _ssn.flow_mode.credit, sync=True)
      self._attachments[rcv] = _rcv
      # XXX: need to kill syncs
      _ssn.sync()

    if rcv.closing:
      _ssn.message_cancel(rcv.destination, sync=True)
      # XXX: need to kill syncs
      _ssn.sync()
      del self._attachments[rcv]
      rcv.closed = True
      return None
    else:
      return _rcv

  def process(self, ssn):
    if ssn.closing: return

    _ssn = self._attachments[ssn]

    while ssn.outgoing:
      msg = ssn.outgoing[0]
      snd = msg._sender
      self.send(snd, msg)
      ssn.outgoing.pop(0)

    for rcv in ssn.receivers:
      self.process_receiver(rcv)

    if ssn.acked:
      messages = ssn.acked[:]
      ids = RangedSet(*[m._transfer_id for m in messages if m._transfer_id is not None])
      for range in ids:
        _ssn.receiver._completed.add_range(range)
      ch = _ssn.channel
      if ch is None:
        raise SessionDetached()
      ch.session_completed(_ssn.receiver._completed)
      _ssn.message_accept(ids, sync=True)
      # XXX: really need to make this async so that we don't give up the lock
      _ssn.sync()

      # XXX: we're ignoring acks that get lost when disconnected
      for m in messages:
        ssn.acked.remove(m)
        if ssn.transactional:
          _ssn.acked.append(m)

    if ssn.committing:
      _ssn.tx_commit(sync=True)
      # XXX: need to kill syncs
      _ssn.sync()
      del _ssn.acked[:]
      ssn.committing = False
      ssn.committed = True
      ssn.aborting = False
      ssn.aborted = False

    if ssn.aborting:
      for rcv in ssn.receivers:
        _ssn.message_stop(rcv.destination)
      _ssn.sync()

      messages = _ssn.acked + ssn.unacked + ssn.incoming
      ids = RangedSet(*[m._transfer_id for m in messages])
      for range in ids:
        _ssn.receiver._completed.add_range(range)
      _ssn.channel.session_completed(_ssn.receiver._completed)
      _ssn.message_release(ids)
      _ssn.tx_rollback(sync=True)
      _ssn.sync()

      del ssn.incoming[:]
      del ssn.unacked[:]
      del _ssn.acked[:]

      for rcv in ssn.receivers:
        rcv.impending = rcv.received
        rcv.returned = rcv.received
        # XXX: do we need to update granted here as well?

      for rcv in ssn.receivers:
        self.process_receiver(rcv)

      ssn.aborting = False
      ssn.aborted = True
      ssn.committing = False
      ssn.committed = False

  def grant(self, rcv):
    _ssn = self._attachments[rcv.session]
    _rcv = self.link_in(rcv)

    if rcv.granted is UNLIMITED:
      if rcv.impending is UNLIMITED:
        delta = 0
      else:
        delta = UNLIMITED
    elif rcv.impending is UNLIMITED:
      delta = -1
    else:
      delta = max(rcv.granted, rcv.received) - rcv.impending

    if delta is UNLIMITED:
      _ssn.message_flow(rcv.destination, _ssn.credit_unit.byte, UNLIMITED.value)
      _ssn.message_flow(rcv.destination, _ssn.credit_unit.message, UNLIMITED.value)
      rcv.impending = UNLIMITED
    elif delta > 0:
      _ssn.message_flow(rcv.destination, _ssn.credit_unit.byte, UNLIMITED.value)
      _ssn.message_flow(rcv.destination, _ssn.credit_unit.message, delta)
      rcv.impending += delta
    elif delta < 0:
      if rcv.drain:
        _ssn.message_flush(rcv.destination, sync=True)
      else:
        _ssn.message_stop(rcv.destination, sync=True)
      # XXX: need to kill syncs
      _ssn.sync()
      rcv.impending = rcv.received
      self.grant(rcv)

  def process_receiver(self, rcv):
    if rcv.closed: return
    self.grant(rcv)

  def send(self, snd, msg):
    _ssn = self._attachments[snd.session]
    _snd = self.link_out(snd)

    # XXX: what if subject is specified for a normal queue?
    if _snd._routing_key is None:
      rk = msg.subject
    else:
      rk = _snd._routing_key
    # XXX: do we need to query to figure out how to create the reply-to interoperably?
    if msg.reply_to:
      rt = _ssn.reply_to(*parse_addr(msg.reply_to))
    else:
      rt = None
    dp = _ssn.delivery_properties(routing_key=rk)
    mp = _ssn.message_properties(message_id=msg.id,
                                 user_id=msg.user_id,
                                 reply_to=rt,
                                 correlation_id=msg.correlation_id,
                                 content_type=msg.content_type,
                                 application_headers=msg.properties)
    if msg.subject is not None:
      if mp.application_headers is None:
        mp.application_headers = {}
      mp.application_headers["subject"] = msg.subject
    if msg.to is not None:
      if mp.application_headers is None:
        mp.application_headers = {}
      mp.application_headers["to"] = msg.to
    if msg.durable:
      dp.delivery_mode = delivery_mode.persistent
    enc, dec = get_codec(msg.content_type)
    body = enc(msg.content)
    _ssn.message_transfer(destination=_snd._exchange,
                          message=Message010(dp, mp, body),
                          sync=True)
    log.debug("SENT [%s] %s", snd.session, msg)
    # XXX: really need to make this async so that we don't give up the lock
    _ssn.sync()
    # XXX: should we log the ack somehow too?
    snd.acked += 1

  @synchronized
  def _message_transfer(self, ssn, cmd):
    m = Message010(cmd.payload)
    m.headers = cmd.headers
    m.id = cmd.id
    msg = self._decode(m)
    rcv = ssn.receivers[int(cmd.destination)]
    msg._receiver = rcv
    if rcv.impending is not UNLIMITED:
      assert rcv.received < rcv.impending
    rcv.received += 1
    log.debug("RECV [%s] %s", ssn, msg)
    ssn.incoming.append(msg)
    self.connection._waiter.notifyAll()
    return INCOMPLETE

  def _decode(self, message):
    dp = message.get("delivery_properties")
    mp = message.get("message_properties")
    ap = mp.application_headers
    enc, dec = get_codec(mp.content_type)
    content = dec(message.body)
    msg = Message(content)
    msg.id = mp.message_id
    if ap is not None:
      msg.to = ap.get("to")
      msg.subject = ap.get("subject")
    msg.user_id = mp.user_id
    if mp.reply_to is not None:
      msg.reply_to = reply_to2addr(mp.reply_to)
    msg.correlation_id = mp.correlation_id
    msg.durable = dp.delivery_mode == delivery_mode.persistent
    msg.properties = mp.application_headers
    msg.content_type = mp.content_type
    msg._transfer_id = message.id
    return msg