summaryrefslogtreecommitdiff
path: root/examples/gui/fs-gui.py
diff options
context:
space:
mode:
Diffstat (limited to 'examples/gui/fs-gui.py')
-rwxr-xr-xexamples/gui/fs-gui.py844
1 files changed, 511 insertions, 333 deletions
diff --git a/examples/gui/fs-gui.py b/examples/gui/fs-gui.py
index e29f02b8..c16f58cc 100755
--- a/examples/gui/fs-gui.py
+++ b/examples/gui/fs-gui.py
@@ -25,37 +25,20 @@ import socket
import threading
import weakref
-import signal
-
-try:
- import pygtk
- pygtk.require("2.0")
-
- import gtk, gobject, gtk.gdk
- import gobject
-except ImportError, e:
- raise SystemExit("PyGTK couldn't be found ! (%s)" % (e[0]))
-
-try:
- import pygst
- pygst.require('0.10')
-
- import gst
-except ImportError, e:
- raise SystemExit("Gst-Python couldn't be found! (%s)" % (e[0]))
-try:
- import farstream
-except:
- try:
- sys.path.append(os.path.join(os.path.dirname(__file__),
- '..', '..', 'python', '.libs'))
- import farstream
- except ImportError, e:
- raise SystemExit("Farstream couldn't be found! (%s)" % (e[0]))
+import gc
+import signal
+import gi
+gi.require_version('Farstream', '0.2')
+gi.require_version('Gst', '1.0')
+gi.require_version('GstVideo', '1.0')
+gi.require_version('Gtk', '3.0')
+gi.require_version('Gdk', '3.0')
+from gi.repository import GLib, GObject, Gtk, Gdk, GdkX11
+from gi.repository import Farstream, Gst, GstVideo
-from fs_gui_net import FsUIClient, FsUIListener, FsUIServer
+from fs_gui_net import FsUIClient, FsUIListener, FsUIServer
CAMERA=False
@@ -67,46 +50,48 @@ SERVER=2
TRANSMITTER="nice"
+INITIAL_VOLUME=50.0
+
builderprefix = os.path.join(os.path.dirname(__file__),"fs-gui-")
-def make_video_sink(pipeline, xid, name, async=True):
+def make_video_sink(pipeline, xid, name, sync=True):
"Make a bin with a video sink in it, that will be displayed on xid."
- bin = gst.Bin("videosink_%d" % xid)
- sink = gst.element_factory_make("ximagesink", name)
- sink.set_property("sync", async)
- sink.set_property("async", async)
- bin.add(sink)
- colorspace = gst.element_factory_make("ffmpegcolorspace")
- bin.add(colorspace)
- videoscale = gst.element_factory_make("videoscale")
- bin.add(videoscale)
- videoscale.link(colorspace)
- colorspace.link(sink)
- bin.add_pad(gst.GhostPad("sink", videoscale.get_pad("sink")))
- sink.set_data("xid", xid)
+ bin = Gst.parse_bin_from_description("videoconvert ! videoscale ! videoconvert ! xvimagesink", True)
+ sink = bin.get_by_interface(GstVideo.VideoOverlay)
+ assert sink
+ bin.set_name("videosink_%d" % xid)
+ sink.set_window_handle(xid)
+ sink.props.sync = sync
return bin
-class FsUIPipeline:
+class FsUIPipeline (GObject.Object):
"Object to wrap the GstPipeline"
+ level = GObject.Property(type=float)
+
def int_handler(self, sig, frame):
try:
- gst.DEBUG_BIN_TO_DOT_FILE(self.pipeline, 0, "pipelinedump")
+ Gst.DEBUG_BIN_TO_DOT_FILE(self.pipeline, 0, "pipelinedump")
except:
pass
sys.exit(2)
+
+ def g_int_handler(self):
+ self.int_handler(None, None)
def __init__(self, elementname="fsrtpconference"):
- self.pipeline = gst.Pipeline()
+ GObject.GObject.__init__(self)
+ self.pipeline = Gst.Pipeline()
+ GLib.unix_signal_add_full(0, signal.SIGINT, self.g_int_handler, None,)
signal.signal(signal.SIGINT, self.int_handler)
- self.notifier = farstream.ElementAddedNotifier()
- self.notifier.connect("element-added", self.element_added_cb)
+ #self.pipeline.get_bus().set_sync_handler(self.sync_handler, None)
+ self.pipeline.get_bus().add_watch(0, self.async_handler, None)
+ self.conf = Gst.ElementFactory.make(elementname, None)
+ self.notifier = Farstream.ElementAddedNotifier()
self.notifier.add(self.pipeline)
- self.pipeline.get_bus().set_sync_handler(self.sync_handler)
- self.pipeline.get_bus().add_watch(self.async_handler)
- self.conf = gst.element_factory_make(elementname)
+ self.notifier.set_default_properties(self.conf)
self.pipeline.add(self.conf)
if VIDEO:
@@ -115,71 +100,96 @@ class FsUIPipeline:
if AUDIO:
self.audiosource = FsUIAudioSource(self.pipeline)
self.audiosession = FsUISession(self.conf, self.audiosource)
- self.adder = None
- self.pipeline.set_state(gst.STATE_PLAYING)
+ #self.adder = None
+ self.pipeline.set_state(Gst.State.PLAYING)
def __del__(self):
- self.pipeline.set_state(gst.STATE_NULL)
+ self.pipeline.set_state(Gst.State.NULL)
+
+ def set_mute(self, muted):
+ if AUDIO:
+ self.audiosource.volume.props.mute = muted
+
+ def set_volume(self, volume):
+ if AUDIO:
+ self.audiosource.volume.props.volume = volume / 100
- def sync_handler(self, bus, message):
+ def sync_handler(self, bus, message, data):
"Message handler to get the prepare-xwindow-id event"
- if message.type == gst.MESSAGE_ELEMENT and \
- message.structure.has_name("prepare-xwindow-id"):
+ if message.type == Gst.MessageType.ELEMENT and \
+ message.get_structure().has_name("prepare-xwindow-id"):
xid = None
element = message.src
# We stored the XID on the element or its parent on the expose event
# Now lets look it up
while not xid and element:
- xid = element.get_data("xid")
+ xid = element.xid
element = element.get_parent()
if xid:
- message.src.set_xwindow_id(xid)
- return gst.BUS_DROP
- return gst.BUS_PASS
+ message.src.set_window_handle(xid)
+ return Gst.BusSyncReply.DROP
+ return Gst.BusSyncReply.PASS
- def async_handler(self, bus, message):
+ def async_handler(self, bus, message, Data):
"Async handler to print messages"
- if message.type == gst.MESSAGE_ERROR:
- print message.src.get_name(), ": ", message.parse_error()
- elif message.type == gst.MESSAGE_WARNING:
- print message.src.get_name(), ": ", message.parse_warning()
- elif message.type == gst.MESSAGE_ELEMENT:
- if message.structure.has_name("dtmf-event"):
- print "dtmf-event: %d" % message.structure["number"]
- elif message.structure.has_name("farstream-local-candidates-prepared"):
- message.structure["stream"].uistream.local_candidates_prepared()
-
- elif message.structure.has_name("farstream-new-local-candidate"):
- message.structure["stream"].uistream.new_local_candidate(
- message.structure["candidate"])
- elif message.structure.has_name("farstream-codecs-changed"):
- print message.src.get_name(), ": ", message.structure.get_name()
- message.structure["session"].uisession.codecs_changed()
- if AUDIO and message.structure["session"] == self.audiosession.fssession:
+ if message.type == Gst.MessageType.ERROR:
+ print "ERROR: ", message.src.get_name(), ": ", message.parse_error()
+ sys.exit(1)
+ elif message.type == Gst.MessageType.WARNING:
+ print "WARNING: ", message.src.get_name(), ": ", message.parse_warning()
+ elif message.type == Gst.MessageType.LATENCY:
+ self.pipeline.recalculate_latency()
+ elif message.type == Gst.MessageType.ELEMENT:
+ if message.get_structure().has_name("dtmf-event"):
+ print "dtmf-event: %d" % message.get_structure().get_int("number")
+ elif message.get_structure().has_name("farstream-local-candidates-prepared"):
+ message.get_structure().get_value("stream").uistream.local_candidates_prepared()
+
+ elif message.get_structure().has_name("farstream-new-local-candidate"):
+ message.get_structure().get_value("stream").uistream.new_local_candidate(
+ message.get_structure().get_value("candidate"))
+ elif message.get_structure().has_name("farstream-codecs-changed"):
+ print message.src.get_name(), ": ", message.get_structure().get_name()
+ message.get_structure().get_value("session").uisession.codecs_changed()
+ if AUDIO and message.get_structure().get_value("session") == self.audiosession.fssession:
self.codecs_changed_audio()
- if VIDEO and message.structure["session"] == self.videosession.fssession:
+ if VIDEO and message.get_structure().get_value("session") == self.videosession.fssession:
self.codecs_changed_video()
- elif message.structure.has_name("farstream-send-codec-changed"):
- print message.src.get_name(), ": ", message.structure.get_name()
- print "send codec changed: " + message.structure["codec"].to_string()
- if AUDIO and message.structure["session"] == self.audiosession.fssession:
+ elif message.get_structure().has_name("farstream-send-codec-changed"):
+ print message.src.get_name(), ": ", message.get_structure().get_name()
+ print "send codec changed: " + message.get_structure().get_value("codec").to_string()
+ if AUDIO and message.get_structure().get_value("session") == self.audiosession.fssession:
self.codecs_changed_audio()
- if VIDEO and message.structure["session"] == self.videosession.fssession:
+ if VIDEO and message.get_structure().get_value("session") == self.videosession.fssession:
self.codecs_changed_video()
- elif message.structure.has_name("farstream-recv-codecs-changed"):
- print message.src.get_name(), ": ", message.structure.get_name()
- message.structure["stream"].uistream.recv_codecs_changed( \
- message.structure["codecs"])
+ elif message.get_structure().has_name("farstream-recv-codecs-changed"):
+ print message.src.get_name(), ": ", message.get_structure().get_name()
+ message.get_structure().get_value("stream").uistream.recv_codecs_changed( \
+ message.get_structure().get_value("codecs"))
- elif message.structure.has_name("farstream-error"):
- print "Async error ("+ str(message.structure["error-no"]) +"): " + message.structure["error-msg"]
+ elif message.get_structure().has_name("farstream-error"):
+ print "Async error ("+ str(message.get_structure().get_int("error-no")) +"): " + message.get_structure().get_string("error-msg")
+ elif message.get_structure().has_name("level"):
+ decay = message.get_structure().get_value("decay")
+ rms = message.get_structure().get_value("rms")
+ avg_rms = float(sum(rms))/len(rms) if len(rms) > 0 else float('nan')
+ avg_linear = pow (10, avg_rms / 20) * 100
+ if message.src == self.audiosource.level:
+ self.props.level = avg_linear
+ else:
+ for stream in self.audiosession.streams:
+ for sink in stream.sinks:
+ level = sink.get_by_name("l")
+ if level and level is message.src:
+ stream.participant.props.level = avg_linear
+ break
else:
- print message.src.get_name(), ": ", message.structure.get_name()
- elif message.type != gst.MESSAGE_STATE_CHANGED \
- and message.type != gst.MESSAGE_ASYNC_DONE:
+ print message.src.get_name(), ": ", message.get_structure().get_name()
+ elif message.type != Gst.MessageType.STATE_CHANGED \
+ and message.type != Gst.MessageType.ASYNC_DONE \
+ and message.type != Gst.MessageType.QOS:
print message.type
-
return True
def make_video_preview(self, xid, newsize_callback):
@@ -188,84 +198,88 @@ class FsUIPipeline:
"previewvideosink", False)
self.pipeline.add(self.previewsink)
#Add a probe to wait for the first buffer to find the image size
- self.havesize = self.previewsink.get_pad("sink").add_buffer_probe(self.have_size,
- newsize_callback)
+ self.havesize = self.previewsink.get_static_pad("sink").add_probe(Gst.PadProbeType.EVENT_DOWNSTREAM, self.have_size, newsize_callback)
- self.previewsink.set_state(gst.STATE_PLAYING)
+ self.previewsink.set_state(Gst.State.PLAYING)
self.videosource.tee.link(self.previewsink)
- self.pipeline.set_state(gst.STATE_PLAYING)
return self.previewsink
- def have_size(self, pad, buffer, callback):
+ def have_size(self, pad, info, callback):
"Callback on the first buffer to know the drawingarea size"
- x = buffer.caps[0]["width"]
- y = buffer.caps[0]["height"]
+ if (info.get_event().type != Gst.EventType.CAPS):
+ return Gst.PadProbeReturn.OK
+
+ caps = info.get_event().parse_caps()
+ x = caps.get_structure(0).get_int("width")[1]
+ y = caps.get_structure(0).get_int("height")[1]
callback(x,y)
- self.previewsink.get_pad("sink").remove_buffer_probe(self.havesize)
- return True
+ return Gst.PadProbeReturn.OK
- def link_audio_sink(self, pad):
+ def link_audio_sink(self, pad, sinkcount):
"Link the audio sink to the pad"
- print >>sys.stderr, "LINKING AUDIO SINK"
- if not self.adder:
- audiosink = gst.element_factory_make("alsasink")
- audiosink.set_property("buffer-time", 50000)
- self.pipeline.add(audiosink)
-
- try:
- self.adder = gst.element_factory_make("liveadder")
- except gst.ElementNotFoundError:
- audiosink.set_state(gst.STATE_PLAYING)
- pad.link(audiosink.get_pad("sink"))
- return
- self.pipeline.add(self.adder)
- audiosink.set_state(gst.STATE_PLAYING)
- self.adder.link(audiosink)
- self.adder.set_state(gst.STATE_PLAYING)
- convert1 = gst.element_factory_make("audioconvert")
- self.pipeline.add(convert1)
- resample = gst.element_factory_make("audioresample")
- self.pipeline.add(resample)
- convert2 = gst.element_factory_make("audioconvert")
- self.pipeline.add(convert2)
- convert1.link(resample)
- resample.link(convert2)
- convert2.link(self.adder)
- pad.link(convert1.get_pad("sink"))
- convert2.set_state(gst.STATE_PLAYING)
- resample.set_state(gst.STATE_PLAYING)
- convert1.set_state(gst.STATE_PLAYING)
-
- def element_added_cb(self, notifier, bin, element):
- if not element.get_factory():
- return
- if element.get_factory().get_name() == "x264enc":
- element.set_property("byte-stream", True)
- element.set_property("bitrate", 128)
- element.set_property("speed-preset", 2)
- element.set_property("profile", "baseline")
- element.set_property("tune", "zerolatency")
- elif element.get_factory().get_name() == "rtpbin":
- element.set_property("latency", 100)
-
+ # print >>sys.stderr, "LINKING AUDIO SINK"
+ # if not self.adder:
+ # audiosink = Gst.ElementFactory.make("alsasink", None)
+ # audiosink.set_property("buffer-time", 50000)
+ # self.pipeline.add(audiosink)
+
+ # try:
+ # self.adder = Gst.ElementFactory.make("liveadder", None)
+ # except Gst.ElementNotFoundError:
+ # audiosink.set_state(Gst.State.PLAYING)
+ # pad.link(audiosink.get_static_pad("sink"))
+ # return
+ # self.pipeline.add(self.adder)
+ # audiosink.set_state(Gst.State.PLAYING)
+ # self.adder.link(audiosink)
+ # self.adder.set_state(Gst.State.PLAYING)
+ # convert1 = Gst.ElementFactory.make("audioconvert", None)
+ # self.pipeline.add(convert1)
+ # resample = Gst.ElementFactory.make("audioresample", None)
+ # self.pipeline.add(resample)
+ # convert2 = Gst.ElementFactory.make("audioconvert", None)
+ # self.pipeline.add(convert2)
+ # convert1.link(resample)
+ # resample.link(convert2)
+ # convert2.link(self.adder)
+ # pad.link(convert1.get_static_pad("sink"))
+ # convert2.set_state(Gst.State.PLAYING)
+ # resample.set_state(Gst.State.PLAYING)
+ # convert1.set_state(Gst.State.PLAYING)
+
+ sink = Gst.parse_bin_from_description("level name=l ! pulsesink name=v", True)
+ sink.set_name("audiosink_" + str(sinkcount));
+ print sink
+ print sink.get_name()
+ self.pipeline.add(sink)
+ sink.set_state(Gst.State.PLAYING)
+ if pad.link(sink.get_static_pad("sink")) != Gst.PadLinkReturn.OK:
+ print("LINK FAILED")
+ self.pipeline.post_message(Gst.Message.new_latency(sink))
+ return sink
class FsUISource:
"An abstract generic class for media sources"
def __init__(self, pipeline):
self.pipeline = pipeline
- self.tee = gst.element_factory_make("tee")
+ self.tee = Gst.ElementFactory.make("tee", None)
pipeline.add(self.tee)
- self.tee.set_state(gst.STATE_PLAYING)
+ self.fakesink = Gst.ElementFactory.make("fakesink", None)
+ self.fakesink.props.async = False
+ self.fakesink.props.sync = False
+ self.fakesink.set_state(Gst.State.PLAYING)
+ self.tee.set_state(Gst.State.PLAYING)
+ pipeline.add(self.fakesink)
+ self.tee.link(self.fakesink)
self.source = self.make_source()
pipeline.add(self.source)
self.source.link(self.tee)
- self.playcount = 0
def __del__(self):
- self.source.set_state(gst.STATE_NULL)
- self.tee.set_state(gst.STATE_NULL)
+ self.source.set_state(Gst.State.NULL)
+ self.tee.set_state(Gst.State.NULL)
self.pipeline.remove(self.source)
self.pipeline.remove(self.tee)
@@ -279,57 +293,49 @@ class FsUISource:
"Returns the FsMediaType of the source."
raise NotImplementedError()
- def get_src_pad(self, name="src%d"):
+ def get_src_pad(self, name="src_%u"):
"Gets a source pad from the source"
- queue = gst.element_factory_make("queue")
- queue.set_property("leaky", 2)
- queue.set_property("max-size-time", 50*gst.MSECOND)
requestpad = self.tee.get_request_pad(name)
- self.pipeline.add(queue)
- requestpad.link(queue.get_static_pad("sink"))
- pad = queue.get_static_pad("src")
- pad.set_data("requestpad", requestpad)
- pad.set_data("queue", queue)
- return pad
+ assert requestpad
+ return requestpad
def put_src_pad(self, pad):
"Puts the source pad from the source"
- self.pipeline.remove(pad.get_data("queue"))
- self.tee.release_request_pad(pad.get_data("requestpad"))
+ self.tee.release_request_pad(pad)
class FsUIVideoSource(FsUISource):
"A Video source"
def get_type(self):
- return farstream.MEDIA_TYPE_VIDEO
+ return Farstream.MediaType.VIDEO
def make_source(self):
- bin = gst.Bin()
+ bin = Gst.Bin()
if CAMERA:
- source = gst.element_factory_make("v4l2src")
+ source = Gst.ElementFactory.make("v4l2src", None)
source.set_property("device", CAMERA)
bin.add(source)
else:
- source = gst.element_factory_make("videotestsrc")
+ source = Gst.ElementFactory.make("videotestsrc", None)
source.set_property("is-live", 1)
bin.add(source)
- overlay = gst.element_factory_make("timeoverlay")
+ overlay = Gst.ElementFactory.make("timeoverlay", None)
overlay.set_property("font-desc", "Sans 32")
bin.add(overlay)
source.link(overlay)
source=overlay
- filter = gst.element_factory_make("capsfilter")
- filter.set_property("caps", gst.Caps("video/x-raw-yuv , width=[300,500] , height=[200,500], framerate=[20/1,30/1]"))
+ filter = Gst.ElementFactory.make("capsfilter", None)
+ filter.set_property("caps", Gst.Caps.from_string("video/x-raw , width=[300,500] , height=[200,500], framerate=[20/1,30/1]"))
bin.add(filter)
source.link(filter)
- videoscale = gst.element_factory_make("videoscale")
+ videoscale = Gst.ElementFactory.make("videoscale", None)
bin.add(videoscale)
filter.link(videoscale)
- bin.add_pad(gst.GhostPad("src", videoscale.get_pad("src")))
+ bin.add_pad(Gst.GhostPad.new("src", videoscale.get_static_pad("src")))
return bin
@@ -338,17 +344,15 @@ class FsUIAudioSource(FsUISource):
"An audio source"
def get_type(self):
- return farstream.MEDIA_TYPE_AUDIO
-
+ return Farstream.MediaType.AUDIO
+
def make_source(self):
- source = gst.element_factory_make("audiotestsrc")
- source.set_property("is-live", True)
- source.set_property("wave", 5)
+ AUDIOSOURCE = "audiotestsrc is-live=1 wave=1 ! volume name=v"
+ #AUDIOSOURCE = "pulsesrc name=v"
+ source = Gst.parse_bin_from_description(AUDIOSOURCE + " volume=" + str(INITIAL_VOLUME/100) + " ! level message=1 name=l", True)
+ self.volume = source.get_by_name("v")
+ self.level = source.get_by_name("l")
return source
- #return gst.element_factory_make("alsasrc")
- #return gst.element_factory_make("gconfaudiosrc")
-
-
class FsUISession:
"This is one session (audio or video depending on the source)"
@@ -356,86 +360,57 @@ class FsUISession:
def __init__(self, conference, source):
self.conference = conference
self.source = source
- self.streams = []
+ self.streams = weakref.WeakSet()
self.fssession = conference.new_session(source.get_type())
self.fssession.uisession = self
- if source.get_type() == farstream.MEDIA_TYPE_VIDEO:
- # We prefer H263-1998 because we know it works
- # We don't know if the others do work
- # We know H264 doesn't work for now or anything else
- # that needs to send config data
- self.fssession.set_codec_preferences( [ \
- farstream.Codec(farstream.CODEC_ID_ANY,
- "THEORA",
- farstream.MEDIA_TYPE_VIDEO,
- 90000),
- farstream.Codec(farstream.CODEC_ID_ANY,
- "H264",
- farstream.MEDIA_TYPE_VIDEO,
- 0),
- farstream.Codec(farstream.CODEC_ID_ANY,
- "H263-1998",
- farstream.MEDIA_TYPE_VIDEO,
- 0),
- farstream.Codec(farstream.CODEC_ID_ANY,
- "H263",
- farstream.MEDIA_TYPE_VIDEO,
- 0)
- ])
- elif source.get_type() == farstream.MEDIA_TYPE_AUDIO:
- self.fssession.set_codec_preferences( [ \
- farstream.Codec(farstream.CODEC_ID_ANY,
- "PCMA",
- farstream.MEDIA_TYPE_AUDIO,
- 0),
- farstream.Codec(farstream.CODEC_ID_ANY,
- "PCMU",
- farstream.MEDIA_TYPE_AUDIO,
- 0),
- # The gst speexenc element breaks timestamps
- farstream.Codec(farstream.CODEC_ID_DISABLE,
- "SPEEX",
- farstream.MEDIA_TYPE_AUDIO,
- 16000),
- # Sadly, vorbis is not currently compatible with live streaming :-(
- farstream.Codec(farstream.CODEC_ID_DISABLE,
- "VORBIS",
- farstream.MEDIA_TYPE_AUDIO,
- 0),
- ])
+ self.fssession.set_codec_preferences(
+ Farstream.utils_get_default_codec_preferences(conference))
+
+ self.queue = Gst.ElementFactory.make("queue", None)
+ self.source.pipeline.add(self.queue)
self.sourcepad = self.source.get_src_pad()
- self.sourcepad.link(self.fssession.get_property("sink-pad"))
+ self.queue.get_static_pad("src").link(self.fssession.get_property("sink-pad"))
+ self.queue.set_state(Gst.State.PLAYING)
+ self.sourcepad.link(self.queue.get_static_pad("sink"))
def __del__(self):
self.sourcepad(unlink)
self.source.put_src_pad(self.sourcepad)
- def __stream_finalized(self, s):
- self.streams.remove(s)
+ self.source.pipeline.remove(self.queue)
+ self.queue.set_state(Gst.State.NULL)
def new_stream(self, id, participant):
"Creates a new stream for a specific participant"
- transmitter_params = {}
+ transmitter_params = []
# If its video, we start at port 9078, to make it more easy
# to differentiate it in a tcpdump log
- if self.source.get_type() == farstream.MEDIA_TYPE_VIDEO and \
+ if self.source.get_type() == Farstream.MediaType.VIDEO and \
TRANSMITTER == "rawudp":
- cand = farstream.Candidate()
- cand.component_id = farstream.COMPONENT_RTP
- cand.port = 9078
- transmitter_params["preferred-local-candidates"] = [cand]
+ cand = Farstream.Candidate.new("",Farstream.ComponentType.RTP,
+ Farstream.CandidateType.HOST,
+ Farstream.NetworkProtocol.UDP,
+ "", 9078)
+ param = GObject.Parameter()
+ param.name = "preferred-local-candidates"
+ param.cand.set_boxed(cand)
+ transmitter_params.append(params)
+ print transmitter_params
+ print participant.fsparticipant
realstream = self.fssession.new_stream(participant.fsparticipant,
- farstream.DIRECTION_BOTH)
+ Farstream.StreamDirection.BOTH)
+ print TRANSMITTER
+ print transmitter_params
realstream.set_transmitter(TRANSMITTER, transmitter_params)
stream = FsUIStream(id, self, participant, realstream)
- self.streams.append(weakref.ref(stream, self.__stream_finalized))
+ self.streams.add(stream)
return stream
def dtmf_start(self, event):
if (event == "*"):
- event = farstream.DTMF_EVENT_STAR
+ event = Farstream.DTMFEvent.STAR
elif (event == "#"):
- event = farstream.DTMF_EVENT_POUND
+ event = Farstream.DTMFEvent.POUND
else:
event = int(event)
self.fssession.start_telephony_event(event, 2)
@@ -446,19 +421,15 @@ class FsUISession:
def codecs_changed(self):
"Callback from FsSession"
for s in self.streams:
- try:
- s().codecs_changed()
- except AttributeError:
- pass
+ s.codecs_changed()
def send_stream_codecs(self, codecs, sourcestream):
for s in self.streams:
- stream = s()
- if stream and stream is not sourcestream:
- stream.connect.send_codecs(stream.participant.id,
- sourcestream.id,
- codecs,
- sourcestream.participant.id)
+ if s is not sourcestream:
+ s.connect.send_codecs(s.participant.id,
+ sourcestream.id,
+ codecs,
+ sourcestream.participant.id)
class FsUIStream:
"One participant in one session"
@@ -475,6 +446,8 @@ class FsUIStream:
self.last_codecs = None
self.last_stream_codecs = None
self.candidates = []
+ self.sinks = []
+ self.sinkcount = 0
def local_candidates_prepared(self):
"Callback from FsStream"
@@ -490,10 +463,19 @@ class FsUIStream:
self.connect.send_candidate(self.participant.id, self.id, candidate)
def __src_pad_added(self, stream, pad, codec):
"Callback from FsStream"
- if self.session.source.get_type() == farstream.MEDIA_TYPE_VIDEO:
+ if self.session.source.get_type() == Farstream.MediaType.VIDEO:
self.participant.link_video_sink(pad)
else:
- self.participant.pipeline.link_audio_sink(pad)
+ sink = self.participant.pipeline.link_audio_sink(pad,
+ self.sinkcount)
+ self.sinkcount += 1
+ v = sink.get_by_name("v")
+ self.participant.volume_adjustment.connect("value-changed",
+ lambda adj, nonevalue:
+ v.set_property("volume",float(adj.props.value) / 100), None)
+ self.participant.mute_button.bind_property("active", v, "mute",
+ GObject.BindingFlags.SYNC_CREATE)
+ self.sinks.append(sink)
def candidate(self, candidate):
"Callback for the network object."
@@ -512,8 +494,8 @@ class FsUIStream:
for c in codecs:
print "Got remote codec from %s/%s %s" % \
(self.participant.id, self.id, c.to_string())
- oldcodecs = self.fsstream.get_property("remote-codecs")
- if oldcodecs == codecs:
+ oldcodecs = self.fsstream.props.remote_codecs
+ if Farstream.codec_list_are_equal(oldcodecs,codecs):
return
try:
self.fsstream.set_remote_codecs(codecs)
@@ -525,33 +507,43 @@ class FsUIStream:
def send_local_codecs(self):
"Callback for the network object."
+ print "send_local_codecs"
self.send_codecs = True
self.check_send_local_codecs()
def codecs_changed(self):
+ print "codecs changed"
self.check_send_local_codecs()
self.send_stream_codecs()
def check_send_local_codecs(self):
"Internal function to send our local codecs when they're ready"
+ print "check_send_local_codecs"
if not self.send_codecs:
+ print "Not ready to send the codecs"
return
- codecs = self.session.fssession.get_property("codecs")
+ codecs = self.session.fssession.props.codecs
if not codecs:
print "Codecs are not ready"
return
- if (codecs == self.last_codecs):
+ if not self.send_codecs and \
+ not self.session.fssession.codecs_need_resend(self.last_codecs,
+ codecs):
+ print "Codecs have not changed"
return
self.last_codecs = codecs
print "sending local codecs"
+ for c in codecs:
+ print "Sending local codec from %s %s" % \
+ (self.id, c.to_string())
self.connect.send_codecs(self.participant.id, self.id, codecs)
def send_stream_codecs(self):
if not self.connect.is_server:
return
- if not self.session.fssession.get_property("codecs"):
+ if not self.session.fssession.props.codecs:
return
- codecs = self.fsstream.get_property("negotiated-codecs")
+ codecs = self.fsstream.props.negotiated_codecs
if codecs:
self.session.send_stream_codecs(codecs, self)
@@ -564,7 +556,7 @@ class FsUIStream:
def send_codecs_to(self, participant):
- codecs = self.fsstream.get_property("negotiated-codecs")
+ codecs = self.fsstream.props.negotiated_codecs
print "sending stream %s codecs from %s to %s" % \
(self.id, self.participant.id, participant.id)
if codecs:
@@ -572,10 +564,13 @@ class FsUIStream:
self.participant.id)
-class FsUIParticipant:
+class FsUIParticipant (GObject.Object):
"Wraps one FsParticipant, is one user remote contact"
+
+ level = GObject.Property(type=float)
def __init__(self, connect, id, pipeline, mainui):
+ GObject.Object.__init__(self)
self.connect = connect
self.id = id
self.pipeline = pipeline
@@ -586,13 +581,13 @@ class FsUIParticipant:
self.make_widget()
self.streams = {}
if VIDEO:
- self.streams[int(farstream.MEDIA_TYPE_VIDEO)] = \
+ self.streams[int(Farstream.MediaType.VIDEO)] = \
pipeline.videosession.new_stream(
- int(farstream.MEDIA_TYPE_VIDEO), self)
+ int(Farstream.MediaType.VIDEO), self)
if AUDIO:
- self.streams[int(farstream.MEDIA_TYPE_AUDIO)] = \
+ self.streams[int(Farstream.MediaType.AUDIO)] = \
pipeline.audiosession.new_stream(
- int(farstream.MEDIA_TYPE_AUDIO), self)
+ int(Farstream.MediaType.AUDIO), self)
def candidate(self, media, candidate):
"Callback for the network object."
@@ -610,97 +605,262 @@ class FsUIParticipant:
def make_widget(self):
"Make the widget of the participant's video stream."
- gtk.gdk.threads_enter()
- self.builder = gtk.Builder()
+ Gdk.threads_enter()
+ self.builder = Gtk.Builder()
self.builder.add_from_file(builderprefix + "user-frame.ui")
self.userframe = self.builder.get_object("user_frame")
#self.builder.get_object("frame_label").set_text(self.cname)
self.builder.connect_signals(self)
- self.label = gtk.Label()
+ self.label = Gtk.Label()
self.label.set_alignment(0,0)
self.label.show()
self.mainui.hbox_add(self.userframe, self.label)
- gtk.gdk.threads_leave()
+ volume_scale = self.builder.get_object("volume_scale")
+ self.volume_adjustment = self.builder.get_object("volume_adjustment")
+ volume_scale.props.adjustment = self.volume_adjustment
+ self.mute_button = self.builder.get_object("mute_button")
+ self.mute_button.bind_property("active", volume_scale, "sensitive",
+ GObject.BindingFlags.SYNC_CREATE |
+ GObject.BindingFlags.INVERT_BOOLEAN)
+
+ def candidate(self, candidate):
+ "Callback for the network object."
+ self.candidates.append(candidate)
+ def candidates_done(self):
+ "Callback for the network object."
+ if TRANSMITTER == "rawudp":
+ self.fsstream.force_remote_candidates(self.candidates)
+ else:
+ self.fsstream.add_remote_candidates(self.candidates)
+ self.candidates = []
+ def codecs(self, codecs):
+ "Callback for the network object. Set the codecs"
+
+ print "Remote codecs"
+ for c in codecs:
+ print "Got remote codec from %s/%s %s" % \
+ (self.participant.id, self.id, c.to_string())
+ oldcodecs = self.fsstream.props.remote_codecs
+ if Farstream.codec_list_are_equal(oldcodecs,codecs):
+ return
+ try:
+ self.fsstream.set_remote_codecs(codecs)
+ except AttributeError:
+ print "Tried to set codecs with 0 codec"
+ self.send_local_codecs()
+ self.send_stream_codecs()
+
+
+ def send_local_codecs(self):
+ "Callback for the network object."
+ print "send_local_codecs"
+ self.send_codecs = True
+ self.check_send_local_codecs()
+
+ def codecs_changed(self):
+ print "codecs changed"
+ self.check_send_local_codecs()
+ self.send_stream_codecs()
+
+ def check_send_local_codecs(self):
+ "Internal function to send our local codecs when they're ready"
+ print "check_send_local_codecs"
+ if not self.send_codecs:
+ print "Not ready to send the codecs"
+ return
+ codecs = self.session.fssession.props.codecs
+ if not codecs:
+ print "Codecs are not ready"
+ return
+ if not self.send_codecs and \
+ not self.session.fssession.codecs_need_resend(self.last_codecs,
+ codecs):
+ print "Codecs have not changed"
+ return
+ self.last_codecs = codecs
+ print "sending local codecs"
+ for c in codecs:
+ print "Sending local codec from %s %s" % \
+ (self.id, c.to_string())
+ self.connect.send_codecs(self.participant.id, self.id, codecs)
+
+ def send_stream_codecs(self):
+ if not self.connect.is_server:
+ return
+ if not self.session.fssession.props.codecs:
+ return
+ codecs = self.fsstream.props.negotiated_codecs
+ if codecs:
+ self.session.send_stream_codecs(codecs, self)
+
+ def recv_codecs_changed(self, codecs):
+ self.participant.recv_codecs_changed()
+
+
+ def __remove_from_send_codecs_to(self, participant):
+ self.send_codecs_to.remote(participant)
+
+
+ def send_codecs_to(self, participant):
+ codecs = self.fsstream.props.negotiated_codecs
+ print "sending stream %s codecs from %s to %s" % \
+ (self.id, self.participant.id, participant.id)
+ if codecs:
+ participant.connect.send_codecs(participant.id, self.id, codecs,
+ self.participant.id)
+
+
+class FsUIParticipant (GObject.Object):
+ "Wraps one FsParticipant, is one user remote contact"
+
+ level = GObject.Property(type=float)
+
+ def __init__(self, connect, id, pipeline, mainui):
+ GObject.Object.__init__(self)
+ self.connect = connect
+ self.id = id
+ self.pipeline = pipeline
+ self.mainui = mainui
+ self.fsparticipant = pipeline.conf.new_participant()
+ self.outcv = threading.Condition()
+ self.funnel = None
+ self.make_widget()
+ self.streams = {}
+ print "NEW PARTICIPANT: ", self
+ if VIDEO:
+ self.streams[int(Farstream.MediaType.VIDEO)] = \
+ pipeline.videosession.new_stream(
+ int(Farstream.MediaType.VIDEO), self)
+ if AUDIO:
+ self.streams[int(Farstream.MediaType.AUDIO)] = \
+ pipeline.audiosession.new_stream(
+ int(Farstream.MediaType.AUDIO), self)
+
+ def candidate(self, media, candidate):
+ "Callback for the network object."
+ self.streams[media].candidate(candidate)
+ def candidates_done(self, media):
+ "Callback for the network object."
+ self.streams[media].candidates_done()
+ def codecs(self, media, codecs):
+ "Callback for the network object."
+ self.streams[media].codecs(codecs)
+ def send_local_codecs(self):
+ "Callback for the network object."
+ for id in self.streams:
+ self.streams[id].send_local_codecs()
- def exposed(self, widget, *args):
- """From the exposed signal, used to create the video sink
+ def make_widget(self):
+ "Make the widget of the participant's video stream."
+ Gdk.threads_enter()
+ self.builder = Gtk.Builder()
+ self.builder.add_from_file(builderprefix + "user-frame.ui")
+ self.userframe = self.builder.get_object("user_frame")
+ #self.builder.get_object("frame_label").set_text(self.cname)
+ self.builder.connect_signals(self)
+ self.label = Gtk.Label()
+ self.label.set_alignment(0,0)
+ self.label.show()
+ self.mainui.hbox_add(self.userframe, self.label)
+ volume_scale = self.builder.get_object("volume_scale")
+ self.volume_adjustment = self.builder.get_object("volume_adjustment")
+ volume_scale.props.adjustment = self.volume_adjustment
+ self.mute_button = self.builder.get_object("mute_button")
+ self.mute_button.bind_property("active", volume_scale, "sensitive",
+ GObject.BindingFlags.SYNC_CREATE |
+ GObject.BindingFlags.INVERT_BOOLEAN)
+ self.bind_property("level", volume_scale, "fill-level", 0)
+ Gdk.threads_leave()
+
+
+ def realize(self, widget, *args):
+ """From the realize signal, used to create the video sink
The video sink will be created here, but will only be linked when the
pad arrives and link_video_sink() is called.
"""
+
if not VIDEO:
return
try:
- self.videosink.get_by_interface(gst.interfaces.XOverlay).expose()
+ self.videosink.get_by_interface(GstVideo.VideoOverlay).expose()
except AttributeError:
try:
self.outcv.acquire()
- self.videosink = make_video_sink(self.pipeline.pipeline,
- widget.window.xid,
- "uservideosink")
- self.pipeline.pipeline.add(self.videosink)
- self.funnel = gst.element_factory_make("funnel")
- self.pipeline.pipeline.add(self.funnel)
- self.funnel.link(self.videosink)
- self.havesize = self.videosink.get_pad("sink").add_buffer_probe(self.have_size)
-
- self.videosink.set_state(gst.STATE_PLAYING)
- self.funnel.set_state(gst.STATE_PLAYING)
+ self.xid = widget.get_window().get_xid()
self.outcv.notifyAll()
finally:
self.outcv.release()
- def have_size(self, pad, buffer):
+ def have_size(self, pad, info, nonedata):
"Callback on the first buffer to know the drawingarea size"
- x = buffer.caps[0]["width"]
- y = buffer.caps[0]["height"]
- gtk.gdk.threads_enter()
+ if (info.get_event().type != Gst.EventType.CAPS):
+ return Gst.PadProbeReturn.OK
+
+ caps = info.get_event().parse_caps()
+ x = caps.get_structure(0).get_int("width")[1]
+ y = caps.get_structure(0).get_int("height")[1]
+ Gdk.threads_enter()
self.builder.get_object("user_drawingarea").set_size_request(x,y)
- gtk.gdk.threads_leave()
- self.videosink.get_pad("sink").remove_buffer_probe(self.havesize)
- del self.havesize
- return True
-
-
+ Gdk.threads_leave()
+ return Gst.PadProbeReturn.OK
def link_video_sink(self, pad):
"""Link the video sink
- Wait for the funnnel for the video sink to be created, when it has been
- created, link it.
+ Wait for the xid to appear, when it is known, create the video sink
"""
+
+ print "LINK VIDEO SINK"
try:
self.outcv.acquire()
- while self.funnel is None:
+ while not hasattr(self, 'xid'):
self.outcv.wait()
- print >>sys.stderr, "LINKING VIDEO SINK"
- pad.link(self.funnel.get_pad("sink%d"))
finally:
self.outcv.release()
+ if self.xid is None:
+ return
+
+ if not hasattr(self, 'videosink'):
+ self.videosink = make_video_sink(self.pipeline.pipeline, self.xid,
+ "uservideosink")
+ self.pipeline.pipeline.add(self.videosink)
+ self.funnel = Gst.ElementFactory.make("funnel", None)
+ self.pipeline.pipeline.add(self.funnel)
+ self.funnel.link(self.videosink)
+ self.havesize = self.videosink.get_static_pad("sink").add_probe(Gst.PadProbeType.EVENT_DOWNSTREAM, self.have_size, None)
+
+ self.videosink.set_state(Gst.State.PLAYING)
+ self.funnel.set_state(Gst.State.PLAYING)
+
+ print >>sys.stderr, "LINKING VIDEO SINK"
+ pad.link(self.funnel.get_request_pad("sink_%u"))
def destroy(self):
if VIDEO:
try:
- self.videosink.get_pad("sink").disconnect_handler(self.havesize)
+ self.videosink.get_static_pad("sink").disconnect_handler(self.havesize)
pass
except AttributeError:
pass
- self.builder.get_object("user_drawingarea").disconnect_by_func(self.exposed)
+ self.streams = {}
+ self.builder.get_object("user_drawingarea").disconnect_by_func(self.realize)
self.streams = {}
self.outcv.acquire()
+ del self.xid
self.videosink.set_locked_state(True)
self.funnel.set_locked_state(True)
- self.videosink.set_state(gst.STATE_NULL)
- self.funnel.set_state(gst.STATE_NULL)
+ self.videosink.set_state(Gst.State.NULL)
+ self.funnel.set_state(Gst.State.NULL)
self.pipeline.pipeline.remove(self.videosink)
self.pipeline.pipeline.remove(self.funnel)
del self.videosink
del self.funnel
self.outcv.release()
- gtk.gdk.threads_enter()
+ Gdk.threads_enter()
self.userframe.destroy()
self.label.destroy()
- gtk.gdk.threads_leave()
+ Gdk.threads_leave()
def error(self):
"Callback for the network object."
@@ -712,7 +872,7 @@ class FsUIParticipant:
def recv_codecs_changed(self):
codecs = {}
for s in self.streams:
- codec = self.streams[s].fsstream.get_property("current-recv-codecs")
+ codec = self.streams[s].fsstream.props.current_recv_codecs
mediatype = self.streams[s].session.fssession.get_property("media-type")
if len(codec):
if mediatype in codecs:
@@ -741,21 +901,32 @@ class FsMainUI:
self.pipeline = FsUIPipeline()
self.pipeline.codecs_changed_audio = self.reset_audio_codecs
self.pipeline.codecs_changed_video = self.reset_video_codecs
- self.builder = gtk.Builder()
+ self.builder = Gtk.Builder()
self.builder.add_from_file(builderprefix + "main-window.ui")
self.builder.connect_signals(self)
self.mainwindow = self.builder.get_object("main_window")
self.audio_combobox = self.builder.get_object("audio_combobox")
self.video_combobox = self.builder.get_object("video_combobox")
- liststore = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_PYOBJECT)
+ volume_scale = self.builder.get_object("volume_scale")
+ self.volume_adjustment = Gtk.Adjustment(lower=0, upper=100, value=100)
+ volume_scale.props.adjustment = self.volume_adjustment
+ mute_button = self.builder.get_object("mute_button")
+ mute_button.bind_property("active", volume_scale, "sensitive",
+ GObject.BindingFlags.SYNC_CREATE |
+ GObject.BindingFlags.INVERT_BOOLEAN)
+ self.pipeline.bind_property("level", volume_scale, "fill-level",
+ GObject.BindingFlags.SYNC_CREATE)
+ self.volume_adjustment.connect("value-changed", lambda adj, nonevalue:
+ self.pipeline.set_volume(adj.props.value), None)
+ liststore = Gtk.ListStore(str, object)
self.audio_combobox.set_model(liststore)
- cell = gtk.CellRendererText()
+ cell = Gtk.CellRendererText()
self.audio_combobox.pack_start(cell, True)
self.audio_combobox.add_attribute(cell, 'text', 0)
self.reset_audio_codecs()
- liststore = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_PYOBJECT)
+ liststore = Gtk.ListStore(str, object)
self.video_combobox.set_model(liststore)
- cell = gtk.CellRendererText()
+ cell = Gtk.CellRendererText()
self.video_combobox.pack_start(cell, True)
self.video_combobox.add_attribute(cell, 'text', 0)
self.reset_video_codecs()
@@ -778,8 +949,9 @@ class FsMainUI:
def reset_codecs(self, combobox, fssession):
liststore = combobox.get_model()
current = fssession.get_property("current-send-codec")
- liststore.clear()
- for c in fssession.get_property("codecs-without-config"):
+ if liststore:
+ liststore.clear()
+ for c in fssession.props.codecs_without_config:
str = ("%s: %s/%s %s" % (c.id,
c.media_type.value_nick,
c.encoding_name,
@@ -813,22 +985,25 @@ class FsMainUI:
def video_combobox_changed_cb(self, combobox):
self.combobox_changed_cb(combobox, self.pipeline.videosession.fssession)
-
- def exposed(self, widget, *args):
- "Callback from the exposed event of the widget to make the preview sink"
+ def toggled_mute(self, button):
+ self.pipeline.set_mute(button.props.active)
+
+
+ def realize(self, widget, *args):
+ "Callback from the realize event of the widget to make the preview sink"
if not VIDEO:
return
try:
- self.preview.get_by_interface(gst.interfaces.XOverlay).expose()
+ self.preview.get_by_interface(GstVideo.VideoOverlay).expose()
except AttributeError:
- self.preview = self.pipeline.make_video_preview(widget.window.xid,
+ self.preview = self.pipeline.make_video_preview(widget.get_window().get_xid(),
self.newsize)
def newsize (self, x, y):
self.builder.get_object("preview_drawingarea").set_size_request(x,y)
def shutdown(self, widget=None):
- gtk.main_quit()
+ Gtk.main_quit()
def hbox_add(self, widget, label):
table = self.builder.get_object("users_table")
@@ -840,22 +1015,22 @@ class FsMainUI:
self.mainwindow.destroy()
def fatal_error(self, errormsg):
- gtk.gdk.threads_enter()
- dialog = gtk.MessageDialog(self.mainwindow,
- gtk.DIALOG_MODAL,
- gtk.MESSAGE_ERROR,
- gtk.BUTTONS_OK)
+ Gdk.threads_enter()
+ dialog = Gtk.MessageDialog(self.mainwindow,
+ Gtk.DialogFlags.MODAL,
+ Gtk.MessageType.ERROR,
+ Gtk.ButtonsType.OK)
dialog.set_markup(errormsg);
dialog.run()
dialog.destroy()
- gtk.main_quit()
- gtk.gdk.threads_leave()
+ Gtk.main_quit()
+ Gdk.threads_leave()
def show_dtmf(self, button):
try:
self.dtmf_builder.present()
except AttributeError:
- self.dtmf_builder = gtk.Builder()
+ self.dtmf_builder = Gtk.Builder()
self.dtmf_builder.add_from_file(builderprefix + "dtmf.ui")
self.dtmf_builder.connect_signals(self)
@@ -878,7 +1053,7 @@ class FsUIStartup:
"Displays the startup window and then creates the FsMainUI"
def __init__(self):
- self.builder = gtk.Builder()
+ self.builder = Gtk.Builder()
self.builder.add_from_file(builderprefix + "startup.ui")
self.dialog = self.builder.get_object("neworconnect_dialog")
self.builder.get_object("spinbutton_adjustment").set_value(9893)
@@ -896,10 +1071,10 @@ class FsUIStartup:
del self.dialog
del self.builder
except socket.error, e:
- dialog = gtk.MessageDialog(self.dialog,
- gtk.DIALOG_MODAL,
- gtk.MESSAGE_ERROR,
- gtk.BUTTONS_OK)
+ dialog = Gtk.MessageDialog(self.dialog,
+ Gtk.DialogFlags.MODAL,
+ Gtk.MessageType.ERROR,
+ Gtk.ButtonsType.OK)
dialog.set_markup("<b>Could not connect to %s %d</b>" % (ip,port))
dialog.format_secondary_markup(e[1])
dialog.run()
@@ -914,7 +1089,7 @@ class FsUIStartup:
def quit(self, widget):
if not self.acted:
- gtk.main_quit()
+ Gtk.main_quit()
@@ -925,7 +1100,10 @@ if __name__ == "__main__":
else:
CAMERA = None
- gobject.threads_init()
- gtk.gdk.threads_init()
+ GLib.threads_init()
+ Gdk.threads_init()
+ Gst.init(sys.argv)
startup = FsUIStartup()
- gtk.main()
+ Gtk.main()
+ del startup
+ gc.collect()