summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLionel Landwerlin <llandwerlin@gmail.com>2014-05-26 23:39:55 +0100
committerLionel Landwerlin <llandwerlin@gmail.com>2014-05-28 08:27:54 +0100
commit61f1a5a00c500e212976a4f7f82076f21ebcdd7f (patch)
tree354822533fee75a43b2797772efc3b51f589ea4b
parenta4b6dd885961948f5e4bd98e9c0f86de851f95da (diff)
downloadclutter-gst-61f1a5a00c500e212976a4f7f82076f21ebcdd7f.tar.gz
Drop Cogl-Gst dependency and reimport video sink
-rw-r--r--clutter-gst.pc.in2
-rw-r--r--clutter-gst/Makefile.am10
-rw-r--r--clutter-gst/clutter-gst-auto-video-sink.c24
-rw-r--r--clutter-gst/clutter-gst-camera-manager.c9
-rw-r--r--clutter-gst/clutter-gst-camera.c23
-rw-r--r--clutter-gst/clutter-gst-content.c59
-rw-r--r--clutter-gst/clutter-gst-content.h9
-rw-r--r--clutter-gst/clutter-gst-playback.c146
-rw-r--r--clutter-gst/clutter-gst-player.c26
-rw-r--r--clutter-gst/clutter-gst-player.h26
-rw-r--r--clutter-gst/clutter-gst-plugin.c9
-rw-r--r--clutter-gst/clutter-gst-private.h15
-rw-r--r--clutter-gst/clutter-gst-types.c22
-rw-r--r--clutter-gst/clutter-gst-util.c24
-rw-r--r--clutter-gst/clutter-gst-video-sink.c2317
-rw-r--r--clutter-gst/clutter-gst-video-sink.h121
-rw-r--r--clutter-gst/clutter-gst.h25
-rw-r--r--configure.ac1
-rw-r--r--doc/reference/Makefile.am1
-rw-r--r--examples/video-content.c3
-rw-r--r--tests/test-alpha.c1
21 files changed, 2680 insertions, 193 deletions
diff --git a/clutter-gst.pc.in b/clutter-gst.pc.in
index c76fb25..61f2b13 100644
--- a/clutter-gst.pc.in
+++ b/clutter-gst.pc.in
@@ -8,5 +8,5 @@ Description: Clutter GStreamer integration
Version: @VERSION@
Libs: -L${libdir} -lclutter-gst-@CLUTTER_GST_MAJORMINOR@
Cflags: -I${includedir}/clutter-gst-@CLUTTER_GST_API_VERSION@
-Requires: clutter-@CLUTTER_API_VERSION@ >= 1.10.0 cogl-gst-2.0-experimental gstreamer-1.0 gstreamer-base-1.0 gstreamer-plugins-base-1.0
+Requires: clutter-@CLUTTER_API_VERSION@ >= 1.10.0 cogl-2.0-experimental gstreamer-1.0 gstreamer-base-1.0 gstreamer-plugins-base-1.0
Requires.private: gio-2.0
diff --git a/clutter-gst/Makefile.am b/clutter-gst/Makefile.am
index 99aa350..cb78414 100644
--- a/clutter-gst/Makefile.am
+++ b/clutter-gst/Makefile.am
@@ -33,6 +33,7 @@ source_h = \
$(srcdir)/clutter-gst-aspectratio.h \
$(srcdir)/clutter-gst-crop.h \
$(srcdir)/clutter-gst-content.h \
+ $(srcdir)/clutter-gst-video-sink.h \
$(NULL)
source_priv_h = \
@@ -54,6 +55,7 @@ source_c = \
$(srcdir)/clutter-gst-aspectratio.c \
$(srcdir)/clutter-gst-crop.c \
$(srcdir)/clutter-gst-content.c \
+ $(srcdir)/clutter-gst-video-sink.c \
$(glib_enum_c) \
$(NULL)
@@ -116,7 +118,9 @@ libgstclutter_@CLUTTER_GST_MAJORMINOR@_la_SOURCES = \
plugin_LTLIBRARIES = libgstclutter-@CLUTTER_GST_MAJORMINOR@.la
-libgstclutter_@CLUTTER_GST_MAJORMINOR@_la_LIBADD = $(PLUGIN_LIBS) $(HW_LIBS) \
+libgstclutter_@CLUTTER_GST_MAJORMINOR@_la_LIBADD = \
+ $(PLUGIN_LIBS) \
+ $(HW_LIBS) \
libclutter-gst-@CLUTTER_GST_MAJORMINOR@.la
libgstclutter_@CLUTTER_GST_MAJORMINOR@_la_LDFLAGS = \
$(GL_LDFLAGS) \
@@ -141,7 +145,7 @@ ClutterGst-@CLUTTER_GST_API_VERSION@.gir: $(INTROSPECTION_SCANNER) libclutter-gs
--add-include-path=$(srcdir) --add-include=path=. \
--c-include="clutter-gst/clutter-gst.h" \
--include=GObject-2.0 \
- --include=Cogl-1.0 \
+ --include=Cogl-2.0 \
--include=Clutter-1.0 \
--include=GdkPixbuf-2.0 \
--add-include-path=`$(PKG_CONFIG) --variable=girdir gstreamer-1.0` \
@@ -159,7 +163,7 @@ ClutterGst-@CLUTTER_GST_API_VERSION@.gir: $(INTROSPECTION_SCANNER) libclutter-gs
--libtool="${LIBTOOL}" \
--output $@ \
--pkg gobject-2.0 \
- --pkg cogl-1.0 \
+ --pkg cogl-2.0-experimental \
--pkg clutter-1.0 \
--pkg gdk-pixbuf-2.0 \
--pkg gstreamer-1.0 \
diff --git a/clutter-gst/clutter-gst-auto-video-sink.c b/clutter-gst/clutter-gst-auto-video-sink.c
index 63aea78..a993bd6 100644
--- a/clutter-gst/clutter-gst-auto-video-sink.c
+++ b/clutter-gst/clutter-gst-auto-video-sink.c
@@ -59,11 +59,11 @@ G_DEFINE_TYPE (ClutterGstAutoVideoSink,
clutter_gst_auto_video_sink,
GST_TYPE_BIN)
-static GstStaticPadTemplate sink_template =
- GST_STATIC_PAD_TEMPLATE ("sink",
- GST_PAD_SINK,
- GST_PAD_ALWAYS,
- GST_STATIC_CAPS_ANY);
+/* static GstStaticPadTemplate sink_template = */
+/* GST_STATIC_PAD_TEMPLATE ("sink", */
+/* GST_PAD_SINK, */
+/* GST_PAD_ALWAYS, */
+/* GST_STATIC_CAPS_ANY); */
static ClutterInitError _clutter_initialized = CLUTTER_INIT_ERROR_UNKNOWN;
@@ -108,17 +108,19 @@ clutter_gst_auto_video_sink_class_init (ClutterGstAutoVideoSinkClass *klass)
CLUTTER_GST_TYPE_CONTENT,
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
- GstElement *cogl_sink = clutter_gst_create_video_sink ();
+ GstElement *clutter_sink = clutter_gst_create_video_sink ();
gst_element_class_add_pad_template (eklass,
- gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (cogl_sink), "sink"));
+ gst_element_class_get_pad_template (GST_ELEMENT_GET_CLASS (clutter_sink),
+ "sink"));
/* gst_static_pad_template_get (&sink_template)); */
- gst_element_class_set_static_metadata (eklass, "Clutter Auto video sink",
+ gst_element_class_set_static_metadata (eklass,
+ "Clutter Auto Video Sink",
"Sink/Video",
- "Video sink using a Clutter as output",
+ "Video sink using the Clutter scene graph as output",
"Lionel Landwerlin <lionel.g.landwerlin@linux.intel.com>");
- g_object_unref (cogl_sink);
+ g_object_unref (clutter_sink);
}
static void
@@ -259,7 +261,7 @@ clutter_gst_auto_video_sink_change_state (GstElement *element,
clutter_actor_show (stage);
}
clutter_gst_content_set_sink (CLUTTER_GST_CONTENT (sink->content),
- COGL_GST_VIDEO_SINK (sink->kid));
+ CLUTTER_GST_VIDEO_SINK (sink->kid));
break;
default:
break;
diff --git a/clutter-gst/clutter-gst-camera-manager.c b/clutter-gst/clutter-gst-camera-manager.c
index 5b6d741..915942c 100644
--- a/clutter-gst/clutter-gst-camera-manager.c
+++ b/clutter-gst/clutter-gst-camera-manager.c
@@ -197,10 +197,6 @@ add_device (ClutterGstCameraManager *self,
factory = gst_element_get_factory (videosrc);
- if (!priv->camera_devices)
- priv->camera_devices =
- g_ptr_array_new_with_free_func ((GDestroyNotify) g_object_unref);
-
device = g_object_new (CLUTTER_GST_TYPE_CAMERA_DEVICE,
"element-factory", factory,
"node", device_node,
@@ -221,7 +217,7 @@ remove_device (ClutterGstCameraManager *self,
const gchar *device_name)
{
ClutterGstCameraManagerPrivate *priv = self->priv;
- gint i;
+ guint i;
for (i = 0; i < priv->camera_devices->len; i++)
{
@@ -334,6 +330,9 @@ clutter_gst_camera_manager_init (ClutterGstCameraManager *self)
{
self->priv = GST_CAMERA_MANAGER_PRIVATE (self);
+ self->priv->camera_devices =
+ g_ptr_array_new_with_free_func ((GDestroyNotify) g_object_unref);
+
probe_camera_devices (self);
}
diff --git a/clutter-gst/clutter-gst-camera.c b/clutter-gst/clutter-gst-camera.c
index e35d250..4d6982a 100644
--- a/clutter-gst/clutter-gst-camera.c
+++ b/clutter-gst/clutter-gst-camera.c
@@ -67,7 +67,7 @@ struct _ClutterGstCameraPrivate
GstBus *bus;
GstElement *camerabin;
GstElement *camera_source;
- CoglGstVideoSink *video_sink;
+ ClutterGstVideoSink *video_sink;
/* video filter */
GstElement *video_filter_bin;
@@ -136,7 +136,7 @@ clutter_gst_camera_get_pipeline (ClutterGstPlayer *player)
return priv->camerabin;
}
-static CoglGstVideoSink *
+static ClutterGstVideoSink *
clutter_gst_camera_get_video_sink (ClutterGstPlayer *player)
{
ClutterGstCameraPrivate *priv = CLUTTER_GST_CAMERA (player)->priv;
@@ -729,25 +729,25 @@ setup_camera_source (ClutterGstCamera *self)
}
static void
-_new_frame_from_pipeline (CoglGstVideoSink *sink, ClutterGstCamera *self)
+_new_frame_from_pipeline (ClutterGstVideoSink *sink, ClutterGstCamera *self)
{
ClutterGstCameraPrivate *priv = self->priv;
clutter_gst_player_update_frame (CLUTTER_GST_PLAYER (self),
&priv->current_frame,
- cogl_gst_video_sink_get_pipeline (sink));
+ clutter_gst_video_sink_get_frame (sink));
}
static void
-_ready_from_pipeline (CoglGstVideoSink *sink, ClutterGstCamera *self)
+_ready_from_pipeline (ClutterGstVideoSink *sink, ClutterGstCamera *self)
{
g_signal_emit_by_name (self, "ready");
}
static void
-_pixel_aspect_ratio_changed (CoglGstVideoSink *sink,
- GParamSpec *spec,
- ClutterGstCamera *self)
+_pixel_aspect_ratio_changed (ClutterGstVideoSink *sink,
+ GParamSpec *spec,
+ ClutterGstCamera *self)
{
clutter_gst_frame_update_pixel_aspect_ratio (self->priv->current_frame, sink);
}
@@ -759,6 +759,8 @@ setup_pipeline (ClutterGstCamera *self)
const GPtrArray *camera_devices =
clutter_gst_camera_manager_get_camera_devices (clutter_gst_camera_manager_get_default ());
+
+
priv->camerabin = gst_element_factory_make ("camerabin", "camerabin");
if (G_UNLIKELY (!priv->camerabin))
{
@@ -778,7 +780,8 @@ setup_pipeline (ClutterGstCamera *self)
return FALSE;
}
- if (!clutter_gst_camera_set_camera_device (self,
+ if (camera_devices->len > 0 &&
+ !clutter_gst_camera_set_camera_device (self,
g_ptr_array_index (camera_devices, 0)))
{
g_critical ("Unable to select capture device");
@@ -787,7 +790,7 @@ setup_pipeline (ClutterGstCamera *self)
return FALSE;
}
- priv->video_sink = cogl_gst_video_sink_new (clutter_gst_get_cogl_context ());
+ priv->video_sink = clutter_gst_video_sink_new ();
g_signal_connect (priv->video_sink, "new-frame",
G_CALLBACK (_new_frame_from_pipeline), self);
diff --git a/clutter-gst/clutter-gst-content.c b/clutter-gst/clutter-gst-content.c
index a280cb6..e23a31c 100644
--- a/clutter-gst/clutter-gst-content.c
+++ b/clutter-gst/clutter-gst-content.c
@@ -55,7 +55,7 @@ G_DEFINE_TYPE_WITH_CODE (ClutterGstContent,
struct _ClutterGstContentPrivate
{
- CoglGstVideoSink *sink;
+ ClutterGstVideoSink *sink;
ClutterGstPlayer *player;
ClutterGstFrame *current_frame;
};
@@ -84,14 +84,13 @@ static guint signals[LAST_SIGNAL];
static void
update_frame (ClutterGstContent *self,
- CoglPipeline *pipeline)
+ ClutterGstFrame *new_frame)
{
ClutterGstContentPrivate *priv = self->priv;
- ClutterGstFrame *old_frame, *new_frame;
+ ClutterGstFrame *old_frame;
old_frame = priv->current_frame;
- new_frame = clutter_gst_frame_new (pipeline);
- priv->current_frame = new_frame;
+ priv->current_frame = g_boxed_copy (CLUTTER_GST_TYPE_FRAME, new_frame);
if (old_frame)
{
@@ -113,26 +112,26 @@ update_frame (ClutterGstContent *self,
}
static void
-_new_frame_from_pipeline (CoglGstVideoSink *sink,
- ClutterGstContent *self)
+_new_frame_from_pipeline (ClutterGstVideoSink *sink,
+ ClutterGstContent *self)
{
- update_frame (self, cogl_gst_video_sink_get_pipeline (sink));
+ update_frame (self, clutter_gst_video_sink_get_frame (sink));
clutter_content_invalidate (CLUTTER_CONTENT (self));
}
static void
-_pixel_aspect_ratio_changed (CoglGstVideoSink *sink,
- GParamSpec *pspec,
- ClutterGstContent *self)
+_pixel_aspect_ratio_changed (ClutterGstVideoSink *sink,
+ GParamSpec *pspec,
+ ClutterGstContent *self)
{
clutter_gst_frame_update_pixel_aspect_ratio (self->priv->current_frame,
sink);
}
-static void content_set_sink (ClutterGstContent *self,
- CoglGstVideoSink *sink,
- gboolean set_from_player);
+static void content_set_sink (ClutterGstContent *self,
+ ClutterGstVideoSink *sink,
+ gboolean set_from_player);
static void
content_set_player (ClutterGstContent *self,
@@ -158,9 +157,9 @@ content_set_player (ClutterGstContent *self,
}
static void
-content_set_sink (ClutterGstContent *self,
- CoglGstVideoSink *sink,
- gboolean set_from_player)
+content_set_sink (ClutterGstContent *self,
+ ClutterGstVideoSink *sink,
+ gboolean set_from_player)
{
ClutterGstContentPrivate *priv = self->priv;
@@ -187,11 +186,11 @@ content_set_sink (ClutterGstContent *self,
g_signal_connect (priv->sink, "notify::pixel-aspect-ratio",
G_CALLBACK (_pixel_aspect_ratio_changed), self);
- if (cogl_gst_video_sink_is_ready (priv->sink))
+ if (clutter_gst_video_sink_is_ready (priv->sink))
{
- CoglPipeline *pipeline = cogl_gst_video_sink_get_pipeline (priv->sink);
- if (pipeline)
- update_frame (self, pipeline);
+ ClutterGstFrame *frame = clutter_gst_video_sink_get_frame (priv->sink);
+ if (frame)
+ update_frame (self, frame);
}
}
@@ -364,7 +363,7 @@ clutter_gst_content_class_init (ClutterGstContentClass *klass)
g_param_spec_object ("video-sink",
"Cogl Video Sink",
"Cogl Video Sink",
- COGL_GST_TYPE_VIDEO_SINK,
+ CLUTTER_GST_TYPE_VIDEO_SINK,
G_PARAM_STATIC_STRINGS | G_PARAM_READWRITE);
g_object_class_install_properties (gobject_class, PROP_LAST, props);
@@ -397,7 +396,7 @@ clutter_gst_content_init (ClutterGstContent *self)
self->priv = priv = CLUTTER_GST_CONTENT_GET_PRIVATE (self);
content_set_sink (self,
- COGL_GST_VIDEO_SINK (clutter_gst_create_video_sink ()),
+ CLUTTER_GST_VIDEO_SINK (clutter_gst_create_video_sink ()),
FALSE);
}
@@ -422,7 +421,7 @@ clutter_gst_content_new (void)
* Since: 3.0
*/
ClutterContent *
-clutter_gst_content_new_with_sink (CoglGstVideoSink *sink)
+clutter_gst_content_new_with_sink (ClutterGstVideoSink *sink)
{
return g_object_new (CLUTTER_GST_TYPE_CONTENT,
"video-sink", sink,
@@ -449,11 +448,11 @@ clutter_gst_content_get_frame (ClutterGstContent *self)
* clutter_gst_content_get_sink:
* @self: A #ClutterGstContent
*
- * Returns: (transfer none): The #CoglGstVideoSink currently attached to @self.
+ * Returns: (transfer none): The #ClutterGstVideoSink currently attached to @self.
*
* Since: 3.0
*/
-CoglGstVideoSink *
+ClutterGstVideoSink *
clutter_gst_content_get_sink (ClutterGstContent *self)
{
g_return_val_if_fail (CLUTTER_GST_IS_CONTENT (self), NULL);
@@ -464,16 +463,16 @@ clutter_gst_content_get_sink (ClutterGstContent *self)
/**
* clutter_gst_content_set_sink:
* @self: A #ClutterGstContent
- * @sink: A #CoglGstVideoSink or %NULL
+ * @sink: A #ClutterGstVideoSink or %NULL
*
* Since: 3.0
*/
void
-clutter_gst_content_set_sink (ClutterGstContent *self,
- CoglGstVideoSink *sink)
+clutter_gst_content_set_sink (ClutterGstContent *self,
+ ClutterGstVideoSink *sink)
{
g_return_if_fail (CLUTTER_GST_IS_CONTENT (self));
- g_return_if_fail (sink == NULL || COGL_GST_IS_VIDEO_SINK (sink));
+ g_return_if_fail (sink == NULL || CLUTTER_GST_IS_VIDEO_SINK (sink));
content_set_sink (self, sink, FALSE);
}
diff --git a/clutter-gst/clutter-gst-content.h b/clutter-gst/clutter-gst-content.h
index 4b13f97..f3fb916 100644
--- a/clutter-gst/clutter-gst-content.h
+++ b/clutter-gst/clutter-gst-content.h
@@ -26,7 +26,6 @@
#include <glib-object.h>
-#include <cogl-gst/cogl-gst.h>
#include <clutter/clutter.h>
#include <clutter-gst/clutter-gst-player.h>
@@ -80,14 +79,14 @@ GType clutter_gst_content_get_type (void) G_GNUC_CONST;
ClutterContent * clutter_gst_content_new (void);
-ClutterContent * clutter_gst_content_new_with_sink (CoglGstVideoSink *sink);
+ClutterContent * clutter_gst_content_new_with_sink (ClutterGstVideoSink *sink);
ClutterGstFrame * clutter_gst_content_get_frame (ClutterGstContent *self);
-void clutter_gst_content_set_sink (ClutterGstContent *self,
- CoglGstVideoSink *sink);
+void clutter_gst_content_set_sink (ClutterGstContent *self,
+ ClutterGstVideoSink *sink);
-CoglGstVideoSink * clutter_gst_content_get_sink (ClutterGstContent *self);
+ClutterGstVideoSink * clutter_gst_content_get_sink (ClutterGstContent *self);
ClutterGstPlayer * clutter_gst_content_get_player (ClutterGstContent *self);
diff --git a/clutter-gst/clutter-gst-playback.c b/clutter-gst/clutter-gst-playback.c
index 54b6824..417e858 100644
--- a/clutter-gst/clutter-gst-playback.c
+++ b/clutter-gst/clutter-gst-playback.c
@@ -52,7 +52,6 @@
#include <string.h>
#include <gio/gio.h>
-#include <cogl-gst/cogl-gst.h>
#include <gst/video/video.h>
#include <gst/tag/tag.h>
#include <gst/audio/streamvolume.h>
@@ -123,7 +122,9 @@ struct _ClutterGstPlaybackPrivate
{
GstElement *pipeline;
GstBus *bus;
- CoglGstVideoSink *video_sink;
+ ClutterGstVideoSink *video_sink;
+ GArray *gst_pipe_sigs;
+ GArray *gst_bus_sigs;
ClutterGstFrame *current_frame;
@@ -1426,7 +1427,7 @@ clutter_gst_playback_get_pipeline (ClutterGstPlayer *self)
return priv->pipeline;
}
-static CoglGstVideoSink *
+static ClutterGstVideoSink *
clutter_gst_playback_get_video_sink (ClutterGstPlayer *self)
{
ClutterGstPlaybackPrivate *priv = CLUTTER_GST_PLAYBACK (self)->priv;
@@ -1655,6 +1656,7 @@ static void
clutter_gst_playback_dispose (GObject *object)
{
ClutterGstPlaybackPrivate *priv = CLUTTER_GST_PLAYBACK (object)->priv;
+ guint i;
if (priv->tick_timeout_id)
{
@@ -1670,12 +1672,18 @@ clutter_gst_playback_dispose (GObject *object)
if (priv->bus)
{
+ for (i = 0; i < priv->gst_bus_sigs->len; i++)
+ g_signal_handler_disconnect (priv->bus,
+ g_array_index (priv->gst_bus_sigs, gulong, i));
gst_bus_remove_signal_watch (priv->bus);
priv->bus = NULL;
}
if (priv->pipeline)
{
+ for (i = 0; i < priv->gst_pipe_sigs->len; i++)
+ g_signal_handler_disconnect (priv->pipeline,
+ g_array_index (priv->gst_pipe_sigs, gulong, i));
gst_element_set_state (priv->pipeline, GST_STATE_NULL);
g_clear_object (&priv->pipeline);
}
@@ -1940,25 +1948,25 @@ clutter_gst_playback_class_init (ClutterGstPlaybackClass *klass)
}
static void
-_new_frame_from_pipeline (CoglGstVideoSink *sink, ClutterGstPlayback *self)
+_new_frame_from_pipeline (ClutterGstVideoSink *sink, ClutterGstPlayback *self)
{
ClutterGstPlaybackPrivate *priv = self->priv;
clutter_gst_player_update_frame (CLUTTER_GST_PLAYER (self),
&priv->current_frame,
- cogl_gst_video_sink_get_pipeline (sink));
+ clutter_gst_video_sink_get_frame (sink));
}
static void
-_ready_from_pipeline (CoglGstVideoSink *sink, ClutterGstPlayback *self)
+_ready_from_pipeline (ClutterGstVideoSink *sink, ClutterGstPlayback *self)
{
g_signal_emit_by_name (self, "ready");
}
static void
-_pixel_aspect_ratio_changed (CoglGstVideoSink *sink,
- GParamSpec *spec,
- ClutterGstPlayback *self)
+_pixel_aspect_ratio_changed (ClutterGstVideoSink *sink,
+ GParamSpec *spec,
+ ClutterGstPlayback *self)
{
clutter_gst_frame_update_pixel_aspect_ratio (self->priv->current_frame, sink);
}
@@ -1976,7 +1984,7 @@ get_pipeline (ClutterGstPlayback *self)
return NULL;
}
- priv->video_sink = cogl_gst_video_sink_new (clutter_gst_get_cogl_context ());
+ priv->video_sink = clutter_gst_video_sink_new ();
g_signal_connect (priv->video_sink, "new-frame",
G_CALLBACK (_new_frame_from_pipeline), self);
@@ -1993,6 +2001,18 @@ get_pipeline (ClutterGstPlayback *self)
return pipeline;
}
+#define connect_signal_custom(store, object, signal, callback, data) \
+ do { \
+ gulong s = g_signal_connect (object, signal, callback, data); \
+ g_array_append_val (store, s); \
+ } while (0)
+
+#define connect_object_custom(store, object, signal, callback, data, flags) \
+ do { \
+ gulong s = g_signal_connect_object (object, signal, callback, data, flags); \
+ g_array_append_val (store, s); \
+ } while (0)
+
static void
clutter_gst_playback_init (ClutterGstPlayback *self)
{
@@ -2000,6 +2020,9 @@ clutter_gst_playback_init (ClutterGstPlayback *self)
self->priv = priv = GST_PLAYBACK_PRIVATE (self);
+ priv->gst_pipe_sigs = g_array_new (FALSE, FALSE, sizeof (gulong));
+ priv->gst_bus_sigs = g_array_new (FALSE, FALSE, sizeof (gulong));
+
priv->is_idle = TRUE;
priv->in_seek = FALSE;
priv->is_changing_uri = FALSE;
@@ -2010,8 +2033,9 @@ clutter_gst_playback_init (ClutterGstPlayback *self)
priv->current_frame = clutter_gst_create_blank_frame (NULL);
- g_signal_connect (priv->pipeline, "notify::source",
- G_CALLBACK (on_source_changed), self);
+ connect_signal_custom (priv->gst_pipe_sigs, priv->pipeline,
+ "notify::source",
+ G_CALLBACK (on_source_changed), self);
/* We default to not playing until someone calls set_playing(TRUE) */
priv->target_state = GST_STATE_PAUSED;
@@ -2023,48 +2047,62 @@ clutter_gst_playback_init (ClutterGstPlayback *self)
gst_bus_add_signal_watch (priv->bus);
- g_signal_connect_object (priv->bus, "message::error",
- G_CALLBACK (bus_message_error_cb),
- self, 0);
- g_signal_connect_object (priv->bus, "message::eos",
- G_CALLBACK (bus_message_eos_cb),
- self, 0);
- g_signal_connect_object (priv->bus, "message::buffering",
- G_CALLBACK (bus_message_buffering_cb),
- self, 0);
- g_signal_connect_object (priv->bus, "message::duration-changed",
- G_CALLBACK (bus_message_duration_changed_cb),
- self, 0);
- g_signal_connect_object (priv->bus, "message::state-changed",
- G_CALLBACK (bus_message_state_change_cb),
- self, 0);
- g_signal_connect_object (priv->bus, "message::async-done",
- G_CALLBACK (bus_message_async_done_cb),
- self, 0);
-
- g_signal_connect (priv->pipeline, "notify::volume",
- G_CALLBACK (on_volume_changed),
- self);
-
- g_signal_connect (priv->pipeline, "audio-changed",
- G_CALLBACK (on_audio_changed),
- self);
- g_signal_connect (priv->pipeline, "audio-tags-changed",
- G_CALLBACK (on_audio_tags_changed),
- self);
- g_signal_connect (priv->pipeline, "notify::current-audio",
- G_CALLBACK (on_current_audio_changed),
- self);
-
- g_signal_connect (priv->pipeline, "text-changed",
- G_CALLBACK (on_text_changed),
- self);
- g_signal_connect (priv->pipeline, "text-tags-changed",
- G_CALLBACK (on_text_tags_changed),
- self);
- g_signal_connect (priv->pipeline, "notify::current-text",
- G_CALLBACK (on_current_text_changed),
- self);
+ connect_object_custom (priv->gst_bus_sigs,
+ priv->bus, "message::error",
+ G_CALLBACK (bus_message_error_cb),
+ self, 0);
+ connect_object_custom (priv->gst_bus_sigs,
+ priv->bus, "message::eos",
+ G_CALLBACK (bus_message_eos_cb),
+ self, 0);
+ connect_object_custom (priv->gst_bus_sigs,
+ priv->bus, "message::buffering",
+ G_CALLBACK (bus_message_buffering_cb),
+ self, 0);
+ connect_object_custom (priv->gst_bus_sigs,
+ priv->bus, "message::duration-changed",
+ G_CALLBACK (bus_message_duration_changed_cb),
+ self, 0);
+ connect_object_custom (priv->gst_bus_sigs,
+ priv->bus, "message::state-changed",
+ G_CALLBACK (bus_message_state_change_cb),
+ self, 0);
+ connect_object_custom (priv->gst_bus_sigs,
+ priv->bus, "message::async-done",
+ G_CALLBACK (bus_message_async_done_cb),
+ self, 0);
+
+
+ connect_signal_custom (priv->gst_pipe_sigs,
+ priv->pipeline, "notify::volume",
+ G_CALLBACK (on_volume_changed),
+ self);
+
+ connect_signal_custom (priv->gst_pipe_sigs,
+ priv->pipeline, "audio-changed",
+ G_CALLBACK (on_audio_changed),
+ self);
+ connect_signal_custom (priv->gst_pipe_sigs,
+ priv->pipeline, "audio-tags-changed",
+ G_CALLBACK (on_audio_tags_changed),
+ self);
+ connect_signal_custom (priv->gst_pipe_sigs,
+ priv->pipeline, "notify::current-audio",
+ G_CALLBACK (on_current_audio_changed),
+ self);
+
+ connect_signal_custom (priv->gst_pipe_sigs,
+ priv->pipeline, "text-changed",
+ G_CALLBACK (on_text_changed),
+ self);
+ connect_signal_custom (priv->gst_pipe_sigs,
+ priv->pipeline, "text-tags-changed",
+ G_CALLBACK (on_text_tags_changed),
+ self);
+ connect_signal_custom (priv->gst_pipe_sigs,
+ priv->pipeline, "notify::current-text",
+ G_CALLBACK (on_current_text_changed),
+ self);
#if defined(CLUTTER_WINDOWING_X11) && defined (HAVE_HW_DECODER_SUPPORT)
if (clutter_check_windowing_backend (CLUTTER_WINDOWING_X11))
diff --git a/clutter-gst/clutter-gst-player.c b/clutter-gst/clutter-gst-player.c
index 0d8ef2b..cca31bc 100644
--- a/clutter-gst/clutter-gst-player.c
+++ b/clutter-gst/clutter-gst-player.c
@@ -202,9 +202,9 @@ clutter_gst_player_default_init (ClutterGstPlayerIface *iface)
* clutter_gst_player_get_frame:
* @self: a #ClutterGstPlayer
*
- * Retrieves the #CoglHandle of the last frame produced by @self.
+ * Retrieves the #ClutterGstFrame of the last frame produced by @self.
*
- * Return value: (transfer none): the #CoglHandle of the last frame.
+ * Return value: (transfer none): the #ClutterGstFrame of the last frame.
*
* Since: 3.0
*/
@@ -213,7 +213,7 @@ clutter_gst_player_get_frame (ClutterGstPlayer *self)
{
ClutterGstPlayerIface *iface;
- g_return_val_if_fail (CLUTTER_GST_IS_PLAYER (self), COGL_INVALID_HANDLE);
+ g_return_val_if_fail (CLUTTER_GST_IS_PLAYER (self), NULL);
iface = CLUTTER_GST_PLAYER_GET_INTERFACE (self);
@@ -247,13 +247,13 @@ clutter_gst_player_get_pipeline (ClutterGstPlayer *self)
* clutter_gst_player_get_video_sink:
* @self: a #ClutterGstPlayer
*
- * Retrieves the #CoglGstVideoSink used by the @self.
+ * Retrieves the #ClutterGstVideoSink used by the @self.
*
- * Return value: (transfer none): the #CoglGstVideoSink element used by the player
+ * Return value: (transfer none): the #ClutterGstVideoSink element used by the player
*
* Since: 3.0
*/
-CoglGstVideoSink *
+ClutterGstVideoSink *
clutter_gst_player_get_video_sink (ClutterGstPlayer *self)
{
ClutterGstPlayerIface *iface;
@@ -387,19 +387,17 @@ clutter_gst_player_get_idle (ClutterGstPlayer *self)
void
clutter_gst_player_update_frame (ClutterGstPlayer *player,
ClutterGstFrame **frame,
- CoglPipeline *pipeline)
+ ClutterGstFrame *new_frame)
{
ClutterGstFrame *old_frame = *frame;
- ClutterGstFrame *new_frame = clutter_gst_frame_new (pipeline);
-
- *frame = new_frame;
- new_frame->resolution.par_n = old_frame->resolution.par_n;
- new_frame->resolution.par_d = old_frame->resolution.par_d;
+ *frame = g_boxed_copy (CLUTTER_GST_TYPE_FRAME, new_frame);
if (old_frame == NULL ||
new_frame->resolution.width != old_frame->resolution.width ||
- new_frame->resolution.height != old_frame->resolution.height)
+ new_frame->resolution.height != old_frame->resolution.height ||
+ new_frame->resolution.par_n != old_frame->resolution.par_n ||
+ new_frame->resolution.par_d != old_frame->resolution.par_d)
{
g_signal_emit (player, signals[SIZE_CHANGE], 0,
new_frame->resolution.width,
@@ -414,7 +412,7 @@ clutter_gst_player_update_frame (ClutterGstPlayer *player,
void
clutter_gst_frame_update_pixel_aspect_ratio (ClutterGstFrame *frame,
- CoglGstVideoSink *sink)
+ ClutterGstVideoSink *sink)
{
GValue value = G_VALUE_INIT;
diff --git a/clutter-gst/clutter-gst-player.h b/clutter-gst/clutter-gst-player.h
index 7855c97..0f953c3 100644
--- a/clutter-gst/clutter-gst-player.h
+++ b/clutter-gst/clutter-gst-player.h
@@ -40,9 +40,9 @@
#include <glib-object.h>
#include <gst/gst.h>
-#include <cogl-gst/cogl-gst.h>
#include <clutter-gst/clutter-gst-types.h>
+#include <clutter-gst/clutter-gst-video-sink.h>
G_BEGIN_DECLS
@@ -78,7 +78,7 @@ typedef struct _ClutterGstPlayerIfacePrivate ClutterGstPlayerIfacePrivate;
* ClutterGstPlayerIface:
* @get_frame: virtual function; returns the current visible frame
* @get_pipeline: virtual function; returns the current GStreamer pipeline
- * @get_video_sink: virtual function; returns the current Cogl video sink
+ * @get_video_sink: virtual function; returns the current ClutterGst video sink
* @get_idle: virtual function; returns whether the player is currently in idle state
* @get_audio_volume: virtual function; returns the current audio volume
* @set_audio_volume: virtual function; sets the audio volume
@@ -102,19 +102,19 @@ struct _ClutterGstPlayerIface
ClutterGstPlayerIfacePrivate *priv;
/*< public >*/
- ClutterGstFrame * (* get_frame) (ClutterGstPlayer *self);
- GstElement * (* get_pipeline) (ClutterGstPlayer *self);
- CoglGstVideoSink *(* get_video_sink) (ClutterGstPlayer *self);
+ ClutterGstFrame *(* get_frame) (ClutterGstPlayer *self);
+ GstElement * (* get_pipeline) (ClutterGstPlayer *self);
+ ClutterGstVideoSink *(* get_video_sink) (ClutterGstPlayer *self);
- gboolean (* get_idle) (ClutterGstPlayer *self);
+ gboolean (* get_idle) (ClutterGstPlayer *self);
- gdouble (* get_audio_volume) (ClutterGstPlayer *self);
- void (* set_audio_volume) (ClutterGstPlayer *self,
- gdouble volume);
+ gdouble (* get_audio_volume) (ClutterGstPlayer *self);
+ void (* set_audio_volume) (ClutterGstPlayer *self,
+ gdouble volume);
- gboolean (* get_playing) (ClutterGstPlayer *self);
- void (* set_playing) (ClutterGstPlayer *self,
- gboolean playing);
+ gboolean (* get_playing) (ClutterGstPlayer *self);
+ void (* set_playing) (ClutterGstPlayer *self,
+ gboolean playing);
/*< private >*/
gpointer _padding_vfuncs[16];
@@ -140,7 +140,7 @@ ClutterGstFrame * clutter_gst_player_get_frame (ClutterGstPlayer
GstElement * clutter_gst_player_get_pipeline (ClutterGstPlayer *self);
-CoglGstVideoSink * clutter_gst_player_get_video_sink (ClutterGstPlayer *self);
+ClutterGstVideoSink * clutter_gst_player_get_video_sink (ClutterGstPlayer *self);
gboolean clutter_gst_player_get_idle (ClutterGstPlayer *self);
diff --git a/clutter-gst/clutter-gst-plugin.c b/clutter-gst/clutter-gst-plugin.c
index a4e622d..2b348d5 100644
--- a/clutter-gst/clutter-gst-plugin.c
+++ b/clutter-gst/clutter-gst-plugin.c
@@ -36,8 +36,10 @@
#endif
#include "clutter-gst-auto-video-sink.h"
+#include "clutter-gst-video-sink.h"
GST_DEBUG_CATEGORY (clutter_gst_auto_video_sink_debug);
+GST_DEBUG_CATEGORY (clutter_gst_video_sink_debug);
/* entry point to initialize the plug-in
* initialize the plug-in itself
@@ -66,6 +68,13 @@ plugin_init (GstPlugin *plugin)
if (!ret)
return FALSE;
+ ret = gst_element_register (plugin,
+ "cluttervideosink",
+ GST_RANK_NONE,
+ CLUTTER_GST_TYPE_VIDEO_SINK);
+ if (!ret)
+ return FALSE;
+
return TRUE;
}
diff --git a/clutter-gst/clutter-gst-private.h b/clutter-gst/clutter-gst-private.h
index 6483329..25dcf5c 100644
--- a/clutter-gst/clutter-gst-private.h
+++ b/clutter-gst/clutter-gst-private.h
@@ -27,9 +27,9 @@
#define __CLUTTER_GST_PRIVATE_H__
#include <glib.h>
-#include "clutter-gst.h"
+#include <gst/video/video.h>
-#include <cogl-gst/cogl-gst.h>
+#include "clutter-gst.h"
G_BEGIN_DECLS
@@ -66,16 +66,19 @@ _internal_plugin_init (GstPlugin *plugin);
CoglContext *clutter_gst_get_cogl_context (void);
-ClutterGstFrame *clutter_gst_frame_new (CoglPipeline *pipeline);
+ClutterGstFrame *clutter_gst_frame_new (void);
ClutterGstFrame *clutter_gst_create_blank_frame (const ClutterColor *color);
void clutter_gst_player_update_frame (ClutterGstPlayer *player,
ClutterGstFrame **frame,
- CoglPipeline *pipeline);
+ ClutterGstFrame *new_frame);
+
+void clutter_gst_frame_update_pixel_aspect_ratio (ClutterGstFrame *frame,
+ ClutterGstVideoSink *sink);
-void clutter_gst_frame_update_pixel_aspect_ratio (ClutterGstFrame *frame,
- CoglGstVideoSink *sink);
+void clutter_gst_video_resolution_from_video_info (ClutterGstVideoResolution *resolution,
+ GstVideoInfo *info);
G_END_DECLS
diff --git a/clutter-gst/clutter-gst-types.c b/clutter-gst/clutter-gst-types.c
index ebab80e..6818ad5 100644
--- a/clutter-gst/clutter-gst-types.c
+++ b/clutter-gst/clutter-gst-types.c
@@ -28,20 +28,9 @@
#include "clutter-gst-types.h"
ClutterGstFrame *
-clutter_gst_frame_new (CoglPipeline *pipeline)
+clutter_gst_frame_new (void)
{
- ClutterGstFrame *frame = g_slice_new0 (ClutterGstFrame);
- CoglTexture *texture;
-
- frame->pipeline = cogl_object_ref (pipeline);
- texture = cogl_pipeline_get_layer_texture (pipeline, 0);
-
- frame->resolution.width = cogl_texture_get_width (texture);
- frame->resolution.height = cogl_texture_get_height (texture);
- frame->resolution.par_n = 1;
- frame->resolution.par_d = 1;
-
- return frame;
+ return g_slice_new0 (ClutterGstFrame);
}
static gpointer
@@ -52,7 +41,7 @@ clutter_gst_frame_copy (gpointer data)
ClutterGstFrame *frame = g_slice_dup (ClutterGstFrame, data);
if (frame->pipeline != COGL_INVALID_HANDLE)
- frame->pipeline = cogl_handle_ref (frame->pipeline);
+ frame->pipeline = cogl_object_ref (frame->pipeline);
return frame;
}
@@ -68,7 +57,10 @@ clutter_gst_frame_free (gpointer data)
ClutterGstFrame *frame = (ClutterGstFrame *) data;
if (frame->pipeline != COGL_INVALID_HANDLE)
- cogl_handle_unref (frame->pipeline);
+ {
+ cogl_object_unref (frame->pipeline);
+ frame->pipeline = COGL_INVALID_HANDLE;
+ }
g_slice_free (ClutterGstFrame, frame);
}
}
diff --git a/clutter-gst/clutter-gst-util.c b/clutter-gst/clutter-gst-util.c
index 2f1ed8b..fb25abb 100644
--- a/clutter-gst/clutter-gst-util.c
+++ b/clutter-gst/clutter-gst-util.c
@@ -242,7 +242,7 @@ clutter_gst_init_with_args (int *argc,
/**
* clutter_gst_create_video_sink:
*
- * Creates a new #CoglGstVideoSink initialized with Clutter's Cogl context.
+ * Creates a new #ClutterGstVideoSink initialized with Clutter's Cogl context.
*
* Return value: (transfer full): the newly created #CoglGstVideoSink.
*
@@ -251,7 +251,7 @@ clutter_gst_init_with_args (int *argc,
GstElement *
clutter_gst_create_video_sink (void)
{
- return GST_ELEMENT (cogl_gst_video_sink_new (clutter_gst_get_cogl_context ()));
+ return GST_ELEMENT (clutter_gst_video_sink_new ());
}
/**/
@@ -266,8 +266,7 @@ ClutterGstFrame *
clutter_gst_create_blank_frame (const ClutterColor *color)
{
CoglTexture *texture;
- CoglPipeline *pipeline;
- ClutterGstFrame *frame;
+ ClutterGstFrame *frame = clutter_gst_frame_new ();
ClutterColor black_color = { 0x0, 0x0, 0x0, 0xff };
const guint8 *color_ptr = color != NULL ?
(const guint8 *) color : (const guint8 *) &black_color;
@@ -277,13 +276,20 @@ clutter_gst_create_blank_frame (const ClutterColor *color)
COGL_PIXEL_FORMAT_RGBA_8888,
1,
color_ptr);
- pipeline = cogl_pipeline_new (clutter_gst_get_cogl_context ());
- cogl_pipeline_set_layer_texture (pipeline, 0, texture);
+ frame->pipeline = cogl_pipeline_new (clutter_gst_get_cogl_context ());
+ cogl_pipeline_set_layer_texture (frame->pipeline, 0, texture);
- frame = clutter_gst_frame_new (pipeline);
-
- cogl_object_unref (pipeline);
cogl_object_unref (texture);
return frame;
}
+
+void
+clutter_gst_video_resolution_from_video_info (ClutterGstVideoResolution *resolution,
+ GstVideoInfo *info)
+{
+ resolution->width = info->width;
+ resolution->height = info->height;
+ resolution->par_n = info->par_n;
+ resolution->par_d = info->par_d;
+}
diff --git a/clutter-gst/clutter-gst-video-sink.c b/clutter-gst/clutter-gst-video-sink.c
new file mode 100644
index 0000000..d9f427d
--- /dev/null
+++ b/clutter-gst/clutter-gst-video-sink.c
@@ -0,0 +1,2317 @@
+
+/*
+ * Clutter-GStreamer.
+ *
+ * GStreamer integration library for Clutter.
+ *
+ * clutter-gst-video-sink.c - Gstreamer Video Sink that renders to a
+ * Cogl Pipeline.
+ *
+ * Authored by Jonathan Matthew <jonathan@kaolin.wh9.net>,
+ * Chris Lord <chris@openedhand.com>
+ * Damien Lespiau <damien.lespiau@intel.com>
+ * Matthew Allum <mallum@openedhand.com>
+ * Plamena Manolova <plamena.n.manolova@intel.com>
+ * Lionel Landwerlin <lionel.g.landwerlin@linux.intel.com>
+ *
+ * Copyright (C) 2007, 2008 OpenedHand
+ * Copyright (C) 2009, 2010, 2013, 2014 Intel Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+/**
+ * SECTION:cogl-gst-video-sink
+ * @short_description: A video sink for integrating a GStreamer
+ * pipeline with a Cogl pipeline.
+ *
+ * #ClutterGstVideoSink is a subclass of #GstBaseSink which can be used to
+ * create a #CoglPipeline for rendering the frames of the video.
+ *
+ * To create a basic video player, an application can create a
+ * #GstPipeline as normal using gst_pipeline_new() and set the
+ * sink on it to one created with clutter_gst_video_sink_new(). The
+ * application can then listen for the #ClutterGstVideoSink::new-frame
+ * signal which will be emitted whenever there are new textures ready
+ * for rendering. For simple rendering, the application can just call
+ * clutter_gst_video_sink_get_pipeline() in the signal handler and use
+ * the returned pipeline to paint the new frame.
+ *
+ * An application is also free to do more advanced rendering by
+ * customizing the pipeline. In that case it should listen for the
+ * #ClutterGstVideoSink::pipeline-ready signal which will be emitted as
+ * soon as the sink has determined enough information about the video
+ * to know how it should be rendered. In the handler for this signal,
+ * the application can either make modifications to a copy of the
+ * pipeline returned by clutter_gst_video_sink_get_pipeline() or it can
+ * create its own pipeline from scratch and ask the sink to configure
+ * it with clutter_gst_video_sink_setup_pipeline(). If a custom pipeline
+ * is created using one of these methods then the application should
+ * call clutter_gst_video_sink_attach_frame() on the pipeline before
+ * rendering in order to update the textures on the pipeline's layers.
+ *
+ * If the %COGL_FEATURE_ID_GLSL feature is available then the pipeline
+ * used by the sink will have a shader snippet with a function in it
+ * called clutter_gst_sample_video0 which takes a single vec2 argument.
+ * This can be used by custom snippets set the by the application to
+ * sample from the video. The vec2 argument represents the normalised
+ * coordinates within the video. The function returns a vec4
+ * containing a pre-multiplied RGBA color of the pixel within the
+ * video.
+ *
+ * Since: 3.0
+ */
+
+#include "config.h"
+
+#include <gst/gst.h>
+#include <gst/gstvalue.h>
+#include <gst/video/video.h>
+#include <gst/riff/riff-ids.h>
+#include <string.h>
+#include <math.h>
+
+#include "clutter-gst-video-sink.h"
+#include "clutter-gst-private.h"
+
+#define CLUTTER_GST_DEFAULT_PRIORITY G_PRIORITY_HIGH_IDLE
+
+#define BASE_SINK_CAPS "{ AYUV," \
+ "YV12," \
+ "I420," \
+ "RGBA," \
+ "BGRA," \
+ "RGB," \
+ "BGR," \
+ "NV12 }"
+
+static const char clutter_gst_video_sink_caps_str[] =
+ GST_VIDEO_CAPS_MAKE_WITH_FEATURES(GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY,
+ BASE_SINK_CAPS);
+
+ static GstStaticPadTemplate sinktemplate_all =
+ GST_STATIC_PAD_TEMPLATE ("sink",
+ GST_PAD_SINK,
+ GST_PAD_ALWAYS,
+ GST_STATIC_CAPS (clutter_gst_video_sink_caps_str));
+
+static void color_balance_iface_init (GstColorBalanceInterface *iface);
+
+G_DEFINE_TYPE_WITH_CODE (ClutterGstVideoSink,
+ clutter_gst_video_sink,
+ GST_TYPE_BASE_SINK,
+ G_IMPLEMENT_INTERFACE (GST_TYPE_COLOR_BALANCE, color_balance_iface_init))
+
+enum
+{
+ PROP_0,
+ PROP_UPDATE_PRIORITY
+};
+
+enum
+ {
+ PIPELINE_READY,
+ NEW_FRAME,
+
+ LAST_SIGNAL
+ };
+
+static guint video_sink_signals[LAST_SIGNAL] = { 0, };
+
+typedef enum
+{
+ CLUTTER_GST_NOFORMAT,
+ CLUTTER_GST_RGB32,
+ CLUTTER_GST_RGB24,
+ CLUTTER_GST_AYUV,
+ CLUTTER_GST_YV12,
+ CLUTTER_GST_SURFACE,
+ CLUTTER_GST_I420,
+ CLUTTER_GST_NV12
+} ClutterGstVideoFormat;
+
+typedef enum
+{
+ CLUTTER_GST_RENDERER_NEEDS_GLSL = (1 << 0),
+ CLUTTER_GST_RENDERER_NEEDS_TEXTURE_RG = (1 << 1)
+} ClutterGstRendererFlag;
+
+/* We want to cache the snippets instead of recreating a new one every
+ * time we initialise a pipeline so that if we end up recreating the
+ * same pipeline again then Cogl will be able to use the pipeline
+ * cache to avoid linking a redundant identical shader program */
+typedef struct
+{
+ CoglSnippet *vertex_snippet;
+ CoglSnippet *fragment_snippet;
+ CoglSnippet *default_sample_snippet;
+ int start_position;
+} SnippetCacheEntry;
+
+typedef struct
+{
+ GQueue entries;
+} SnippetCache;
+
+typedef struct _ClutterGstSource
+{
+ GSource source;
+ ClutterGstVideoSink *sink;
+ GMutex buffer_lock;
+ GstBuffer *buffer;
+ gboolean has_new_caps;
+} ClutterGstSource;
+
+typedef void (ClutterGstRendererPaint) (ClutterGstVideoSink *);
+typedef void (ClutterGstRendererPostPaint) (ClutterGstVideoSink *);
+
+typedef struct _ClutterGstRenderer
+{
+ const char *name;
+ ClutterGstVideoFormat format;
+ guint flags;
+ GstStaticCaps caps;
+ guint n_layers;
+ void (*setup_pipeline) (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline);
+ gboolean (*upload) (ClutterGstVideoSink *sink,
+ GstBuffer *buffer);
+ void (*shutdown) (ClutterGstVideoSink *sink);
+} ClutterGstRenderer;
+
+struct _ClutterGstVideoSinkPrivate
+{
+ CoglContext *ctx;
+ CoglPipeline *template_pipeline;
+ CoglPipeline *pipeline;
+ ClutterGstFrame *clt_frame;
+
+ CoglTexture *frame[3];
+ gboolean frame_dirty;
+ gboolean had_upload_once;
+
+ ClutterGstVideoFormat format;
+ gboolean bgr;
+
+ ClutterGstSource *source;
+ GSList *renderers;
+ GstCaps *caps;
+ ClutterGstRenderer *renderer;
+ GstFlowReturn flow_return;
+ int custom_start;
+ int video_start;
+ gboolean default_sample;
+ GstVideoInfo info;
+
+ gdouble brightness;
+ gdouble contrast;
+ gdouble hue;
+ gdouble saturation;
+ gboolean balance_dirty;
+
+ guint8 *tabley;
+ guint8 *tableu;
+ guint8 *tablev;
+};
+
+/* Snippet cache */
+
+static SnippetCacheEntry *
+get_layer_cache_entry (ClutterGstVideoSink *sink,
+ SnippetCache *cache)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ GList *l;
+
+ for (l = cache->entries.head; l; l = l->next)
+ {
+ SnippetCacheEntry *entry = l->data;
+
+ if (entry->start_position == priv->video_start)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static SnippetCacheEntry *
+add_layer_cache_entry (ClutterGstVideoSink *sink,
+ SnippetCache *cache,
+ const char *decl)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ SnippetCacheEntry *entry = g_slice_new (SnippetCacheEntry);
+ char *default_source;
+
+ entry->start_position = priv->video_start;
+
+ entry->vertex_snippet =
+ cogl_snippet_new (COGL_SNIPPET_HOOK_VERTEX_GLOBALS,
+ decl,
+ NULL /* post */);
+ entry->fragment_snippet =
+ cogl_snippet_new (COGL_SNIPPET_HOOK_FRAGMENT_GLOBALS,
+ decl,
+ NULL /* post */);
+
+ default_source =
+ g_strdup_printf (" cogl_layer *= clutter_gst_sample_video%i "
+ "(cogl_tex_coord%i_in.st);\n",
+ priv->video_start,
+ priv->video_start);
+ entry->default_sample_snippet =
+ cogl_snippet_new (COGL_SNIPPET_HOOK_LAYER_FRAGMENT,
+ NULL, /* declarations */
+ default_source);
+ g_free (default_source);
+
+ g_queue_push_head (&cache->entries, entry);
+
+ return entry;
+}
+
+static SnippetCacheEntry *
+get_global_cache_entry (SnippetCache *cache, int param)
+{
+ GList *l;
+
+ for (l = cache->entries.head; l; l = l->next)
+ {
+ SnippetCacheEntry *entry = l->data;
+
+ if (entry->start_position == param)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static SnippetCacheEntry *
+add_global_cache_entry (SnippetCache *cache,
+ const char *decl,
+ int param)
+{
+ SnippetCacheEntry *entry = g_slice_new (SnippetCacheEntry);
+
+ entry->start_position = param;
+
+ entry->vertex_snippet =
+ cogl_snippet_new (COGL_SNIPPET_HOOK_VERTEX_GLOBALS,
+ decl,
+ NULL /* post */);
+ entry->fragment_snippet =
+ cogl_snippet_new (COGL_SNIPPET_HOOK_FRAGMENT_GLOBALS,
+ decl,
+ NULL /* post */);
+
+ g_queue_push_head (&cache->entries, entry);
+
+ return entry;
+}
+
+static void
+setup_pipeline_from_cache_entry (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline,
+ SnippetCacheEntry *cache_entry,
+ int n_layers)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+
+ if (cache_entry)
+ {
+ int i;
+
+ /* The global sampling function gets added to both the fragment
+ * and vertex stages. The hope is that the GLSL compiler will
+ * easily remove the dead code if it's not actually used */
+ cogl_pipeline_add_snippet (pipeline, cache_entry->vertex_snippet);
+ cogl_pipeline_add_snippet (pipeline, cache_entry->fragment_snippet);
+
+ /* Set all of the layers to just directly copy from the previous
+ * layer so that it won't redundantly generate code to sample
+ * the intermediate textures */
+ for (i = 0; i < n_layers; i++) {
+ cogl_pipeline_set_layer_combine (pipeline,
+ priv->video_start + i,
+ "RGBA=REPLACE(PREVIOUS)",
+ NULL);
+ }
+
+ if (priv->default_sample) {
+ cogl_pipeline_add_layer_snippet (pipeline,
+ priv->video_start + n_layers - 1,
+ cache_entry->default_sample_snippet);
+ }
+ }
+
+ priv->frame_dirty = TRUE;
+}
+
+/* Color balance */
+
+#define DEFAULT_BRIGHTNESS (0.0f)
+#define DEFAULT_CONTRAST (1.0f)
+#define DEFAULT_HUE (0.0f)
+#define DEFAULT_SATURATION (1.0f)
+
+static gboolean
+clutter_gst_video_sink_needs_color_balance_shader (ClutterGstVideoSink *sink)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+
+ return (priv->brightness != DEFAULT_BRIGHTNESS ||
+ priv->contrast != DEFAULT_CONTRAST ||
+ priv->hue != DEFAULT_HUE ||
+ priv->saturation != DEFAULT_SATURATION);
+}
+
+static void
+clutter_gst_video_sink_color_balance_update_tables (ClutterGstVideoSink *sink)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ gint i, j;
+ gdouble y, u, v, hue_cos, hue_sin;
+
+ /* Y */
+ for (i = 0; i < 256; i++) {
+ y = 16 + ((i - 16) * priv->contrast + priv->brightness * 255);
+ if (y < 0)
+ y = 0;
+ else if (y > 255)
+ y = 255;
+ priv->tabley[i] = rint (y);
+ }
+
+ hue_cos = cos (G_PI * priv->hue);
+ hue_sin = sin (G_PI * priv->hue);
+
+ /* U/V lookup tables are 2D, since we need both U/V for each table
+ * separately. */
+ for (i = -128; i < 128; i++) {
+ for (j = -128; j < 128; j++) {
+ u = 128 + ((i * hue_cos + j * hue_sin) * priv->saturation);
+ v = 128 + ((-i * hue_sin + j * hue_cos) * priv->saturation);
+ if (u < 0)
+ u = 0;
+ else if (u > 255)
+ u = 255;
+ if (v < 0)
+ v = 0;
+ else if (v > 255)
+ v = 255;
+ priv->tableu[(i + 128) * 256 + j + 128] = rint (u);
+ priv->tablev[(i + 128) * 256 + j + 128] = rint (v);
+ }
+ }
+}
+
+static const GList *
+clutter_gst_video_sink_color_balance_list_channels (GstColorBalance *balance)
+{
+ static GList *channels = NULL;
+
+ if (channels == NULL) {
+ const gchar *str_channels[4] = { "HUE", "SATURATION",
+ "BRIGHTNESS", "CONTRAST"
+ };
+ guint i;
+
+ for (i = 0; i < G_N_ELEMENTS (str_channels); i++) {
+ GstColorBalanceChannel *channel;
+
+ channel = g_object_new (GST_TYPE_COLOR_BALANCE_CHANNEL, NULL);
+ channel->label = g_strdup (str_channels[i]);
+ channel->min_value = -1000;
+ channel->max_value = 1000;
+ channels = g_list_append (channels, channel);
+ }
+ }
+
+ return channels;
+}
+
+static gboolean
+clutter_gst_video_sink_get_variable (ClutterGstVideoSink *sink,
+ const gchar *variable,
+ gdouble *minp,
+ gdouble *maxp,
+ gdouble **valuep)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ gdouble min, max, *value;
+
+ if (!g_strcmp0 (variable, "BRIGHTNESS"))
+ {
+ min = -1.0;
+ max = 1.0;
+ value = &priv->brightness;
+ }
+ else if (!g_strcmp0 (variable, "CONTRAST"))
+ {
+ min = 0.0;
+ max = 2.0;
+ value = &priv->contrast;
+ }
+ else if (!g_strcmp0 (variable, "HUE"))
+ {
+ min = -1.0;
+ max = 1.0;
+ value = &priv->hue;
+ }
+ else if (!g_strcmp0 (variable, "SATURATION"))
+ {
+ min = 0.0;
+ max = 2.0;
+ value = &priv->saturation;
+ }
+ else
+ {
+ GST_WARNING_OBJECT (sink, "color balance parameter not supported %s",
+ variable);
+ return FALSE;
+ }
+
+ if (maxp)
+ *maxp = max;
+ if (minp)
+ *minp = min;
+ if (valuep)
+ *valuep = value;
+
+ return TRUE;
+}
+
+static void
+clutter_gst_video_sink_color_balance_set_value (GstColorBalance *balance,
+ GstColorBalanceChannel *channel,
+ gint value)
+{
+ ClutterGstVideoSink *sink = CLUTTER_GST_VIDEO_SINK (balance);
+ gdouble *old_value, new_value, min, max;
+
+ if (!clutter_gst_video_sink_get_variable (sink, channel->label,
+ &min, &max, &old_value))
+ return;
+
+ new_value = (max - min) * ((gdouble) (value - channel->min_value) /
+ (gdouble) (channel->max_value - channel->min_value))
+ + min;
+
+ if (new_value != *old_value)
+ {
+ *old_value = new_value;
+ sink->priv->balance_dirty = TRUE;
+
+ gst_color_balance_value_changed (GST_COLOR_BALANCE (balance), channel,
+ gst_color_balance_get_value (GST_COLOR_BALANCE (balance), channel));
+ }
+}
+
+static gint
+clutter_gst_video_sink_color_balance_get_value (GstColorBalance *balance,
+ GstColorBalanceChannel *channel)
+{
+ ClutterGstVideoSink *sink = CLUTTER_GST_VIDEO_SINK (balance);
+ gdouble *old_value, min, max;
+ gint value;
+
+ if (!clutter_gst_video_sink_get_variable (sink, channel->label,
+ &min, &max, &old_value))
+ return 0;
+
+ value = (gint) (((*old_value + min) / (max - min)) *
+ (channel->max_value - channel->min_value))
+ + channel->min_value;
+
+ return value;
+}
+
+static GstColorBalanceType
+clutter_gst_video_sink_color_balance_get_balance_type (GstColorBalance *balance)
+{
+ return GST_COLOR_BALANCE_HARDWARE;
+}
+
+static void
+color_balance_iface_init (GstColorBalanceInterface *iface)
+{
+ iface->list_channels = clutter_gst_video_sink_color_balance_list_channels;
+ iface->set_value = clutter_gst_video_sink_color_balance_set_value;
+ iface->get_value = clutter_gst_video_sink_color_balance_get_value;
+
+ iface->get_balance_type = clutter_gst_video_sink_color_balance_get_balance_type;
+}
+
+/**/
+
+static void
+clutter_gst_source_finalize (GSource *source)
+{
+ ClutterGstSource *gst_source = (ClutterGstSource *) source;
+
+ g_mutex_lock (&gst_source->buffer_lock);
+ if (gst_source->buffer)
+ gst_buffer_unref (gst_source->buffer);
+ gst_source->buffer = NULL;
+ g_mutex_unlock (&gst_source->buffer_lock);
+ g_mutex_clear (&gst_source->buffer_lock);
+}
+
+void
+clutter_gst_video_sink_attach_frame (ClutterGstVideoSink *sink,
+ CoglPipeline *pln)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ guint i;
+
+ for (i = 0; i < G_N_ELEMENTS (priv->frame); i++)
+ if (priv->frame[i] != NULL)
+ cogl_pipeline_set_layer_texture (pln, i + priv->video_start,
+ priv->frame[i]);
+}
+
+/* Color balance */
+
+static const gchar *no_color_balance_shader =
+ "#define clutter_gst_get_corrected_color_from_yuv(arg) (arg)\n"
+ "#define clutter_gst_get_corrected_color_from_rgb(arg) (arg)\n";
+
+static const gchar *color_balance_shader =
+ "vec3\n"
+ "clutter_gst_get_corrected_color_from_yuv (vec3 yuv)\n"
+ "{\n"
+ " vec2 ruv = vec2 (yuv[2] + 0.5, yuv[1] + 0.5);\n"
+ " return vec3 (texture2D (cogl_sampler%i, vec2 (yuv[0], 0)).a,\n"
+ " texture2D (cogl_sampler%i, ruv).a - 0.5,\n"
+ " texture2D (cogl_sampler%i, ruv).a - 0.5);\n"
+ "}\n"
+ "\n"
+ "vec3\n"
+ "clutter_gst_get_corrected_color_from_rgb (vec3 rgb)\n"
+ "{\n"
+ " vec3 yuv = clutter_gst_yuv_srgb_to_bt601 (rgb);\n"
+ " vec3 corrected_yuv = vec3 (texture2D (cogl_sampler%i, vec2 (yuv[0], 0)).a,\n"
+ " texture2D (cogl_sampler%i, vec2 (yuv[2], yuv[1])).a,\n"
+ " texture2D (cogl_sampler%i, vec2 (yuv[2], yuv[1])).a);\n"
+ " return clutter_gst_yuv_bt601_to_srgb (corrected_yuv);\n"
+ "}\n";
+
+static void
+clutter_gst_video_sink_setup_balance (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ static SnippetCache snippet_cache;
+ static CoglSnippet *no_color_balance_snippet_vert = NULL,
+ *no_color_balance_snippet_frag = NULL;
+
+ GST_INFO_OBJECT (sink, "attaching correction b=%.3f/c=%.3f/h=%.3f/s=%.3f",
+ priv->brightness, priv->contrast,
+ priv->hue, priv->saturation);
+
+ if (clutter_gst_video_sink_needs_color_balance_shader (sink))
+ {
+ int i;
+ const guint8 *tables[3] = { priv->tabley, priv->tableu, priv->tablev };
+ const gint tables_sizes[3][2] = { { 256, 1 },
+ { 256, 256 },
+ { 256, 256 } };
+ SnippetCacheEntry *entry = get_layer_cache_entry (sink, &snippet_cache);
+
+ if (entry == NULL)
+ {
+ gchar *source = g_strdup_printf (color_balance_shader,
+ priv->custom_start,
+ priv->custom_start + 1,
+ priv->custom_start + 2,
+ priv->custom_start,
+ priv->custom_start + 1,
+ priv->custom_start + 2);
+
+ entry = add_layer_cache_entry (sink, &snippet_cache, source);
+ g_free (source);
+ }
+
+ cogl_pipeline_add_snippet (pipeline, entry->vertex_snippet);
+ cogl_pipeline_add_snippet (pipeline, entry->fragment_snippet);
+
+ clutter_gst_video_sink_color_balance_update_tables (sink);
+
+ for (i = 0; i < 3; i++)
+ {
+ CoglTexture *lut_texture =
+ cogl_texture_2d_new_from_data (priv->ctx,
+ tables_sizes[i][0],
+ tables_sizes[i][1],
+ COGL_PIXEL_FORMAT_A_8,
+ tables_sizes[i][0],
+ tables[i],
+ NULL);
+
+ cogl_pipeline_set_layer_filters (pipeline,
+ priv->custom_start + i,
+ COGL_PIPELINE_FILTER_LINEAR,
+ COGL_PIPELINE_FILTER_LINEAR);
+ cogl_pipeline_set_layer_combine (pipeline,
+ priv->custom_start + i,
+ "RGBA=REPLACE(PREVIOUS)",
+ NULL);
+ cogl_pipeline_set_layer_texture (pipeline,
+ priv->custom_start + i,
+ lut_texture);
+
+ cogl_object_unref (lut_texture);
+ }
+
+ priv->video_start = priv->custom_start + 3;
+ }
+ else
+ {
+ if (G_UNLIKELY (no_color_balance_snippet_vert == NULL))
+ {
+ no_color_balance_snippet_vert =
+ cogl_snippet_new (COGL_SNIPPET_HOOK_VERTEX_GLOBALS,
+ no_color_balance_shader,
+ NULL);
+ no_color_balance_snippet_frag =
+ cogl_snippet_new (COGL_SNIPPET_HOOK_FRAGMENT_GLOBALS,
+ no_color_balance_shader,
+ NULL);
+ }
+
+ cogl_pipeline_add_snippet (pipeline, no_color_balance_snippet_vert);
+ cogl_pipeline_add_snippet (pipeline, no_color_balance_snippet_frag);
+
+ priv->video_start = priv->custom_start;
+ }
+}
+
+/* YUV <-> RGB conversions */
+
+static const gchar *color_conversions_shaders =
+ "\n"
+ "/* These conversion functions take : */\n"
+ "/* Y = [0, 1] */\n"
+ "/* U = [-0.5, 0.5] */\n"
+ "/* V = [-0.5, 0.5] */\n"
+ "vec3\n"
+ "clutter_gst_yuv_bt601_to_srgb (vec3 yuv)\n"
+ "{\n"
+ " return mat3 (1.0, 1.0, 1.0,\n"
+ " 0.0, -0.344136, 1.772,\n"
+ " 1.402, -0.714136, 0.0 ) * yuv;\n"
+ "}\n"
+ "\n"
+ "vec3\n"
+ "clutter_gst_yuv_bt709_to_srgb (vec3 yuv)\n"
+ "{\n"
+ " return mat3 (1.0, 1.0, 1.0,\n"
+ " 0.0, -0.187324, 1.8556,\n"
+ " 1.5748, -0.468124, 0.0 ) * yuv;\n"
+ "}\n"
+ "\n"
+ "vec3\n"
+ "clutter_gst_yuv_bt2020_to_srgb (vec3 yuv)\n"
+ "{\n"
+ " return mat3 (1.0, 1.0, 1.0,\n"
+ " 0.0, 0.571353, 1.8814,\n"
+ " 1.4746, 0.164553, 0.0 ) * yuv;\n"
+ "}\n"
+ "/* Original transformation, still no idea where these values come from... */\n"
+ "vec3\n"
+ "clutter_gst_yuv_originalyuv_to_srgb (vec3 yuv)\n"
+ "{\n"
+ " return mat3 (1.0, 1.0, 1.0,\n"
+ " 0.0, -0.390625, 2.015625,\n"
+ " 1.59765625, -0.8125, 0.0 ) * yuv;\n"
+ "}\n"
+ "\n"
+ "vec3\n"
+ "clutter_gst_yuv_srgb_to_bt601 (vec3 rgb)\n"
+ "{\n"
+ " return mat3 (0.299, 0.5, -0.168736,\n"
+ " 0.587, -0.418688, -0.331264,\n"
+ " 0.114, -0.081312, 0.5 ) * rgb;\n"
+ "}\n"
+ "\n"
+ "vec3\n"
+ "clutter_gst_yuv_srgb_to_bt709 (vec3 rgb)\n"
+ "{\n"
+ " return mat3 (0.2126, -0.114626, 0.5,\n"
+ " 0.7152, -0.385428, -0.454153,\n"
+ " 0.0722, 0.5, 0.045847 ) * rgb;\n"
+ "}\n"
+ "\n"
+ "vec3\n"
+ "clutter_gst_yuv_srgb_to_bt2020 (vec3 rgb)\n"
+ "{\n"
+ " return mat3 (0.2627, -0.139630, 0.503380,\n"
+ " 0.6780, -0.360370, -0.462893,\n"
+ " 0.0593, 0.5, -0.040486 ) * rgb;\n"
+ "}\n"
+ "\n"
+ "#define clutter_gst_default_yuv_to_srgb(arg) clutter_gst_yuv_%s_to_srgb(arg)\n"
+ "\n";
+
+static const char *
+_gst_video_color_matrix_to_string (GstVideoColorMatrix matrix)
+{
+ switch (matrix)
+ {
+ case GST_VIDEO_COLOR_MATRIX_BT601:
+ return "bt601";
+ case GST_VIDEO_COLOR_MATRIX_BT709:
+ return "bt709";
+
+ default:
+ return "bt709";
+ }
+}
+
+static void
+clutter_gst_video_sink_setup_conversions (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ GstVideoColorMatrix matrix = priv->info.colorimetry.matrix;
+ static SnippetCache snippet_cache;
+ SnippetCacheEntry *entry = get_global_cache_entry (&snippet_cache, matrix);
+
+ if (entry == NULL)
+ {
+ char *source = g_strdup_printf (color_conversions_shaders,
+ _gst_video_color_matrix_to_string (matrix));
+
+ entry = add_global_cache_entry (&snippet_cache, source, matrix);
+ g_free (source);
+ }
+
+ cogl_pipeline_add_snippet (pipeline, entry->vertex_snippet);
+ cogl_pipeline_add_snippet (pipeline, entry->fragment_snippet);
+}
+
+/**/
+
+static gboolean
+clutter_gst_source_prepare (GSource *source,
+ int *timeout)
+{
+ ClutterGstSource *gst_source = (ClutterGstSource *) source;
+
+ *timeout = -1;
+
+ return gst_source->buffer != NULL;
+}
+
+static gboolean
+clutter_gst_source_check (GSource *source)
+{
+ ClutterGstSource *gst_source = (ClutterGstSource *) source;
+
+ return (gst_source->buffer != NULL ||
+ gst_source->sink->priv->balance_dirty);
+}
+
+static void
+clutter_gst_video_sink_set_priority (ClutterGstVideoSink *sink,
+ int priority)
+{
+ if (sink->priv->source)
+ g_source_set_priority ((GSource *) sink->priv->source, priority);
+}
+
+static void
+dirty_default_pipeline (ClutterGstVideoSink *sink)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+
+ if (priv->pipeline)
+ {
+ cogl_object_unref (priv->pipeline);
+ priv->pipeline = NULL;
+ priv->had_upload_once = FALSE;
+ }
+}
+
+static int
+_clutter_gst_video_sink_get_video_layer (ClutterGstVideoSink *sink)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+
+ if (clutter_gst_video_sink_needs_color_balance_shader (sink))
+ return priv->custom_start + 3;
+ return priv->custom_start;
+}
+
+static void
+clear_frame_textures (ClutterGstVideoSink *sink)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ guint i;
+
+ for (i = 0; i < G_N_ELEMENTS (priv->frame); i++)
+ {
+ if (priv->frame[i] == NULL)
+ break;
+ else
+ cogl_object_unref (priv->frame[i]);
+ }
+
+ memset (priv->frame, 0, sizeof (priv->frame));
+
+ priv->frame_dirty = TRUE;
+}
+
+/**/
+
+static void
+clutter_gst_dummy_shutdown (ClutterGstVideoSink *sink)
+{
+}
+
+/**/
+
+static inline gboolean
+is_pot (unsigned int number)
+{
+ /* Make sure there is only one bit set */
+ return (number & (number - 1)) == 0;
+}
+
+/* This first tries to upload the texture to a CoglTexture2D, but
+ * if that's not possible it falls back to a CoglTexture2DSliced.
+ *
+ * Auto-mipmapping of any uploaded texture is disabled
+ */
+static CoglTexture *
+video_texture_new_from_data (CoglContext *ctx,
+ int width,
+ int height,
+ CoglPixelFormat format,
+ int rowstride,
+ const uint8_t *data)
+{
+ CoglBitmap *bitmap;
+ CoglTexture *tex;
+ CoglError *internal_error = NULL;
+
+ bitmap = cogl_bitmap_new_for_data (ctx,
+ width, height,
+ format,
+ rowstride,
+ (uint8_t *) data);
+
+ if ((is_pot (cogl_bitmap_get_width (bitmap)) &&
+ is_pot (cogl_bitmap_get_height (bitmap))) ||
+ cogl_has_feature (ctx, COGL_FEATURE_ID_TEXTURE_NPOT_BASIC))
+ {
+ tex = cogl_texture_2d_new_from_bitmap (bitmap);
+ if (!tex)
+ {
+ cogl_error_free (internal_error);
+ internal_error = NULL;
+ }
+ }
+ else
+ tex = NULL;
+
+ if (!tex)
+ {
+ /* Otherwise create a sliced texture */
+ tex = cogl_texture_2d_sliced_new_from_bitmap (bitmap,
+ -1); /* no maximum waste */
+ }
+
+ cogl_object_unref (bitmap);
+
+ cogl_texture_set_premultiplied (tex, FALSE);
+
+ return tex;
+}
+
+static void
+clutter_gst_rgb24_glsl_setup_pipeline (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ static SnippetCache snippet_cache;
+ SnippetCacheEntry *entry = get_layer_cache_entry (sink, &snippet_cache);
+
+ if (entry == NULL)
+ {
+ char *source;
+
+ source =
+ g_strdup_printf ("vec4\n"
+ "clutter_gst_sample_video%i (vec2 UV)\n"
+ "{\n"
+ " vec4 color = texture2D (cogl_sampler%i, UV);\n"
+ " vec3 corrected = clutter_gst_get_corrected_color_from_rgb (color.rgb);\n"
+ " return vec4(corrected.rgb, color.a);\n"
+ "}\n",
+ priv->custom_start,
+ priv->custom_start);
+
+ entry = add_layer_cache_entry (sink, &snippet_cache, source);
+ g_free (source);
+ }
+
+ setup_pipeline_from_cache_entry (sink, pipeline, entry, 1);
+}
+
+static void
+clutter_gst_rgb24_setup_pipeline (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline)
+{
+ setup_pipeline_from_cache_entry (sink, pipeline, NULL, 1);
+}
+
+static gboolean
+clutter_gst_rgb24_upload (ClutterGstVideoSink *sink,
+ GstBuffer *buffer)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ CoglPixelFormat format;
+ GstVideoFrame frame;
+
+ if (priv->bgr)
+ format = COGL_PIXEL_FORMAT_BGR_888;
+ else
+ format = COGL_PIXEL_FORMAT_RGB_888;
+
+ if (!gst_video_frame_map (&frame, &priv->info, buffer, GST_MAP_READ))
+ goto map_fail;
+
+ clear_frame_textures (sink);
+
+ priv->frame[0] = video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_FRAME_COMP_WIDTH (&frame, 0),
+ GST_VIDEO_FRAME_COMP_HEIGHT (&frame, 0),
+ format,
+ GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 0),
+ GST_VIDEO_FRAME_PLANE_DATA (&frame, 0));
+
+ gst_video_frame_unmap (&frame);
+
+ return TRUE;
+
+ map_fail:
+ {
+ GST_ERROR_OBJECT (sink, "Could not map incoming video frame");
+ return FALSE;
+ }
+}
+
+static ClutterGstRenderer rgb24_glsl_renderer =
+ {
+ "RGB 24",
+ CLUTTER_GST_RGB24,
+ CLUTTER_GST_RENDERER_NEEDS_GLSL,
+
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES (GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY,
+ "{ RGB, BGR }")),
+ 1, /* n_layers */
+ clutter_gst_rgb24_glsl_setup_pipeline,
+ clutter_gst_rgb24_upload,
+ clutter_gst_dummy_shutdown,
+ };
+
+static ClutterGstRenderer rgb24_renderer =
+ {
+ "RGB 24",
+ CLUTTER_GST_RGB24,
+ 0,
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES (GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY,
+ "{ RGB, BGR }")),
+ 1, /* n_layers */
+ clutter_gst_rgb24_setup_pipeline,
+ clutter_gst_rgb24_upload,
+ clutter_gst_dummy_shutdown,
+ };
+
+static void
+clutter_gst_rgb32_glsl_setup_pipeline (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ static SnippetCache snippet_cache;
+ SnippetCacheEntry *entry = get_layer_cache_entry (sink, &snippet_cache);
+
+ if (entry == NULL)
+ {
+ char *source;
+
+ source =
+ g_strdup_printf ("vec4\n"
+ "clutter_gst_sample_video%i (vec2 UV)\n"
+ "{\n"
+ " vec4 color = texture2D (cogl_sampler%i, UV);\n"
+ " vec3 corrected = clutter_gst_get_corrected_color_from_rgb (color.rgb);\n"
+ /* Premultiply the color */
+ " corrected.rgb *= color.a;\n"
+ " return vec4(corrected.rgb, color.a);\n"
+ "}\n",
+ priv->custom_start,
+ priv->custom_start);
+
+ entry = add_layer_cache_entry (sink, &snippet_cache, source);
+ g_free (source);
+ }
+
+ setup_pipeline_from_cache_entry (sink, pipeline, entry, 1);
+}
+
+static void
+clutter_gst_rgb32_setup_pipeline (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ char *layer_combine;
+
+ setup_pipeline_from_cache_entry (sink, pipeline, NULL, 1);
+
+ /* Premultiply the texture using the a special layer combine */
+ layer_combine = g_strdup_printf ("RGB=MODULATE(PREVIOUS, TEXTURE_%i[A])\n"
+ "A=REPLACE(PREVIOUS[A])",
+ priv->custom_start);
+ cogl_pipeline_set_layer_combine (pipeline,
+ priv->custom_start + 1,
+ layer_combine,
+ NULL);
+ g_free(layer_combine);
+}
+
+static gboolean
+clutter_gst_rgb32_upload (ClutterGstVideoSink *sink,
+ GstBuffer *buffer)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ CoglPixelFormat format;
+ GstVideoFrame frame;
+
+ if (priv->bgr)
+ format = COGL_PIXEL_FORMAT_BGRA_8888;
+ else
+ format = COGL_PIXEL_FORMAT_RGBA_8888;
+
+ if (!gst_video_frame_map (&frame, &priv->info, buffer, GST_MAP_READ))
+ goto map_fail;
+
+ clear_frame_textures (sink);
+
+ priv->frame[0] = video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_FRAME_COMP_WIDTH (&frame, 0),
+ GST_VIDEO_FRAME_COMP_HEIGHT (&frame, 0),
+ format,
+ GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 0),
+ GST_VIDEO_FRAME_PLANE_DATA (&frame, 0));
+
+ gst_video_frame_unmap (&frame);
+
+ return TRUE;
+
+ map_fail:
+ {
+ GST_ERROR_OBJECT (sink, "Could not map incoming video frame");
+ return FALSE;
+ }
+}
+
+static ClutterGstRenderer rgb32_glsl_renderer =
+ {
+ "RGB 32",
+ CLUTTER_GST_RGB32,
+ CLUTTER_GST_RENDERER_NEEDS_GLSL,
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES(GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY,
+ "{ RGBA, BGRA }")),
+ 1, /* n_layers */
+ clutter_gst_rgb32_glsl_setup_pipeline,
+ clutter_gst_rgb32_upload,
+ clutter_gst_dummy_shutdown,
+ };
+
+static ClutterGstRenderer rgb32_renderer =
+ {
+ "RGB 32",
+ CLUTTER_GST_RGB32,
+ 0,
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES(GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY,
+ "{ RGBA, BGRA }")),
+ 2, /* n_layers */
+ clutter_gst_rgb32_setup_pipeline,
+ clutter_gst_rgb32_upload,
+ clutter_gst_dummy_shutdown,
+ };
+
+static gboolean
+clutter_gst_yv12_upload (ClutterGstVideoSink *sink,
+ GstBuffer *buffer)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ CoglPixelFormat format = COGL_PIXEL_FORMAT_A_8;
+ GstVideoFrame frame;
+
+ if (!gst_video_frame_map (&frame, &priv->info, buffer, GST_MAP_READ))
+ goto map_fail;
+
+ clear_frame_textures (sink);
+
+ priv->frame[0] =
+ video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_FRAME_COMP_WIDTH (&frame, 0),
+ GST_VIDEO_FRAME_COMP_HEIGHT (&frame, 0),
+ format,
+ GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 0),
+ GST_VIDEO_FRAME_PLANE_DATA (&frame, 0));
+
+ priv->frame[2] =
+ video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_FRAME_COMP_WIDTH (&frame, 1),
+ GST_VIDEO_FRAME_COMP_HEIGHT (&frame, 1),
+ format,
+ GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 1),
+ GST_VIDEO_FRAME_PLANE_DATA (&frame, 1));
+
+ priv->frame[1] =
+ video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_FRAME_COMP_WIDTH (&frame, 2),
+ GST_VIDEO_FRAME_COMP_HEIGHT (&frame, 2),
+ format,
+ GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 2),
+ GST_VIDEO_FRAME_PLANE_DATA (&frame, 2));
+
+ gst_video_frame_unmap (&frame);
+
+ return TRUE;
+
+ map_fail:
+ {
+ GST_ERROR_OBJECT (sink, "Could not map incoming video frame");
+ return FALSE;
+ }
+}
+
+static gboolean
+clutter_gst_i420_upload (ClutterGstVideoSink *sink,
+ GstBuffer *buffer)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ CoglPixelFormat format = COGL_PIXEL_FORMAT_A_8;
+ GstVideoFrame frame;
+
+ if (!gst_video_frame_map (&frame, &priv->info, buffer, GST_MAP_READ))
+ goto map_fail;
+
+ clear_frame_textures (sink);
+
+ priv->frame[0] =
+ video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_FRAME_COMP_WIDTH (&frame, 0),
+ GST_VIDEO_FRAME_COMP_HEIGHT (&frame, 0),
+ format,
+ GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 0),
+ GST_VIDEO_FRAME_PLANE_DATA (&frame, 0));
+
+ priv->frame[1] =
+ video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_FRAME_COMP_WIDTH (&frame, 1),
+ GST_VIDEO_FRAME_COMP_HEIGHT (&frame, 1),
+ format,
+ GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 1),
+ GST_VIDEO_FRAME_PLANE_DATA (&frame, 1));
+
+ priv->frame[2] =
+ video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_FRAME_COMP_WIDTH (&frame, 2),
+ GST_VIDEO_FRAME_COMP_HEIGHT (&frame, 2),
+ format,
+ GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 2),
+ GST_VIDEO_FRAME_PLANE_DATA (&frame, 2));
+
+ gst_video_frame_unmap (&frame);
+
+ return TRUE;
+
+ map_fail:
+ {
+ GST_ERROR_OBJECT (sink, "Could not map incoming video frame");
+ return FALSE;
+ }
+}
+
+static void
+clutter_gst_yv12_glsl_setup_pipeline (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ static SnippetCache snippet_cache;
+ SnippetCacheEntry *entry;
+
+ entry = get_layer_cache_entry (sink, &snippet_cache);
+
+ if (entry == NULL)
+ {
+ char *source;
+
+ source =
+ g_strdup_printf ("vec4\n"
+ "clutter_gst_sample_video%i (vec2 UV)\n"
+ "{\n"
+ " float y = 1.1640625 * (texture2D (cogl_sampler%i, UV).a - 0.0625);\n"
+ " float u = texture2D (cogl_sampler%i, UV).a - 0.5;\n"
+ " float v = texture2D (cogl_sampler%i, UV).a - 0.5;\n"
+ " vec3 corrected = clutter_gst_get_corrected_color_from_yuv (vec3 (y, u, v));\n"
+ " vec4 color;\n"
+ " color.rgb = clutter_gst_default_yuv_to_srgb (corrected);\n"
+ " color.a = 1.0;\n"
+ " return color;\n"
+ "}\n",
+ priv->video_start,
+ priv->video_start,
+ priv->video_start + 1,
+ priv->video_start + 2);
+
+ entry = add_layer_cache_entry (sink, &snippet_cache, source);
+ g_free (source);
+ }
+
+ setup_pipeline_from_cache_entry (sink, pipeline, entry, 3);
+}
+
+static ClutterGstRenderer yv12_glsl_renderer =
+ {
+ "YV12 glsl",
+ CLUTTER_GST_YV12,
+ CLUTTER_GST_RENDERER_NEEDS_GLSL,
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES(GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY,
+ "YV12")),
+ 3, /* n_layers */
+ clutter_gst_yv12_glsl_setup_pipeline,
+ clutter_gst_yv12_upload,
+ clutter_gst_dummy_shutdown,
+ };
+
+static ClutterGstRenderer i420_glsl_renderer =
+ {
+ "I420 glsl",
+ CLUTTER_GST_I420,
+ CLUTTER_GST_RENDERER_NEEDS_GLSL,
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES(GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY,
+ "I420")),
+ 3, /* n_layers */
+ clutter_gst_yv12_glsl_setup_pipeline,
+ clutter_gst_i420_upload,
+ clutter_gst_dummy_shutdown,
+ };
+
+static void
+clutter_gst_ayuv_glsl_setup_pipeline (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ static SnippetCache snippet_cache;
+ SnippetCacheEntry *entry;
+
+ entry = get_layer_cache_entry (sink, &snippet_cache);
+
+ if (entry == NULL)
+ {
+ char *source;
+
+ source
+ = g_strdup_printf ("vec4\n"
+ "clutter_gst_sample_video%i (vec2 UV)\n"
+ "{\n"
+ " vec4 color = texture2D (cogl_sampler%i, UV);\n"
+ " float y = 1.1640625 * (color.g - 0.0625);\n"
+ " float u = color.b - 0.5;\n"
+ " float v = color.a - 0.5;\n"
+ " vec3 corrected = clutter_gst_get_corrected_color_from_yuv (vec3 (y, u, v));\n"
+ " color.a = color.r;\n"
+ " color.rgb = clutter_gst_default_yuv_to_srgb (corrected);\n"
+ /* Premultiply the color */
+ " color.rgb *= color.a;\n"
+ " return color;\n"
+ "}\n",
+ priv->video_start,
+ priv->video_start);
+
+ entry = add_layer_cache_entry (sink, &snippet_cache, source);
+ g_free (source);
+ }
+
+ setup_pipeline_from_cache_entry (sink, pipeline, entry, 1);
+}
+
+static gboolean
+clutter_gst_ayuv_upload (ClutterGstVideoSink *sink,
+ GstBuffer *buffer)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ CoglPixelFormat format = COGL_PIXEL_FORMAT_RGBA_8888;
+ GstVideoFrame frame;
+
+ if (!gst_video_frame_map (&frame, &priv->info, buffer, GST_MAP_READ))
+ goto map_fail;
+
+ clear_frame_textures (sink);
+
+ priv->frame[0] = video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_FRAME_COMP_WIDTH (&frame, 0),
+ GST_VIDEO_FRAME_COMP_HEIGHT (&frame, 0),
+ format,
+ GST_VIDEO_FRAME_PLANE_STRIDE (&frame, 0),
+ GST_VIDEO_FRAME_PLANE_DATA (&frame, 0));
+
+ gst_video_frame_unmap (&frame);
+
+ return TRUE;
+
+ map_fail:
+ {
+ GST_ERROR_OBJECT (sink, "Could not map incoming video frame");
+ return FALSE;
+ }
+}
+
+static ClutterGstRenderer ayuv_glsl_renderer =
+ {
+ "AYUV glsl",
+ CLUTTER_GST_AYUV,
+ CLUTTER_GST_RENDERER_NEEDS_GLSL,
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES(GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY,
+ "AYUV")),
+ 1, /* n_layers */
+ clutter_gst_ayuv_glsl_setup_pipeline,
+ clutter_gst_ayuv_upload,
+ clutter_gst_dummy_shutdown,
+ };
+
+static void
+clutter_gst_nv12_glsl_setup_pipeline (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ static SnippetCache snippet_cache;
+ SnippetCacheEntry *entry;
+
+ entry = get_layer_cache_entry (sink, &snippet_cache);
+
+ if (entry == NULL)
+ {
+ char *source;
+
+ source =
+ g_strdup_printf ("vec4\n"
+ "clutter_gst_sample_video%i (vec2 UV)\n"
+ "{\n"
+ " vec4 color;\n"
+ " float y = 1.1640625 *\n"
+ " (texture2D (cogl_sampler%i, UV).a -\n"
+ " 0.0625);\n"
+ " vec2 uv = texture2D (cogl_sampler%i, UV).rg;\n"
+ " uv -= 0.5;\n"
+ " float u = uv.x;\n"
+ " float v = uv.y;\n"
+ " vec3 corrected = clutter_gst_get_corrected_color_from_yuv (vec3 (y, u, v));\n"
+ " color.rgb = clutter_gst_default_yuv_to_srgb (corrected);\n"
+ " color.a = 1.0;\n"
+ " return color;\n"
+ "}\n",
+ priv->custom_start,
+ priv->custom_start,
+ priv->custom_start + 1);
+
+ entry = add_layer_cache_entry (sink, &snippet_cache, source);
+ g_free (source);
+ }
+
+ setup_pipeline_from_cache_entry (sink, pipeline, entry, 2);
+}
+
+static gboolean
+clutter_gst_nv12_upload (ClutterGstVideoSink *sink,
+ GstBuffer *buffer)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ GstVideoFrame frame;
+
+ if (!gst_video_frame_map (&frame, &priv->info, buffer, GST_MAP_READ))
+ goto map_fail;
+
+ clear_frame_textures (sink);
+
+ priv->frame[0] =
+ video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_INFO_COMP_WIDTH (&priv->info, 0),
+ GST_VIDEO_INFO_COMP_HEIGHT (&priv->info, 0),
+ COGL_PIXEL_FORMAT_A_8,
+ priv->info.stride[0],
+ frame.data[0]);
+
+ priv->frame[1] =
+ video_texture_new_from_data (priv->ctx,
+ GST_VIDEO_INFO_COMP_WIDTH (&priv->info, 1),
+ GST_VIDEO_INFO_COMP_HEIGHT (&priv->info, 1),
+ COGL_PIXEL_FORMAT_RG_88,
+ priv->info.stride[1],
+ frame.data[1]);
+
+ gst_video_frame_unmap (&frame);
+
+ return TRUE;
+
+ map_fail:
+ {
+ GST_ERROR_OBJECT (sink, "Could not map incoming video frame");
+ return FALSE;
+ }
+}
+
+static ClutterGstRenderer nv12_glsl_renderer =
+ {
+ "NV12 glsl",
+ CLUTTER_GST_NV12,
+ CLUTTER_GST_RENDERER_NEEDS_GLSL | CLUTTER_GST_RENDERER_NEEDS_TEXTURE_RG,
+ GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES(GST_CAPS_FEATURE_MEMORY_SYSTEM_MEMORY,
+ "NV12")),
+ 2, /* n_layers */
+ clutter_gst_nv12_glsl_setup_pipeline,
+ clutter_gst_nv12_upload,
+ clutter_gst_dummy_shutdown,
+ };
+
+static GSList*
+clutter_gst_build_renderers_list (CoglContext *ctx)
+{
+ GSList *list = NULL;
+ ClutterGstRendererFlag flags = 0;
+ int i;
+ static ClutterGstRenderer *const renderers[] =
+ {
+ /* These are in increasing order of priority so that the
+ * priv->renderers will be in decreasing order. That way the GLSL
+ * renderers will be preferred if they are available */
+ &rgb24_renderer,
+ &rgb32_renderer,
+ &rgb24_glsl_renderer,
+ &rgb32_glsl_renderer,
+ &yv12_glsl_renderer,
+ &i420_glsl_renderer,
+ &ayuv_glsl_renderer,
+ &nv12_glsl_renderer,
+ NULL
+ };
+
+ if (cogl_has_feature (ctx, COGL_FEATURE_ID_GLSL))
+ flags |= CLUTTER_GST_RENDERER_NEEDS_GLSL;
+
+ if (cogl_has_feature (ctx, COGL_FEATURE_ID_TEXTURE_RG))
+ flags |= CLUTTER_GST_RENDERER_NEEDS_TEXTURE_RG;
+
+ for (i = 0; renderers[i]; i++)
+ if ((renderers[i]->flags & flags) == renderers[i]->flags)
+ list = g_slist_prepend (list, renderers[i]);
+
+ return list;
+}
+
+static void
+append_cap (gpointer data,
+ gpointer user_data)
+{
+ ClutterGstRenderer *renderer = (ClutterGstRenderer *) data;
+ GstCaps *caps = (GstCaps *) user_data;
+ GstCaps *writable_caps;
+ writable_caps =
+ gst_caps_make_writable (gst_static_caps_get (&renderer->caps));
+ gst_caps_append (caps, writable_caps);
+}
+
+static GstCaps *
+clutter_gst_build_caps (GSList *renderers)
+{
+ GstCaps *caps;
+
+ caps = gst_caps_new_empty ();
+
+ g_slist_foreach (renderers, append_cap, caps);
+
+ return caps;
+}
+
+static ClutterGstRenderer *
+clutter_gst_find_renderer_by_format (ClutterGstVideoSink *sink,
+ ClutterGstVideoFormat format)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ ClutterGstRenderer *renderer = NULL;
+ GSList *element;
+
+ /* The renderers list is in decreasing order of priority so we'll
+ * pick the first one that matches */
+ for (element = priv->renderers; element; element = g_slist_next (element))
+ {
+ ClutterGstRenderer *candidate = (ClutterGstRenderer *) element->data;
+ if (candidate->format == format)
+ {
+ renderer = candidate;
+ break;
+ }
+ }
+
+ return renderer;
+}
+
+static GstCaps *
+clutter_gst_video_sink_get_caps (GstBaseSink *bsink,
+ GstCaps *filter)
+{
+ ClutterGstVideoSink *sink;
+ sink = CLUTTER_GST_VIDEO_SINK (bsink);
+
+ if (sink->priv->caps == NULL)
+ return NULL;
+ else
+ return gst_caps_ref (sink->priv->caps);
+}
+
+static gboolean
+clutter_gst_video_sink_parse_caps (GstCaps *caps,
+ ClutterGstVideoSink *sink,
+ gboolean save)
+{
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ GstCaps *intersection;
+ GstVideoInfo vinfo;
+ ClutterGstVideoFormat format;
+ gboolean bgr = FALSE;
+ ClutterGstRenderer *renderer;
+
+ intersection = gst_caps_intersect (priv->caps, caps);
+ if (gst_caps_is_empty (intersection))
+ goto no_intersection;
+
+ gst_caps_unref (intersection);
+
+ if (!gst_video_info_from_caps (&vinfo, caps))
+ goto unknown_format;
+
+ switch (vinfo.finfo->format)
+ {
+ case GST_VIDEO_FORMAT_YV12:
+ format = CLUTTER_GST_YV12;
+ break;
+ case GST_VIDEO_FORMAT_I420:
+ format = CLUTTER_GST_I420;
+ break;
+ case GST_VIDEO_FORMAT_AYUV:
+ format = CLUTTER_GST_AYUV;
+ bgr = FALSE;
+ break;
+ case GST_VIDEO_FORMAT_NV12:
+ format = CLUTTER_GST_NV12;
+ break;
+ case GST_VIDEO_FORMAT_RGB:
+ format = CLUTTER_GST_RGB24;
+ bgr = FALSE;
+ break;
+ case GST_VIDEO_FORMAT_BGR:
+ format = CLUTTER_GST_RGB24;
+ bgr = TRUE;
+ break;
+ case GST_VIDEO_FORMAT_RGBA:
+ format = CLUTTER_GST_RGB32;
+ bgr = FALSE;
+ break;
+ case GST_VIDEO_FORMAT_BGRA:
+ format = CLUTTER_GST_RGB32;
+ bgr = TRUE;
+ break;
+ default:
+ goto unhandled_format;
+ }
+
+ renderer = clutter_gst_find_renderer_by_format (sink, format);
+
+ if (G_UNLIKELY (renderer == NULL))
+ goto no_suitable_renderer;
+
+ GST_INFO_OBJECT (sink, "found the %s renderer", renderer->name);
+
+ if (save)
+ {
+ priv->info = vinfo;
+
+ priv->format = format;
+ priv->bgr = bgr;
+
+ priv->renderer = renderer;
+ }
+
+ return TRUE;
+
+
+ no_intersection:
+ {
+ GST_WARNING_OBJECT (sink,
+ "Incompatible caps, don't intersect with %" GST_PTR_FORMAT, priv->caps);
+ return FALSE;
+ }
+
+ unknown_format:
+ {
+ GST_WARNING_OBJECT (sink, "Could not figure format of input caps");
+ return FALSE;
+ }
+
+ unhandled_format:
+ {
+ GST_ERROR_OBJECT (sink, "Provided caps aren't supported by clutter-gst");
+ return FALSE;
+ }
+
+ no_suitable_renderer:
+ {
+ GST_ERROR_OBJECT (sink, "could not find a suitable renderer");
+ return FALSE;
+ }
+}
+
+static gboolean
+clutter_gst_video_sink_set_caps (GstBaseSink *bsink,
+ GstCaps *caps)
+{
+ ClutterGstVideoSink *sink;
+ ClutterGstVideoSinkPrivate *priv;
+
+ sink = CLUTTER_GST_VIDEO_SINK (bsink);
+ priv = sink->priv;
+
+ if (!clutter_gst_video_sink_parse_caps (caps, sink, FALSE))
+ return FALSE;
+
+ g_mutex_lock (&priv->source->buffer_lock);
+ priv->source->has_new_caps = TRUE;
+ g_mutex_unlock (&priv->source->buffer_lock);
+
+ return TRUE;
+}
+
+static gboolean
+clutter_gst_source_dispatch (GSource *source,
+ GSourceFunc callback,
+ void *user_data)
+{
+ ClutterGstSource *gst_source= (ClutterGstSource*) source;
+ ClutterGstVideoSinkPrivate *priv = gst_source->sink->priv;
+ GstBuffer *buffer;
+ gboolean pipeline_ready = FALSE;
+
+ g_mutex_lock (&gst_source->buffer_lock);
+
+ if (G_UNLIKELY (gst_source->has_new_caps))
+ {
+ GstCaps *caps =
+ gst_pad_get_current_caps (GST_BASE_SINK_PAD ((GST_BASE_SINK
+ (gst_source->sink))));
+
+ if (!clutter_gst_video_sink_parse_caps (caps, gst_source->sink, TRUE))
+ goto negotiation_fail;
+
+ gst_source->has_new_caps = FALSE;
+
+ dirty_default_pipeline (gst_source->sink);
+
+ /* We are now in a state where we could generate the pipeline if
+ * the application requests it so we can emit the signal.
+ * However we'll actually generate the pipeline lazily only if
+ * the application actually asks for it. */
+ pipeline_ready = TRUE;
+ }
+
+ buffer = gst_source->buffer;
+ gst_source->buffer = NULL;
+
+ g_mutex_unlock (&gst_source->buffer_lock);
+
+ if (buffer)
+ {
+ if (!priv->renderer->upload (gst_source->sink, buffer))
+ goto fail_upload;
+
+ priv->had_upload_once = TRUE;
+
+ gst_buffer_unref (buffer);
+ }
+ else
+ GST_WARNING_OBJECT (gst_source->sink, "No buffers available for display");
+
+ if (G_UNLIKELY (pipeline_ready))
+ g_signal_emit (gst_source->sink,
+ video_sink_signals[PIPELINE_READY],
+ 0 /* detail */);
+ if (priv->had_upload_once)
+ g_signal_emit (gst_source->sink,
+ video_sink_signals[NEW_FRAME], 0,
+ NULL);
+
+ return TRUE;
+
+
+ negotiation_fail:
+ {
+ GST_WARNING_OBJECT (gst_source->sink,
+ "Failed to handle caps. Stopping GSource");
+ priv->flow_return = GST_FLOW_NOT_NEGOTIATED;
+ g_mutex_unlock (&gst_source->buffer_lock);
+
+ return FALSE;
+ }
+
+ fail_upload:
+ {
+ GST_WARNING_OBJECT (gst_source->sink, "Failed to upload buffer");
+ priv->flow_return = GST_FLOW_ERROR;
+ gst_buffer_unref (buffer);
+ return FALSE;
+ }
+}
+
+static GSourceFuncs gst_source_funcs =
+ {
+ clutter_gst_source_prepare,
+ clutter_gst_source_check,
+ clutter_gst_source_dispatch,
+ clutter_gst_source_finalize
+ };
+
+static ClutterGstSource *
+clutter_gst_source_new (ClutterGstVideoSink *sink)
+{
+ GSource *source;
+ ClutterGstSource *gst_source;
+
+ source = g_source_new (&gst_source_funcs, sizeof (ClutterGstSource));
+ gst_source = (ClutterGstSource *) source;
+
+ g_source_set_can_recurse (source, TRUE);
+ g_source_set_priority (source, CLUTTER_GST_DEFAULT_PRIORITY);
+
+ gst_source->sink = sink;
+ g_mutex_init (&gst_source->buffer_lock);
+ gst_source->buffer = NULL;
+
+ return gst_source;
+}
+
+static void
+clutter_gst_video_sink_init (ClutterGstVideoSink *sink)
+{
+ ClutterGstVideoSinkPrivate *priv;
+
+ sink->priv = priv = G_TYPE_INSTANCE_GET_PRIVATE (sink,
+ CLUTTER_GST_TYPE_VIDEO_SINK,
+ ClutterGstVideoSinkPrivate);
+ priv->custom_start = 0;
+ priv->default_sample = TRUE;
+
+ priv->brightness = DEFAULT_BRIGHTNESS;
+ priv->contrast = DEFAULT_CONTRAST;
+ priv->hue = DEFAULT_HUE;
+ priv->saturation = DEFAULT_SATURATION;
+
+ priv->tabley = g_new0 (guint8, 256);
+ priv->tableu = g_new0 (guint8, 256 * 256);
+ priv->tablev = g_new0 (guint8, 256 * 256);
+
+ priv->ctx = clutter_gst_get_cogl_context ();
+ priv->renderers = clutter_gst_build_renderers_list (priv->ctx);
+ priv->caps = clutter_gst_build_caps (priv->renderers);
+}
+
+static GstFlowReturn
+_clutter_gst_video_sink_render (GstBaseSink *bsink,
+ GstBuffer *buffer)
+{
+ ClutterGstVideoSink *sink = CLUTTER_GST_VIDEO_SINK (bsink);
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+ ClutterGstSource *gst_source = priv->source;
+
+ g_mutex_lock (&gst_source->buffer_lock);
+
+ if (G_UNLIKELY (priv->flow_return != GST_FLOW_OK))
+ goto dispatch_flow_ret;
+
+ if (gst_source->buffer)
+ gst_buffer_unref (gst_source->buffer);
+
+ gst_source->buffer = gst_buffer_ref (buffer);
+ g_mutex_unlock (&gst_source->buffer_lock);
+
+ g_main_context_wakeup (NULL);
+
+ return GST_FLOW_OK;
+
+ dispatch_flow_ret:
+ {
+ g_mutex_unlock (&gst_source->buffer_lock);
+ return priv->flow_return;
+ }
+}
+
+static void
+clutter_gst_video_sink_dispose (GObject *object)
+{
+ ClutterGstVideoSink *self;
+ ClutterGstVideoSinkPrivate *priv;
+
+ self = CLUTTER_GST_VIDEO_SINK (object);
+ priv = self->priv;
+
+ clear_frame_textures (self);
+
+ if (priv->renderer) {
+ priv->renderer->shutdown (self);
+ priv->renderer = NULL;
+ }
+
+ if (priv->pipeline)
+ {
+ cogl_object_unref (priv->pipeline);
+ priv->pipeline = NULL;
+ }
+
+ if (priv->clt_frame)
+ {
+ g_boxed_free (CLUTTER_GST_TYPE_FRAME, priv->clt_frame);
+ priv->clt_frame = NULL;
+ }
+
+ if (priv->caps)
+ {
+ gst_caps_unref (priv->caps);
+ priv->caps = NULL;
+ }
+
+ if (priv->tabley)
+ {
+ g_free (priv->tabley);
+ priv->tabley = NULL;
+ }
+
+ if (priv->tableu)
+ {
+ g_free (priv->tableu);
+ priv->tableu = NULL;
+ }
+
+ if (priv->tablev)
+ {
+ g_free (priv->tablev);
+ priv->tablev = NULL;
+ }
+
+ G_OBJECT_CLASS (clutter_gst_video_sink_parent_class)->dispose (object);
+}
+
+static void
+clutter_gst_video_sink_finalize (GObject *object)
+{
+ G_OBJECT_CLASS (clutter_gst_video_sink_parent_class)->finalize (object);
+}
+
+static gboolean
+clutter_gst_video_sink_start (GstBaseSink *base_sink)
+{
+ ClutterGstVideoSink *sink = CLUTTER_GST_VIDEO_SINK (base_sink);
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+
+ priv->source = clutter_gst_source_new (sink);
+ g_source_attach ((GSource *) priv->source, NULL);
+ priv->flow_return = GST_FLOW_OK;
+ return TRUE;
+}
+
+static void
+clutter_gst_video_sink_set_property (GObject *object,
+ unsigned int prop_id,
+ const GValue *value,
+ GParamSpec *pspec)
+{
+ ClutterGstVideoSink *sink = CLUTTER_GST_VIDEO_SINK (object);
+
+ switch (prop_id)
+ {
+ case PROP_UPDATE_PRIORITY:
+ clutter_gst_video_sink_set_priority (sink, g_value_get_int (value));
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+clutter_gst_video_sink_get_property (GObject *object,
+ unsigned int prop_id,
+ GValue *value,
+ GParamSpec *pspec)
+{
+ ClutterGstVideoSink *sink = CLUTTER_GST_VIDEO_SINK (object);
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+
+ switch (prop_id)
+ {
+ case PROP_UPDATE_PRIORITY:
+ g_value_set_int (value, g_source_get_priority ((GSource *) priv->source));
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static gboolean
+clutter_gst_video_sink_stop (GstBaseSink *base_sink)
+{
+ ClutterGstVideoSink *sink = CLUTTER_GST_VIDEO_SINK (base_sink);
+ ClutterGstVideoSinkPrivate *priv = sink->priv;
+
+ if (priv->source)
+ {
+ GSource *source = (GSource *) priv->source;
+ g_source_destroy (source);
+ g_source_unref (source);
+ priv->source = NULL;
+ }
+
+ return TRUE;
+}
+
+static gboolean
+clutter_gst_video_sink_propose_allocation (GstBaseSink *base_sink, GstQuery *query)
+{
+ gboolean need_pool = FALSE;
+ GstCaps *caps = NULL;
+
+ gst_query_parse_allocation (query, &caps, &need_pool);
+
+ gst_query_add_allocation_meta (query,
+ GST_VIDEO_META_API_TYPE, NULL);
+
+ return TRUE;
+}
+
+static void
+clutter_gst_video_sink_class_init (ClutterGstVideoSinkClass *klass)
+{
+ GObjectClass *go_class = G_OBJECT_CLASS (klass);
+ GstBaseSinkClass *gb_class = GST_BASE_SINK_CLASS (klass);
+ GstElementClass *ge_class = GST_ELEMENT_CLASS (klass);
+ GstPadTemplate *pad_template;
+ GParamSpec *pspec;
+
+ g_type_class_add_private (klass, sizeof (ClutterGstVideoSinkPrivate));
+ go_class->set_property = clutter_gst_video_sink_set_property;
+ go_class->get_property = clutter_gst_video_sink_get_property;
+ go_class->dispose = clutter_gst_video_sink_dispose;
+ go_class->finalize = clutter_gst_video_sink_finalize;
+
+ pad_template = gst_static_pad_template_get (&sinktemplate_all);
+ gst_element_class_add_pad_template (ge_class, pad_template);
+
+ gst_element_class_set_metadata (ge_class,
+ "Clutter video sink", "Sink/Video",
+ "Sends video data from GStreamer to a "
+ "Cogl pipeline",
+ "Jonathan Matthew <jonathan@kaolin.wh9.net>, "
+ "Matthew Allum <mallum@o-hand.com, "
+ "Chris Lord <chris@o-hand.com>, "
+ "Plamena Manolova "
+ "<plamena.n.manolova@intel.com>");
+
+ gb_class->render = _clutter_gst_video_sink_render;
+ gb_class->preroll = _clutter_gst_video_sink_render;
+ gb_class->start = clutter_gst_video_sink_start;
+ gb_class->stop = clutter_gst_video_sink_stop;
+ gb_class->set_caps = clutter_gst_video_sink_set_caps;
+ gb_class->get_caps = clutter_gst_video_sink_get_caps;
+ gb_class->propose_allocation = clutter_gst_video_sink_propose_allocation;
+
+ pspec = g_param_spec_int ("update-priority",
+ "Update Priority",
+ "Priority of video updates in the thread",
+ -G_MAXINT, G_MAXINT,
+ CLUTTER_GST_DEFAULT_PRIORITY,
+ CLUTTER_GST_PARAM_READWRITE);
+
+ g_object_class_install_property (go_class, PROP_UPDATE_PRIORITY, pspec);
+
+ /**
+ * ClutterGstVideoSink::pipeline-ready:
+ * @sink: the #ClutterGstVideoSink
+ *
+ * The sink will emit this signal as soon as it has gathered enough
+ * information from the video to configure a pipeline. If the
+ * application wants to do some customized rendering, it can setup its
+ * pipeline after this signal is emitted. The application's pipeline
+ * will typically either be a copy of the one returned by
+ * clutter_gst_video_sink_get_pipeline() or it can be a completely custom
+ * pipeline which is setup using clutter_gst_video_sink_setup_pipeline().
+ *
+ * Note that it is an error to call either of those functions before
+ * this signal is emitted. The #ClutterGstVideoSink::new-frame signal
+ * will only be emitted after the pipeline is ready so the application
+ * could also create its pipeline in the handler for that.
+ *
+ * Since: 3.0
+ */
+ video_sink_signals[PIPELINE_READY] =
+ g_signal_new ("pipeline-ready",
+ CLUTTER_GST_TYPE_VIDEO_SINK,
+ G_SIGNAL_RUN_LAST,
+ G_STRUCT_OFFSET (ClutterGstVideoSinkClass, pipeline_ready),
+ NULL, /* accumulator */
+ NULL, /* accu_data */
+ g_cclosure_marshal_VOID__VOID,
+ G_TYPE_NONE,
+ 0 /* n_params */);
+
+ /**
+ * ClutterGstVideoSink::new-frame:
+ * @sink: the #ClutterGstVideoSink
+ *
+ * The sink will emit this signal whenever there are new textures
+ * available for a new frame of the video. After this signal is
+ * emitted, an application can call clutter_gst_video_sink_get_pipeline()
+ * to get a pipeline suitable for rendering the frame. If the
+ * application is using a custom pipeline it can alternatively call
+ * clutter_gst_video_sink_attach_frame() to attach the textures.
+ *
+ * Since: 3.0
+ */
+ video_sink_signals[NEW_FRAME] =
+ g_signal_new ("new-frame",
+ CLUTTER_GST_TYPE_VIDEO_SINK,
+ G_SIGNAL_RUN_LAST,
+ G_STRUCT_OFFSET (ClutterGstVideoSinkClass, new_frame),
+ NULL, /* accumulator */
+ NULL, /* accu_data */
+ g_cclosure_marshal_VOID__VOID,
+ G_TYPE_NONE,
+ 0 /* n_params */);
+}
+
+/**
+ * clutter_gst_video_sink_new:
+ *
+ * Creates a new #ClutterGstVideoSink
+ *
+ * Return value: (transfer full): a new #ClutterGstVideoSink
+ * Since: 3.0
+ */
+ClutterGstVideoSink *
+clutter_gst_video_sink_new (void)
+{
+ ClutterGstVideoSink *sink = g_object_new (CLUTTER_GST_TYPE_VIDEO_SINK, NULL);
+
+ return sink;
+}
+
+void
+clutter_gst_video_sink_get_aspect (ClutterGstVideoSink *sink, gint *par_n, gint *par_d)
+{
+ GstVideoInfo *info;
+
+ g_return_if_fail (CLUTTER_GST_IS_VIDEO_SINK (sink));
+
+ info = &sink->priv->info;
+
+ if (par_n)
+ *par_n = info->par_n;
+ if (par_d)
+ *par_d = info->par_d;
+}
+
+/**
+ * clutter_gst_video_sink_is_ready:
+ * @sink: The #ClutterGstVideoSink
+ *
+ * Returns whether the pipeline is ready and so
+ * clutter_gst_video_sink_get_pipeline() and
+ * clutter_gst_video_sink_setup_pipeline() can be called without causing error.
+ *
+ * Note: Normally an application will wait until the
+ * #ClutterGstVideoSink::pipeline-ready signal is emitted instead of
+ * polling the ready status with this api, but sometimes when a sink
+ * is passed between components that didn't have an opportunity to
+ * connect a signal handler this can be useful.
+ *
+ * Return value: %TRUE if the sink is ready, else %FALSE
+ * Since: 3.0
+ */
+gboolean
+clutter_gst_video_sink_is_ready (ClutterGstVideoSink *sink)
+{
+ g_return_val_if_fail (CLUTTER_GST_IS_VIDEO_SINK (sink), FALSE);
+
+ return !!sink->priv->renderer;
+}
+
+/**
+ * clutter_gst_video_sink_get_frame:
+ * @sink: The #ClutterGstVideoSink
+ *
+ * Returns a #ClutterGstFrame object suitable to render the current
+ * frame of the given video sink. An application is free to make a
+ * copy of this pipeline and modify it for custom rendering.
+ *
+ * Return value: (transfer none): A #ClutterGstFame or NULL if there
+ * isn't a frame to be displayed yet.
+ *
+ * Since: 3.0
+ */
+
+ClutterGstFrame *
+clutter_gst_video_sink_get_frame (ClutterGstVideoSink *sink)
+{
+ ClutterGstVideoSinkPrivate *priv;
+ CoglPipeline *pipeline;
+
+ g_return_val_if_fail (CLUTTER_GST_IS_VIDEO_SINK (sink), NULL);
+
+ priv = sink->priv;
+
+ pipeline = clutter_gst_video_sink_get_pipeline (sink);
+
+ if (pipeline == NULL)
+ return NULL;
+
+ if (priv->clt_frame != NULL && priv->clt_frame->pipeline != pipeline)
+ {
+ g_boxed_free (CLUTTER_GST_TYPE_FRAME, priv->clt_frame);
+ priv->clt_frame = NULL;
+ }
+
+ if (priv->clt_frame == NULL)
+ {
+ priv->clt_frame = clutter_gst_frame_new ();
+ priv->clt_frame->pipeline = cogl_object_ref (pipeline);
+ clutter_gst_video_resolution_from_video_info (&priv->clt_frame->resolution,
+ &priv->info);
+ }
+
+ return priv->clt_frame;
+}
+
+/**
+ * clutter_gst_video_sink_get_pipeline:
+ * @sink: The #ClutterGstVideoSink
+ *
+ * Returns a pipeline suitable for rendering the current frame of the
+ * given video sink. The pipeline will already have the textures for
+ * the frame attached. For simple rendering, an application will
+ * typically call this function immediately before it paints the
+ * video. It can then just paint a rectangle using the returned
+ * pipeline.
+ *
+ * An application is free to make a copy of this
+ * pipeline and modify it for custom rendering.
+ *
+ * Note: it is considered an error to call this function before the
+ * #ClutterGstVideoSink::pipeline-ready signal is emitted.
+ *
+ * Return value: (transfer none): the pipeline for rendering the
+ * current frame
+ * Since: 3.0
+ */
+CoglPipeline *
+clutter_gst_video_sink_get_pipeline (ClutterGstVideoSink *sink)
+{
+ ClutterGstVideoSinkPrivate *priv;
+
+ g_return_val_if_fail (CLUTTER_GST_IS_VIDEO_SINK (sink), NULL);
+
+ if (!clutter_gst_video_sink_is_ready (sink))
+ return NULL;
+
+ priv = sink->priv;
+
+ if (priv->pipeline == NULL)
+ {
+ priv->pipeline = cogl_pipeline_new (priv->ctx);
+ clutter_gst_video_sink_setup_pipeline (sink, priv->pipeline);
+ clutter_gst_video_sink_attach_frame (sink, priv->pipeline);
+ priv->balance_dirty = FALSE;
+ }
+ else if (priv->balance_dirty)
+ {
+ cogl_object_unref (priv->pipeline);
+ priv->pipeline = cogl_pipeline_new (priv->ctx);
+
+ clutter_gst_video_sink_setup_pipeline (sink, priv->pipeline);
+ clutter_gst_video_sink_attach_frame (sink, priv->pipeline);
+ priv->balance_dirty = FALSE;
+ }
+ else if (priv->frame_dirty)
+ {
+ CoglPipeline *pipeline = cogl_pipeline_copy (priv->pipeline);
+ cogl_object_unref (priv->pipeline);
+ priv->pipeline = pipeline;
+
+ clutter_gst_video_sink_attach_frame (sink, pipeline);
+ }
+
+ priv->frame_dirty = FALSE;
+
+ return priv->pipeline;
+}
+
+/**
+ * clutter_gst_video_sink_setup_pipeline:
+ * @sink: The #ClutterGstVideoSink
+ * @pipeline: A #CoglPipeline
+ *
+ * Configures the given pipeline so that will be able to render the
+ * video for the @sink. This should only be used if the application
+ * wants to perform some custom rendering using its own pipeline.
+ * Typically an application will call this in response to the
+ * #ClutterGstVideoSink::pipeline-ready signal.
+ *
+ * Note: it is considered an error to call this function before the
+ * #ClutterGstVideoSink::pipeline-ready signal is emitted.
+ *
+ * Since: 3.0
+ */
+void
+clutter_gst_video_sink_setup_pipeline (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline)
+{
+ ClutterGstVideoSinkPrivate *priv;
+
+ g_return_if_fail (CLUTTER_GST_IS_VIDEO_SINK (sink));
+ g_return_if_fail (pipeline != NULL);
+
+ priv = sink->priv;
+
+ if (priv->renderer)
+ {
+ clutter_gst_video_sink_setup_conversions (sink, priv->pipeline);
+ clutter_gst_video_sink_setup_balance (sink, priv->pipeline);
+ priv->renderer->setup_pipeline (sink, priv->pipeline);
+ }
+}
diff --git a/clutter-gst/clutter-gst-video-sink.h b/clutter-gst/clutter-gst-video-sink.h
new file mode 100644
index 0000000..505a6cd
--- /dev/null
+++ b/clutter-gst/clutter-gst-video-sink.h
@@ -0,0 +1,121 @@
+/*
+ * Clutter-GStreamer.
+ *
+ * GStreamer integration library for Cogl.
+ *
+ * clutter-gst-video-sink.h - Gstreamer Video Sink that renders to a
+ * Cogl Pipeline.
+ *
+ * Authored by Jonathan Matthew <jonathan@kaolin.wh9.net>,
+ * Chris Lord <chris@openedhand.com>
+ * Damien Lespiau <damien.lespiau@intel.com>
+ * Matthew Allum <mallum@openedhand.com>
+ * Plamena Manolova <plamena.n.manolova@intel.com>
+ * Lionel Landwerlin <lionel.g.landwerlin@linux.intel.com>
+ *
+ * Copyright (C) 2007, 2008 OpenedHand
+ * Copyright (C) 2009, 2010, 2013, 2014 Intel Corporation
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __CLUTTER_GST_VIDEO_SINK_H__
+#define __CLUTTER_GST_VIDEO_SINK_H__
+
+#include <cogl/cogl.h>
+#include <gst/base/gstbasesink.h>
+#include <clutter-gst/clutter-gst-types.h>
+
+G_BEGIN_DECLS
+
+#define CLUTTER_GST_TYPE_VIDEO_SINK clutter_gst_video_sink_get_type()
+
+#define CLUTTER_GST_VIDEO_SINK(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST ((obj), \
+ CLUTTER_GST_TYPE_VIDEO_SINK, ClutterGstVideoSink))
+
+#define CLUTTER_GST_VIDEO_SINK_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST ((klass), \
+ CLUTTER_GST_TYPE_VIDEO_SINK, ClutterGstVideoSinkClass))
+
+#define CLUTTER_GST_IS_VIDEO_SINK(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE ((obj), \
+ CLUTTER_GST_TYPE_VIDEO_SINK))
+
+#define CLUTTER_GST_IS_VIDEO_SINK_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE ((klass), \
+ CLUTTER_GST_TYPE_VIDEO_SINK))
+
+#define CLUTTER_GST_VIDEO_SINK_GET_CLASS(obj) \
+ (G_TYPE_INSTANCE_GET_CLASS ((obj), \
+ CLUTTER_GST_TYPE_VIDEO_SINK, ClutterGstVideoSinkClass))
+
+typedef struct _ClutterGstVideoSink ClutterGstVideoSink;
+typedef struct _ClutterGstVideoSinkClass ClutterGstVideoSinkClass;
+typedef struct _ClutterGstVideoSinkPrivate ClutterGstVideoSinkPrivate;
+
+/**
+ * ClutterGstVideoSink:
+ *
+ * The #ClutterGstVideoSink structure contains only private data and
+ * should be accessed using the provided API.
+ *
+ * Since: 3.0
+ */
+struct _ClutterGstVideoSink
+{
+ /*< private >*/
+ GstBaseSink parent;
+ ClutterGstVideoSinkPrivate *priv;
+};
+
+/**
+ * ClutterGstVideoSinkClass:
+ * @new_frame: handler for the #ClutterGstVideoSink::new-frame signal
+ * @pipeline_ready: handler for the #ClutterGstVideoSink::pipeline-ready signal
+ *
+ * Since: 3.0
+ */
+struct _ClutterGstVideoSinkClass
+{
+ /*< private >*/
+ GstBaseSinkClass parent_class;
+
+ /*< public >*/
+ void (* new_frame) (ClutterGstVideoSink *sink);
+ void (* pipeline_ready) (ClutterGstVideoSink *sink);
+
+ /*< private >*/
+ void *_padding_dummy[8];
+};
+
+
+GType clutter_gst_video_sink_get_type (void) G_GNUC_CONST;
+
+ClutterGstVideoSink * clutter_gst_video_sink_new (void);
+
+gboolean clutter_gst_video_sink_is_ready (ClutterGstVideoSink *sink);
+
+ClutterGstFrame * clutter_gst_video_sink_get_frame (ClutterGstVideoSink *sink);
+
+CoglPipeline * clutter_gst_video_sink_get_pipeline (ClutterGstVideoSink *sink);
+
+void clutter_gst_video_sink_setup_pipeline (ClutterGstVideoSink *sink,
+ CoglPipeline *pipeline);
+
+G_END_DECLS
+
+#endif
diff --git a/clutter-gst/clutter-gst.h b/clutter-gst/clutter-gst.h
index 70b4ccc..509322f 100644
--- a/clutter-gst/clutter-gst.h
+++ b/clutter-gst/clutter-gst.h
@@ -30,17 +30,18 @@
#define __CLUTTER_GST_H_INSIDE__
-#include "clutter-gst-types.h"
-#include "clutter-gst-enum-types.h"
-#include "clutter-gst-aspectratio.h"
-#include "clutter-gst-camera-device.h"
-#include "clutter-gst-camera-manager.h"
-#include "clutter-gst-camera.h"
-#include "clutter-gst-content.h"
-#include "clutter-gst-crop.h"
-#include "clutter-gst-playback.h"
-#include "clutter-gst-player.h"
-#include "clutter-gst-util.h"
-#include "clutter-gst-version.h"
+#include <clutter-gst/clutter-gst-types.h>
+#include <clutter-gst/clutter-gst-enum-types.h>
+#include <clutter-gst/clutter-gst-aspectratio.h>
+#include <clutter-gst/clutter-gst-camera-device.h>
+#include <clutter-gst/clutter-gst-camera-manager.h>
+#include <clutter-gst/clutter-gst-camera.h>
+#include <clutter-gst/clutter-gst-content.h>
+#include <clutter-gst/clutter-gst-crop.h>
+#include <clutter-gst/clutter-gst-playback.h>
+#include <clutter-gst/clutter-gst-player.h>
+#include <clutter-gst/clutter-gst-util.h>
+#include <clutter-gst/clutter-gst-version.h>
+#include <clutter-gst/clutter-gst-video-sink.h>
#endif /* __CLUTTER_GST_H__ */
diff --git a/configure.ac b/configure.ac
index 8da4fc1..7766cef 100644
--- a/configure.ac
+++ b/configure.ac
@@ -143,7 +143,6 @@ dnl ========================================================================
PKG_CHECK_MODULES(CLUTTER_GST,
[clutter-1.0 >= $CLUTTER_REQ_VERSION
cogl-2.0-experimental >= $COGL_REQ_VERSION
- cogl-gst-2.0-experimental >= $COGL_GST_REQ_VERSION
gio-2.0 >= $GLIB_REQ_VERSION])
dnl ========================================================================
diff --git a/doc/reference/Makefile.am b/doc/reference/Makefile.am
index c855b32..55b27a7 100644
--- a/doc/reference/Makefile.am
+++ b/doc/reference/Makefile.am
@@ -43,7 +43,6 @@ FIXXREF_OPTIONS= \
--extra-dir=$(GLIB_PREFIX)/share/gtk-doc/html/glib \
--extra-dir=$(GLIB_PREFIX)/share/gtk-doc/html/gobject \
--extra-dir=$(GLIB_PREFIX)/share/gtk-doc/html/cogl \
- --extra-dir=$(GLIB_PREFIX)/share/gtk-doc/html/cogl-gst \
--extra-dir=$(GLIB_PREFIX)/share/gtk-doc/html/gdk-pixbuf \
--extra-dir=$(GLIB_PREFIX)/share/gtk-doc/html/gstreamer-1.0 \
$(NULL)
diff --git a/examples/video-content.c b/examples/video-content.c
index c7368cc..aaf5740 100644
--- a/examples/video-content.c
+++ b/examples/video-content.c
@@ -22,7 +22,6 @@
*/
#include <stdlib.h>
-#include <cogl-gst/cogl-gst.h>
#include <clutter-gst/clutter-gst.h>
@@ -136,7 +135,7 @@ main (int argc, char *argv[])
{
ClutterActor *stage, *actor;
ClutterContent *video;
- CoglGstVideoSink *video_sink;
+ ClutterGstVideoSink *video_sink;
if (clutter_init (&argc, &argv) != CLUTTER_INIT_SUCCESS)
return EXIT_FAILURE;
diff --git a/tests/test-alpha.c b/tests/test-alpha.c
index f15e0e8..a73c7c8 100644
--- a/tests/test-alpha.c
+++ b/tests/test-alpha.c
@@ -32,7 +32,6 @@
#include <glib/gprintf.h>
#include <clutter-gst/clutter-gst.h>
-#include <cogl-gst/cogl-gst.h>
static gint opt_framerate = 30;
static gchar *opt_fourcc = "I420";