summaryrefslogtreecommitdiff
path: root/ext/libvisual
diff options
context:
space:
mode:
authorStefan Sauer <ensonic@users.sf.net>2012-07-08 19:19:38 +0200
committerStefan Sauer <ensonic@users.sf.net>2012-07-09 19:59:53 +0200
commit5e6abc449b16ee3b944251728c477347ea34351f (patch)
treef87e722af3c09f07595398b313150b6f52708335 /ext/libvisual
parent85565952eef342c35043f83e8e738cf1b4b98779 (diff)
downloadgstreamer-plugins-base-5e6abc449b16ee3b944251728c477347ea34351f.tar.gz
visual: port to baseaudiovisualizer
Add a copy of the base class until it is stable. Right now the extra effects of the baseclass are not supported as the sublass overwrites the buffer instead of blending.
Diffstat (limited to 'ext/libvisual')
-rw-r--r--ext/libvisual/Makefile.am8
-rw-r--r--ext/libvisual/gstbaseaudiovisualizer.c1134
-rw-r--r--ext/libvisual/gstbaseaudiovisualizer.h125
-rw-r--r--ext/libvisual/visual.c834
-rw-r--r--ext/libvisual/visual.h34
5 files changed, 1400 insertions, 735 deletions
diff --git a/ext/libvisual/Makefile.am b/ext/libvisual/Makefile.am
index 799e50bb1..60b440558 100644
--- a/ext/libvisual/Makefile.am
+++ b/ext/libvisual/Makefile.am
@@ -1,12 +1,12 @@
plugin_LTLIBRARIES = libgstlibvisual.la
-libgstlibvisual_la_SOURCES = plugin.c visual.c
+libgstlibvisual_la_SOURCES = plugin.c visual.c gstbaseaudiovisualizer.c
libgstlibvisual_la_CFLAGS = $(GST_PLUGINS_BASE_CFLAGS) $(GST_BASE_CFLAGS) $(GST_CFLAGS) $(LIBVISUAL_CFLAGS)
libgstlibvisual_la_LIBADD = \
- $(top_builddir)/gst-libs/gst/audio/libgstaudio-$(GST_API_VERSION).la \
- $(top_builddir)/gst-libs/gst/video/libgstvideo-$(GST_API_VERSION).la \
+ $(top_builddir)/gst-libs/gst/audio/libgstaudio-$(GST_API_VERSION).la \
+ $(top_builddir)/gst-libs/gst/video/libgstvideo-$(GST_API_VERSION).la \
$(GST_BASE_LIBS) $(LIBVISUAL_LIBS)
libgstlibvisual_la_LDFLAGS = $(GST_PLUGIN_LDFLAGS)
libgstlibvisual_la_LIBTOOLFLAGS = --tag=disable-static
-noinst_HEADERS = visual.h
+noinst_HEADERS = visual.h gstbaseaudiovisualizer.h
diff --git a/ext/libvisual/gstbaseaudiovisualizer.c b/ext/libvisual/gstbaseaudiovisualizer.c
new file mode 100644
index 000000000..4ff53d8f3
--- /dev/null
+++ b/ext/libvisual/gstbaseaudiovisualizer.c
@@ -0,0 +1,1134 @@
+/* GStreamer
+ * Copyright (C) <2011> Stefan Kost <ensonic@users.sf.net>
+ *
+ * gstbaseaudiovisualizer.h: base class for audio visualisation elements
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/**
+ * SECTION:gstbaseaudiovisualizer
+ *
+ * A basclass for scopes (visualizers). It takes care of re-fitting the
+ * audio-rate to video-rate and handles renegotiation (downstream video size
+ * changes).
+ *
+ * It also provides several background shading effects. These effects are
+ * applied to a previous picture before the render() implementation can draw a
+ * new frame.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+/* FIXME 0.11: suppress warnings for deprecated API such as GStaticRecMutex
+ * with newer GLib versions (>= 2.31.0) */
+#define GLIB_DISABLE_DEPRECATION_WARNINGS
+
+#include <string.h>
+
+#include "gstbaseaudiovisualizer.h"
+
+GST_DEBUG_CATEGORY_STATIC (base_audio_visualizer_debug);
+#define GST_CAT_DEFAULT (base_audio_visualizer_debug)
+
+#define DEFAULT_SHADER GST_BASE_AUDIO_VISUALIZER_SHADER_FADE
+#define DEFAULT_SHADE_AMOUNT 0x000a0a0a
+
+enum
+{
+ PROP_0,
+ PROP_SHADER,
+ PROP_SHADE_AMOUNT
+};
+
+static GstBaseTransformClass *parent_class = NULL;
+
+static void gst_base_audio_visualizer_class_init (GstBaseAudioVisualizerClass *
+ klass);
+static void gst_base_audio_visualizer_init (GstBaseAudioVisualizer * scope,
+ GstBaseAudioVisualizerClass * g_class);
+static void gst_base_audio_visualizer_set_property (GObject * object,
+ guint prop_id, const GValue * value, GParamSpec * pspec);
+static void gst_base_audio_visualizer_get_property (GObject * object,
+ guint prop_id, GValue * value, GParamSpec * pspec);
+static void gst_base_audio_visualizer_dispose (GObject * object);
+
+static gboolean gst_base_audio_visualizer_src_negotiate (GstBaseAudioVisualizer
+ * scope);
+static gboolean gst_base_audio_visualizer_src_setcaps (GstBaseAudioVisualizer *
+ scope, GstCaps * caps);
+static gboolean gst_base_audio_visualizer_sink_setcaps (GstBaseAudioVisualizer *
+ scope, GstCaps * caps);
+
+static GstFlowReturn gst_base_audio_visualizer_chain (GstPad * pad,
+ GstObject * parent, GstBuffer * buffer);
+
+static gboolean gst_base_audio_visualizer_src_event (GstPad * pad,
+ GstObject * parent, GstEvent * event);
+static gboolean gst_base_audio_visualizer_sink_event (GstPad * pad,
+ GstObject * parent, GstEvent * event);
+
+static gboolean gst_base_audio_visualizer_src_query (GstPad * pad,
+ GstObject * parent, GstQuery * query);
+static gboolean gst_base_audio_visualizer_sink_query (GstPad * pad,
+ GstObject * parent, GstQuery * query);
+
+static GstStateChangeReturn gst_base_audio_visualizer_change_state (GstElement *
+ element, GstStateChange transition);
+
+/* shading functions */
+
+#define GST_TYPE_BASE_AUDIO_VISUALIZER_SHADER (gst_base_audio_visualizer_shader_get_type())
+static GType
+gst_base_audio_visualizer_shader_get_type (void)
+{
+ static GType shader_type = 0;
+ static const GEnumValue shaders[] = {
+ {GST_BASE_AUDIO_VISUALIZER_SHADER_NONE, "None", "none"},
+ {GST_BASE_AUDIO_VISUALIZER_SHADER_FADE, "Fade", "fade"},
+ {GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_UP, "Fade and move up",
+ "fade-and-move-up"},
+ {GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_DOWN, "Fade and move down",
+ "fade-and-move-down"},
+ {GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_LEFT, "Fade and move left",
+ "fade-and-move-left"},
+ {GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_RIGHT,
+ "Fade and move right",
+ "fade-and-move-right"},
+ {GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_HORIZ_OUT,
+ "Fade and move horizontally out", "fade-and-move-horiz-out"},
+ {GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_HORIZ_IN,
+ "Fade and move horizontally in", "fade-and-move-horiz-in"},
+ {GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_VERT_OUT,
+ "Fade and move vertically out", "fade-and-move-vert-out"},
+ {GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_VERT_IN,
+ "Fade and move vertically in", "fade-and-move-vert-in"},
+ {0, NULL, NULL},
+ };
+
+ if (G_UNLIKELY (shader_type == 0)) {
+ shader_type =
+ g_enum_register_static ("GstBaseAudioVisualizerShader", shaders);
+ }
+ return shader_type;
+}
+
+/* we're only supporting GST_VIDEO_FORMAT_xRGB right now) */
+#if G_BYTE_ORDER == G_LITTLE_ENDIAN
+
+#define SHADE1(_d, _s, _i, _r, _g, _b) \
+G_STMT_START { \
+ _d[_i] = (_s[_i] > _b) ? _s[_i] - _b : 0; \
+ _i++; \
+ _d[_i] = (_s[_i] > _g) ? _s[_i] - _g : 0; \
+ _i++; \
+ _d[_i] = (_s[_i] > _r) ? _s[_i] - _r : 0; \
+ _i++; \
+ _d[_i++] = 0; \
+} G_STMT_END
+
+#define SHADE2(_d, _s, _j, _i, _r, _g, _b) \
+G_STMT_START { \
+ _d[_j++] = (_s[_i] > _b) ? _s[_i] - _b : 0; \
+ _i++; \
+ _d[_j++] = (_s[_i] > _g) ? _s[_i] - _g : 0; \
+ _i++; \
+ _d[_j++] = (_s[_i] > _r) ? _s[_i] - _r : 0; \
+ _i++; \
+ _d[_j++] = 0; \
+ _i++; \
+} G_STMT_END
+
+#else
+
+#define SHADE1(_d, _s, _i, _r, _g, _b) \
+G_STMT_START { \
+ _d[_i++] = 0; \
+ _d[_i] = (_s[_i] > _r) ? _s[_i] - _r : 0; \
+ _i++; \
+ _d[_i] = (_s[_i] > _g) ? _s[_i] - _g : 0; \
+ _i++; \
+ _d[_i] = (_s[_i] > _b) ? _s[_i] - _b : 0; \
+ _i++; \
+} G_STMT_END
+
+#define SHADE2(_d, _s, _j, _i, _r, _g, _b) \
+G_STMT_START { \
+ _d[_j++] = 0; \
+ _i++; \
+ _d[_j++] = (_s[_i] > _r) ? _s[_i] - _r : 0; \
+ _i++; \
+ _d[_j++] = (_s[_i] > _g) ? _s[_i] - _g : 0; \
+ _i++; \
+ _d[_j++] = (_s[_i] > _b) ? _s[_i] - _b : 0; \
+ _i++; \
+} G_STMT_END
+
+#endif
+
+static void
+shader_fade (GstBaseAudioVisualizer * scope, const guint8 * s, guint8 * d)
+{
+ guint i, bpf = scope->bpf;
+ guint r = (scope->shade_amount >> 16) & 0xff;
+ guint g = (scope->shade_amount >> 8) & 0xff;
+ guint b = (scope->shade_amount >> 0) & 0xff;
+
+ for (i = 0; i < bpf;) {
+ SHADE1 (d, s, i, r, g, b);
+ }
+}
+
+static void
+shader_fade_and_move_up (GstBaseAudioVisualizer * scope, const guint8 * s,
+ guint8 * d)
+{
+ guint i, j, bpf = scope->bpf;
+ guint bpl = 4 * scope->width;
+ guint r = (scope->shade_amount >> 16) & 0xff;
+ guint g = (scope->shade_amount >> 8) & 0xff;
+ guint b = (scope->shade_amount >> 0) & 0xff;
+
+ for (j = 0, i = bpl; i < bpf;) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+}
+
+static void
+shader_fade_and_move_down (GstBaseAudioVisualizer * scope, const guint8 * s,
+ guint8 * d)
+{
+ guint i, j, bpf = scope->bpf;
+ guint bpl = 4 * scope->width;
+ guint r = (scope->shade_amount >> 16) & 0xff;
+ guint g = (scope->shade_amount >> 8) & 0xff;
+ guint b = (scope->shade_amount >> 0) & 0xff;
+
+ for (j = bpl, i = 0; j < bpf;) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+}
+
+static void
+shader_fade_and_move_left (GstBaseAudioVisualizer * scope,
+ const guint8 * s, guint8 * d)
+{
+ guint i, j, k, bpf = scope->bpf;
+ guint w = scope->width;
+ guint r = (scope->shade_amount >> 16) & 0xff;
+ guint g = (scope->shade_amount >> 8) & 0xff;
+ guint b = (scope->shade_amount >> 0) & 0xff;
+
+ /* move to the left */
+ for (j = 0, i = 4; i < bpf;) {
+ for (k = 0; k < w - 1; k++) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+ i += 4;
+ j += 4;
+ }
+}
+
+static void
+shader_fade_and_move_right (GstBaseAudioVisualizer * scope,
+ const guint8 * s, guint8 * d)
+{
+ guint i, j, k, bpf = scope->bpf;
+ guint w = scope->width;
+ guint r = (scope->shade_amount >> 16) & 0xff;
+ guint g = (scope->shade_amount >> 8) & 0xff;
+ guint b = (scope->shade_amount >> 0) & 0xff;
+
+ /* move to the left */
+ for (j = 4, i = 0; i < bpf;) {
+ for (k = 0; k < w - 1; k++) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+ i += 4;
+ j += 4;
+ }
+}
+
+static void
+shader_fade_and_move_horiz_out (GstBaseAudioVisualizer * scope,
+ const guint8 * s, guint8 * d)
+{
+ guint i, j, bpf = scope->bpf / 2;
+ guint bpl = 4 * scope->width;
+ guint r = (scope->shade_amount >> 16) & 0xff;
+ guint g = (scope->shade_amount >> 8) & 0xff;
+ guint b = (scope->shade_amount >> 0) & 0xff;
+
+ /* move upper half up */
+ for (j = 0, i = bpl; i < bpf;) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+ /* move lower half down */
+ for (j = bpf + bpl, i = bpf; j < bpf + bpf;) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+}
+
+static void
+shader_fade_and_move_horiz_in (GstBaseAudioVisualizer * scope,
+ const guint8 * s, guint8 * d)
+{
+ guint i, j, bpf = scope->bpf / 2;
+ guint bpl = 4 * scope->width;
+ guint r = (scope->shade_amount >> 16) & 0xff;
+ guint g = (scope->shade_amount >> 8) & 0xff;
+ guint b = (scope->shade_amount >> 0) & 0xff;
+
+ /* move upper half down */
+ for (i = 0, j = bpl; i < bpf;) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+ /* move lower half up */
+ for (i = bpf + bpl, j = bpf; i < bpf + bpf;) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+}
+
+static void
+shader_fade_and_move_vert_out (GstBaseAudioVisualizer * scope,
+ const guint8 * s, guint8 * d)
+{
+ guint i, j, k, bpf = scope->bpf;
+ guint m = scope->width / 2;
+ guint r = (scope->shade_amount >> 16) & 0xff;
+ guint g = (scope->shade_amount >> 8) & 0xff;
+ guint b = (scope->shade_amount >> 0) & 0xff;
+
+ /* move left half to the left */
+ for (j = 0, i = 4; i < bpf;) {
+ for (k = 0; k < m; k++) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+ j += 4 * m;
+ i += 4 * m;
+ }
+ /* move right half to the right */
+ for (j = 4 * (m + 1), i = 4 * m; j < bpf;) {
+ for (k = 0; k < m; k++) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+ j += 4 * m;
+ i += 4 * m;
+ }
+}
+
+static void
+shader_fade_and_move_vert_in (GstBaseAudioVisualizer * scope,
+ const guint8 * s, guint8 * d)
+{
+ guint i, j, k, bpf = scope->bpf;
+ guint m = scope->width / 2;
+ guint r = (scope->shade_amount >> 16) & 0xff;
+ guint g = (scope->shade_amount >> 8) & 0xff;
+ guint b = (scope->shade_amount >> 0) & 0xff;
+
+ /* move left half to the right */
+ for (j = 4, i = 0; j < bpf;) {
+ for (k = 0; k < m; k++) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+ j += 4 * m;
+ i += 4 * m;
+ }
+ /* move right half to the left */
+ for (j = 4 * m, i = 4 * (m + 1); i < bpf;) {
+ for (k = 0; k < m; k++) {
+ SHADE2 (d, s, j, i, r, g, b);
+ }
+ j += 4 * m;
+ i += 4 * m;
+ }
+}
+
+static void
+gst_base_audio_visualizer_change_shader (GstBaseAudioVisualizer * scope)
+{
+ switch (scope->shader_type) {
+ case GST_BASE_AUDIO_VISUALIZER_SHADER_NONE:
+ scope->shader = NULL;
+ break;
+ case GST_BASE_AUDIO_VISUALIZER_SHADER_FADE:
+ scope->shader = shader_fade;
+ break;
+ case GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_UP:
+ scope->shader = shader_fade_and_move_up;
+ break;
+ case GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_DOWN:
+ scope->shader = shader_fade_and_move_down;
+ break;
+ case GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_LEFT:
+ scope->shader = shader_fade_and_move_left;
+ break;
+ case GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_RIGHT:
+ scope->shader = shader_fade_and_move_right;
+ break;
+ case GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_HORIZ_OUT:
+ scope->shader = shader_fade_and_move_horiz_out;
+ break;
+ case GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_HORIZ_IN:
+ scope->shader = shader_fade_and_move_horiz_in;
+ break;
+ case GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_VERT_OUT:
+ scope->shader = shader_fade_and_move_vert_out;
+ break;
+ case GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_VERT_IN:
+ scope->shader = shader_fade_and_move_vert_in;
+ break;
+ default:
+ GST_ERROR ("invalid shader function");
+ scope->shader = NULL;
+ break;
+ }
+}
+
+/* base class */
+
+GType
+gst_base_audio_visualizer_get_type (void)
+{
+ static volatile gsize base_audio_visualizer_type = 0;
+
+ if (g_once_init_enter (&base_audio_visualizer_type)) {
+ static const GTypeInfo base_audio_visualizer_info = {
+ sizeof (GstBaseAudioVisualizerClass),
+ NULL,
+ NULL,
+ (GClassInitFunc) gst_base_audio_visualizer_class_init,
+ NULL,
+ NULL,
+ sizeof (GstBaseAudioVisualizer),
+ 0,
+ (GInstanceInitFunc) gst_base_audio_visualizer_init,
+ };
+ GType _type;
+
+ _type = g_type_register_static (GST_TYPE_ELEMENT,
+ "GstBaseAudioVisualizer", &base_audio_visualizer_info,
+ G_TYPE_FLAG_ABSTRACT);
+ g_once_init_leave (&base_audio_visualizer_type, _type);
+ }
+ return (GType) base_audio_visualizer_type;
+}
+
+static void
+gst_base_audio_visualizer_class_init (GstBaseAudioVisualizerClass * klass)
+{
+ GObjectClass *gobject_class = (GObjectClass *) klass;
+ GstElementClass *element_class = (GstElementClass *) klass;
+
+ parent_class = g_type_class_peek_parent (klass);
+
+ GST_DEBUG_CATEGORY_INIT (base_audio_visualizer_debug, "baseaudiovisualizer",
+ 0, "scope audio visualisation base class");
+
+ gobject_class->set_property = gst_base_audio_visualizer_set_property;
+ gobject_class->get_property = gst_base_audio_visualizer_get_property;
+ gobject_class->dispose = gst_base_audio_visualizer_dispose;
+
+ element_class->change_state =
+ GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_change_state);
+
+ g_object_class_install_property (gobject_class, PROP_SHADER,
+ g_param_spec_enum ("shader", "shader type",
+ "Shader function to apply on each frame",
+ GST_TYPE_BASE_AUDIO_VISUALIZER_SHADER, DEFAULT_SHADER,
+ G_PARAM_READWRITE | GST_PARAM_CONTROLLABLE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_SHADE_AMOUNT,
+ g_param_spec_uint ("shade-amount", "shade amount",
+ "Shading color to use (big-endian ARGB)", 0, G_MAXUINT32,
+ DEFAULT_SHADE_AMOUNT,
+ G_PARAM_READWRITE | GST_PARAM_CONTROLLABLE | G_PARAM_STATIC_STRINGS));
+}
+
+static void
+gst_base_audio_visualizer_init (GstBaseAudioVisualizer * scope,
+ GstBaseAudioVisualizerClass * g_class)
+{
+ GstPadTemplate *pad_template;
+
+ /* create the sink and src pads */
+ pad_template =
+ gst_element_class_get_pad_template (GST_ELEMENT_CLASS (g_class), "sink");
+ g_return_if_fail (pad_template != NULL);
+ scope->sinkpad = gst_pad_new_from_template (pad_template, "sink");
+ gst_pad_set_chain_function (scope->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_chain));
+ gst_pad_set_event_function (scope->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_sink_event));
+ gst_pad_set_query_function (scope->sinkpad,
+ GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_sink_query));
+ gst_element_add_pad (GST_ELEMENT (scope), scope->sinkpad);
+
+ pad_template =
+ gst_element_class_get_pad_template (GST_ELEMENT_CLASS (g_class), "src");
+ g_return_if_fail (pad_template != NULL);
+ scope->srcpad = gst_pad_new_from_template (pad_template, "src");
+ gst_pad_set_event_function (scope->srcpad,
+ GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_src_event));
+ gst_pad_set_query_function (scope->srcpad,
+ GST_DEBUG_FUNCPTR (gst_base_audio_visualizer_src_query));
+ gst_element_add_pad (GST_ELEMENT (scope), scope->srcpad);
+
+ scope->adapter = gst_adapter_new ();
+ scope->inbuf = gst_buffer_new ();
+
+ /* properties */
+ scope->shader_type = DEFAULT_SHADER;
+ gst_base_audio_visualizer_change_shader (scope);
+ scope->shade_amount = DEFAULT_SHADE_AMOUNT;
+
+ /* reset the initial video state */
+ scope->width = 320;
+ scope->height = 200;
+ scope->fps_n = 25; /* desired frame rate */
+ scope->fps_d = 1;
+ scope->frame_duration = GST_CLOCK_TIME_NONE;
+
+ /* reset the initial state */
+ gst_audio_info_init (&scope->ainfo);
+ gst_video_info_init (&scope->vinfo);
+
+ g_mutex_init (&scope->config_lock);
+}
+
+static void
+gst_base_audio_visualizer_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec)
+{
+ GstBaseAudioVisualizer *scope = GST_BASE_AUDIO_VISUALIZER (object);
+
+ switch (prop_id) {
+ case PROP_SHADER:
+ scope->shader_type = g_value_get_enum (value);
+ gst_base_audio_visualizer_change_shader (scope);
+ break;
+ case PROP_SHADE_AMOUNT:
+ scope->shade_amount = g_value_get_uint (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_base_audio_visualizer_get_property (GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec)
+{
+ GstBaseAudioVisualizer *scope = GST_BASE_AUDIO_VISUALIZER (object);
+
+ switch (prop_id) {
+ case PROP_SHADER:
+ g_value_set_enum (value, scope->shader_type);
+ break;
+ case PROP_SHADE_AMOUNT:
+ g_value_set_uint (value, scope->shade_amount);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_base_audio_visualizer_dispose (GObject * object)
+{
+ GstBaseAudioVisualizer *scope = GST_BASE_AUDIO_VISUALIZER (object);
+
+ if (scope->adapter) {
+ g_object_unref (scope->adapter);
+ scope->adapter = NULL;
+ }
+ if (scope->inbuf) {
+ gst_buffer_unref (scope->inbuf);
+ scope->inbuf = NULL;
+ }
+ if (scope->pixelbuf) {
+ g_free (scope->pixelbuf);
+ scope->pixelbuf = NULL;
+ }
+ if (scope->config_lock.p) {
+ g_mutex_clear (&scope->config_lock);
+ scope->config_lock.p = NULL;
+ }
+ G_OBJECT_CLASS (parent_class)->dispose (object);
+}
+
+static void
+gst_base_audio_visualizer_reset (GstBaseAudioVisualizer * scope)
+{
+ gst_adapter_clear (scope->adapter);
+ gst_segment_init (&scope->segment, GST_FORMAT_UNDEFINED);
+
+ GST_OBJECT_LOCK (scope);
+ scope->proportion = 1.0;
+ scope->earliest_time = -1;
+ GST_OBJECT_UNLOCK (scope);
+}
+
+static gboolean
+gst_base_audio_visualizer_sink_setcaps (GstBaseAudioVisualizer * scope,
+ GstCaps * caps)
+{
+ GstAudioInfo info;
+ gboolean res = TRUE;
+
+ if (!gst_audio_info_from_caps (&info, caps))
+ goto wrong_caps;
+
+ scope->ainfo = info;
+
+ GST_DEBUG_OBJECT (scope, "audio: channels %d, rate %d",
+ GST_AUDIO_INFO_CHANNELS (&info), GST_AUDIO_INFO_RATE (&info));
+
+done:
+ return res;
+
+ /* Errors */
+wrong_caps:
+ {
+ GST_WARNING_OBJECT (scope, "could not parse caps");
+ res = FALSE;
+ goto done;
+ }
+}
+
+static gboolean
+gst_base_audio_visualizer_src_setcaps (GstBaseAudioVisualizer * scope,
+ GstCaps * caps)
+{
+ GstVideoInfo info;
+ GstBaseAudioVisualizerClass *klass;
+ GstStructure *structure;
+ gboolean res;
+
+ if (!gst_video_info_from_caps (&info, caps))
+ goto wrong_caps;
+
+ structure = gst_caps_get_structure (caps, 0);
+ if (!gst_structure_get_int (structure, "width", &scope->width) ||
+ !gst_structure_get_int (structure, "height", &scope->height) ||
+ !gst_structure_get_fraction (structure, "framerate", &scope->fps_n,
+ &scope->fps_d))
+ goto wrong_caps;
+
+ klass = GST_BASE_AUDIO_VISUALIZER_CLASS (G_OBJECT_GET_CLASS (scope));
+
+ scope->vinfo = info;
+ scope->video_format = info.finfo->format;
+
+ scope->frame_duration = gst_util_uint64_scale_int (GST_SECOND,
+ scope->fps_d, scope->fps_n);
+ scope->spf = gst_util_uint64_scale_int (GST_AUDIO_INFO_RATE (&scope->ainfo),
+ scope->fps_d, scope->fps_n);
+ scope->req_spf = scope->spf;
+
+ scope->bpf = scope->width * scope->height * 4;
+
+ if (scope->pixelbuf)
+ g_free (scope->pixelbuf);
+ scope->pixelbuf = g_malloc0 (scope->bpf);
+
+ if (klass->setup)
+ res = klass->setup (scope);
+
+ GST_DEBUG_OBJECT (scope, "video: dimension %dx%d, framerate %d/%d",
+ scope->width, scope->height, scope->fps_n, scope->fps_d);
+ GST_DEBUG_OBJECT (scope, "blocks: spf %u, req_spf %u",
+ scope->spf, scope->req_spf);
+
+ res = gst_pad_set_caps (scope->srcpad, caps);
+
+ return res;
+
+ /* ERRORS */
+wrong_caps:
+ {
+ GST_DEBUG_OBJECT (scope, "error parsing caps");
+ return FALSE;
+ }
+}
+
+static gboolean
+gst_base_audio_visualizer_src_negotiate (GstBaseAudioVisualizer * scope)
+{
+ GstCaps *othercaps, *target;
+ GstStructure *structure;
+ GstCaps *templ;
+ GstQuery *query;
+ GstBufferPool *pool;
+ GstStructure *config;
+ guint size, min, max;
+
+ templ = gst_pad_get_pad_template_caps (scope->srcpad);
+
+ GST_DEBUG_OBJECT (scope, "performing negotiation");
+
+ /* see what the peer can do */
+ othercaps = gst_pad_peer_query_caps (scope->srcpad, NULL);
+ if (othercaps) {
+ target = gst_caps_intersect (othercaps, templ);
+ gst_caps_unref (othercaps);
+ gst_caps_unref (templ);
+
+ if (gst_caps_is_empty (target))
+ goto no_format;
+
+ target = gst_caps_truncate (target);
+ } else {
+ target = templ;
+ }
+
+ target = gst_caps_make_writable (target);
+ structure = gst_caps_get_structure (target, 0);
+ gst_structure_fixate_field_nearest_int (structure, "width", scope->width);
+ gst_structure_fixate_field_nearest_int (structure, "height", scope->height);
+ gst_structure_fixate_field_nearest_fraction (structure, "framerate",
+ scope->fps_n, scope->fps_d);
+
+ GST_DEBUG_OBJECT (scope, "final caps are %" GST_PTR_FORMAT, target);
+
+ gst_base_audio_visualizer_src_setcaps (scope, target);
+
+ /* try to get a bufferpool now */
+ /* find a pool for the negotiated caps now */
+ query = gst_query_new_allocation (target, TRUE);
+
+ if (!gst_pad_peer_query (scope->srcpad, query)) {
+ /* not a problem, we use the query defaults */
+ GST_DEBUG_OBJECT (scope, "allocation query failed");
+ }
+
+ if (gst_query_get_n_allocation_pools (query) > 0) {
+ /* we got configuration from our peer, parse them */
+ gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
+ } else {
+ pool = NULL;
+ size = scope->bpf;
+ min = max = 0;
+ }
+
+ if (pool == NULL) {
+ /* we did not get a pool, make one ourselves then */
+ pool = gst_buffer_pool_new ();
+ }
+
+ config = gst_buffer_pool_get_config (pool);
+ gst_buffer_pool_config_set_params (config, target, size, min, max);
+ gst_buffer_pool_set_config (pool, config);
+
+ if (scope->pool) {
+ gst_buffer_pool_set_active (scope->pool, FALSE);
+ gst_object_unref (scope->pool);
+ }
+ scope->pool = pool;
+
+ /* and activate */
+ gst_buffer_pool_set_active (pool, TRUE);
+
+ gst_caps_unref (target);
+
+ return TRUE;
+
+no_format:
+ {
+ gst_caps_unref (target);
+ return FALSE;
+ }
+}
+
+/* make sure we are negotiated */
+static GstFlowReturn
+gst_base_audio_visualizer_ensure_negotiated (GstBaseAudioVisualizer * scope)
+{
+ gboolean reconfigure;
+
+ reconfigure = gst_pad_check_reconfigure (scope->srcpad);
+
+ /* we don't know an output format yet, pick one */
+ if (reconfigure || !gst_pad_has_current_caps (scope->srcpad)) {
+ if (!gst_base_audio_visualizer_src_negotiate (scope))
+ return GST_FLOW_NOT_NEGOTIATED;
+ }
+ return GST_FLOW_OK;
+}
+
+static GstFlowReturn
+gst_base_audio_visualizer_chain (GstPad * pad, GstObject * parent,
+ GstBuffer * buffer)
+{
+ GstFlowReturn ret = GST_FLOW_OK;
+ GstBaseAudioVisualizer *scope;
+ GstBaseAudioVisualizerClass *klass;
+ GstBuffer *inbuf;
+ guint64 dist, ts;
+ guint avail, sbpf;
+ gpointer adata;
+ gboolean (*render) (GstBaseAudioVisualizer * scope, GstBuffer * audio,
+ GstBuffer * video);
+ gint bps, channels, rate;
+
+ scope = GST_BASE_AUDIO_VISUALIZER (parent);
+ klass = GST_BASE_AUDIO_VISUALIZER_CLASS (G_OBJECT_GET_CLASS (scope));
+
+ render = klass->render;
+
+ GST_LOG_OBJECT (scope, "chainfunc called");
+
+ /* resync on DISCONT */
+ if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) {
+ gst_adapter_clear (scope->adapter);
+ }
+
+ /* Make sure have an output format */
+ ret = gst_base_audio_visualizer_ensure_negotiated (scope);
+ if (ret != GST_FLOW_OK) {
+ gst_buffer_unref (buffer);
+ goto beach;
+ }
+ channels = GST_AUDIO_INFO_CHANNELS (&scope->ainfo);
+ rate = GST_AUDIO_INFO_RATE (&scope->ainfo);
+ bps = GST_AUDIO_INFO_BPS (&scope->ainfo);
+
+ if (bps == 0) {
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ goto beach;
+ }
+
+ gst_adapter_push (scope->adapter, buffer);
+
+ g_mutex_lock (&scope->config_lock);
+
+ /* this is what we want */
+ sbpf = scope->req_spf * channels * sizeof (gint16);
+
+ inbuf = scope->inbuf;
+ /* FIXME: the timestamp in the adapter would be different */
+ gst_buffer_copy_into (inbuf, buffer, GST_BUFFER_COPY_METADATA, 0, -1);
+
+ /* this is what we have */
+ avail = gst_adapter_available (scope->adapter);
+ GST_LOG_OBJECT (scope, "avail: %u, bpf: %u", avail, sbpf);
+ while (avail >= sbpf) {
+ GstBuffer *outbuf;
+ GstMapInfo map;
+
+ /* get timestamp of the current adapter content */
+ ts = gst_adapter_prev_timestamp (scope->adapter, &dist);
+ if (GST_CLOCK_TIME_IS_VALID (ts)) {
+ /* convert bytes to time */
+ dist /= bps;
+ ts += gst_util_uint64_scale_int (dist, GST_SECOND, rate);
+ }
+
+ if (GST_CLOCK_TIME_IS_VALID (ts)) {
+ gint64 qostime;
+ gboolean need_skip;
+
+ qostime =
+ gst_segment_to_running_time (&scope->segment, GST_FORMAT_TIME, ts) +
+ scope->frame_duration;
+
+ GST_OBJECT_LOCK (scope);
+ /* check for QoS, don't compute buffers that are known to be late */
+ need_skip = scope->earliest_time != -1 && qostime <= scope->earliest_time;
+ GST_OBJECT_UNLOCK (scope);
+
+ if (need_skip) {
+ GST_WARNING_OBJECT (scope,
+ "QoS: skip ts: %" GST_TIME_FORMAT ", earliest: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (qostime), GST_TIME_ARGS (scope->earliest_time));
+ goto skip;
+ }
+ }
+
+ g_mutex_unlock (&scope->config_lock);
+ ret = gst_buffer_pool_acquire_buffer (scope->pool, &outbuf, NULL);
+ g_mutex_lock (&scope->config_lock);
+ /* recheck as the value could have changed */
+ sbpf = scope->req_spf * channels * sizeof (gint16);
+
+ /* no buffer allocated, we don't care why. */
+ if (ret != GST_FLOW_OK)
+ break;
+
+ /* sync controlled properties */
+ gst_object_sync_values (GST_OBJECT (scope), ts);
+
+ GST_BUFFER_TIMESTAMP (outbuf) = ts;
+ GST_BUFFER_DURATION (outbuf) = scope->frame_duration;
+
+ gst_buffer_map (outbuf, &map, GST_MAP_WRITE);
+ if (scope->shader) {
+ memcpy (map.data, scope->pixelbuf, scope->bpf);
+ } else {
+ memset (map.data, 0, scope->bpf);
+ }
+
+ /* this can fail as the data size we need could have changed */
+ if (!(adata = (gpointer) gst_adapter_map (scope->adapter, sbpf)))
+ break;
+
+ gst_buffer_replace_all_memory (inbuf,
+ gst_memory_new_wrapped (GST_MEMORY_FLAG_READONLY, adata, sbpf, 0,
+ sbpf, NULL, NULL));
+
+ /* call class->render() vmethod */
+ if (render) {
+ if (!render (scope, inbuf, outbuf)) {
+ ret = GST_FLOW_ERROR;
+ } else {
+ /* run various post processing (shading and geometri transformation */
+ if (scope->shader) {
+ scope->shader (scope, map.data, scope->pixelbuf);
+ }
+ }
+ }
+
+ gst_buffer_unmap (outbuf, &map);
+ gst_buffer_resize (outbuf, 0, scope->bpf);
+
+ g_mutex_unlock (&scope->config_lock);
+ ret = gst_pad_push (scope->srcpad, outbuf);
+ outbuf = NULL;
+ g_mutex_lock (&scope->config_lock);
+
+ skip:
+ /* recheck as the value could have changed */
+ sbpf = scope->req_spf * channels * sizeof (gint16);
+ GST_LOG_OBJECT (scope, "avail: %u, bpf: %u", avail, sbpf);
+ /* we want to take less or more, depending on spf : req_spf */
+ if (avail - sbpf >= sbpf) {
+ gst_adapter_flush (scope->adapter, sbpf);
+ gst_adapter_unmap (scope->adapter);
+ } else if (avail >= sbpf) {
+ /* just flush a bit and stop */
+ gst_adapter_flush (scope->adapter, (avail - sbpf));
+ gst_adapter_unmap (scope->adapter);
+ break;
+ }
+ avail = gst_adapter_available (scope->adapter);
+
+ if (ret != GST_FLOW_OK)
+ break;
+ }
+
+ g_mutex_unlock (&scope->config_lock);
+
+beach:
+ return ret;
+}
+
+static gboolean
+gst_base_audio_visualizer_src_event (GstPad * pad, GstObject * parent,
+ GstEvent * event)
+{
+ gboolean res;
+ GstBaseAudioVisualizer *scope;
+
+ scope = GST_BASE_AUDIO_VISUALIZER (parent);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_QOS:
+ {
+ gdouble proportion;
+ GstClockTimeDiff diff;
+ GstClockTime timestamp;
+
+ gst_event_parse_qos (event, NULL, &proportion, &diff, &timestamp);
+
+ /* save stuff for the _chain() function */
+ GST_OBJECT_LOCK (scope);
+ scope->proportion = proportion;
+ if (diff >= 0)
+ /* we're late, this is a good estimate for next displayable
+ * frame (see part-qos.txt) */
+ scope->earliest_time = timestamp + 2 * diff + scope->frame_duration;
+ else
+ scope->earliest_time = timestamp + diff;
+ GST_OBJECT_UNLOCK (scope);
+
+ res = gst_pad_push_event (scope->sinkpad, event);
+ break;
+ }
+ case GST_EVENT_RECONFIGURE:
+ /* dont't forward */
+ gst_event_unref (event);
+ res = TRUE;
+ break;
+ default:
+ res = gst_pad_push_event (scope->sinkpad, event);
+ break;
+ }
+
+ return res;
+}
+
+static gboolean
+gst_base_audio_visualizer_sink_event (GstPad * pad, GstObject * parent,
+ GstEvent * event)
+{
+ gboolean res;
+ GstBaseAudioVisualizer *scope;
+
+ scope = GST_BASE_AUDIO_VISUALIZER (parent);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_CAPS:
+ {
+ GstCaps *caps;
+
+ gst_event_parse_caps (event, &caps);
+ res = gst_base_audio_visualizer_sink_setcaps (scope, caps);
+ break;
+ }
+ case GST_EVENT_FLUSH_START:
+ res = gst_pad_push_event (scope->srcpad, event);
+ break;
+ case GST_EVENT_FLUSH_STOP:
+ gst_base_audio_visualizer_reset (scope);
+ res = gst_pad_push_event (scope->srcpad, event);
+ break;
+ case GST_EVENT_SEGMENT:
+ {
+ /* the newsegment values are used to clip the input samples
+ * and to convert the incomming timestamps to running time so
+ * we can do QoS */
+ gst_event_copy_segment (event, &scope->segment);
+
+ res = gst_pad_push_event (scope->srcpad, event);
+ break;
+ }
+ default:
+ res = gst_pad_push_event (scope->srcpad, event);
+ break;
+ }
+
+ return res;
+}
+
+static gboolean
+gst_base_audio_visualizer_src_query (GstPad * pad, GstObject * parent,
+ GstQuery * query)
+{
+ gboolean res = FALSE;
+ GstBaseAudioVisualizer *scope;
+
+ scope = GST_BASE_AUDIO_VISUALIZER (parent);
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_LATENCY:
+ {
+ /* We need to send the query upstream and add the returned latency to our
+ * own */
+ GstClockTime min_latency, max_latency;
+ gboolean us_live;
+ GstClockTime our_latency;
+ guint max_samples;
+ gint rate = GST_AUDIO_INFO_RATE (&scope->ainfo);
+
+ if (rate == 0)
+ break;
+
+ if ((res = gst_pad_peer_query (scope->sinkpad, query))) {
+ gst_query_parse_latency (query, &us_live, &min_latency, &max_latency);
+
+ GST_DEBUG_OBJECT (scope, "Peer latency: min %"
+ GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
+
+ /* the max samples we must buffer buffer */
+ max_samples = MAX (scope->req_spf, scope->spf);
+ our_latency = gst_util_uint64_scale_int (max_samples, GST_SECOND, rate);
+
+ GST_DEBUG_OBJECT (scope, "Our latency: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (our_latency));
+
+ /* we add some latency but only if we need to buffer more than what
+ * upstream gives us */
+ min_latency += our_latency;
+ if (max_latency != -1)
+ max_latency += our_latency;
+
+ GST_DEBUG_OBJECT (scope, "Calculated total latency : min %"
+ GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
+
+ gst_query_set_latency (query, TRUE, min_latency, max_latency);
+ }
+ break;
+ }
+ default:
+ res = gst_pad_query_default (pad, parent, query);
+ break;
+ }
+
+ return res;
+}
+
+static gboolean
+gst_base_audio_visualizer_sink_query (GstPad * pad, GstObject * parent,
+ GstQuery * query)
+{
+ gboolean res = FALSE;
+
+ switch (GST_QUERY_TYPE (query)) {
+ default:
+ res = gst_pad_query_default (pad, parent, query);
+ break;
+ }
+ return res;
+}
+
+static GstStateChangeReturn
+gst_base_audio_visualizer_change_state (GstElement * element,
+ GstStateChange transition)
+{
+ GstStateChangeReturn ret;
+ GstBaseAudioVisualizer *scope;
+
+ scope = GST_BASE_AUDIO_VISUALIZER (element);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
+ gst_base_audio_visualizer_reset (scope);
+ break;
+ default:
+ break;
+ }
+
+ ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
+ if (scope->pool) {
+ gst_buffer_pool_set_active (scope->pool, FALSE);
+ gst_object_replace ((GstObject **) & scope->pool, NULL);
+ }
+ break;
+ case GST_STATE_CHANGE_READY_TO_NULL:
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
diff --git a/ext/libvisual/gstbaseaudiovisualizer.h b/ext/libvisual/gstbaseaudiovisualizer.h
new file mode 100644
index 000000000..fe1fa5e78
--- /dev/null
+++ b/ext/libvisual/gstbaseaudiovisualizer.h
@@ -0,0 +1,125 @@
+/* GStreamer
+ * Copyright (C) <2011> Stefan Kost <ensonic@users.sf.net>
+ *
+ * gstbaseaudiovisualizer.c: base class for audio visualisation elements
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __GST_BASE_AUDIO_VISUALIZER_H__
+#define __GST_BASE_AUDIO_VISUALIZER_H__
+
+#include <gst/gst.h>
+#include <gst/base/gstbasetransform.h>
+
+#include <gst/video/video.h>
+#include <gst/audio/audio.h>
+#include <gst/base/gstadapter.h>
+
+G_BEGIN_DECLS
+#define GST_TYPE_BASE_AUDIO_VISUALIZER (gst_base_audio_visualizer_get_type())
+#define GST_BASE_AUDIO_VISUALIZER(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_BASE_AUDIO_VISUALIZER,GstBaseAudioVisualizer))
+#define GST_BASE_AUDIO_VISUALIZER_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_BASE_AUDIO_VISUALIZER,GstBaseAudioVisualizerClass))
+#define GST_IS_SYNAESTHESIA(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_BASE_AUDIO_VISUALIZER))
+#define GST_IS_SYNAESTHESIA_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_BASE_AUDIO_VISUALIZER))
+typedef struct _GstBaseAudioVisualizer GstBaseAudioVisualizer;
+typedef struct _GstBaseAudioVisualizerClass GstBaseAudioVisualizerClass;
+
+typedef void (*GstBaseAudioVisualizerShaderFunc)(GstBaseAudioVisualizer *scope, const guint8 *s, guint8 *d);
+
+/**
+ * GstBaseAudioVisualizerShader:
+ * @GST_BASE_AUDIO_VISUALIZER_SHADER_NONE: no shading
+ * @GST_BASE_AUDIO_VISUALIZER_SHADER_FADE: plain fading
+ * @GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_UP: fade and move up
+ * @GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_DOWN: fade and move down
+ * @GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_LEFT: fade and move left
+ * @GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_RIGHT: fade and move right
+ * @GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_HORIZ_OUT: fade and move horizontally out
+ * @GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_HORIZ_IN: fade and move horizontally in
+ * @GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_VERT_OUT: fade and move vertically out
+ * @GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_VERT_IN: fade and move vertically in
+ *
+ * Different types of supported background shading functions.
+ */
+typedef enum {
+ GST_BASE_AUDIO_VISUALIZER_SHADER_NONE,
+ GST_BASE_AUDIO_VISUALIZER_SHADER_FADE,
+ GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_UP,
+ GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_DOWN,
+ GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_LEFT,
+ GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_RIGHT,
+ GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_HORIZ_OUT,
+ GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_HORIZ_IN,
+ GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_VERT_OUT,
+ GST_BASE_AUDIO_VISUALIZER_SHADER_FADE_AND_MOVE_VERT_IN
+} GstBaseAudioVisualizerShader;
+
+struct _GstBaseAudioVisualizer
+{
+ GstElement parent;
+
+ /* pads */
+ GstPad *srcpad, *sinkpad;
+
+ GstBufferPool *pool;
+ GstAdapter *adapter;
+ GstBuffer *inbuf;
+ guint8 *pixelbuf;
+
+ GstBaseAudioVisualizerShader shader_type;
+ GstBaseAudioVisualizerShaderFunc shader;
+ guint32 shade_amount;
+
+ guint spf; /* samples per video frame */
+ guint req_spf; /* min samples per frame wanted by the subclass */
+
+ /* video state */
+ GstVideoInfo vinfo;
+ GstVideoFormat video_format;
+ gint fps_n, fps_d;
+ gint width;
+ gint height;
+ guint64 frame_duration;
+ guint bpf; /* bytes per frame */
+
+ /* audio state */
+ GstAudioInfo ainfo;
+
+ /* configuration mutex */
+ GMutex config_lock;
+
+ /* QoS stuff *//* with LOCK */
+ gdouble proportion;
+ GstClockTime earliest_time;
+
+ GstSegment segment;
+};
+
+struct _GstBaseAudioVisualizerClass
+{
+ GstElementClass parent_class;
+
+ /* virtual function, called whenever the format changes */
+ gboolean (*setup) (GstBaseAudioVisualizer * scope);
+
+ /* virtual function for rendering a frame */
+ gboolean (*render) (GstBaseAudioVisualizer * scope, GstBuffer * audio, GstBuffer * video);
+};
+
+GType gst_base_audio_visualizer_get_type (void);
+
+G_END_DECLS
+#endif /* __GST_BASE_AUDIO_VISUALIZER_H__ */
diff --git a/ext/libvisual/visual.c b/ext/libvisual/visual.c
index e631db4c7..47e5a9303 100644
--- a/ext/libvisual/visual.c
+++ b/ext/libvisual/visual.c
@@ -30,11 +30,6 @@ GST_DEBUG_CATEGORY_EXTERN (libvisual_debug);
/* amounf of samples before we can feed libvisual */
#define VISUAL_SAMPLES 512
-#define DEFAULT_WIDTH 320
-#define DEFAULT_HEIGHT 240
-#define DEFAULT_FPS_N 25
-#define DEFAULT_FPS_D 1
-
static GstStaticPadTemplate src_template = GST_STATIC_PAD_TEMPLATE ("src",
GST_PAD_SRC,
GST_PAD_ALWAYS,
@@ -53,6 +48,7 @@ static GstStaticPadTemplate sink_template = GST_STATIC_PAD_TEMPLATE ("sink",
GST_STATIC_CAPS ("audio/x-raw, "
"format = (string) " GST_AUDIO_NE (S16) ", "
"layout = (string) interleaved, " "channels = (int) { 1, 2 }, "
+ "channel-mask = (bitmask) 0x3, "
#if defined(VISUAL_API_VERSION) && VISUAL_API_VERSION >= 4000 && VISUAL_API_VERSION < 5000
"rate = (int) { 8000, 11250, 22500, 32000, 44100, 48000, 96000 }"
#else
@@ -62,24 +58,12 @@ static GstStaticPadTemplate sink_template = GST_STATIC_PAD_TEMPLATE ("sink",
);
-
static void gst_visual_init (GstVisual * visual);
static void gst_visual_finalize (GObject * object);
-static GstStateChangeReturn gst_visual_change_state (GstElement * element,
- GstStateChange transition);
-static GstFlowReturn gst_visual_chain (GstPad * pad, GstObject * parent,
- GstBuffer * buffer);
-static gboolean gst_visual_sink_event (GstPad * pad, GstObject * parent,
- GstEvent * event);
-static gboolean gst_visual_src_event (GstPad * pad, GstObject * parent,
- GstEvent * event);
-
-static gboolean gst_visual_src_query (GstPad * pad, GstObject * parent,
- GstQuery * query);
-
-static gboolean gst_visual_sink_setcaps (GstPad * pad, GstCaps * caps);
-static GstCaps *gst_visual_getcaps (GstPad * pad, GstCaps * filter);
+static gboolean gst_visual_setup (GstBaseAudioVisualizer * bscope);
+static gboolean gst_visual_render (GstBaseAudioVisualizer * bscope,
+ GstBuffer * audio, GstBuffer * video);
static GstElementClass *parent_class = NULL;
@@ -101,7 +85,9 @@ gst_visual_get_type (void)
(GInstanceInitFunc) gst_visual_init,
};
- type = g_type_register_static (GST_TYPE_ELEMENT, "GstVisual", &info, 0);
+ type =
+ g_type_register_static (GST_TYPE_BASE_AUDIO_VISUALIZER, "GstVisual",
+ &info, 0);
}
return type;
}
@@ -109,14 +95,14 @@ gst_visual_get_type (void)
void
gst_visual_class_init (gpointer g_class, gpointer class_data)
{
- GstVisualClass *klass = GST_VISUAL_CLASS (g_class);
- GstElementClass *element = GST_ELEMENT_CLASS (g_class);
- GObjectClass *object = G_OBJECT_CLASS (g_class);
+ GObjectClass *gobject_class = (GObjectClass *) g_class;
+ GstElementClass *element_class = (GstElementClass *) g_class;
+ GstBaseAudioVisualizerClass *scope_class =
+ (GstBaseAudioVisualizerClass *) g_class;
+ GstVisualClass *klass = (GstVisualClass *) g_class;
klass->plugin = class_data;
- element->change_state = gst_visual_change_state;
-
if (class_data == NULL) {
parent_class = g_type_class_peek_parent (g_class);
} else {
@@ -124,36 +110,28 @@ gst_visual_class_init (gpointer g_class, gpointer class_data)
klass->plugin->info->name, klass->plugin->info->version);
/* FIXME: improve to only register what plugin supports? */
- gst_element_class_add_pad_template (element,
+ gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&src_template));
- gst_element_class_add_pad_template (element,
+ gst_element_class_add_pad_template (element_class,
gst_static_pad_template_get (&sink_template));
- gst_element_class_set_static_metadata (element,
+ gst_element_class_set_static_metadata (element_class,
longname, "Visualization",
klass->plugin->info->about, "Benjamin Otte <otte@gnome.org>");
g_free (longname);
}
- object->finalize = gst_visual_finalize;
+ gobject_class->finalize = gst_visual_finalize;
+
+ scope_class->setup = GST_DEBUG_FUNCPTR (gst_visual_setup);
+ scope_class->render = GST_DEBUG_FUNCPTR (gst_visual_render);
}
static void
gst_visual_init (GstVisual * visual)
{
- /* create the sink and src pads */
- visual->sinkpad = gst_pad_new_from_static_template (&sink_template, "sink");
- gst_pad_set_chain_function (visual->sinkpad, gst_visual_chain);
- gst_pad_set_event_function (visual->sinkpad, gst_visual_sink_event);
- gst_element_add_pad (GST_ELEMENT (visual), visual->sinkpad);
-
- visual->srcpad = gst_pad_new_from_static_template (&src_template, "src");
- gst_pad_set_event_function (visual->srcpad, gst_visual_src_event);
- gst_pad_set_query_function (visual->srcpad, gst_visual_src_query);
- gst_element_add_pad (GST_ELEMENT (visual), visual->srcpad);
-
- visual->adapter = gst_adapter_new ();
+ /* do nothing */
}
static void
@@ -178,716 +156,170 @@ gst_visual_finalize (GObject * object)
{
GstVisual *visual = GST_VISUAL (object);
- g_object_unref (visual->adapter);
- if (visual->pool)
- gst_object_unref (visual->pool);
gst_visual_clear_actors (visual);
GST_CALL_PARENT (G_OBJECT_CLASS, finalize, (object));
}
-static void
-gst_visual_reset (GstVisual * visual)
-{
- gst_adapter_clear (visual->adapter);
- gst_segment_init (&visual->segment, GST_FORMAT_UNDEFINED);
-
- GST_OBJECT_LOCK (visual);
- visual->proportion = 1.0;
- visual->earliest_time = -1;
- GST_OBJECT_UNLOCK (visual);
-}
-
-static GstCaps *
-gst_visual_getcaps (GstPad * pad, GstCaps * filter)
+static gboolean
+gst_visual_setup (GstBaseAudioVisualizer * bscope)
{
- GstCaps *ret;
- GstVisual *visual = GST_VISUAL (GST_PAD_PARENT (pad));
- int depths;
-
- if (!visual->actor) {
- ret = gst_pad_get_pad_template_caps (visual->srcpad);
- goto beach;
- }
-
- ret = gst_caps_new_empty ();
- depths = visual_actor_get_supported_depth (visual->actor);
- if (depths < 0) {
- /* FIXME: set an error */
- goto beach;
- }
- if (depths == VISUAL_VIDEO_DEPTH_GL) {
- /* We can't handle GL only plugins */
- goto beach;
- }
-
- GST_DEBUG_OBJECT (visual, "libvisual plugin supports depths %u (0x%04x)",
- depths, depths);
- /* if (depths & VISUAL_VIDEO_DEPTH_32BIT) Always supports 32bit output */
-#if G_BYTE_ORDER == G_BIG_ENDIAN
- gst_caps_append (ret, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("xRGB")));
-#else
- gst_caps_append (ret, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("BGRx")));
-#endif
+ GstVisual *visual = GST_VISUAL (bscope);
+ gint pitch, depth;
- if (depths & VISUAL_VIDEO_DEPTH_24BIT) {
-#if G_BYTE_ORDER == G_BIG_ENDIAN
- gst_caps_append (ret, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("RGB")));
-#else
- gst_caps_append (ret, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("BGR")));
-#endif
- }
- if (depths & VISUAL_VIDEO_DEPTH_16BIT) {
- gst_caps_append (ret, gst_caps_from_string (GST_VIDEO_CAPS_MAKE ("RGB16")));
- }
-
-beach:
-
- if (filter) {
- GstCaps *intersection;
+ gst_visual_clear_actors (visual);
- intersection =
- gst_caps_intersect_full (filter, ret, GST_CAPS_INTERSECT_FIRST);
- gst_caps_unref (ret);
- ret = intersection;
+ depth = bscope->vinfo.finfo->pixel_stride[0];
+ if (bscope->vinfo.finfo->bits >= 8) {
+ depth *= 8;
}
- GST_DEBUG_OBJECT (visual, "returning caps %" GST_PTR_FORMAT, ret);
+ visual->actor =
+ visual_actor_new (GST_VISUAL_GET_CLASS (visual)->plugin->info->plugname);
+ visual->video = visual_video_new ();
+ visual->audio = visual_audio_new ();
+ /* can't have a play without actors */
+ if (!visual->actor || !visual->video)
+ goto no_actors;
- return ret;
-}
+ if (visual_actor_realize (visual->actor) != 0)
+ goto no_realize;
-static gboolean
-gst_visual_src_setcaps (GstVisual * visual, GstCaps * caps)
-{
- gboolean res;
- GstStructure *structure;
- gint depth, pitch, rate;
- const gchar *fmt;
-
- structure = gst_caps_get_structure (caps, 0);
-
- GST_DEBUG_OBJECT (visual, "src pad got caps %" GST_PTR_FORMAT, caps);
-
- if (!gst_structure_get_int (structure, "width", &visual->width))
- goto error;
- if (!gst_structure_get_int (structure, "height", &visual->height))
- goto error;
- if (!(fmt = gst_structure_get_string (structure, "format")))
- goto error;
- if (!gst_structure_get_fraction (structure, "framerate", &visual->fps_n,
- &visual->fps_d))
- goto error;
-
- if (!strcmp (fmt, "BGR") || !strcmp (fmt, "RGB"))
- depth = 24;
- else if (!strcmp (fmt, "BGRx") || !strcmp (fmt, "xRGB"))
- depth = 32;
- else
- depth = 16;
+ visual_actor_set_video (visual->actor, visual->video);
visual_video_set_depth (visual->video,
visual_video_depth_enum_from_value (depth));
- visual_video_set_dimension (visual->video, visual->width, visual->height);
- pitch = GST_ROUND_UP_4 (visual->width * visual->video->bpp);
+ visual_video_set_dimension (visual->video, bscope->width, bscope->height);
+ pitch = GST_ROUND_UP_4 (bscope->width * visual->video->bpp);
visual_video_set_pitch (visual->video, pitch);
visual_actor_video_negotiate (visual->actor, 0, FALSE, FALSE);
- rate = GST_AUDIO_INFO_RATE (&visual->info);
-
- /* precalc some values */
- visual->outsize = visual->video->height * pitch;
- visual->spf = gst_util_uint64_scale_int (rate, visual->fps_d, visual->fps_n);
- visual->duration =
- gst_util_uint64_scale_int (GST_SECOND, visual->fps_d, visual->fps_n);
-
- res = gst_pad_set_caps (visual->srcpad, caps);
-
- return res;
-
- /* ERRORS */
-error:
- {
- GST_DEBUG_OBJECT (visual, "error parsing caps");
- return FALSE;
- }
-}
-
-static gboolean
-gst_visual_sink_setcaps (GstPad * pad, GstCaps * caps)
-{
- GstVisual *visual = GST_VISUAL (GST_PAD_PARENT (pad));
- GstAudioInfo info;
- gint rate;
-
- if (!gst_audio_info_from_caps (&info, caps))
- goto invalid_caps;
-
- visual->info = info;
-
- rate = GST_AUDIO_INFO_RATE (&info);
-
- /* this is how many samples we need to fill one frame at the requested
- * framerate. */
- if (visual->fps_n != 0) {
- visual->spf =
- gst_util_uint64_scale_int (rate, visual->fps_d, visual->fps_n);
- }
+ GST_DEBUG_OBJECT (visual, "WxH: %dx%d, bpp: %d, pitch: %d, depth: %d",
+ bscope->width, bscope->height, visual->video->bpp, pitch, depth);
return TRUE;
-
/* ERRORS */
-invalid_caps:
+no_actors:
{
- GST_ERROR_OBJECT (visual, "invalid caps received");
+ GST_ELEMENT_ERROR (visual, LIBRARY, INIT, (NULL),
+ ("could not create actors"));
+ gst_visual_clear_actors (visual);
return FALSE;
}
-}
-
-static gboolean
-gst_vis_src_negotiate (GstVisual * visual)
-{
- GstCaps *othercaps, *target;
- GstStructure *structure;
- GstCaps *caps;
- GstQuery *query;
- GstBufferPool *pool = NULL;
- GstStructure *config;
- guint size, min, max;
-
- caps = gst_pad_query_caps (visual->srcpad, NULL);
-
- /* see what the peer can do */
- othercaps = gst_pad_peer_query_caps (visual->srcpad, caps);
- if (othercaps) {
- target = othercaps;
- gst_caps_unref (caps);
-
- if (gst_caps_is_empty (target))
- goto no_format;
-
- target = gst_caps_truncate (target);
- } else {
- /* need a copy, we'll be modifying it when fixating */
- target = gst_caps_ref (caps);
- }
- GST_DEBUG_OBJECT (visual, "before fixate caps %" GST_PTR_FORMAT, target);
-
- target = gst_caps_make_writable (target);
- /* fixate in case something is not fixed. This does nothing if the value is
- * already fixed. For video we always try to fixate to something like
- * 320x240x25 by convention. */
- structure = gst_caps_get_structure (target, 0);
- gst_structure_fixate_field_nearest_int (structure, "width", DEFAULT_WIDTH);
- gst_structure_fixate_field_nearest_int (structure, "height", DEFAULT_HEIGHT);
- gst_structure_fixate_field_nearest_fraction (structure, "framerate",
- DEFAULT_FPS_N, DEFAULT_FPS_D);
- target = gst_caps_fixate (target);
-
- GST_DEBUG_OBJECT (visual, "after fixate caps %" GST_PTR_FORMAT, target);
-
- gst_visual_src_setcaps (visual, target);
-
- /* try to get a bufferpool now */
- /* find a pool for the negotiated caps now */
- query = gst_query_new_allocation (target, TRUE);
-
- if (!gst_pad_peer_query (visual->srcpad, query)) {
- /* not a problem, we deal with the defaults of the query */
- GST_DEBUG_OBJECT (visual, "allocation query failed");
- }
-
- if (gst_query_get_n_allocation_pools (query) > 0) {
- gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);
-
- size = MAX (size, visual->outsize);
- } else {
- pool = NULL;
- size = visual->outsize;
- min = max = 0;
- }
-
- if (pool == NULL) {
- /* no pool, just parameters, we can make our own */
- GST_DEBUG_OBJECT (visual, "no pool, making new pool");
- pool = gst_video_buffer_pool_new ();
- }
-
- /* and configure */
- config = gst_buffer_pool_get_config (pool);
- gst_buffer_pool_config_set_params (config, target, size, min, max);
- gst_buffer_pool_set_config (pool, config);
-
- if (visual->pool)
- gst_object_unref (visual->pool);
- visual->pool = pool;
-
- /* and activate */
- gst_buffer_pool_set_active (pool, TRUE);
-
- gst_caps_unref (target);
-
- return TRUE;
-
- /* ERRORS */
-no_format:
+no_realize:
{
- GST_ELEMENT_ERROR (visual, STREAM, FORMAT, (NULL),
- ("could not negotiate output format"));
- gst_caps_unref (target);
+ GST_ELEMENT_ERROR (visual, LIBRARY, INIT, (NULL),
+ ("could not realize actor"));
+ gst_visual_clear_actors (visual);
return FALSE;
}
}
static gboolean
-gst_visual_sink_event (GstPad * pad, GstObject * parent, GstEvent * event)
+gst_visual_render (GstBaseAudioVisualizer * bscope, GstBuffer * audio,
+ GstBuffer * video)
{
- GstVisual *visual;
- gboolean res;
-
- visual = GST_VISUAL (parent);
-
- switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_FLUSH_START:
- res = gst_pad_push_event (visual->srcpad, event);
- break;
- case GST_EVENT_FLUSH_STOP:
- /* reset QoS and adapter. */
- gst_visual_reset (visual);
- res = gst_pad_push_event (visual->srcpad, event);
- break;
- case GST_EVENT_CAPS:
- {
- GstCaps *caps;
-
- gst_event_parse_caps (event, &caps);
- res = gst_visual_sink_setcaps (pad, caps);
- gst_event_unref (event);
- break;
- }
- case GST_EVENT_SEGMENT:
- {
- /* the newsegment values are used to clip the input samples
- * and to convert the incomming timestamps to running time so
- * we can do QoS */
- gst_event_copy_segment (event, &visual->segment);
-
- /* and forward */
- res = gst_pad_push_event (visual->srcpad, event);
- break;
- }
- default:
- res = gst_pad_event_default (pad, parent, event);
- break;
- }
+ GstVisual *visual = GST_VISUAL (bscope);
+ GstMapInfo amap, vmap;
+ const guint16 *adata;
+ gint i, channels;
+ gboolean res = TRUE;
- return res;
-}
-
-static gboolean
-gst_visual_src_event (GstPad * pad, GstObject * parent, GstEvent * event)
-{
- GstVisual *visual;
- gboolean res;
-
- visual = GST_VISUAL (parent);
-
- switch (GST_EVENT_TYPE (event)) {
- case GST_EVENT_QOS:
- {
- gdouble proportion;
- GstClockTimeDiff diff;
- GstClockTime timestamp;
-
- gst_event_parse_qos (event, NULL, &proportion, &diff, &timestamp);
-
- /* save stuff for the _chain function */
- GST_OBJECT_LOCK (visual);
- visual->proportion = proportion;
- if (diff >= 0)
- /* we're late, this is a good estimate for next displayable
- * frame (see part-qos.txt) */
- visual->earliest_time = timestamp + 2 * diff + visual->duration;
- else
- visual->earliest_time = timestamp + diff;
-
- GST_OBJECT_UNLOCK (visual);
-
- res = gst_pad_push_event (visual->sinkpad, event);
- break;
- }
- case GST_EVENT_RECONFIGURE:
- /* dont't forward */
- gst_event_unref (event);
- res = TRUE;
- break;
- default:
- res = gst_pad_event_default (pad, parent, event);
- break;
- }
-
- return res;
-}
-
-static gboolean
-gst_visual_src_query (GstPad * pad, GstObject * parent, GstQuery * query)
-{
- gboolean res;
- GstVisual *visual;
-
- visual = GST_VISUAL (parent);
-
- switch (GST_QUERY_TYPE (query)) {
- case GST_QUERY_LATENCY:
- {
- /* We need to send the query upstream and add the returned latency to our
- * own */
- GstClockTime min_latency, max_latency;
- gboolean us_live;
- GstClockTime our_latency;
- guint max_samples;
-
- if ((res = gst_pad_peer_query (visual->sinkpad, query))) {
- gst_query_parse_latency (query, &us_live, &min_latency, &max_latency);
-
- GST_DEBUG_OBJECT (visual, "Peer latency: min %"
- GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
- GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
-
- /* the max samples we must buffer */
- max_samples = MAX (VISUAL_SAMPLES, visual->spf);
- our_latency =
- gst_util_uint64_scale_int (max_samples, GST_SECOND,
- GST_AUDIO_INFO_RATE (&visual->info));
-
- GST_DEBUG_OBJECT (visual, "Our latency: %" GST_TIME_FORMAT,
- GST_TIME_ARGS (our_latency));
-
- /* we add some latency but only if we need to buffer more than what
- * upstream gives us */
- min_latency += our_latency;
- if (max_latency != -1)
- max_latency += our_latency;
-
- GST_DEBUG_OBJECT (visual, "Calculated total latency : min %"
- GST_TIME_FORMAT " max %" GST_TIME_FORMAT,
- GST_TIME_ARGS (min_latency), GST_TIME_ARGS (max_latency));
-
- gst_query_set_latency (query, TRUE, min_latency, max_latency);
- }
- break;
- }
- case GST_QUERY_CAPS:
- {
- GstCaps *filter, *caps;
-
- gst_query_parse_caps (query, &filter);
- caps = gst_visual_getcaps (pad, filter);
- gst_query_set_caps_result (query, caps);
- gst_caps_unref (caps);
- res = TRUE;
- }
- default:
- res = gst_pad_query_default (pad, parent, query);
- break;
- }
-
- return res;
-}
-
-/* Make sure we are negotiated */
-static GstFlowReturn
-ensure_negotiated (GstVisual * visual)
-{
- gboolean reconfigure;
+ gst_buffer_map (audio, &amap, GST_MAP_READ);
+ gst_buffer_map (video, &vmap, GST_MAP_WRITE);
- reconfigure = gst_pad_check_reconfigure (visual->srcpad);
-
- /* we don't know an output format yet, pick one */
- if (reconfigure || !gst_pad_has_current_caps (visual->srcpad)) {
- if (!gst_vis_src_negotiate (visual))
- return GST_FLOW_NOT_NEGOTIATED;
- }
- return GST_FLOW_OK;
-}
-
-static GstFlowReturn
-gst_visual_chain (GstPad * pad, GstObject * parent, GstBuffer * buffer)
-{
- GstBuffer *outbuf = NULL;
- guint i;
- GstVisual *visual = GST_VISUAL (parent);
- GstFlowReturn ret = GST_FLOW_OK;
- guint avail;
- gint bpf, rate, channels;
-
- GST_DEBUG_OBJECT (visual, "chain function called");
-
- /* Make sure have an output format */
- ret = ensure_negotiated (visual);
- if (ret != GST_FLOW_OK) {
- gst_buffer_unref (buffer);
- goto beach;
- }
-
- /* resync on DISCONT */
- if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT)) {
- gst_adapter_clear (visual->adapter);
- }
-
- rate = GST_AUDIO_INFO_RATE (&visual->info);
- bpf = GST_AUDIO_INFO_BPF (&visual->info);
- channels = GST_AUDIO_INFO_CHANNELS (&visual->info);
-
- GST_DEBUG_OBJECT (visual,
- "Input buffer has %" G_GSIZE_FORMAT " samples, time=%" G_GUINT64_FORMAT,
- gst_buffer_get_size (buffer) / bpf, GST_BUFFER_TIMESTAMP (buffer));
-
- gst_adapter_push (visual->adapter, buffer);
-
- while (TRUE) {
- gboolean need_skip;
- const guint16 *data;
- guint64 dist, timestamp;
- GstMapInfo outmap;
-
- GST_DEBUG_OBJECT (visual, "processing buffer");
-
- avail = gst_adapter_available (visual->adapter);
- GST_DEBUG_OBJECT (visual, "avail now %u", avail);
-
- /* we need at least VISUAL_SAMPLES samples */
- if (avail < VISUAL_SAMPLES * bpf)
- break;
-
- /* we need at least enough samples to make one frame */
- if (avail < visual->spf * bpf)
- break;
-
- /* get timestamp of the current adapter byte */
- timestamp = gst_adapter_prev_timestamp (visual->adapter, &dist);
- if (GST_CLOCK_TIME_IS_VALID (timestamp)) {
- /* convert bytes to time */
- dist /= bpf;
- timestamp += gst_util_uint64_scale_int (dist, GST_SECOND, rate);
- }
-
- if (timestamp != -1) {
- gint64 qostime;
-
- /* QoS is done on running time */
- qostime = gst_segment_to_running_time (&visual->segment, GST_FORMAT_TIME,
- timestamp);
- qostime += visual->duration;
-
- GST_OBJECT_LOCK (visual);
- /* check for QoS, don't compute buffers that are known to be late */
- need_skip = visual->earliest_time != -1 &&
- qostime <= visual->earliest_time;
- GST_OBJECT_UNLOCK (visual);
-
- if (need_skip) {
- GST_WARNING_OBJECT (visual,
- "QoS: skip ts: %" GST_TIME_FORMAT ", earliest: %" GST_TIME_FORMAT,
- GST_TIME_ARGS (qostime), GST_TIME_ARGS (visual->earliest_time));
- goto skip;
- }
- }
+ visual_video_set_buffer (visual->video, vmap.data);
- /* Read VISUAL_SAMPLES samples per channel */
- data =
- (const guint16 *) gst_adapter_map (visual->adapter,
- VISUAL_SAMPLES * bpf);
+ channels = GST_AUDIO_INFO_CHANNELS (&bscope->ainfo);
+ adata = (const guint16 *) amap.data;
#if defined(VISUAL_API_VERSION) && VISUAL_API_VERSION >= 4000 && VISUAL_API_VERSION < 5000
- {
- VisBuffer *lbuf, *rbuf;
- guint16 ldata[VISUAL_SAMPLES], rdata[VISUAL_SAMPLES];
- VisAudioSampleRateType vrate;
-
- lbuf = visual_buffer_new_with_buffer (ldata, sizeof (ldata), NULL);
- rbuf = visual_buffer_new_with_buffer (rdata, sizeof (rdata), NULL);
-
- if (channels == 2) {
- for (i = 0; i < VISUAL_SAMPLES; i++) {
- ldata[i] = *data++;
- rdata[i] = *data++;
- }
- } else {
- for (i = 0; i < VISUAL_SAMPLES; i++) {
- ldata[i] = *data;
- rdata[i] = *data++;
- }
- }
-
- switch (rate) {
- case 8000:
- vrate = VISUAL_AUDIO_SAMPLE_RATE_8000;
- break;
- case 11250:
- vrate = VISUAL_AUDIO_SAMPLE_RATE_11250;
- break;
- case 22500:
- vrate = VISUAL_AUDIO_SAMPLE_RATE_22500;
- break;
- case 32000:
- vrate = VISUAL_AUDIO_SAMPLE_RATE_32000;
- break;
- case 44100:
- vrate = VISUAL_AUDIO_SAMPLE_RATE_44100;
- break;
- case 48000:
- vrate = VISUAL_AUDIO_SAMPLE_RATE_48000;
- break;
- case 96000:
- vrate = VISUAL_AUDIO_SAMPLE_RATE_96000;
- break;
- default:
- visual_object_unref (VISUAL_OBJECT (lbuf));
- visual_object_unref (VISUAL_OBJECT (rbuf));
- GST_ERROR_OBJECT (visual, "unsupported rate %d", rate);
- ret = GST_FLOW_ERROR;
- goto beach;
- break;
- }
-
- visual_audio_samplepool_input_channel (visual->audio->samplepool,
- lbuf,
- vrate, VISUAL_AUDIO_SAMPLE_FORMAT_S16,
- (char *) VISUAL_AUDIO_CHANNEL_LEFT);
- visual_audio_samplepool_input_channel (visual->audio->samplepool, rbuf,
- vrate, VISUAL_AUDIO_SAMPLE_FORMAT_S16,
- (char *) VISUAL_AUDIO_CHANNEL_RIGHT);
+ {
+ VisBuffer *lbuf, *rbuf;
+ guint16 ldata[VISUAL_SAMPLES], rdata[VISUAL_SAMPLES];
+ VisAudioSampleRateType vrate;
- visual_object_unref (VISUAL_OBJECT (lbuf));
- visual_object_unref (VISUAL_OBJECT (rbuf));
+ lbuf = visual_buffer_new_with_buffer (ldata, sizeof (ldata), NULL);
+ rbuf = visual_buffer_new_with_buffer (rdata, sizeof (rdata), NULL);
- }
-#else
- if (visual->channels == 2) {
+ if (channels == 2) {
for (i = 0; i < VISUAL_SAMPLES; i++) {
- visual->audio->plugpcm[0][i] = *data++;
- visual->audio->plugpcm[1][i] = *data++;
+ ldata[i] = *adata++;
+ rdata[i] = *adata++;
}
} else {
for (i = 0; i < VISUAL_SAMPLES; i++) {
- visual->audio->plugpcm[0][i] = *data;
- visual->audio->plugpcm[1][i] = *data++;
+ ldata[i] = *adata;
+ rdata[i] = *adata++;
}
}
-#endif
- /* alloc a buffer if we don't have one yet, this happens
- * when we pushed a buffer in this while loop before */
- if (outbuf == NULL) {
- GST_DEBUG_OBJECT (visual, "allocating output buffer");
- ret = gst_buffer_pool_acquire_buffer (visual->pool, &outbuf, NULL);
- if (ret != GST_FLOW_OK) {
- gst_adapter_unmap (visual->adapter);
- goto beach;
- }
+ /* TODO(ensonic): move to setup */
+ switch (bscope->ainfo.rate) {
+ case 8000:
+ vrate = VISUAL_AUDIO_SAMPLE_RATE_8000;
+ break;
+ case 11250:
+ vrate = VISUAL_AUDIO_SAMPLE_RATE_11250;
+ break;
+ case 22500:
+ vrate = VISUAL_AUDIO_SAMPLE_RATE_22500;
+ break;
+ case 32000:
+ vrate = VISUAL_AUDIO_SAMPLE_RATE_32000;
+ break;
+ case 44100:
+ vrate = VISUAL_AUDIO_SAMPLE_RATE_44100;
+ break;
+ case 48000:
+ vrate = VISUAL_AUDIO_SAMPLE_RATE_48000;
+ break;
+ case 96000:
+ vrate = VISUAL_AUDIO_SAMPLE_RATE_96000;
+ break;
+ default:
+ visual_object_unref (VISUAL_OBJECT (lbuf));
+ visual_object_unref (VISUAL_OBJECT (rbuf));
+ GST_ERROR_OBJECT (visual, "unsupported rate %d", bscope->ainfo.rate);
+ res = FALSE;
+ goto done;
}
- gst_buffer_map (outbuf, &outmap, GST_MAP_WRITE);
- visual_video_set_buffer (visual->video, outmap.data);
- visual_audio_analyze (visual->audio);
- visual_actor_run (visual->actor, visual->audio);
- visual_video_set_buffer (visual->video, NULL);
- gst_buffer_unmap (outbuf, &outmap);
- GST_DEBUG_OBJECT (visual, "rendered one frame");
-
- gst_adapter_unmap (visual->adapter);
-
- GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
- GST_BUFFER_DURATION (outbuf) = visual->duration;
- ret = gst_pad_push (visual->srcpad, outbuf);
- outbuf = NULL;
+ visual_audio_samplepool_input_channel (visual->audio->samplepool,
+ lbuf,
+ vrate, VISUAL_AUDIO_SAMPLE_FORMAT_S16,
+ (char *) VISUAL_AUDIO_CHANNEL_LEFT);
+ visual_audio_samplepool_input_channel (visual->audio->samplepool, rbuf,
+ vrate, VISUAL_AUDIO_SAMPLE_FORMAT_S16,
+ (char *) VISUAL_AUDIO_CHANNEL_RIGHT);
- skip:
- GST_DEBUG_OBJECT (visual, "finished frame, flushing %u samples from input",
- visual->spf);
+ visual_object_unref (VISUAL_OBJECT (lbuf));
+ visual_object_unref (VISUAL_OBJECT (rbuf));
- /* Flush out the number of samples per frame */
- gst_adapter_flush (visual->adapter, visual->spf * bpf);
-
- /* quit the loop if something was wrong */
- if (ret != GST_FLOW_OK)
- break;
- }
-
-beach:
-
- if (outbuf != NULL)
- gst_buffer_unref (outbuf);
-
- return ret;
-}
-
-static GstStateChangeReturn
-gst_visual_change_state (GstElement * element, GstStateChange transition)
-{
- GstVisual *visual = GST_VISUAL (element);
- GstStateChangeReturn ret;
-
- switch (transition) {
- case GST_STATE_CHANGE_NULL_TO_READY:
- visual->actor =
- visual_actor_new (GST_VISUAL_GET_CLASS (visual)->plugin->info->
- plugname);
- visual->video = visual_video_new ();
- visual->audio = visual_audio_new ();
- /* can't have a play without actors */
- if (!visual->actor || !visual->video)
- goto no_actors;
-
- if (visual_actor_realize (visual->actor) != 0)
- goto no_realize;
-
- visual_actor_set_video (visual->actor, visual->video);
- break;
- case GST_STATE_CHANGE_READY_TO_PAUSED:
- gst_visual_reset (visual);
- break;
- case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
- break;
- default:
- break;
}
-
- ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
-
- switch (transition) {
- case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
- break;
- case GST_STATE_CHANGE_PAUSED_TO_READY:
- if (visual->pool) {
- gst_buffer_pool_set_active (visual->pool, FALSE);
- gst_object_unref (visual->pool);
- visual->pool = NULL;
- }
- break;
- case GST_STATE_CHANGE_READY_TO_NULL:
- gst_visual_clear_actors (visual);
- break;
- default:
- break;
+#else
+ if (channels == 2) {
+ guint16 *ldata = visual->audio->plugpcm[0];
+ guint16 *rdata = visual->audio->plugpcm[1];
+ for (i = 0; i < VISUAL_SAMPLES; i++) {
+ ldata[i] = *adata++;
+ rdata[i] = *adata++;
+ }
+ } else {
+ for (i = 0; i < VISUAL_SAMPLES; i++) {
+ ldata[i] = *adata;
+ rdata[i] = *adata++;
+ }
}
+#endif
- return ret;
+ visual_audio_analyze (visual->audio);
+ visual_actor_run (visual->actor, visual->audio);
+ visual_video_set_buffer (visual->video, NULL);
- /* ERRORS */
-no_actors:
- {
- GST_ELEMENT_ERROR (visual, LIBRARY, INIT, (NULL),
- ("could not create actors"));
- gst_visual_clear_actors (visual);
- return GST_STATE_CHANGE_FAILURE;
- }
-no_realize:
- {
- GST_ELEMENT_ERROR (visual, LIBRARY, INIT, (NULL),
- ("could not realize actor"));
- gst_visual_clear_actors (visual);
- return GST_STATE_CHANGE_FAILURE;
- }
+ GST_DEBUG_OBJECT (visual, "rendered one frame");
+done:
+ gst_buffer_unmap (video, &vmap);
+ gst_buffer_unmap (audio, &amap);
+ return res;
}
diff --git a/ext/libvisual/visual.h b/ext/libvisual/visual.h
index d8a9ec313..cc24941c6 100644
--- a/ext/libvisual/visual.h
+++ b/ext/libvisual/visual.h
@@ -28,6 +28,8 @@
#include <gst/audio/audio.h>
#include <libvisual/libvisual.h>
+#include "gstbaseaudiovisualizer.h"
+
G_BEGIN_DECLS
#define GST_TYPE_VISUAL (gst_visual_get_type())
@@ -42,45 +44,17 @@ typedef struct _GstVisualClass GstVisualClass;
struct _GstVisual
{
- GstElement element;
-
- /* pads */
- GstPad *sinkpad;
- GstPad *srcpad;
- GstSegment segment;
+ GstBaseAudioVisualizer element;
/* libvisual stuff */
VisAudio *audio;
VisVideo *video;
VisActor *actor;
-
- /* audio/video state */
- GstAudioInfo info;
-
- /* framerate numerator & denominator */
- gint fps_n;
- gint fps_d;
- gint width;
- gint height;
- GstClockTime duration;
- guint outsize;
- GstBufferPool *pool;
-
- /* samples per frame based on caps */
- guint spf;
-
- /* state stuff */
- GstAdapter *adapter;
- guint count;
-
- /* QoS stuff *//* with LOCK */
- gdouble proportion;
- GstClockTime earliest_time;
};
struct _GstVisualClass
{
- GstElementClass parent_class;
+ GstBaseAudioVisualizerClass parent_class;
VisPluginRef *plugin;
};