summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorGustavo Sverzut Barbieri <barbieri@gmail.com>2013-01-10 03:43:32 +0000
committerGustavo Sverzut Barbieri <barbieri@gmail.com>2013-01-10 03:43:32 +0000
commitdfb84c1657bfb14a5236b881193b81f4c0b8a69b (patch)
treeb51b210fc88a21eec8e5907b8bbfe12ebc669f90 /src
parent532284dbbe4259a9f2291f44d3eff376849e8031 (diff)
downloadefl-dfb84c1657bfb14a5236b881193b81f4c0b8a69b.tar.gz
efl: merge emotion.
this one was quite a huge work, but hopefully it's correct. NOTES: * removed vlc generic module, it should go into a separate package. * gstreamer is enabled by default (see --disable-gstreamer) * xine is disabled by default (see --enable-gstreamer) * generic is always built statically if supported * gstreamer and xine can't be configured as static (just lacks command line options, build system supports it) * v4l2 is enabled by default on linux if eeze is built (see --disable-v4l2) * emotion_test moved to src/tests/emotion and depends on EFL_ENABLE_TESTS (--with-tests), but is still installed if enabled. TODO (need your help!): * fix warnings with gstreamer and xine engine * call engine shutdown functions if building as static * remove direct usage of PACKAGE_*_DIR and use eina_prefix * add eina_prefix checkme file as evas and others * add support for $EFL_RUN_IN_TREE * create separate package for emotion_generic_modules * check docs hierarchy (doxygen is segv'in here) SVN revision: 82501
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.am5
-rw-r--r--src/Makefile_Edje.am10
-rw-r--r--src/Makefile_Emotion.am235
-rw-r--r--src/edje_external/emotion/emotion.c516
-rw-r--r--src/examples/Makefile.am2
-rw-r--r--src/examples/emotion/Makefile.am57
-rw-r--r--src/examples/emotion/emotion_basic_example.c81
-rw-r--r--src/examples/emotion/emotion_border_example.c238
-rw-r--r--src/examples/emotion/emotion_generic_example.c233
-rw-r--r--src/examples/emotion/emotion_generic_subtitle_example.c97
-rw-r--r--src/examples/emotion/emotion_signals_example.c173
-rw-r--r--src/lib/emotion/Emotion.h1332
-rw-r--r--src/lib/emotion/emotion_main.c464
-rw-r--r--src/lib/emotion/emotion_private.h137
-rw-r--r--src/lib/emotion/emotion_smart.c2133
-rw-r--r--src/modules/emotion/generic/Emotion_Generic_Plugin.h145
-rw-r--r--src/modules/emotion/generic/README79
-rw-r--r--src/modules/emotion/generic/emotion_generic.c1820
-rw-r--r--src/modules/emotion/generic/emotion_generic.h113
-rw-r--r--src/modules/emotion/gstreamer/emotion_alloc.c90
-rw-r--r--src/modules/emotion/gstreamer/emotion_convert.c251
-rw-r--r--src/modules/emotion/gstreamer/emotion_fakeeos.c70
-rw-r--r--src/modules/emotion/gstreamer/emotion_gstreamer.c2156
-rw-r--r--src/modules/emotion/gstreamer/emotion_gstreamer.h330
-rw-r--r--src/modules/emotion/gstreamer/emotion_sink.c1391
-rw-r--r--src/modules/emotion/xine/emotion_xine.c1723
-rw-r--r--src/modules/emotion/xine/emotion_xine.h98
-rw-r--r--src/modules/emotion/xine/emotion_xine_vo_out.c767
-rw-r--r--src/tests/emotion/data/bpause.pngbin0 -> 383 bytes
-rw-r--r--src/tests/emotion/data/bplay.pngbin0 -> 425 bytes
-rw-r--r--src/tests/emotion/data/bstop.pngbin0 -> 401 bytes
-rw-r--r--src/tests/emotion/data/e_logo.pngbin0 -> 7833 bytes
-rw-r--r--src/tests/emotion/data/fr1.pngbin0 -> 591 bytes
-rw-r--r--src/tests/emotion/data/fr2.pngbin0 -> 288 bytes
-rw-r--r--src/tests/emotion/data/fr3.pngbin0 -> 657 bytes
-rw-r--r--src/tests/emotion/data/fr4.pngbin0 -> 375 bytes
-rw-r--r--src/tests/emotion/data/fr5.pngbin0 -> 1366 bytes
-rw-r--r--src/tests/emotion/data/fr6.pngbin0 -> 699 bytes
-rw-r--r--src/tests/emotion/data/fr7.pngbin0 -> 1184 bytes
-rw-r--r--src/tests/emotion/data/h_slider.pngbin0 -> 917 bytes
-rw-r--r--src/tests/emotion/data/icon.edc14
-rw-r--r--src/tests/emotion/data/knob.pngbin0 -> 1076 bytes
-rw-r--r--src/tests/emotion/data/orb.pngbin0 -> 203 bytes
-rw-r--r--src/tests/emotion/data/pnl.pngbin0 -> 705 bytes
-rw-r--r--src/tests/emotion/data/sl.pngbin0 -> 225 bytes
-rw-r--r--src/tests/emotion/data/theme.edc1667
-rw-r--r--src/tests/emotion/data/tiles.pngbin0 -> 3026 bytes
-rw-r--r--src/tests/emotion/data/video_frame_bottom.pngbin0 -> 514 bytes
-rw-r--r--src/tests/emotion/data/video_frame_left.pngbin0 -> 2023 bytes
-rw-r--r--src/tests/emotion/data/video_frame_right.pngbin0 -> 2441 bytes
-rw-r--r--src/tests/emotion/data/video_frame_top.pngbin0 -> 471 bytes
-rw-r--r--src/tests/emotion/data/whb.pngbin0 -> 207 bytes
-rw-r--r--src/tests/emotion/data/window_inner_shadow.pngbin0 -> 30426 bytes
-rw-r--r--src/tests/emotion/emotion_test_main.c748
54 files changed, 17174 insertions, 1 deletions
diff --git a/src/Makefile.am b/src/Makefile.am
index 9dec122014..c530955d7b 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -42,6 +42,7 @@ include Makefile_Efreet.am
include Makefile_Eeze.am
include Makefile_EPhysics.am
include Makefile_Edje.am
+include Makefile_Emotion.am
.PHONY: benchmark examples
@@ -80,6 +81,7 @@ clean-local:
rm -rf lib/eeze/*.gcno
rm -rf lib/ephysics/*.gcno
rm -rf lib/edje/*.gcno
+ rm -rf lib/emotion/*.gcno
rm -rf modules/eina/mp/pass_through/*.gcno
rm -rf modules/eina/mp/one_big/*.gcno
rm -rf modules/eina/mp/chained_pool/*.gcno
@@ -119,5 +121,8 @@ clean-local:
rm -rf modules/edje/alsa_snd_player/*.gcno
rm -rf modules/edje/eet_snd_reader/*.gcno
rm -rf modules/edje/multisense_factory/*.gcno
+ rm -rf modules/emotion/xine/*.gcno
+ rm -rf modules/emotion/gstreamer/*.gcno
+ rm -rf modules/emotion/generic/*.gcno
rm -rf static_libs/liblinebreak/*.gcno
rm -rf static_libs/lz4/*.gcno
diff --git a/src/Makefile_Edje.am b/src/Makefile_Edje.am
index 6539c30087..f120f2cd54 100644
--- a/src/Makefile_Edje.am
+++ b/src/Makefile_Edje.am
@@ -257,3 +257,13 @@ $(EDJE_COMMON_USER_LDADD) \
@CHECK_LIBS@
endif
+
+# Useful to other modules that generate EDJ
+EDJE_CC = EFL_RUN_IN_TREE=1 $(top_builddir)/src/bin/edje/edje_cc
+EDJE_CC_FLAGS_VERBOSE_0 =
+EDJE_CC_FLAGS_VERBOSE_1 = -v
+EDJE_CC_FLAGS = $(EDJE_CC_FLAGS_VERBOSE_$(V)) -id $(srcdir) -fd $(srcdir)
+
+AM_V_EDJ = $(am__v_EDJ_$(V))
+am__v_EDJ_ = $(am__v_EDJ_$(AM_DEFAULT_VERBOSITY))
+am__v_EDJ_0 = @echo " EDJ " $@;
diff --git a/src/Makefile_Emotion.am b/src/Makefile_Emotion.am
new file mode 100644
index 0000000000..94dadc22b8
--- /dev/null
+++ b/src/Makefile_Emotion.am
@@ -0,0 +1,235 @@
+### Library
+
+lib_LTLIBRARIES += \
+lib/emotion/libemotion.la
+
+EMOTION_COMMON_CPPFLAGS = \
+-I$(top_srcdir)/src/lib/eina \
+-I$(top_builddir)/src/lib/eina \
+-I$(top_srcdir)/src/lib/eo \
+-I$(top_builddir)/src/lib/eo \
+-I$(top_srcdir)/src/lib/ecore \
+-I$(top_builddir)/src/lib/ecore \
+-I$(top_srcdir)/src/lib/ecore_x \
+-I$(top_builddir)/src/lib/ecore_x \
+-I$(top_srcdir)/src/lib/ecore_input \
+-I$(top_builddir)/src/lib/ecore_input \
+-I$(top_srcdir)/src/lib/ecore_evas \
+-I$(top_builddir)/src/lib/ecore_evas \
+-I$(top_srcdir)/src/lib/eet \
+-I$(top_builddir)/src/lib/eet \
+-I$(top_srcdir)/src/lib/evas \
+-I$(top_builddir)/src/lib/evas \
+-I$(top_srcdir)/src/lib/eio \
+-I$(top_builddir)/src/lib/eio \
+-I$(top_srcdir)/src/lib/eeze \
+-I$(top_builddir)/src/lib/eeze \
+-I$(top_srcdir)/src/lib/emotion \
+-I$(top_builddir)/src/lib/emotion \
+@EFL_COV_CFLAGS@ \
+@EMOTION_CFLAGS@
+
+EMOTION_COMMON_LDADD = \
+lib/eina/libeina.la \
+lib/eo/libeo.la \
+lib/ecore/libecore.la \
+lib/eet/libeet.la \
+lib/evas/libevas.la \
+lib/eio/libeio.la \
+@EFL_COV_LIBS@
+
+if EMOTION_HAVE_V4L2
+EMOTION_COMMON_LDADD += lib/eeze/libeeze.la
+endif
+
+installed_emotionmainheadersdir = $(includedir)/emotion-@VMAJ@
+dist_installed_emotionmainheaders_DATA = lib/emotion/Emotion.h
+
+# libemotion.la
+lib_emotion_libemotion_la_SOURCES = \
+lib/emotion/emotion_private.h \
+lib/emotion/emotion_smart.c \
+lib/emotion/emotion_main.c
+
+EMOTION_COMMON_LIBADD = $(EMOTION_COMMON_LDADD) @EMOTION_LIBS@
+EMOTION_COMMON_LDADD += @EMOTION_LDFLAGS@
+EMOTION_COMMON_USER_LIBADD = $(EMOTION_COMMON_LIBADD) lib/emotion/libemotion.la
+EMOTION_COMMON_USER_LDADD = $(EMOTION_COMMON_LDADD) lib/emotion/libemotion.la
+
+lib_emotion_libemotion_la_CPPFLAGS = \
+$(EMOTION_COMMON_CPPFLAGS) \
+-DPACKAGE_BIN_DIR=\"$(bindir)\" \
+-DPACKAGE_LIB_DIR=\"$(libdir)\" \
+-DPACKAGE_DATA_DIR=\"$(datadir)/emotion\" \
+-DPACKAGE_BUILD_DIR=\"`pwd`/$(top_builddir)\" \
+-DEFL_EMOTION_BUILD
+
+lib_emotion_libemotion_la_LIBADD = $(EMOTION_COMMON_LIBADD)
+lib_emotion_libemotion_la_LDFLAGS = @EFL_LTLIBRARY_FLAGS@
+
+## Modules
+
+# Xine
+EMOTION_XINE_SOURCES = \
+modules/emotion/xine/emotion_xine.h \
+modules/emotion/xine/emotion_xine.c \
+modules/emotion/xine/emotion_xine_vo_out.c
+
+if EMOTION_STATIC_BUILD_XINE
+lib_emotion_libemotion_la_SOURCES += $(EMOTION_XINE_SOURCES)
+else
+if EMOTION_BUILD_XINE
+emotionmodulexinedir = $(libdir)/emotion/modules/xine/$(MODULE_ARCH)
+emotionmodulexine_LTLIBRARIES = modules/emotion/xine/module.la
+modules_emotion_xine_module_la_SOURCES = $(EMOTION_XINE_SOURCES)
+modules_emotion_xine_module_la_CPPFLAGS = \
+$(EMOTION_COMMON_CPPFLAGS) \
+@EMOTION_MODULE_XINE_CFLAGS@
+modules_emotion_xine_module_la_LIBADD = \
+$(EMOTION_COMMON_USER_LIBADD) \
+@EMOTION_MODULE_XINE_LIBS@
+modules_emotion_xine_module_la_LDFLAGS = -module @EFL_LTMODULE_FLAGS@
+modules_emotion_xine_module_la_LIBTOOLFLAGS = --tag=disable-static
+endif
+endif
+
+# Gstreamer
+EMOTION_GSTREAMER_SOURCES = \
+modules/emotion/gstreamer/emotion_gstreamer.h \
+modules/emotion/gstreamer/emotion_gstreamer.c \
+modules/emotion/gstreamer/emotion_alloc.c \
+modules/emotion/gstreamer/emotion_convert.c \
+modules/emotion/gstreamer/emotion_fakeeos.c \
+modules/emotion/gstreamer/emotion_sink.c
+
+if EMOTION_STATIC_BUILD_GSTREAMER
+lib_emotion_libemotion_la_SOURCES += $(EMOTION_GSTREAMER_SOURCES)
+if HAVE_ECORE_X
+EMOTION_COMMON_LDADD += \
+lib/ecore_evas/libecore_evas.la \
+lib/ecore_x/libecore_x.la
+endif
+else
+if EMOTION_BUILD_GSTREAMER
+emotionmodulegstreamerdir = $(libdir)/emotion/modules/gstreamer/$(MODULE_ARCH)
+emotionmodulegstreamer_LTLIBRARIES = modules/emotion/gstreamer/module.la
+modules_emotion_gstreamer_module_la_SOURCES = $(EMOTION_GSTREAMER_SOURCES)
+modules_emotion_gstreamer_module_la_CPPFLAGS = \
+$(EMOTION_COMMON_CPPFLAGS) \
+@EMOTION_MODULE_GSTREAMER_CFLAGS@
+modules_emotion_gstreamer_module_la_LIBADD = \
+$(EMOTION_COMMON_USER_LIBADD) \
+@EMOTION_MODULE_GSTREAMER_LIBS@
+modules_emotion_gstreamer_module_la_LDFLAGS = -module @EFL_LTMODULE_FLAGS@
+modules_emotion_gstreamer_module_la_LIBTOOLFLAGS = --tag=disable-static
+if HAVE_ECORE_X
+modules_emotion_gstreamer_module_la_LIBADD += \
+lib/ecore_evas/libecore_evas.la \
+lib/ecore_x/libecore_x.la
+endif
+endif
+endif
+
+# Generic
+EMOTION_GENERIC_SOURCES = \
+modules/emotion/generic/emotion_generic.h \
+modules/emotion/generic/emotion_generic.c
+
+if EMOTION_STATIC_BUILD_GENERIC
+lib_emotion_libemotion_la_SOURCES += $(EMOTION_GENERIC_SOURCES)
+else
+if EMOTION_BUILD_GENERIC
+emotionmodulegenericdir = $(libdir)/emotion/modules/generic/$(MODULE_ARCH)
+emotionmodulegeneric_LTLIBRARIES = modules/emotion/generic/module.la
+modules_emotion_generic_module_la_SOURCES = $(EMOTION_GENERIC_SOURCES)
+modules_emotion_generic_module_la_CPPFLAGS = \
+$(EMOTION_COMMON_CPPFLAGS)
+modules_emotion_generic_module_la_LIBADD = \
+$(EMOTION_COMMON_USER_LIBADD)
+modules_emotion_generic_module_la_LDFLAGS = -module @EFL_LTMODULE_FLAGS@
+modules_emotion_generic_module_la_LIBTOOLFLAGS = --tag=disable-static
+endif
+endif
+
+if EMOTION_BUILD_GENERIC
+dist_installed_emotionmainheaders_DATA += \
+modules/emotion/generic/Emotion_Generic_Plugin.h
+endif
+
+
+# Edje_External
+emotionedjeexternaldir = $(libdir)/edje/modules/emotion/$(MODULE_ARCH)
+emotionedjeexternal_LTLIBRARIES = edje_external/emotion/module.la
+
+edje_external_emotion_module_la_SOURCES = \
+edje_external/emotion/emotion.c
+edje_external_emotion_module_la_CPPFLAGS = \
+$(EMOTION_COMMON_CPPFLAGS) \
+$(EDJE_COMMON_CPPFLAGS)
+edje_external_emotion_module_la_LIBADD = \
+$(EMOTION_COMMON_USER_LIBADD) \
+$(EDJE_COMMON_USER_LIBADD)
+edje_external_emotion_module_la_LDFLAGS = -module @EFL_LTMODULE_FLAGS@
+edje_external_emotion_module_la_LIBTOOLFLAGS = --tag=disable-static
+
+### Binary
+
+### Unit tests
+
+if EFL_ENABLE_TESTS
+
+bin_PROGRAMS += tests/emotion/emotion_test
+
+tests_emotion_emotion_test_SOURCES = \
+tests/emotion/emotion_test_main.c
+
+tests_emotion_emotion_test_CPPFLAGS = \
+$(EMOTION_COMMON_CPPFLAGS) \
+-I$(top_srcdir)/src/lib/edje \
+-I$(top_builddir)/src/lib/edje \
+-DPACKAGE_BIN_DIR=\"$(bindir)\" \
+-DPACKAGE_LIB_DIR=\"$(libdir)\" \
+-DPACKAGE_DATA_DIR=\"$(datadir)/emotion\" \
+-DPACKAGE_BUILD_DIR=\"`pwd`/$(top_builddir)\"
+
+tests_emotion_emotion_test_LDADD = \
+$(EMOTION_COMMON_USER_LDADD) \
+lib/edje/libedje.la
+
+tests/emotion/data/theme.edj: tests/emotion/data/theme.edc
+ $(AM_V_EDJ)$(EDJE_CC) $(EDJE_CC_FLAGS) -id $(srcdir)/tests/emotion/data $< $@
+
+EMOTION_DATA_FILES = \
+tests/emotion/data/bpause.png \
+tests/emotion/data/bplay.png \
+tests/emotion/data/bstop.png \
+tests/emotion/data/e_logo.png \
+tests/emotion/data/fr1.png \
+tests/emotion/data/fr2.png \
+tests/emotion/data/fr3.png \
+tests/emotion/data/fr4.png \
+tests/emotion/data/fr5.png \
+tests/emotion/data/fr6.png \
+tests/emotion/data/fr7.png \
+tests/emotion/data/h_slider.png \
+tests/emotion/data/icon.edc \
+tests/emotion/data/knob.png \
+tests/emotion/data/orb.png \
+tests/emotion/data/pnl.png \
+tests/emotion/data/sl.png \
+tests/emotion/data/theme.edc \
+tests/emotion/data/tiles.png \
+tests/emotion/data/video_frame_bottom.png \
+tests/emotion/data/video_frame_left.png \
+tests/emotion/data/video_frame_right.png \
+tests/emotion/data/video_frame_top.png \
+tests/emotion/data/whb.png \
+tests/emotion/data/window_inner_shadow.png
+
+emotiondatafilesdir = $(datadir)/emotion/data
+emotiondatafiles_DATA = tests/emotion/data/theme.edj
+endif
+
+EXTRA_DIST += \
+$(EMOTION_DATA_FILES) \
+modules/emotion/generic/README
diff --git a/src/edje_external/emotion/emotion.c b/src/edje_external/emotion/emotion.c
new file mode 100644
index 0000000000..7ae0a0e5ca
--- /dev/null
+++ b/src/edje_external/emotion/emotion.c
@@ -0,0 +1,516 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <Edje.h>
+
+#include "Emotion.h"
+
+typedef struct _External_Emotion_Params External_Emotion_Params;
+typedef struct _External_Emotion_Signals_Proxy_Context External_Emotion_Signals_Proxy_Context;
+
+struct _External_Emotion_Params
+{
+#define _STR(M) const char *M
+#define _BOOL(M) Eina_Bool M:1; Eina_Bool M##_exists:1
+#define _INT(M) int M; Eina_Bool M##_exists:1
+#define _DOUBLE(M) double M; Eina_Bool M##_exists:1
+ _STR(file);
+ _BOOL(play);
+ _DOUBLE(position);
+ _BOOL(smooth_scale);
+ _DOUBLE(audio_volume);
+ _BOOL(audio_mute);
+ _INT(audio_channel);
+ _BOOL(video_mute);
+ _INT(video_channel);
+ _BOOL(spu_mute);
+ _INT(spu_channel);
+ _INT(chapter);
+ _DOUBLE(play_speed);
+ _DOUBLE(play_length);
+ //_INT(vis);
+#undef _STR
+#undef _BOOL
+#undef _INT
+#undef _DOUBLE
+};
+
+struct _External_Emotion_Signals_Proxy_Context
+{
+ const char *emission;
+ const char *source;
+ Evas_Object *edje;
+};
+
+static int _log_dom = -1;
+#define CRITICAL(...) EINA_LOG_DOM_CRIT(_log_dom, __VA_ARGS__)
+#define ERR(...) EINA_LOG_DOM_ERR(_log_dom, __VA_ARGS__)
+#define WRN(...) EINA_LOG_DOM_WARN(_log_dom, __VA_ARGS__)
+#define INF(...) EINA_LOG_DOM_INFO(_log_dom, __VA_ARGS__)
+#define DBG(...) EINA_LOG_DOM_DBG(_log_dom, __VA_ARGS__)
+
+static const char *_external_emotion_engines[] = {
+#ifdef EMOTION_BUILD_XINE
+ "xine",
+#endif
+#ifdef EMOTION_BUILD_GSTREAMER
+ "gstreamer",
+#endif
+#ifdef EMOTION_BUILD_GENERIC
+ "generic",
+#endif
+ NULL,
+};
+
+static const char _external_emotion_engine_def[] =
+#if defined(EMOTION_BUILD_XINE)
+ "xine";
+#elif defined(EMOTION_BUILD_GSTREAMER)
+ "gstreamer";
+#elif defined(EMOTION_BUILD_GENERIC)
+ "generic";
+#else
+ "impossible";
+#endif
+
+static void
+_external_emotion_signal_proxy_free_cb(void *data, Evas *e EINA_UNUSED, Evas_Object *obj EINA_UNUSED, void *event_info EINA_UNUSED)
+{
+ External_Emotion_Signals_Proxy_Context *ctxt = data;
+ free(ctxt);
+}
+
+static void
+_external_emotion_signal_proxy_cb(void *data, Evas_Object *obj EINA_UNUSED, void *event_info EINA_UNUSED)
+{
+ External_Emotion_Signals_Proxy_Context *ctxt = data;
+ // TODO: Is it worth to check Evas_Smart_Cb_Description and do something
+ // TODO: with event_info given its description?
+ edje_object_signal_emit(ctxt->edje, ctxt->emission, ctxt->source);
+}
+
+static Evas_Object *
+_external_emotion_add(void *data EINA_UNUSED, Evas *evas, Evas_Object *edje EINA_UNUSED, const Eina_List *params, const char *part_name)
+{
+ const Evas_Smart_Cb_Description **cls_descs, **inst_descs;
+ unsigned int cls_count, inst_count, total;
+ External_Emotion_Signals_Proxy_Context *ctxt;
+ Evas_Object *obj;
+ const char *engine;
+
+ if (!edje_external_param_choice_get(params, "engine", &engine))
+ engine = NULL;
+ if (!engine) engine = _external_emotion_engine_def;
+
+ obj = emotion_object_add(evas);
+ if (!emotion_object_init(obj, engine))
+ {
+ ERR("failed to initialize emotion with engine '%s'.", engine);
+ return NULL;
+ }
+
+ evas_object_smart_callbacks_descriptions_get
+ (obj, &cls_descs, &cls_count, &inst_descs, &inst_count);
+
+ total = cls_count + inst_count;
+ if (!total) goto end;
+ ctxt = malloc(sizeof(External_Emotion_Signals_Proxy_Context) * total);
+ if (!ctxt) goto end;
+ evas_object_event_callback_add
+ (obj, EVAS_CALLBACK_DEL, _external_emotion_signal_proxy_free_cb, ctxt);
+
+ for (; cls_count > 0; cls_count--, cls_descs++, ctxt++)
+ {
+ const Evas_Smart_Cb_Description *d = *cls_descs;
+ ctxt->emission = d->name;
+ ctxt->source = part_name;
+ ctxt->edje = edje;
+ evas_object_smart_callback_add
+ (obj, d->name, _external_emotion_signal_proxy_cb, ctxt);
+ }
+
+ for (; inst_count > 0; inst_count--, inst_descs++, ctxt++)
+ {
+ const Evas_Smart_Cb_Description *d = *inst_descs;
+ ctxt->emission = d->name;
+ ctxt->source = part_name;
+ ctxt->edje = edje;
+ evas_object_smart_callback_add
+ (obj, d->name, _external_emotion_signal_proxy_cb, ctxt);
+ }
+
+ end:
+ return obj;
+}
+
+static void
+_external_emotion_signal(void *data EINA_UNUSED, Evas_Object *obj EINA_UNUSED, const char *signal, const char *source)
+{
+ DBG("External Signal received: '%s' '%s'", signal, source);
+}
+
+static void
+_external_emotion_state_set(void *data EINA_UNUSED, Evas_Object *obj, const void *from_params, const void *to_params, float pos EINA_UNUSED)
+{
+ const External_Emotion_Params *p;
+
+ if (to_params) p = to_params;
+ else if (from_params) p = from_params;
+ else return;
+
+#define _STR(M) if (p->M) emotion_object_##M##_set(obj, p->M)
+#define _BOOL(M) if (p->M##_exists) emotion_object_##M##_set(obj, p->M)
+#define _INT(M) if (p->M##_exists) emotion_object_##M##_set(obj, p->M)
+#define _DOUBLE(M) if (p->M##_exists) emotion_object_##M##_set(obj, p->M)
+ _STR(file);
+ _BOOL(play);
+ //_DOUBLE(position);
+ if (p->position_exists)
+ WRN("position should not be set from state description! Ignored.");
+ _BOOL(smooth_scale);
+ _DOUBLE(audio_volume);
+ _BOOL(audio_mute);
+ _INT(audio_channel);
+ _BOOL(video_mute);
+ _INT(video_channel);
+ _BOOL(spu_mute);
+ _INT(spu_channel);
+ _INT(chapter);
+ _DOUBLE(play_speed);
+ if (p->play_length_exists) ERR("play_length is read-only");
+ //_INT(vis);
+#undef _STR
+#undef _BOOL
+#undef _INT
+#undef _DOUBLE
+}
+
+static Eina_Bool
+_external_emotion_param_set(void *data EINA_UNUSED, Evas_Object *obj, const Edje_External_Param *param)
+{
+ if (!strcmp(param->name, "engine"))
+ {
+ // TODO
+ WRN("engine is a property that can be set only at object creation!");
+ return EINA_FALSE;
+ }
+
+#define _STR(M) \
+ else if (!strcmp(param->name, #M)) \
+ { \
+ if (param->type == EDJE_EXTERNAL_PARAM_TYPE_STRING) \
+ { \
+ emotion_object_##M##_set(obj, param->s); \
+ return EINA_TRUE; \
+ } \
+ }
+#define _BOOL(M) \
+ else if (!strcmp(param->name, #M)) \
+ { \
+ if (param->type == EDJE_EXTERNAL_PARAM_TYPE_BOOL) \
+ { \
+ emotion_object_##M##_set(obj, param->i); \
+ return EINA_TRUE; \
+ } \
+ }
+#define _INT(M) \
+ else if (!strcmp(param->name, #M)) \
+ { \
+ if (param->type == EDJE_EXTERNAL_PARAM_TYPE_INT) \
+ { \
+ emotion_object_##M##_set(obj, param->i); \
+ return EINA_TRUE; \
+ } \
+ }
+#define _DOUBLE(M) \
+ else if (!strcmp(param->name, #M)) \
+ { \
+ if (param->type == EDJE_EXTERNAL_PARAM_TYPE_DOUBLE) \
+ { \
+ emotion_object_##M##_set(obj, param->d); \
+ return EINA_TRUE; \
+ } \
+ }
+
+ if (0) {} // so else if works...
+ _STR(file)
+ _BOOL(play)
+ _DOUBLE(position)
+ _BOOL(smooth_scale)
+ _DOUBLE(audio_volume)
+ _BOOL(audio_mute)
+ _INT(audio_channel)
+ _BOOL(video_mute)
+ _INT(video_channel)
+ _BOOL(spu_mute)
+ _INT(spu_channel)
+ _INT(chapter)
+ _DOUBLE(play_speed)
+ else if (!strcmp(param->name, "play_length"))
+ {
+ ERR("play_length is read-only");
+ return EINA_FALSE;
+ }
+ //_INT(vis);
+#undef _STR
+#undef _BOOL
+#undef _INT
+#undef _DOUBLE
+
+ ERR("unknown parameter '%s' of type '%s'",
+ param->name, edje_external_param_type_str(param->type));
+
+ return EINA_FALSE;
+}
+
+static Eina_Bool
+_external_emotion_param_get(void *data EINA_UNUSED, const Evas_Object *obj, Edje_External_Param *param)
+{
+#define _STR(M) \
+ else if (!strcmp(param->name, #M)) \
+ { \
+ if (param->type == EDJE_EXTERNAL_PARAM_TYPE_STRING) \
+ { \
+ param->s = emotion_object_##M##_get(obj); \
+ return EINA_TRUE; \
+ } \
+ }
+#define _BOOL(M) \
+ else if (!strcmp(param->name, #M)) \
+ { \
+ if (param->type == EDJE_EXTERNAL_PARAM_TYPE_BOOL) \
+ { \
+ param->i = emotion_object_##M##_get(obj); \
+ return EINA_TRUE; \
+ } \
+ }
+#define _INT(M) \
+ else if (!strcmp(param->name, #M)) \
+ { \
+ if (param->type == EDJE_EXTERNAL_PARAM_TYPE_INT) \
+ { \
+ param->i = emotion_object_##M##_get(obj); \
+ return EINA_TRUE; \
+ } \
+ }
+#define _DOUBLE(M) \
+ else if (!strcmp(param->name, #M)) \
+ { \
+ if (param->type == EDJE_EXTERNAL_PARAM_TYPE_DOUBLE) \
+ { \
+ param->d = emotion_object_##M##_get(obj); \
+ return EINA_TRUE; \
+ } \
+ }
+
+ if (0) {} // so else if works...
+ _STR(file)
+ _BOOL(play)
+ _DOUBLE(position)
+ _BOOL(smooth_scale)
+ _DOUBLE(audio_volume)
+ _BOOL(audio_mute)
+ _INT(audio_channel)
+ _BOOL(video_mute)
+ _INT(video_channel)
+ _BOOL(spu_mute)
+ _INT(spu_channel)
+ _INT(chapter)
+ _DOUBLE(play_speed)
+ _DOUBLE(play_length)
+ //_INT(vis)
+#undef _STR
+#undef _BOOL
+#undef _INT
+#undef _DOUBLE
+
+ ERR("unknown parameter '%s' of type '%s'",
+ param->name, edje_external_param_type_str(param->type));
+
+ return EINA_FALSE;
+}
+
+static void *
+_external_emotion_params_parse(void *data EINA_UNUSED, Evas_Object *obj EINA_UNUSED, const Eina_List *params)
+{
+ const Edje_External_Param *param;
+ const Eina_List *l;
+ External_Emotion_Params *p = calloc(1, sizeof(External_Emotion_Params));
+ if (!p) return NULL;
+
+ EINA_LIST_FOREACH(params, l, param)
+ {
+#define _STR(M) \
+ if (!strcmp(param->name, #M)) p->M = eina_stringshare_add(param->s)
+#define _BOOL(M) \
+ if (!strcmp(param->name, #M)) \
+ { \
+ p->M = param->i; \
+ p->M##_exists = EINA_TRUE; \
+ }
+#define _INT(M) \
+ if (!strcmp(param->name, #M)) \
+ { \
+ p->M = param->i; \
+ p->M##_exists = EINA_TRUE; \
+ }
+#define _DOUBLE(M) \
+ if (!strcmp(param->name, #M)) \
+ { \
+ p->M = param->d; \
+ p->M##_exists = EINA_TRUE; \
+ }
+
+ _STR(file);
+ _BOOL(play);
+ _DOUBLE(position);
+ _BOOL(smooth_scale);
+ _DOUBLE(audio_volume);
+ _BOOL(audio_mute);
+ _INT(audio_channel);
+ _BOOL(video_mute);
+ _INT(video_channel);
+ _BOOL(spu_mute);
+ _INT(spu_channel);
+ _INT(chapter);
+ _DOUBLE(play_speed);
+ _DOUBLE(play_length);
+ //_INT(vis);
+#undef _STR
+#undef _BOOL
+#undef _INT
+#undef _DOUBLE
+ }
+
+ return p;
+}
+
+static void
+_external_emotion_params_free(void *params)
+{
+ External_Emotion_Params *p = params;
+
+#define _STR(M) eina_stringshare_del(p->M)
+#define _BOOL(M) do {} while (0)
+#define _INT(M) do {} while (0)
+#define _DOUBLE(M) do {} while (0)
+ _STR(file);
+ _BOOL(play);
+ _DOUBLE(position);
+ _BOOL(smooth_scale);
+ _DOUBLE(audio_volume);
+ _BOOL(audio_mute);
+ _INT(audio_channel);
+ _BOOL(video_mute);
+ _INT(video_channel);
+ _BOOL(spu_mute);
+ _INT(spu_channel);
+ _INT(chapter);
+ _DOUBLE(play_speed);
+ _DOUBLE(play_length);
+ //_INT(vis);
+#undef _STR
+#undef _BOOL
+#undef _INT
+#undef _DOUBLE
+ free(p);
+}
+
+static const char *
+_external_emotion_label_get(void *data EINA_UNUSED)
+{
+ return "Emotion";
+}
+
+static Evas_Object *
+_external_emotion_icon_add(void *data EINA_UNUSED, Evas *e)
+{
+ Evas_Object *ic;
+ int w = 0, h = 0;
+
+ ic = edje_object_add(e);
+ edje_object_file_set(ic, PACKAGE_DATA_DIR"/data/icon.edj", "icon");
+ edje_object_size_min_get(ic, &w, &h);
+ if (w < 1) w = 20;
+ if (h < 1) h = 10;
+ evas_object_size_hint_min_set(ic, w, h);
+ evas_object_size_hint_max_set(ic, w, h);
+
+ return ic;
+}
+
+static const char *
+_external_emotion_translate(void *data EINA_UNUSED, const char *orig)
+{
+ // in future, mark all params as translatable and use dgettext()
+ // with "emotion" text domain here.
+ return orig;
+}
+
+static Edje_External_Param_Info _external_emotion_params[] = {
+ EDJE_EXTERNAL_PARAM_INFO_CHOICE_FULL
+ ("engine", _external_emotion_engine_def, _external_emotion_engines),
+ EDJE_EXTERNAL_PARAM_INFO_STRING("file"),
+ EDJE_EXTERNAL_PARAM_INFO_BOOL_DEFAULT("play", EINA_FALSE),
+ EDJE_EXTERNAL_PARAM_INFO_DOUBLE("position"),
+ EDJE_EXTERNAL_PARAM_INFO_BOOL_DEFAULT("smooth_scale", EINA_FALSE),
+ EDJE_EXTERNAL_PARAM_INFO_DOUBLE_DEFAULT("audio_volume", 0.9),
+ EDJE_EXTERNAL_PARAM_INFO_BOOL_DEFAULT("audio_mute", EINA_FALSE),
+ EDJE_EXTERNAL_PARAM_INFO_INT_DEFAULT("audio_channel", 0),
+ EDJE_EXTERNAL_PARAM_INFO_BOOL_DEFAULT("video_mute", EINA_FALSE),
+ EDJE_EXTERNAL_PARAM_INFO_INT_DEFAULT("video_channel", 0),
+ EDJE_EXTERNAL_PARAM_INFO_BOOL_DEFAULT("spu_mute", EINA_FALSE),
+ EDJE_EXTERNAL_PARAM_INFO_INT_DEFAULT("spu_channel", 0),
+ EDJE_EXTERNAL_PARAM_INFO_INT("chapter"),
+ EDJE_EXTERNAL_PARAM_INFO_DOUBLE_DEFAULT("play_speed", 1.0),
+ EDJE_EXTERNAL_PARAM_INFO_DOUBLE("play_length"),
+ //EDJE_EXTERNAL_PARAM_INFO_CHOICE_FULL("vis", ...),
+ EDJE_EXTERNAL_PARAM_INFO_SENTINEL
+};
+
+static const Edje_External_Type _external_emotion_type = {
+ .abi_version = EDJE_EXTERNAL_TYPE_ABI_VERSION,
+ .module = "emotion",
+ .module_name = "Emotion",
+ .add = _external_emotion_add,
+ .state_set = _external_emotion_state_set,
+ .signal_emit = _external_emotion_signal,
+ .param_set = _external_emotion_param_set,
+ .param_get = _external_emotion_param_get,
+ .params_parse = _external_emotion_params_parse,
+ .params_free = _external_emotion_params_free,
+ .label_get = _external_emotion_label_get,
+ .description_get = NULL,
+ .icon_add = _external_emotion_icon_add,
+ .preview_add = NULL,
+ .translate = _external_emotion_translate,
+ .parameters_info = _external_emotion_params,
+ .data = NULL
+};
+
+static Edje_External_Type_Info _external_emotion_types[] =
+{
+ {"emotion", &_external_emotion_type},
+ {NULL, NULL}
+};
+
+static Eina_Bool
+external_emotion_mod_init(void)
+{
+ _log_dom = eina_log_domain_register
+ ("emotion-externals", EINA_COLOR_LIGHTBLUE);
+ edje_external_type_array_register(_external_emotion_types);
+ return EINA_TRUE;
+}
+
+static void
+external_emotion_mod_shutdown(void)
+{
+ edje_external_type_array_unregister(_external_emotion_types);
+ eina_log_domain_unregister(_log_dom);
+ _log_dom = -1;
+}
+
+EINA_MODULE_INIT(external_emotion_mod_init);
+EINA_MODULE_SHUTDOWN(external_emotion_mod_shutdown);
diff --git a/src/examples/Makefile.am b/src/examples/Makefile.am
index d46cfedf64..610627764d 100644
--- a/src/examples/Makefile.am
+++ b/src/examples/Makefile.am
@@ -1,6 +1,6 @@
MAINTAINERCLEANFILES = Makefile.in
-SUBDIRS = eina eo eet evas ecore eio edbus ephysics edje
+SUBDIRS = eina eo eet evas ecore eio edbus ephysics edje emotion
.PHONY: examples install-examples
diff --git a/src/examples/emotion/Makefile.am b/src/examples/emotion/Makefile.am
new file mode 100644
index 0000000000..b1bf86e654
--- /dev/null
+++ b/src/examples/emotion/Makefile.am
@@ -0,0 +1,57 @@
+MAINTAINERCLEANFILES = Makefile.in
+
+AM_CPPFLAGS = \
+-I$(top_srcdir)/src/lib/eina \
+-I$(top_srcdir)/src/lib/eo \
+-I$(top_srcdir)/src/lib/evas \
+-I$(top_srcdir)/src/lib/ecore \
+-I$(top_srcdir)/src/lib/ecore_evas \
+-I$(top_srcdir)/src/lib/emotion \
+-I$(top_builddir)/src/lib/eina \
+-I$(top_builddir)/src/lib/eo \
+-I$(top_builddir)/src/lib/evas \
+-I$(top_builddir)/src/lib/ecore \
+-I$(top_builddir)/src/lib/ecore_evas \
+-I$(top_builddir)/src/lib/emotion \
+@EMOTION_CFLAGS@
+
+LDADD = \
+$(top_builddir)/src/lib/eina/libeina.la \
+$(top_builddir)/src/lib/eo/libeo.la \
+$(top_builddir)/src/lib/evas/libevas.la \
+$(top_builddir)/src/lib/ecore/libecore.la \
+$(top_builddir)/src/lib/ecore_evas/libecore_evas.la \
+$(top_builddir)/src/lib/emotion/libemotion.la \
+@EMOTION_LDFLAGS@
+
+EXTRA_PROGRAMS = \
+emotion_basic_example \
+emotion_generic_example \
+emotion_generic_subtitle_example \
+emotion_border_example \
+emotion_signals_example
+
+SRCS = \
+emotion_basic_example.c \
+emotion_generic_example.c \
+emotion_generic_subtitle_example.c \
+emotion_border_example.c \
+emotion_signals_example.c
+
+examples: $(EXTRA_PROGRAMS)
+
+clean-local:
+ rm -f $(EXTRA_PROGRAMS)
+
+install-examples:
+ mkdir -p $(datadir)/emotion/examples
+ $(install_sh_DATA) -c $(SRCS) $(datadir)/emotion/examples
+
+uninstall-local:
+ for f in $(SRCS); do \
+ rm -f $(datadir)/emotion/examples/$$f ; \
+ done
+
+if ALWAYS_BUILD_EXAMPLES
+noinst_PROGRAMS = $(EXTRA_PROGRAMS)
+endif
diff --git a/src/examples/emotion/emotion_basic_example.c b/src/examples/emotion/emotion_basic_example.c
new file mode 100644
index 0000000000..7e3e4c2e4b
--- /dev/null
+++ b/src/examples/emotion/emotion_basic_example.c
@@ -0,0 +1,81 @@
+#include <Ecore.h>
+#include <Ecore_Evas.h>
+#include <Evas.h>
+#include <Emotion.h>
+#include <stdio.h>
+
+#define WIDTH (320)
+#define HEIGHT (240)
+
+static void
+_playback_started_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf("Emotion object started playback.\n");
+}
+
+int
+main(int argc, const char *argv[])
+{
+ Ecore_Evas *ee;
+ Evas *e;
+ Evas_Object *bg, *em;
+ const char *filename = NULL;
+
+ if (argc < 2)
+ {
+ printf("One argument is necessary. Usage:\n");
+ printf("\t%s <filename>\n", argv[0]);
+ }
+
+ filename = argv[1];
+
+ if (!ecore_evas_init())
+ return EXIT_FAILURE;
+
+ /* this will give you a window with an Evas canvas under the first
+ * engine available */
+ ee = ecore_evas_new(NULL, 10, 10, WIDTH, HEIGHT, NULL);
+ if (!ee)
+ goto error;
+
+ ecore_evas_show(ee);
+
+ /* the canvas pointer, de facto */
+ e = ecore_evas_get(ee);
+
+ /* adding a background to this example */
+ bg = evas_object_rectangle_add(e);
+ evas_object_name_set(bg, "our dear rectangle");
+ evas_object_color_set(bg, 255, 255, 255, 255); /* white bg */
+ evas_object_move(bg, 0, 0); /* at canvas' origin */
+ evas_object_resize(bg, WIDTH, HEIGHT); /* covers full canvas */
+ evas_object_show(bg);
+
+ /* Creating the emotion object */
+ em = emotion_object_add(e);
+ emotion_object_init(em, NULL);
+
+ evas_object_smart_callback_add(
+ em, "playback_started", _playback_started_cb, NULL);
+
+ emotion_object_file_set(em, filename);
+
+ evas_object_move(em, 0, 0);
+ evas_object_resize(em, WIDTH, HEIGHT);
+ evas_object_show(em);
+
+ emotion_object_play_set(em, EINA_TRUE);
+
+ ecore_main_loop_begin();
+
+ ecore_evas_free(ee);
+ ecore_evas_shutdown();
+ return 0;
+
+error:
+ fprintf(stderr, "you got to have at least one evas engine built and linked"
+ " up to ecore-evas for this example to run properly.\n");
+
+ ecore_evas_shutdown();
+ return -1;
+}
diff --git a/src/examples/emotion/emotion_border_example.c b/src/examples/emotion/emotion_border_example.c
new file mode 100644
index 0000000000..9df53f4333
--- /dev/null
+++ b/src/examples/emotion/emotion_border_example.c
@@ -0,0 +1,238 @@
+#include <Ecore.h>
+#include <Ecore_Evas.h>
+#include <Evas.h>
+#include <Emotion.h>
+#include <stdio.h>
+#include <string.h>
+
+#define WIDTH (320)
+#define HEIGHT (240)
+
+static Eina_List *filenames = NULL;
+static Eina_List *curfile = NULL;
+
+static void
+_playback_started_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf("Emotion object started playback.\n");
+}
+
+static Evas_Object *
+_create_emotion_object(Evas *e)
+{
+ Evas_Object *em = emotion_object_add(e);
+
+ emotion_object_init(em, "gstreamer");
+
+ evas_object_smart_callback_add(
+ em, "playback_started", _playback_started_cb, NULL);
+
+ return em;
+}
+
+static void
+_on_key_down(void *data, Evas *e, Evas_Object *o, void *event_info)
+{
+ Evas_Event_Key_Down *ev = event_info;
+ Evas_Object *em = data;
+
+ if (!strcmp(ev->keyname, "Return"))
+ {
+ emotion_object_play_set(em, EINA_TRUE);
+ }
+ else if (!strcmp(ev->keyname, "space"))
+ {
+ emotion_object_play_set(em, EINA_FALSE);
+ }
+ else if (!strcmp(ev->keyname, "Escape"))
+ {
+ ecore_main_loop_quit();
+ }
+ else if (!strcmp(ev->keyname, "n"))
+ {
+ const char *file;
+ if (!curfile)
+ curfile = filenames;
+ else
+ curfile = eina_list_next(curfile);
+ file = eina_list_data_get(curfile);
+ fprintf(stderr, "playing next file: %s\n", file);
+ emotion_object_file_set(em, file);
+ }
+ else if (!strcmp(ev->keyname, "p"))
+ {
+ const char *file;
+ if (!curfile)
+ curfile = eina_list_last(filenames);
+ else
+ curfile = eina_list_prev(curfile);
+ file = eina_list_data_get(curfile);
+ fprintf(stderr, "playing next file: %s\n", file);
+ emotion_object_file_set(em, file);
+ }
+ else if (!strcmp(ev->keyname, "b"))
+ {
+ emotion_object_border_set(em, 0, 0, 50, 50);
+ }
+ else if (!strcmp(ev->keyname, "0"))
+ {
+ emotion_object_keep_aspect_set(em, EMOTION_ASPECT_KEEP_NONE);
+ }
+ else if (!strcmp(ev->keyname, "w"))
+ {
+ emotion_object_keep_aspect_set(em, EMOTION_ASPECT_KEEP_WIDTH);
+ }
+ else if (!strcmp(ev->keyname, "h"))
+ {
+ emotion_object_keep_aspect_set(em, EMOTION_ASPECT_KEEP_HEIGHT);
+ }
+ else if (!strcmp(ev->keyname, "2"))
+ {
+ emotion_object_keep_aspect_set(em, EMOTION_ASPECT_KEEP_BOTH);
+ }
+ else if (!strcmp(ev->keyname, "c"))
+ {
+ emotion_object_keep_aspect_set(em, EMOTION_ASPECT_CROP);
+ }
+ else
+ {
+ fprintf(stderr, "unhandled key: %s\n", ev->keyname);
+ }
+}
+
+static void
+_frame_decode_cb(void *data, Evas_Object *o, void *event_info)
+{
+ // fprintf(stderr, "smartcb: frame_decode\n");
+}
+
+static void
+_length_change_cb(void *data, Evas_Object *o, void *event_info)
+{
+ fprintf(stderr, "smartcb: length_change: %0.3f\n", emotion_object_play_length_get(o));
+}
+
+static void
+_position_update_cb(void *data, Evas_Object *o, void *event_info)
+{
+ fprintf(stderr, "smartcb: position_update: %0.3f\n", emotion_object_position_get(o));
+}
+
+static void
+_progress_change_cb(void *data, Evas_Object *o, void *event_info)
+{
+ fprintf(stderr, "smartcb: progress_change: %0.3f, %s\n",
+ emotion_object_progress_status_get(o),
+ emotion_object_progress_info_get(o));
+}
+
+static void
+_frame_resize_cb(void *data, Evas_Object *o, void *event_info)
+{
+ int w, h;
+ emotion_object_size_get(o, &w, &h);
+ fprintf(stderr, "smartcb: frame_resize: %dx%d\n", w, h);
+}
+
+static void /* adjust canvas' contents on resizes */
+_canvas_resize_cb(Ecore_Evas *ee)
+{
+ int w, h;
+ Evas_Object *bg, *em;
+
+ ecore_evas_geometry_get(ee, NULL, NULL, &w, &h);
+
+ bg = ecore_evas_data_get(ee, "bg");
+ em = ecore_evas_data_get(ee, "emotion");
+
+ evas_object_resize(bg, w, h);
+ evas_object_move(em, 10, 10);
+ evas_object_resize(em, w - 20, h - 20);
+}
+
+int
+main(int argc, const char *argv[])
+{
+ Ecore_Evas *ee;
+ Evas *e;
+ Evas_Object *bg, *em;
+ int i;
+
+ if (argc < 2)
+ {
+ printf("One argument is necessary. Usage:\n");
+ printf("\t%s <filename>\n", argv[0]);
+ }
+
+ eina_init();
+ for (i = 1; i < argc; i++)
+ filenames = eina_list_append(filenames, eina_stringshare_add(argv[i]));
+
+ curfile = filenames;
+
+ if (!ecore_evas_init())
+ return EXIT_FAILURE;
+
+ /* this will give you a window with an Evas canvas under the first
+ * engine available */
+ ee = ecore_evas_new(NULL, 10, 10, WIDTH, HEIGHT, NULL);
+ if (!ee)
+ goto error;
+
+ ecore_evas_callback_resize_set(ee, _canvas_resize_cb);
+
+ ecore_evas_show(ee);
+
+ /* the canvas pointer, de facto */
+ e = ecore_evas_get(ee);
+
+ /* adding a background to this example */
+ bg = evas_object_rectangle_add(e);
+ evas_object_name_set(bg, "our dear rectangle");
+ evas_object_color_set(bg, 255, 0, 0, 255); /* white bg */
+ evas_object_move(bg, 0, 0); /* at canvas' origin */
+ evas_object_resize(bg, WIDTH, HEIGHT); /* covers full canvas */
+ evas_object_show(bg);
+
+ ecore_evas_data_set(ee, "bg", bg);
+
+ /* Creating the emotion object */
+ em = _create_emotion_object(e);
+ emotion_object_file_set(em, eina_list_data_get(curfile));
+ evas_object_move(em, 10, 10);
+ evas_object_resize(em, WIDTH, HEIGHT);
+ evas_object_resize(em, WIDTH - 20, HEIGHT - 20);
+ emotion_object_keep_aspect_set(em, EMOTION_ASPECT_KEEP_BOTH);
+ emotion_object_bg_color_set(em, 0, 128, 0, 255);
+ evas_object_show(em);
+
+ ecore_evas_data_set(ee, "emotion", em);
+
+ evas_object_smart_callback_add(em, "frame_decode", _frame_decode_cb, NULL);
+ evas_object_smart_callback_add(em, "length_change", _length_change_cb, NULL);
+ evas_object_smart_callback_add(em, "position_update", _position_update_cb, NULL);
+ evas_object_smart_callback_add(em, "progress_change", _progress_change_cb, NULL);
+ evas_object_smart_callback_add(em, "frame_resize", _frame_resize_cb, NULL);
+
+ evas_object_event_callback_add(bg, EVAS_CALLBACK_KEY_DOWN, _on_key_down, em);
+ evas_object_focus_set(bg, EINA_TRUE);
+
+ emotion_object_play_set(em, EINA_TRUE);
+
+ ecore_main_loop_begin();
+
+ ecore_evas_free(ee);
+ ecore_evas_shutdown();
+ return 0;
+
+error:
+ fprintf(stderr, "you got to have at least one evas engine built and linked"
+ " up to ecore-evas for this example to run properly.\n");
+
+ EINA_LIST_FREE(filenames, curfile)
+ eina_stringshare_del(eina_list_data_get(curfile));
+
+ ecore_evas_shutdown();
+ eina_shutdown();
+ return -1;
+}
diff --git a/src/examples/emotion/emotion_generic_example.c b/src/examples/emotion/emotion_generic_example.c
new file mode 100644
index 0000000000..b8382862d5
--- /dev/null
+++ b/src/examples/emotion/emotion_generic_example.c
@@ -0,0 +1,233 @@
+#include <Ecore.h>
+#include <Ecore_Evas.h>
+#include <Evas.h>
+#include <Emotion.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#define WIDTH (320)
+#define HEIGHT (240)
+
+static Eina_List *filenames = NULL;
+static Eina_List *curfile = NULL;
+
+static void
+_playback_started_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf("Emotion object started playback.\n");
+}
+
+static void
+_playback_stopped_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf("Emotion playback stopped.\n");
+ emotion_object_play_set(o, EINA_FALSE);
+ emotion_object_position_set(o, 0);
+}
+
+static Evas_Object *
+_create_emotion_object(Evas *e)
+{
+ Evas_Object *em = emotion_object_add(e);
+
+ emotion_object_init(em, "generic");
+
+ evas_object_smart_callback_add(
+ em, "playback_started", _playback_started_cb, NULL);
+ evas_object_smart_callback_add(
+ em, "playback_finished", _playback_stopped_cb, NULL);
+
+ return em;
+}
+
+static void
+_on_key_down(void *data, Evas *e, Evas_Object *o, void *event_info)
+{
+ Evas_Event_Key_Down *ev = event_info;
+ Evas_Object *em = data;
+
+ if (!strcmp(ev->keyname, "Return"))
+ {
+ emotion_object_play_set(em, EINA_TRUE);
+ }
+ else if (!strcmp(ev->keyname, "space"))
+ {
+ emotion_object_play_set(em, EINA_FALSE);
+ }
+ else if (!strcmp(ev->keyname, "Escape"))
+ {
+ ecore_main_loop_quit();
+ }
+ else if (!strcmp(ev->keyname, "t"))
+ {
+ int w, h;
+ emotion_object_size_get(em, &w, &h);
+ fprintf(stderr, "example -> size: %dx%d\n", w, h);
+ }
+ else if (!strcmp(ev->keyname, "s"))
+ {
+ float len, pos;
+ len = emotion_object_play_length_get(em);
+ pos = 0.98 * len;
+ fprintf(stderr, "skipping to position %0.3f\n", pos);
+ emotion_object_position_set(em, pos);
+ }
+ else if (!strcmp(ev->keyname, "1"))
+ {
+ fprintf(stderr, "setting speed to 1.0\n");
+ emotion_object_play_speed_set(em, 1.0);
+ }
+ else if (!strcmp(ev->keyname, "2"))
+ {
+ fprintf(stderr, "setting speed to 2.0\n");
+ emotion_object_play_speed_set(em, 2.0);
+ }
+ else if (!strcmp(ev->keyname, "n"))
+ {
+ const char *file;
+ if (!curfile)
+ curfile = filenames;
+ else
+ curfile = eina_list_next(curfile);
+ file = eina_list_data_get(curfile);
+ fprintf(stderr, "playing next file: %s\n", file);
+ emotion_object_file_set(em, file);
+ }
+ else if (!strcmp(ev->keyname, "p"))
+ {
+ const char *file;
+ if (!curfile)
+ curfile = eina_list_last(filenames);
+ else
+ curfile = eina_list_prev(curfile);
+ file = eina_list_data_get(curfile);
+ fprintf(stderr, "playing next file: %s\n", file);
+ emotion_object_file_set(em, file);
+ }
+ else if (!strcmp(ev->keyname, "d"))
+ {
+ evas_object_del(em);
+ }
+ else if (!strcmp(ev->keyname, "l"))
+ {
+ // force frame dropping
+ sleep(5);
+ }
+ else
+ {
+ fprintf(stderr, "unhandled key: %s\n", ev->keyname);
+ }
+}
+
+static void
+_frame_decode_cb(void *data, Evas_Object *o, void *event_info)
+{
+ // fprintf(stderr, "smartcb: frame_decode\n");
+}
+
+static void
+_length_change_cb(void *data, Evas_Object *o, void *event_info)
+{
+ fprintf(stderr, "smartcb: length_change: %0.3f\n", emotion_object_play_length_get(o));
+}
+
+static void
+_position_update_cb(void *data, Evas_Object *o, void *event_info)
+{
+ fprintf(stderr, "smartcb: position_update: %0.3f\n", emotion_object_position_get(o));
+}
+
+static void
+_progress_change_cb(void *data, Evas_Object *o, void *event_info)
+{
+ fprintf(stderr, "smartcb: progress_change: %0.3f, %s\n",
+ emotion_object_progress_status_get(o),
+ emotion_object_progress_info_get(o));
+}
+
+static void
+_frame_resize_cb(void *data, Evas_Object *o, void *event_info)
+{
+ int w, h;
+ emotion_object_size_get(o, &w, &h);
+ fprintf(stderr, "smartcb: frame_resize: %dx%d\n", w, h);
+}
+
+int
+main(int argc, const char *argv[])
+{
+ Ecore_Evas *ee;
+ Evas *e;
+ Evas_Object *bg, *em;
+ int i;
+
+ if (argc < 2)
+ {
+ printf("One argument is necessary. Usage:\n");
+ printf("\t%s <filename>\n", argv[0]);
+ }
+
+ eina_init();
+ for (i = 1; i < argc; i++)
+ filenames = eina_list_append(filenames, eina_stringshare_add(argv[i]));
+
+ curfile = filenames;
+
+ if (!ecore_evas_init())
+ return EXIT_FAILURE;
+
+ /* this will give you a window with an Evas canvas under the first
+ * engine available */
+ ee = ecore_evas_new(NULL, 10, 10, WIDTH, HEIGHT, NULL);
+ if (!ee)
+ goto error;
+
+ ecore_evas_show(ee);
+
+ /* the canvas pointer, de facto */
+ e = ecore_evas_get(ee);
+
+ /* adding a background to this example */
+ bg = evas_object_rectangle_add(e);
+ evas_object_name_set(bg, "our dear rectangle");
+ evas_object_color_set(bg, 255, 255, 255, 255); /* white bg */
+ evas_object_move(bg, 0, 0); /* at canvas' origin */
+ evas_object_resize(bg, WIDTH, HEIGHT); /* covers full canvas */
+ evas_object_show(bg);
+
+ /* Creating the emotion object */
+ em = _create_emotion_object(e);
+ emotion_object_file_set(em, eina_list_data_get(curfile));
+ evas_object_move(em, 0, 0);
+ evas_object_resize(em, WIDTH, HEIGHT);
+ evas_object_show(em);
+
+ evas_object_smart_callback_add(em, "frame_decode", _frame_decode_cb, NULL);
+ evas_object_smart_callback_add(em, "length_change", _length_change_cb, NULL);
+ evas_object_smart_callback_add(em, "position_update", _position_update_cb, NULL);
+ evas_object_smart_callback_add(em, "progress_change", _progress_change_cb, NULL);
+ evas_object_smart_callback_add(em, "frame_resize", _frame_resize_cb, NULL);
+
+ evas_object_event_callback_add(bg, EVAS_CALLBACK_KEY_DOWN, _on_key_down, em);
+ evas_object_focus_set(bg, EINA_TRUE);
+
+ emotion_object_play_set(em, EINA_TRUE);
+
+ ecore_main_loop_begin();
+
+ ecore_evas_free(ee);
+ ecore_evas_shutdown();
+ return 0;
+
+error:
+ fprintf(stderr, "you got to have at least one evas engine built and linked"
+ " up to ecore-evas for this example to run properly.\n");
+
+ EINA_LIST_FREE(filenames, curfile)
+ eina_stringshare_del(eina_list_data_get(curfile));
+
+ ecore_evas_shutdown();
+ eina_shutdown();
+ return -1;
+}
diff --git a/src/examples/emotion/emotion_generic_subtitle_example.c b/src/examples/emotion/emotion_generic_subtitle_example.c
new file mode 100644
index 0000000000..448b505449
--- /dev/null
+++ b/src/examples/emotion/emotion_generic_subtitle_example.c
@@ -0,0 +1,97 @@
+#include <Ecore.h>
+#include <Ecore_Evas.h>
+#include <Evas.h>
+#include <Emotion.h>
+#include <stdio.h>
+
+#define WIDTH (320)
+#define HEIGHT (240)
+
+static void
+_playback_started_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf("Emotion object started playback.\n");
+}
+
+static void
+_on_delete(Ecore_Evas *ee)
+{
+ ecore_main_loop_quit();
+}
+
+int
+main(int argc, const char *argv[])
+{
+ Ecore_Evas *ee;
+ Evas *e;
+ Evas_Object *bg, *em;
+ const char *filename = NULL;
+ const char *subtitle_filename = NULL;
+
+ if (argc < 2)
+ {
+ printf("At least one argument is necessary. Usage:\n");
+ printf("\t%s <filename> <subtitle filename>\n", argv[0]);
+ return -1;
+ }
+
+ filename = argv[1];
+
+ if (argc > 2)
+ subtitle_filename = argv[2];
+
+ if (!ecore_evas_init())
+ return EXIT_FAILURE;
+
+ /* this will give you a window with an Evas canvas under the first
+ * engine available */
+ ee = ecore_evas_new(NULL, 10, 10, WIDTH, HEIGHT, NULL);
+ if (!ee)
+ goto error;
+
+ ecore_evas_callback_delete_request_set(ee, _on_delete);
+
+ ecore_evas_show(ee);
+
+ /* the canvas pointer, de facto */
+ e = ecore_evas_get(ee);
+
+ /* adding a background to this example */
+ bg = evas_object_rectangle_add(e);
+ evas_object_name_set(bg, "our dear rectangle");
+ evas_object_color_set(bg, 255, 255, 255, 255); /* white bg */
+ evas_object_move(bg, 0, 0); /* at canvas' origin */
+ evas_object_resize(bg, WIDTH, HEIGHT); /* covers full canvas */
+ evas_object_show(bg);
+
+ /* Creating the emotion object */
+ em = emotion_object_add(e);
+ emotion_object_init(em, "generic");
+
+ if (subtitle_filename)
+ emotion_object_video_subtitle_file_set(em, subtitle_filename);
+
+ evas_object_smart_callback_add(
+ em, "playback_started", _playback_started_cb, NULL);
+
+ emotion_object_file_set(em, filename);
+
+ evas_object_move(em, 0, 0);
+ evas_object_resize(em, WIDTH, HEIGHT);
+ evas_object_show(em);
+
+ emotion_object_play_set(em, EINA_TRUE);
+
+ ecore_main_loop_begin();
+
+ ecore_evas_free(ee);
+ ecore_evas_shutdown();
+ return 0;
+
+error:
+ fprintf(stderr, "you got to have at least one evas engine built and linked"
+ " up to ecore-evas for this example to run properly.\n");
+
+ ecore_evas_shutdown();
+ return -1;
+}
diff --git a/src/examples/emotion/emotion_signals_example.c b/src/examples/emotion/emotion_signals_example.c
new file mode 100644
index 0000000000..2469c468ba
--- /dev/null
+++ b/src/examples/emotion/emotion_signals_example.c
@@ -0,0 +1,173 @@
+#include <Ecore.h>
+#include <Ecore_Evas.h>
+#include <Evas.h>
+#include <Emotion.h>
+#include <stdio.h>
+
+#define WIDTH (320)
+#define HEIGHT (240)
+
+static void
+_display_info(Evas_Object *o)
+{
+ int w, h;
+ printf("playing: %d\n", emotion_object_play_get(o));
+ printf("meta title: %s\n",
+ emotion_object_meta_info_get(o, EMOTION_META_INFO_TRACK_TITLE));
+ printf("seek position: %0.3f\n",
+ emotion_object_position_get(o));
+ printf("play length: %0.3f\n",
+ emotion_object_play_length_get(o));
+ printf("is seekable: %d\n",
+ emotion_object_seekable_get(o));
+ emotion_object_size_get(o, &w, &h);
+ printf("video geometry: %dx%d\n", w, h);
+ printf("video width / height ratio: %0.3f\n",
+ emotion_object_ratio_get(o));
+ printf("\n");
+}
+
+static void
+_playback_started_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf(">>> Emotion object started playback.\n");
+ _display_info(o);
+}
+
+static void
+_playback_finished_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf(">>> Emotion object finished playback.\n");
+ _display_info(o);
+}
+
+static void
+_open_done_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf(">>> Emotion object open done.\n");
+ _display_info(o);
+}
+
+static void
+_position_update_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf(">>> Emotion object first position update.\n");
+ evas_object_smart_callback_del(o, "position_update", _position_update_cb);
+ _display_info(o);
+}
+
+static void
+_frame_decode_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf(">>> Emotion object first frame decode.\n");
+ evas_object_smart_callback_del(o, "frame_decode", _frame_decode_cb);
+ _display_info(o);
+}
+
+static void
+_decode_stop_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf(">>> Emotion object decode stop.\n");
+ _display_info(o);
+}
+
+static void
+_frame_resize_cb(void *data, Evas_Object *o, void *event_info)
+{
+ printf(">>> Emotion object frame resize.\n");
+ _display_info(o);
+}
+
+static void
+_setup_emotion_callbacks(Evas_Object *o)
+{
+ evas_object_smart_callback_add(
+ o, "playback_started", _playback_started_cb, NULL);
+ evas_object_smart_callback_add(
+ o, "playback_finished", _playback_finished_cb, NULL);
+ evas_object_smart_callback_add(
+ o, "open_done", _open_done_cb, NULL);
+ evas_object_smart_callback_add(
+ o, "position_update", _position_update_cb, NULL);
+ evas_object_smart_callback_add(
+ o, "frame_decode", _frame_decode_cb, NULL);
+ evas_object_smart_callback_add(
+ o, "decode_stop", _decode_stop_cb, NULL);
+ evas_object_smart_callback_add(
+ o, "frame_resize", _frame_resize_cb, NULL);
+}
+
+int
+main(int argc, const char *argv[])
+{
+ Ecore_Evas *ee;
+ Evas *e;
+ Evas_Object *bg, *em;
+ const char *filename = NULL;
+ const char *module = NULL;
+
+ if (argc < 2)
+ {
+ printf("At least one argument is necessary. Usage:\n");
+ printf("\t%s <filename> [module_name]\n", argv[0]);
+ goto error;
+ }
+
+ filename = argv[1];
+
+ if (argc >= 3)
+ module = argv[2];
+
+ if (!ecore_evas_init())
+ return EXIT_FAILURE;
+
+ /* this will give you a window with an Evas canvas under the first
+ * engine available */
+ ee = ecore_evas_new(NULL, 10, 10, WIDTH, HEIGHT, NULL);
+ if (!ee)
+ goto error;
+
+ ecore_evas_show(ee);
+
+ /* the canvas pointer, de facto */
+ e = ecore_evas_get(ee);
+
+ /* adding a background to this example */
+ bg = evas_object_rectangle_add(e);
+ evas_object_name_set(bg, "our dear rectangle");
+ evas_object_color_set(bg, 255, 255, 255, 255); /* white bg */
+ evas_object_move(bg, 0, 0); /* at canvas' origin */
+ evas_object_resize(bg, WIDTH, HEIGHT); /* covers full canvas */
+ evas_object_show(bg);
+
+ /* Creating the emotion object */
+ em = emotion_object_add(e);
+
+ /* Try to load the specified module - NULL for auto-discover */
+ if (!emotion_object_init(em, module))
+ fprintf(stderr, "Emotion: \"%s\" module could not be initialized.\n", module);
+
+ _display_info(em);
+ _setup_emotion_callbacks(em);
+
+ if (!emotion_object_file_set(em, filename))
+ fprintf(stderr, "Emotion: Could not load the file \"%s\"\n", filename);
+
+ evas_object_move(em, 0, 0);
+ evas_object_resize(em, WIDTH, HEIGHT);
+ evas_object_show(em);
+
+ emotion_object_play_set(em, EINA_TRUE);
+
+ ecore_main_loop_begin();
+
+ ecore_evas_free(ee);
+ ecore_evas_shutdown();
+ return 0;
+
+ ecore_evas_free(ee);
+
+error:
+ ecore_evas_shutdown();
+ return -1;
+}
diff --git a/src/lib/emotion/Emotion.h b/src/lib/emotion/Emotion.h
new file mode 100644
index 0000000000..454ee0f42e
--- /dev/null
+++ b/src/lib/emotion/Emotion.h
@@ -0,0 +1,1332 @@
+#ifndef EMOTION_H
+#define EMOTION_H
+
+/**
+ * @file
+ * @brief Emotion Media Library
+ *
+ * These routines are used for Emotion.
+ */
+
+/**
+ *
+ * @page emotion_main Emotion
+ *
+ * @date 2003 (created)
+ *
+ * @section toc Table of Contents
+ *
+ * @li @ref emotion_main_intro
+ * @li @ref emotion_main_work
+ * @li @ref emotion_main_compiling
+ * @li @ref emotion_main_next_steps
+ * @li @ref emotion_main_intro_example
+ *
+ * @section emotion_main_intro Introduction
+ *
+ * A media object library for Evas and Ecore.
+ *
+ * Emotion is a library that allows playing audio and video files, using one of
+ * its backends (gstreamer, xine or generic shm player).
+ *
+ * It is integrated into Ecore through its mainloop, and is transparent to the
+ * user of the library how the decoding of audio and video is being done. Once
+ * the objects are created, the user can set callbacks to the specific events
+ * and set options to this object, all in the main loop (no threads are needed).
+ *
+ * Emotion is also integrated with Evas. The emotion object returned by
+ * emotion_object_add() is an Evas smart object, so it can be manipulated with
+ * default Evas object functions. Callbacks can be added to the signals emitted
+ * by this object with evas_object_smart_callback_add().
+ *
+ * @section emotion_main_work How does Emotion work?
+ *
+ * The Emotion library uses Evas smart objects to allow you to manipulate the
+ * created object as any other Evas object, and to connect to its signals,
+ * handling them when needed. It's also possible to swallow Emotion objects
+ * inside Edje themes, and expect it to behave as a normal image or rectangle
+ * when regarding to its dimensions.
+ *
+ * @section emotion_main_compiling How to compile
+ *
+ * Emotion is a library your application links to. The procedure for this is
+ * very simple. You simply have to compile your application with the
+ * appropriate compiler flags that the @c pkg-config script outputs. For
+ * example:
+ *
+ * Compiling C or C++ files into object files:
+ *
+ * @verbatim
+ gcc -c -o main.o main.c `pkg-config --cflags emotion`
+ @endverbatim
+ *
+ * Linking object files into a binary executable:
+ *
+ * @verbatim
+ gcc -o my_application main.o `pkg-config --libs emotion`
+ @endverbatim
+ *
+ * See @ref pkgconfig
+ *
+ * @section emotion_main_next_steps Next Steps
+ *
+ * After you understood what Emotion is and installed it in your
+ * system you should proceed understanding the programming
+ * interface. We'd recommend you to take a while to learn @ref Ecore and
+ * @ref Evas to get started.
+ *
+ * Recommended reading:
+ *
+ * @li @ref Emotion_Init to initialize the library.
+ * @li @ref Emotion_Video to control video parameters.
+ * @li @ref Emotion_Audio to control audio parameters.
+ * @li @ref Emotion_Play to control playback.
+ * @li @ref Emotion_Webcam to show cameras.
+ * @li @ref Emotion_API for general programming interface.
+ *
+ * @section emotion_main_intro_example Introductory Example
+ *
+ * @include emotion_basic_example.c
+ *
+ * More examples can be found at @ref emotion_examples.
+ */
+
+#include <Evas.h>
+
+#ifdef EAPI
+# undef EAPI
+#endif
+
+#ifdef _WIN32
+# ifdef EFL_EMOTION_BUILD
+# ifdef DLL_EXPORT
+# define EAPI __declspec(dllexport)
+# else
+# define EAPI
+# endif /* ! DLL_EXPORT */
+# else
+# define EAPI __declspec(dllimport)
+# endif /* ! EFL_EMOTION_BUILD */
+#else
+# ifdef __GNUC__
+# if __GNUC__ >= 4
+# define EAPI __attribute__ ((visibility("default")))
+# else
+# define EAPI
+# endif
+# else
+# define EAPI
+# endif
+#endif /* ! _WIN32 */
+
+/**
+ * @file Emotion.h
+ * @brief The file that provides Emotion the API, with functions available for
+ * play, seek, change volume, etc.
+ */
+
+enum _Emotion_Module
+{
+ EMOTION_MODULE_XINE,
+ EMOTION_MODULE_GSTREAMER
+};
+
+enum _Emotion_Event
+{
+ EMOTION_EVENT_MENU1, // Escape Menu
+ EMOTION_EVENT_MENU2, // Title Menu
+ EMOTION_EVENT_MENU3, // Root Menu
+ EMOTION_EVENT_MENU4, // Subpicture Menu
+ EMOTION_EVENT_MENU5, // Audio Menu
+ EMOTION_EVENT_MENU6, // Angle Menu
+ EMOTION_EVENT_MENU7, // Part Menu
+ EMOTION_EVENT_UP,
+ EMOTION_EVENT_DOWN,
+ EMOTION_EVENT_LEFT,
+ EMOTION_EVENT_RIGHT,
+ EMOTION_EVENT_SELECT,
+ EMOTION_EVENT_NEXT,
+ EMOTION_EVENT_PREV,
+ EMOTION_EVENT_ANGLE_NEXT,
+ EMOTION_EVENT_ANGLE_PREV,
+ EMOTION_EVENT_FORCE,
+ EMOTION_EVENT_0,
+ EMOTION_EVENT_1,
+ EMOTION_EVENT_2,
+ EMOTION_EVENT_3,
+ EMOTION_EVENT_4,
+ EMOTION_EVENT_5,
+ EMOTION_EVENT_6,
+ EMOTION_EVENT_7,
+ EMOTION_EVENT_8,
+ EMOTION_EVENT_9,
+ EMOTION_EVENT_10
+};
+
+/**
+ * @enum _Emotion_Meta_Info
+ *
+ * Used for retrieving information about the media file being played.
+ *
+ * @see emotion_object_meta_info_get()
+ *
+ * @ingroup Emotion_Info
+ */
+enum _Emotion_Meta_Info
+{
+ EMOTION_META_INFO_TRACK_TITLE, /**< track title */
+ EMOTION_META_INFO_TRACK_ARTIST, /**< artist name */
+ EMOTION_META_INFO_TRACK_ALBUM, /**< album name */
+ EMOTION_META_INFO_TRACK_YEAR, /**< track year */
+ EMOTION_META_INFO_TRACK_GENRE, /**< track genre */
+ EMOTION_META_INFO_TRACK_COMMENT, /**< track comments */
+ EMOTION_META_INFO_TRACK_DISC_ID, /**< track disc ID */
+ EMOTION_META_INFO_TRACK_COUNT /**< track count - number of the track in the album */
+};
+
+/**
+ * @enum _Emotion_Vis
+ *
+ * Used for displaying a visualization on the emotion object.
+ *
+ * @see emotion_object_vis_set()
+ *
+ * @ingroup Emotion_Visualization
+ */
+enum _Emotion_Vis
+{
+ EMOTION_VIS_NONE, /**< no visualization set */
+ EMOTION_VIS_GOOM, /**< goom */
+ EMOTION_VIS_LIBVISUAL_BUMPSCOPE, /**< bumpscope */
+ EMOTION_VIS_LIBVISUAL_CORONA, /**< corona */
+ EMOTION_VIS_LIBVISUAL_DANCING_PARTICLES, /**< dancing particles */
+ EMOTION_VIS_LIBVISUAL_GDKPIXBUF, /**< gdkpixbuf */
+ EMOTION_VIS_LIBVISUAL_G_FORCE, /**< G force */
+ EMOTION_VIS_LIBVISUAL_GOOM, /**< goom */
+ EMOTION_VIS_LIBVISUAL_INFINITE, /**< infinite */
+ EMOTION_VIS_LIBVISUAL_JAKDAW, /**< jakdaw */
+ EMOTION_VIS_LIBVISUAL_JESS, /**< jess */
+ EMOTION_VIS_LIBVISUAL_LV_ANALYSER, /**< lv analyser */
+ EMOTION_VIS_LIBVISUAL_LV_FLOWER, /**< lv flower */
+ EMOTION_VIS_LIBVISUAL_LV_GLTEST, /**< lv gltest */
+ EMOTION_VIS_LIBVISUAL_LV_SCOPE, /**< lv scope */
+ EMOTION_VIS_LIBVISUAL_MADSPIN, /**< madspin */
+ EMOTION_VIS_LIBVISUAL_NEBULUS, /**< nebulus */
+ EMOTION_VIS_LIBVISUAL_OINKSIE, /**< oinksie */
+ EMOTION_VIS_LIBVISUAL_PLASMA, /**< plasma */
+ EMOTION_VIS_LAST /* sentinel */
+};
+
+/**
+ * @enum Emotion_Suspend
+ *
+ * Used for emotion pipeline ressource management.
+ *
+ * @see emotion_object_suspend_set()
+ * @see emotion_object_suspend_get()
+ *
+ * @ingroup Emotion_Ressource
+ */
+typedef enum
+{
+ EMOTION_WAKEUP, /**< pipeline is up and running */
+ EMOTION_SLEEP, /**< turn off hardware ressource usage like overlay */
+ EMOTION_DEEP_SLEEP, /**< destroy the pipeline, but keep full resolution pixels output around */
+ EMOTION_HIBERNATE /**< destroy the pipeline, and keep half resolution or object resolution if lower */
+} Emotion_Suspend;
+
+/**
+ * @enum _Emotion_Aspect
+ * Defines the aspect ratio option.
+ */
+enum _Emotion_Aspect
+{
+ EMOTION_ASPECT_KEEP_NONE, /**< ignore video aspect ratio */
+ EMOTION_ASPECT_KEEP_WIDTH, /**< respect video aspect, fitting its width inside the object width */
+ EMOTION_ASPECT_KEEP_HEIGHT, /**< respect video aspect, fitting its height inside the object height */
+ EMOTION_ASPECT_KEEP_BOTH, /**< respect video aspect, fitting it inside the object area */
+ EMOTION_ASPECT_CROP, /**< respect video aspect, cropping exceding area */
+ EMOTION_ASPECT_CUSTOM, /**< use custom borders/crop for the video */
+};
+
+typedef enum _Emotion_Module Emotion_Module;
+typedef enum _Emotion_Event Emotion_Event;
+typedef enum _Emotion_Meta_Info Emotion_Meta_Info; /**< Meta info type to be retrieved. */
+typedef enum _Emotion_Vis Emotion_Vis; /**< Type of visualization. */
+typedef enum _Emotion_Aspect Emotion_Aspect; /**< Aspect ratio option. */
+
+#define EMOTION_CHANNEL_AUTO -1
+#define EMOTION_CHANNEL_DEFAULT 0
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EMOTION_VERSION_MAJOR 1
+#define EMOTION_VERSION_MINOR 8
+
+ typedef struct _Emotion_Version
+ {
+ int major;
+ int minor;
+ int micro;
+ int revision;
+ } Emotion_Version;
+
+ EAPI extern Emotion_Version *emotion_version;
+
+/* api calls available */
+
+/**
+ * @brief How to create, initialize, manipulate and connect to signals of an
+ * Emotion object.
+ * @defgroup Emotion_API API available for manipulating Emotion object.
+ * @ingroup Emotion
+ *
+ * @{
+ *
+ * Emotion provides an Evas smart object that allows to play, control and
+ * display a video or audio file. The API is synchronous but not everything
+ * happens immediately. There are also some signals to report changed states.
+ *
+ * Basically, once the object is created and initialized, a file will be set to
+ * it, and then it can be resized, moved, and controlled by other Evas object
+ * functions.
+ *
+ * However, the decoding of the music and video occurs not in the Ecore main
+ * loop, but usually in another thread (this depends on the module being used).
+ * The synchronization between this other thread and the main loop not visible
+ * to the end user of the library. The user can just register callbacks to the
+ * available signals to receive information about the changed states, and can
+ * call other functions from the API to request more changes on the current
+ * loaded file.
+ *
+ * There will be a delay between an API being called and it being really
+ * executed, since this request will be done in the main thread, and it needs to
+ * be sent to the decoding thread. For this reason, always call functions like
+ * emotion_object_size_get() or emotion_object_length_get() after some signal
+ * being sent, like "playback_started" or "open_done". @ref
+ * emotion_signals_example.c "This example demonstrates this behavior".
+ *
+ * @section signals Available signals
+ * The Evas_Object returned by emotion_object_add() has a number of signals that
+ * can be listened to using evas' smart callbacks mechanism. All signals have
+ * NULL as event info. The following is a list of interesting signals:
+ * @li "playback_started" - Emitted when the playback starts
+ * @li "playback_finished" - Emitted when the playback finishes
+ * @li "frame_decode" - Emitted every time a frame is decoded
+ * @li "open_done" - Emitted when the media file is opened
+ * @li "position_update" - Emitted when emotion_object_position_set is called
+ * @li "decode_stop" - Emitted after the last frame is decoded
+ *
+ * @section Examples
+ *
+ * The following examples exemplify the emotion usage. There's also the
+ * emotion_test binary that is distributed with this library and cover the
+ * entire API, but since it is too long and repetitive to be explained, its code
+ * is just displayed as another example.
+ *
+ * @li @ref emotion_basic_example_c
+ * @li @ref emotion_signals_example.c "Emotion signals"
+ * @li @ref emotion_test_main.c "emotion_test - full API usage"
+ *
+ */
+
+/**
+ * @defgroup Emotion_Init Creation and initialization functions
+ */
+
+/**
+ * @defgroup Emotion_Audio Audio control functions
+ */
+
+/**
+ * @defgroup Emotion_Video Video control functions
+ */
+
+/**
+ * @defgroup Emotion_Visualization Visualization control functions
+ */
+
+/**
+ * @defgroup Emotion_Info Miscellaneous information retrieval functions
+ */
+
+/**
+ * @defgroup Emotion_Ressource Video ressource management
+ */
+
+EAPI Eina_Bool emotion_init(void);
+EAPI Eina_Bool emotion_shutdown(void);
+
+/**
+ * @brief Add an emotion object to the canvas.
+ *
+ * @param evas The canvas where the object will be added to.
+ * @return The emotion object just created.
+ *
+ * This function creates an emotion object and adds it to the specified @p evas.
+ * The returned object can be manipulated as any other Evas object, using the
+ * default object manipulation functions - evas_object_*.
+ *
+ * After creating the object with this function, it's still necessary to
+ * initialize it with emotion_object_init(), and if an audio file is going to be
+ * played with this object instead of a video, use
+ * emotion_object_video_mute_set().
+ *
+ * The next step is to open the desired file with emotion_object_file_set(), and
+ * start playing it with emotion_object_play_set().
+ *
+ * @see emotion_object_init()
+ * @see emotion_object_video_mute_set()
+ * @see emotion_object_file_set()
+ * @see emotion_object_play_set()
+ *
+ * @ingroup Emotion_Init
+ */
+EAPI Evas_Object *emotion_object_add (Evas *evas);
+
+/**
+ * @brief Set the specified option for the current module.
+ *
+ * @param obj The emotion object which the option is being set to.
+ * @param opt The option that is being set. Currently supported optiosn: "video"
+ * and "audio".
+ * @param val The value of the option. Currently only supports "off" (?!?!?!)
+ *
+ * This function allows one to mute the video or audio of the emotion object.
+ *
+ * @note Please don't use this function, consider using
+ * emotion_object_audio_mute_set() and emotion_object_video_mute_set() instead.
+ *
+ * @see emotion_object_audio_mute_set()
+ * @see emotion_object_video_mute_set()
+ *
+ * @ingroup Emotion_Init
+ */
+EAPI void emotion_object_module_option_set (Evas_Object *obj, const char *opt, const char *val);
+
+/**
+ * @brief Initializes an emotion object with the specified module.
+ *
+ * @param obj The emotion object to be initialized.
+ * @param module_filename The name of the module to be used (gstreamer or xine).
+ * @return @c EINA_TRUE if the specified module was successfully initialized for
+ * this object, @c EINA_FALSE otherwise.
+ *
+ * This function is required after creating the emotion object, in order to
+ * specify which module will be used with this object. Different objects can
+ * use different modules to play a media file. The current supported modules are
+ * @b gstreamer and @b xine.
+ *
+ * To use any of them, you need to make sure that support for them was compiled
+ * correctly.
+ *
+ * @note It's possible to disable the build of a module with
+ * --disable-module_name.
+ *
+ * @see emotion_object_add()
+ * @see emotion_object_file_set()
+ *
+ * @ingroup Emotion_Init
+ */
+EAPI Eina_Bool emotion_object_init (Evas_Object *obj, const char *module_filename);
+
+/**
+ * @brief Set borders for the emotion object.
+ *
+ * @param obj The emotion object where borders are being set.
+ * @param l The left border.
+ * @param r The right border.
+ * @param t The top border.
+ * @param b The bottom border.
+ *
+ * This function sets borders for the emotion video object (just when a video is
+ * present). When positive values are given to one of the parameters, a border
+ * will be added to the respective position of the object, representing that
+ * size on the original video size. However, if the video is scaled up or down
+ * (i.e. the emotion object size is different from the video size), the borders
+ * will be scaled respectively too.
+ *
+ * If a negative value is given to one of the parameters, instead of a border,
+ * that respective side of the video will be cropped.
+ *
+ * It's possible to set a color for the added borders (default is transparent)
+ * with emotion_object_bg_color_set(). By default, an Emotion object doesn't
+ * have any border.
+ *
+ * @see emotion_object_border_get()
+ * @see emotion_object_bg_color_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_border_set(Evas_Object *obj, int l, int r, int t, int b);
+
+/**
+ * @brief Get the borders set for the emotion object.
+ *
+ * @param obj The emotion object from which the borders are being retrieved.
+ * @param l The left border.
+ * @param r The right border.
+ * @param t The top border.
+ * @param b The bottom border.
+ *
+ * @see emotion_object_border_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_border_get(const Evas_Object *obj, int *l, int *r, int *t, int *b);
+
+/**
+ * @brief Set a color for the background rectangle of this emotion object.
+ *
+ * @param obj The emotion object where the background color is being set.
+ * @param r Red component of the color.
+ * @param g Green component of the color.
+ * @param b Blue component of the color.
+ * @param a Alpha channel of the color.
+ *
+ * This is useful when a border is added to any side of the Emotion object. The
+ * area between the edge of the video and the edge of the object will be filled
+ * with the specified color.
+ *
+ * The default color is 0, 0, 0, 0 (transparent).
+ *
+ * @see emotion_object_bg_color_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_bg_color_set(Evas_Object *obj, int r, int g, int b, int a);
+
+/**
+ * @brief Get the background color set for the emotion object.
+ *
+ * @param obj The emotion object from which the background color is being retrieved.
+ * @param r Red component of the color.
+ * @param g Green component of the color.
+ * @param b Blue component of the color.
+ * @param a AAlpha channel of the color.
+ *
+ * @see emotion_object_bg_color_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_bg_color_get(const Evas_Object *obj, int *r, int *g, int *b, int *a);
+
+/**
+ * @brief Set whether emotion should keep the aspect ratio of the video.
+ *
+ * @param obj The emotion object where to set the aspect.
+ * @param a The aspect ratio policy.
+ *
+ * Instead of manually calculating the required border to set with
+ * emotion_object_border_set(), and using this to fix the aspect ratio of the
+ * video when the emotion object has a different aspect, it's possible to just
+ * set the policy to be used.
+ *
+ * The options are:
+ *
+ * - @b #EMOTION_ASPECT_KEEP_NONE - ignore the video aspect ratio, and reset any
+ * border set to 0, stretching the video inside the emotion object area. This
+ * option is similar to EVAS_ASPECT_CONTROL_NONE size hint.
+ * - @b #EMOTION_ASPECT_KEEP_WIDTH - respect the video aspect ratio, fitting the
+ * video width inside the object width. This option is similar to
+ * EVAS_ASPECT_CONTROL_HORIZONTAL size hint.
+ * - @b #EMOTION_ASPECT_KEEP_HEIGHT - respect the video aspect ratio, fitting
+ * the video height inside the object height. This option is similar to
+ * EVAS_ASPECT_CONTROL_VERTIAL size hint.
+ * - @b #EMOTION_ASPECT_KEEP_BOTH - respect the video aspect ratio, fitting both
+ * its width and height inside the object area. This option is similar to
+ * EVAS_ASPECT_CONTROL_BOTH size hint. It's the effect called letterboxing.
+ * - @b #EMOTION_ASPECT_CROP - respect the video aspect ratio, fitting the width
+ * or height inside the object area, and cropping the exceding areas of the
+ * video in height or width. It's the effect called pan-and-scan.
+ * - @b #EMOTION_ASPECT_CUSTOM - ignore the video aspect ratio, and use the
+ * current set from emotion_object_border_set().
+ *
+ * @note Calling this function with any value except #EMOTION_ASPECT_CUSTOM will
+ * invalidate borders set with emotion_object_border_set().
+ *
+ * @note Calling emotion_object_border_set() will automatically set the aspect
+ * policy to #EMOTION_ASPECT_CUSTOM.
+ *
+ * @see emotion_object_border_set()
+ * @see emotion_object_keep_aspect_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_keep_aspect_set(Evas_Object *obj, Emotion_Aspect a);
+
+/**
+ * @brief Get the current emotion aspect ratio policy.
+ *
+ * @param obj The emotion object from which we are fetching the aspect ratio
+ * policy.
+ * @return The current aspect ratio policy.
+ *
+ * @see emotion_object_keep_aspect_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI Emotion_Aspect emotion_object_keep_aspect_get(const Evas_Object *obj);
+
+/**
+ * @brief Set the file to be played in the Emotion object.
+ *
+ * @param obj The emotion object where the file is being loaded.
+ * @param filename Path to the file to be loaded. It can be absolute or relative
+ * path.
+ * @return EINA_TRUE if the new file could be loaded successfully, and
+ * EINA_FALSE if the file could not be loaded. This happens when the filename is
+ * could not be found, when the module couldn't open the file, when no module is
+ * initialized in this object, or when the @p filename is the same as the
+ * one previously set.
+ *
+ * This function sets the file to be used with this emotion object. If the
+ * object already has another file set, this file will be unset and unloaded,
+ * and the new file will be loaded to this emotion object. The seek position
+ * will be set to 0, and the emotion object will be paused, instead of playing.
+ *
+ * If there was already a filename set, and it's the same as the one being set
+ * now, this function does nothing and returns EINA_FALSE.
+ *
+ * Use @c NULL as argument to @p filename if you want to unload the current file
+ * but don't want to load anything else.
+ *
+ * @see emotion_object_init()
+ * @see emotion_object_play_set()
+ * @see emotion_object_file_get()
+ *
+ * @ingroup Emotion_Init
+ */
+EAPI Eina_Bool emotion_object_file_set (Evas_Object *obj, const char *filename);
+
+/**
+ * @brief Get the filename of the file associated with the emotion object.
+ *
+ * @param obj The emotion object from which the filename will be retrieved.
+ * @return The path to the file loaded into this emotion object.
+ *
+ * This function returns the path of the file loaded in this emotion object. If
+ * no object is loaded, it will return @c NULL.
+ *
+ * @note Don't free or change the string returned by this function in any way.
+ * If you want to unset it, use @c emotion_object_file_set(obj, NULL).
+ *
+ * @see emotion_object_file_set()
+ *
+ * @ingroup Emotion_Init
+ */
+EAPI const char *emotion_object_file_get (const Evas_Object *obj);
+/**
+ * @defgroup Emotion_Play Play control functions
+ * @ingroup Emotion
+ *
+ * @{
+ */
+/**
+ *
+ * @brief Set play/pause state of the media file.
+ *
+ * @param obj The emotion object whose state will be changed.
+ * @param play EINA_TRUE to play, EINA_FALSE to pause.
+ *
+ * This functions sets the currently playing status of the video. Using this
+ * function to play or pause the video doesn't alter it's current position.
+ */
+EAPI void emotion_object_play_set (Evas_Object *obj, Eina_Bool play);
+/**
+ * @brief Get play/pause state of the media file.
+ *
+ * @param obj The emotion object from which the state will be retrieved.
+ * @return EINA_TRUE if playing. EINA_FALSE if not playing.
+ */
+EAPI Eina_Bool emotion_object_play_get (const Evas_Object *obj);
+/**
+ * @brief Set the position in the media file.
+ *
+ * @param obj The emotion object whose position will be changed.
+ * @param sec The position(in seconds) to which the media file will be set.
+ *
+ * This functions sets the current position of the media file to @p sec, this
+ * only works on seekable streams. Setting the position doesn't change the
+ * playing state of the media file.
+ *
+ * @see emotion_object_seekable_get
+ */
+EAPI void emotion_object_position_set (Evas_Object *obj, double sec);
+/**
+ * @brief Get the position in the media file.
+ *
+ * @param obj The emotion object from which the position will be retrieved.
+ * @return The position of the media file.
+ *
+ * The position is returned as the number of seconds since the beginning of the
+ * media file.
+ */
+EAPI double emotion_object_position_get (const Evas_Object *obj);
+
+/**
+ * @brief Get the percentual size of the buffering cache.
+ *
+ * @param obj The emotion object from which the buffer size will be retrieved.
+ * @return The buffer percent size, ranging from 0.0 to 1.0
+ *
+ * The buffer size is returned as a number between 0.0 and 1.0, 0.0 means
+ * the buffer if empty, 1.0 means full.
+ * If no buffering is in progress 1.0 is returned. In all other cases (maybe
+ * the backend don't support buffering) 1.0 is returned, thus you can always
+ * check for buffer_size < 1.0 to know if buffering is in progress.
+ *
+ * @warning Generic backend don't implement this (will return 1.0).
+ */
+EAPI double emotion_object_buffer_size_get (const Evas_Object *obj);
+
+/**
+ * @brief Get whether the media file is seekable.
+ *
+ * @param obj The emotion object from which the seekable status will be
+ * retrieved.
+ * @return EINA_TRUE if the media file is seekable, EINA_FALSE otherwise.
+ */
+EAPI Eina_Bool emotion_object_seekable_get (const Evas_Object *obj);
+/**
+ * @brief Get the length of play for the media file.
+ *
+ * @param obj The emotion object from which the length will be retrieved.
+ * @return The length of the media file in seconds.
+ *
+ * This function returns the length of the media file in seconds.
+ *
+ * @warning This will return 0 if called before the "length_change" signal has,
+ * been emitted.
+ */
+EAPI double emotion_object_play_length_get (const Evas_Object *obj);
+
+/**
+ * @brief Set the play speed of the media file.
+ *
+ * @param obj The emotion object whose speed will be set.
+ * @param speed The speed to be set in the range [0,infinity)
+ *
+ * This function sets the speed with which the media file will be played. 1.0
+ * represents the normal speed, 2 double speed, 0.5 half speed and so on.
+ *
+ * @warning The only backend that implements this is the experimental VLC
+ * backend.
+ */
+EAPI void emotion_object_play_speed_set (Evas_Object *obj, double speed);
+/**
+ * @brief Get the play speed of the media file.
+ *
+ * @param obj The emotion object from which the filename will be retrieved.
+ * @return The current speed of the media file.
+ *
+ * @see emotion_object_play_speed_set
+ */
+EAPI double emotion_object_play_speed_get (const Evas_Object *obj);
+/**
+ * @brief Get how much of the file has been played.
+ *
+ * @param obj The emotion object from which the filename will be retrieved.
+ * @return The progress of the media file.
+ *
+ * @warning Don't change of free the returned string.
+ * @warning gstreamer xine backends don't implement this(will return NULL).
+ */
+EAPI const char *emotion_object_progress_info_get (const Evas_Object *obj);
+/**
+ * @brief Get how much of the file has been played.
+ *
+ * @param obj The emotion object from which the filename will be retrieved
+ * @return The progress of the media file.
+ *
+ * This function gets the progress in playing the file, the return value is in
+ * the [0, 1] range.
+ *
+ * @warning gstreamer xine backends don't implement this(will return 0).
+ */
+EAPI double emotion_object_progress_status_get (const Evas_Object *obj);
+/**
+ * @}
+ */
+EAPI Eina_Bool emotion_object_video_handled_get (const Evas_Object *obj);
+EAPI Eina_Bool emotion_object_audio_handled_get (const Evas_Object *obj);
+
+/**
+ * @brief Retrieve the video aspect ratio of the media file loaded.
+ *
+ * @param obj The emotion object which the video aspect ratio will be retrieved
+ * from.
+ * @return The video aspect ratio of the file loaded.
+ *
+ * This function returns the video aspect ratio (width / height) of the file
+ * loaded. It can be used to adapt the size of the emotion object in the canvas,
+ * so the aspect won't be changed (by wrongly resizing the object). Or to crop
+ * the video correctly, if necessary.
+ *
+ * The described behavior can be applied like following. Consider a given
+ * emotion object that we want to position inside an area, which we will
+ * represent by @c w and @c h. Since we want to position this object either
+ * stretching, or filling the entire area but overflowing the video, or just
+ * adjust the video to fit inside the area without keeping the aspect ratio, we
+ * must compare the video aspect ratio with the area aspect ratio:
+ * @code
+ * int w = 200, h = 300; // an arbitrary value which represents the area where
+ * // the video would be placed
+ * int vw, vh;
+ * double r, vr = emotion_object_ratio_get(obj);
+ * r = (double)w / h;
+ * @endcode
+ *
+ * Now, if we want to make the video fit inside the area, the following code
+ * would do it:
+ * @code
+ * if (vr > r) // the video is wider than the area
+ * {
+ * vw = w;
+ * vh = w / vr;
+ * }
+ * else // the video is taller than the area
+ * {
+ * vh = h;
+ * vw = h * vr;
+ * }
+ * evas_object_resize(obj, vw, vh);
+ * @endcode
+ *
+ * And for keeping the aspect ratio but making the video fill the entire area,
+ * overflowing the content which can't fit inside it, we would do:
+ * @code
+ * if (vr > r) // the video is wider than the area
+ * {
+ * vh = h;
+ * vw = h * vr;
+ * }
+ * else // the video is taller than the area
+ * {
+ * vw = w;
+ * vh = w / vr;
+ * }
+ * evas_object_resize(obj, vw, vh);
+ * @endcode
+ *
+ * Finally, by just resizing the video to the video area, we would have the
+ * video stretched:
+ * @code
+ * vw = w;
+ * vh = h;
+ * evas_object_resize(obj, vw, vh);
+ * @endcode
+ *
+ * The following diagram exemplifies what would happen to the video,
+ * respectively, in each case:
+ *
+ * @image html emotion_ratio.png
+ * @image latex emotion_ratio.eps width=\textwidth
+ *
+ * @note This function returns the aspect ratio that the video @b should be, but
+ * sometimes the reported size from emotion_object_size_get() represents a
+ * different aspect ratio. You can safely resize the video to respect the aspect
+ * ratio returned by @b this function.
+ *
+ * @see emotion_object_size_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI double emotion_object_ratio_get (const Evas_Object *obj);
+
+/**
+ * @brief Retrieve the video size of the loaded file.
+ *
+ * @param obj The object from which we are retrieving the video size.
+ * @param iw A pointer to a variable where the width will be stored.
+ * @param ih A pointer to a variable where the height will be stored.
+ *
+ * This function returns the reported size of the loaded video file. If a file
+ * that doesn't contain a video channel is loaded, then this size can be
+ * ignored.
+ *
+ * The value reported by this function should be consistent with the aspect
+ * ratio returned by emotion_object_ratio_get(), but sometimes the information
+ * stored in the file is wrong. So use the ratio size reported by
+ * emotion_object_ratio_get(), since it is more likely going to be accurate.
+ *
+ * @note Use @c NULL for @p iw or @p ih if you don't need one of these values.
+ *
+ * @see emotion_object_ratio_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_size_get (const Evas_Object *obj, int *iw, int *ih);
+
+/**
+ * @brief Sets whether to use of high-quality image scaling algorithm
+ * of the given video object.
+ *
+ * When enabled, a higher quality video scaling algorithm is used when
+ * scaling videos to sizes other than the source video. This gives
+ * better results but is more computationally expensive.
+ *
+ * @param obj The given video object.
+ * @param smooth Whether to use smooth scale or not.
+ *
+ * @see emotion_object_smooth_scale_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_smooth_scale_set (Evas_Object *obj, Eina_Bool smooth);
+
+/**
+ * @brief Gets whether the high-quality image scaling algorithm
+ * of the given video object is used.
+ *
+ * @param obj The given video object.
+ * @return Whether the smooth scale is used or not.
+ *
+ * @see emotion_object_smooth_scale_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI Eina_Bool emotion_object_smooth_scale_get (const Evas_Object *obj);
+EAPI void emotion_object_event_simple_send (Evas_Object *obj, Emotion_Event ev);
+
+/**
+ * @brief Set the audio volume.
+ *
+ * @param obj The object where the volume is being set.
+ * @param vol The new volume parameter. Range is from 0.0 to 1.0.
+ *
+ * Sets the audio volume of the stream being played. This has nothing to do with
+ * the system volume. This volume will be multiplied by the system volume. e.g.:
+ * if the current volume level is 0.5, and the system volume is 50%, it will be
+ * 0.5 * 0.5 = 0.25.
+ *
+ * The default value depends on the module used. This value doesn't get changed
+ * when another file is loaded.
+ *
+ * @see emotion_object_audio_volume_get()
+ *
+ * @ingroup Emotion_Audio
+ */
+EAPI void emotion_object_audio_volume_set (Evas_Object *obj, double vol);
+
+/**
+ * @brief Get the audio volume.
+ *
+ * @param obj The object from which we are retrieving the volume.
+ * @return The current audio volume level for this object.
+ *
+ * Get the current value for the audio volume level. Range is from 0.0 to 1.0.
+ * This volume is set with emotion_object_audio_volume_set().
+ *
+ * @see emotion_object_audio_volume_set()
+ *
+ * @ingroup Emotion_Audio
+ */
+EAPI double emotion_object_audio_volume_get (const Evas_Object *obj);
+
+/**
+ * @brief Set the mute audio option for this object.
+ *
+ * @param obj The object which we are setting the mute audio option.
+ * @param mute Whether the audio should be muted (@c EINA_TRUE) or not (@c
+ * EINA_FALSE).
+ *
+ * This function sets the mute audio option for this emotion object. The current
+ * module used for this object can use this to avoid decoding the audio portion
+ * of the loaded media file.
+ *
+ * @see emotion_object_audio_mute_get()
+ * @see emotion_object_video_mute_set()
+ *
+ * @ingroup Emotion_Audio
+ */
+EAPI void emotion_object_audio_mute_set (Evas_Object *obj, Eina_Bool mute);
+
+/**
+ * @brief Get the mute audio option of this object.
+ *
+ * @param obj The object which we are retrieving the mute audio option from.
+ * @return Whether the audio is muted (@c EINA_TRUE) or not (@c EINA_FALSE).
+ *
+ * This function return the mute audio option from this emotion object. It can
+ * be set with emotion_object_audio_mute_set().
+ *
+ * @see emotion_object_audio_mute_set()
+ *
+ * @ingroup Emotion_Audio
+ */
+EAPI Eina_Bool emotion_object_audio_mute_get (const Evas_Object *obj);
+EAPI int emotion_object_audio_channel_count (const Evas_Object *obj);
+EAPI const char *emotion_object_audio_channel_name_get(const Evas_Object *obj, int channel);
+EAPI void emotion_object_audio_channel_set (Evas_Object *obj, int channel);
+EAPI int emotion_object_audio_channel_get (const Evas_Object *obj);
+
+/**
+ * @brief Set the mute video option for this object.
+ *
+ * @param obj The object which we are setting the mute video option.
+ * @param mute Whether the video should be muted (@c EINA_TRUE) or not (@c
+ * EINA_FALSE).
+ *
+ * This function sets the mute video option for this emotion object. The
+ * current module used for this object can use this information to avoid
+ * decoding the video portion of the loaded media file.
+ *
+ * @see emotion_object_video_mute_get()
+ * @see emotion_object_audio_mute_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_video_mute_set (Evas_Object *obj, Eina_Bool mute);
+
+/**
+ * @brief Get the mute video option of this object.
+ *
+ * @param obj The object which we are retrieving the mute video option from.
+ * @return Whether the video is muted (@c EINA_TRUE) or not (@c EINA_FALSE).
+ *
+ * This function returns the mute video option from this emotion object. It can
+ * be set with emotion_object_video_mute_set().
+ *
+ * @see emotion_object_video_mute_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI Eina_Bool emotion_object_video_mute_get (const Evas_Object *obj);
+
+/**
+ * @brief Set the video's subtitle file path.
+ *
+ * @param obj The object which we are setting a subtitle file path.
+ * @param filepath The subtitle file path.
+ *
+ * This function sets a video's subtitle file path(i.e an .srt file) for
+ * supported subtitle formats consult the backend's documentation.
+ *
+ * @see emotion_object_video_subtitle_file_get().
+ *
+ * @ingroup Emotion_Video
+ * @since 1.8
+ */
+EAPI void emotion_object_video_subtitle_file_set (Evas_Object *obj, const char *filepath);
+
+/**
+ * @brief Get the video's subtitle file path.
+ *
+ * @param obj The object which we are retrieving the subtitle file path from.
+ * @return The video's subtitle file path previously set, NULL otherwise.
+ *
+ * This function returns the video's subtitle file path, if not previously set
+ * or in error NULL is returned.
+ *
+ * @see emotion_object_video_subtitle_file_set().
+ *
+ * @ingroup Emotion_Video
+ * @since 1.8
+ */
+EAPI const char *emotion_object_video_subtitle_file_get (const Evas_Object *obj);
+
+/**
+ * @brief Get the number of available video channel
+ *
+ * @param obj The object which we are retrieving the channel count from
+ * @return the number of available channel.
+ *
+ * @see emotion_object_video_channel_name_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI int emotion_object_video_channel_count (const Evas_Object *obj);
+EAPI const char *emotion_object_video_channel_name_get(const Evas_Object *obj, int channel);
+EAPI void emotion_object_video_channel_set (Evas_Object *obj, int channel);
+EAPI int emotion_object_video_channel_get (const Evas_Object *obj);
+EAPI void emotion_object_spu_mute_set (Evas_Object *obj, Eina_Bool mute);
+EAPI Eina_Bool emotion_object_spu_mute_get (const Evas_Object *obj);
+EAPI int emotion_object_spu_channel_count (const Evas_Object *obj);
+EAPI const char *emotion_object_spu_channel_name_get (const Evas_Object *obj, int channel);
+EAPI void emotion_object_spu_channel_set (Evas_Object *obj, int channel);
+EAPI int emotion_object_spu_channel_get (const Evas_Object *obj);
+EAPI int emotion_object_chapter_count (const Evas_Object *obj);
+EAPI void emotion_object_chapter_set (Evas_Object *obj, int chapter);
+EAPI int emotion_object_chapter_get (const Evas_Object *obj);
+EAPI const char *emotion_object_chapter_name_get (const Evas_Object *obj, int chapter);
+EAPI void emotion_object_eject (Evas_Object *obj);
+
+/**
+ * @brief Get the dvd title from this emotion object.
+ *
+ * @param obj The object which the title will be retrieved from.
+ * @return A string containing the title.
+ *
+ * This function is only useful when playing a DVD.
+ *
+ * @note Don't change or free the string returned by this function.
+ *
+ * @ingroup Emotion_Info
+ */
+EAPI const char *emotion_object_title_get (const Evas_Object *obj);
+EAPI const char *emotion_object_ref_file_get (const Evas_Object *obj);
+EAPI int emotion_object_ref_num_get (const Evas_Object *obj);
+EAPI int emotion_object_spu_button_count_get (const Evas_Object *obj);
+EAPI int emotion_object_spu_button_get (const Evas_Object *obj);
+
+/**
+ * @brief Retrieve meta information from this file being played.
+ *
+ * @param obj The object which the meta info will be extracted from.
+ * @param meta The type of meta information that will be extracted.
+ *
+ * This function retrieves information about the file loaded. It can retrieve
+ * the track title, artist name, album name, etc. See @ref Emotion_Meta_Info
+ * for all the possibilities.
+ *
+ * The meta info may be not available on all types of files. It will return @c
+ * NULL if the the file doesn't have meta info, or if this specific field is
+ * empty.
+ *
+ * @note Don't change or free the string returned by this function.
+ *
+ * @see Emotion_Meta_Info
+ *
+ * @ingroup Emotion_Info
+ */
+EAPI const char *emotion_object_meta_info_get (const Evas_Object *obj, Emotion_Meta_Info meta);
+
+/**
+ * @brief Set the visualization to be used with this object.
+ *
+ * @param obj The object where the visualization will be set on.
+ * @param visualization The type of visualization to be used.
+ *
+ * The @p visualization specified will be played instead of a video. This is
+ * commonly used to display a visualization for audio only files (musics).
+ *
+ * The available visualizations are @ref Emotion_Vis.
+ *
+ * @see Emotion_Vis
+ * @see emotion_object_vis_get()
+ * @see emotion_object_vis_supported()
+ *
+ * @ingroup Emotion_Visualization
+ */
+EAPI void emotion_object_vis_set (Evas_Object *obj, Emotion_Vis visualization);
+
+/**
+ * @brief Get the type of visualization in use by this emotion object.
+ *
+ * @param obj The emotion object which the visualization is being retrieved
+ * from.
+ * @return The type of visualization in use by this object.
+ *
+ * The type of visualization can be set by emotion_object_vis_set().
+ *
+ * @see Emotion_Vis
+ * @see emotion_object_vis_set()
+ * @see emotion_object_vis_supported()
+ *
+ * @ingroup Emotion_Visualization
+ */
+EAPI Emotion_Vis emotion_object_vis_get (const Evas_Object *obj);
+
+/**
+ * @brief Query whether a type of visualization is supported by this object.
+ *
+ * @param obj The object which the query is being ran on.
+ * @param visualization The type of visualization that is being queried.
+ * @return EINA_TRUE if the visualization is supported, EINA_FALSE otherwise.
+ *
+ * This can be used to check if a visualization is supported. e.g.: one wants to
+ * display a list of available visualizations for a specific object.
+ *
+ * @see Emotion_Vis
+ * @see emotion_object_vis_set()
+ * @see emotion_object_vis_get()
+ *
+ * @ingroup Emotion_Visualization
+ */
+EAPI Eina_Bool emotion_object_vis_supported (const Evas_Object *obj, Emotion_Vis visualization);
+
+/**
+ * @brief Raise priority of an object so it will have a priviledged access to hardware ressource.
+ *
+ * @param obj The object which the query is being ran on.
+ * @param priority EINA_TRUE means give me a priority access to the hardware ressource.
+ *
+ * Hardware have a few dedicated hardware pipeline that process the video at no cost for the CPU.
+ * Especially on SoC, you mostly have one (on mobile phone SoC) or two (on Set Top Box SoC) when
+ * Picture in Picture is needed. And most application just have a few video stream that really
+ * deserve high frame rate, hiogh quality output. That's why this call is for.
+ *
+ * Please note that if Emotion can't acquire a priviledged hardware ressource, it will fallback
+ * to the no-priority path. This work on the first asking first get basis system.
+ *
+ * @see emotion_object_priority_get()
+ *
+ * @ingroup Emotion_Ressource
+ */
+EAPI void emotion_object_priority_set(Evas_Object *obj, Eina_Bool priority);
+
+/**
+ * @brief Get the actual priority of an object.
+ *
+ * @param obj The object which the query is being ran on.
+ * @return EINA_TRUE if the object has a priority access to the hardware.
+ *
+ * This actually return the priority status of an object. If it failed to have a priviledged
+ * access to the hardware, it will return EINA_FALSE.
+ *
+ * @see emotion_object_priority_get()
+ *
+ * @ingroup Emotion_Ressource
+ */
+EAPI Eina_Bool emotion_object_priority_get(const Evas_Object *obj);
+
+/**
+ * @brief Change the state of an object pipeline.
+ *
+ * @param obj The object which the query is being ran on.
+ * @param state The new state for the object.
+ *
+ * Changing the state of a pipeline should help preserve the battery of an embedded device.
+ * But it will only work sanely if the pipeline is not playing at the time you change its
+ * state. Depending on the engine all state may be not implemented.
+ *
+ * @see Emotion_Suspend
+ * @see emotion_object_suspend_get()
+ *
+ * @ingroup Emotion_Ressource
+ */
+EAPI void emotion_object_suspend_set(Evas_Object *obj, Emotion_Suspend state);
+
+/**
+ * @brief Get the current state of the pipeline
+ *
+ * @param obj The object which the query is being ran on.
+ * @return the current state of the pipeline.
+ *
+ * @see Emotion_Suspend
+ * @see emotion_object_suspend_set()
+ *
+ * @ingroup Emotion_Ressource
+ */
+EAPI Emotion_Suspend emotion_object_suspend_get(Evas_Object *obj);
+
+/**
+ * @brief Load the last known position if available
+ *
+ * @param obj The object which the query is being ran on.
+ *
+ * By using Xattr, Emotion is able, if the system permitt it, to store and retrieve
+ * the latest position. It should trigger some smart callback to let the application
+ * know when it succeed or fail. Every operation is fully asynchronous and not
+ * linked to the actual engine used to play the vide.
+ *
+ * @see emotion_object_last_position_save()
+ *
+ * @ingroup Emotion_Info
+ */
+EAPI void emotion_object_last_position_load(Evas_Object *obj);
+
+/**
+ * @brief Save the lastest position if possible
+ *
+ * @param obj The object which the query is being ran on.
+ *
+ * By using Xattr, Emotion is able, if the system permitt it, to store and retrieve
+ * the latest position. It should trigger some smart callback to let the application
+ * know when it succeed or fail. Every operation is fully asynchronous and not
+ * linked to the actual engine used to play the vide.
+ *
+ * @see emotion_object_last_position_load()
+ *
+ * @ingroup Emotion_Info
+ */
+EAPI void emotion_object_last_position_save(Evas_Object *obj);
+
+/**
+ * @brief Do we have a chance to play that file
+ *
+ * @param file A stringshared filename that we want to know if Emotion can play.
+ *
+ * This just actually look at the extention of the file, it doesn't check the mime-type
+ * nor if the file is actually sane. So this is just an hint for your application.
+ *
+ * @see emotion_object_extension_may_play_get()
+ */
+EAPI Eina_Bool emotion_object_extension_may_play_fast_get(const char *file);
+
+/**
+ * @brief Do we have a chance to play that file
+ *
+ * @param file A filename that we want to know if Emotion can play.
+ *
+ * This just actually look at the extention of the file, it doesn't check the mime-type
+ * nor if the file is actually sane. So this is just an hint for your application.
+ *
+ * @see emotion_object_extension_may_play_fast_get()
+ */
+EAPI Eina_Bool emotion_object_extension_may_play_get(const char *file);
+
+/**
+ * @brief Get the actual image object that contains the pixels of the video stream
+ *
+ * @param obj The object which the query is being ran on.
+ *
+ * This function is usefull when you want to get a direct access to the pixels.
+ *
+ * @see emotion_object_image_get()
+ */
+EAPI Evas_Object *emotion_object_image_get(const Evas_Object *obj);
+
+/**
+ * @defgroup Emotion_Webcam API available for accessing webcam
+ * @ingroup Emotion
+ */
+
+typedef struct _Emotion_Webcam Emotion_Webcam; /**< Webcam description */
+
+EAPI extern int EMOTION_WEBCAM_UPDATE; /**< Ecore_Event triggered when a new webcam is plugged in */
+
+/**
+ * @brief Get a list of active and available webcam
+ *
+ * @return the list of available webcam at the time of the call.
+ *
+ * It will return the current live list of webcam. It is updated before
+ * triggering EMOTION_WEBCAM_UPDATE and should never be modified.
+ *
+ * @ingroup Emotion_Webcam
+ */
+EAPI const Eina_List *emotion_webcams_get(void);
+
+/**
+ * @brief Get the human understandable name of a Webcam
+ *
+ * @param ew The webcam to get the name from.
+ * @return the actual human readable name.
+ *
+ * @ingroup Emotion_Webcam
+ */
+EAPI const char *emotion_webcam_name_get(const Emotion_Webcam *ew);
+
+/**
+ * @brief Get the uri of a Webcam that will be understood by emotion
+ *
+ * @param ew The webcam to get the uri from.
+ * @return the actual uri that emotion will later understood.
+ *
+ * @ingroup Emotion_Webcam
+ */
+EAPI const char *emotion_webcam_device_get(const Emotion_Webcam *ew);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/lib/emotion/emotion_main.c b/src/lib/emotion/emotion_main.c
new file mode 100644
index 0000000000..8416f50b1c
--- /dev/null
+++ b/src/lib/emotion/emotion_main.c
@@ -0,0 +1,464 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+
+#include <stdio.h>
+
+#ifdef HAVE_EEZE
+# include <sys/types.h>
+# include <sys/stat.h>
+# include <fcntl.h>
+# ifdef HAVE_V4L2
+# include <sys/ioctl.h>
+# include <linux/videodev2.h>
+# endif
+# include <Eeze.h>
+#endif
+
+#include <Ecore.h>
+#include <Eet.h>
+
+#include "Emotion.h"
+#include "emotion_private.h"
+
+static Emotion_Version _version = { VMAJ, VMIN, VMIC, VREV };
+static int emotion_pending_objects = 0;
+EAPI Emotion_Version *emotion_version = &_version;
+
+EAPI int EMOTION_WEBCAM_UPDATE = 0;
+
+struct ext_match_s
+{
+ unsigned int length;
+ const char *extension;
+};
+
+#define MATCHING(Ext) \
+ { sizeof (Ext), Ext }
+
+static const struct ext_match_s matchs[] =
+{ /* map extensions to know if it's a emotion playable content for good first-guess tries */
+ MATCHING(".264"),
+ MATCHING(".3g2"),
+ MATCHING(".3gp"),
+ MATCHING(".3gp2"),
+ MATCHING(".3gpp"),
+ MATCHING(".3gpp2"),
+ MATCHING(".3p2"),
+ MATCHING(".asf"),
+ MATCHING(".avi"),
+ MATCHING(".bdm"),
+ MATCHING(".bdmv"),
+ MATCHING(".clpi"),
+ MATCHING(".clp"),
+ MATCHING(".fla"),
+ MATCHING(".flv"),
+ MATCHING(".m1v"),
+ MATCHING(".m2v"),
+ MATCHING(".m2t"),
+ MATCHING(".m4v"),
+ MATCHING(".mkv"),
+ MATCHING(".mov"),
+ MATCHING(".mp2"),
+ MATCHING(".mp2ts"),
+ MATCHING(".mp4"),
+ MATCHING(".mpe"),
+ MATCHING(".mpeg"),
+ MATCHING(".mpg"),
+ MATCHING(".mpl"),
+ MATCHING(".mpls"),
+ MATCHING(".mts"),
+ MATCHING(".mxf"),
+ MATCHING(".nut"),
+ MATCHING(".nuv"),
+ MATCHING(".ogg"),
+ MATCHING(".ogm"),
+ MATCHING(".ogv"),
+ MATCHING(".rm"),
+ MATCHING(".rmj"),
+ MATCHING(".rmm"),
+ MATCHING(".rms"),
+ MATCHING(".rmx"),
+ MATCHING(".rmvb"),
+ MATCHING(".swf"),
+ MATCHING(".ts"),
+ MATCHING(".weba"),
+ MATCHING(".webm"),
+ MATCHING(".wmv")
+};
+
+Eina_Bool
+_emotion_object_extension_can_play_generic_get(const void *data EINA_UNUSED, const char *file)
+{
+ unsigned int length;
+ unsigned int i;
+
+ length = eina_stringshare_strlen(file) + 1;
+ if (length < 5) return EINA_FALSE;
+
+ for (i = 0; i < sizeof (matchs) / sizeof (struct ext_match_s); ++i)
+ {
+ if (matchs[i].length > length) continue;
+
+ if (!strcasecmp(matchs[i].extension,
+ file + length - matchs[i].length))
+ return EINA_TRUE;
+ }
+
+ return EINA_FALSE;
+}
+
+EAPI Eina_Bool
+emotion_object_extension_may_play_fast_get(const char *file)
+{
+ if (!file) return EINA_FALSE;
+ return _emotion_object_extension_can_play_generic_get(NULL, file);
+}
+
+EAPI Eina_Bool
+emotion_object_extension_may_play_get(const char *file)
+{
+ const char *tmp;
+ Eina_Bool result;
+
+ if (!file) return EINA_FALSE;
+ tmp = eina_stringshare_add(file);
+ result = emotion_object_extension_may_play_fast_get(tmp);
+ eina_stringshare_del(tmp);
+
+ return result;
+}
+
+typedef struct _Emotion_Webcams Emotion_Webcams;
+
+struct _Emotion_Webcams
+{
+ Eina_List *webcams;
+};
+
+struct _Emotion_Webcam
+{
+ EINA_REFCOUNT;
+
+ const char *syspath;
+ const char *device;
+ const char *name;
+
+ const char *custom;
+
+ const char *filename;
+};
+
+static int _emotion_webcams_count = 0;
+static Eet_Data_Descriptor *_webcam_edd;
+static Eet_Data_Descriptor *_webcams_edd;
+
+static Emotion_Webcams *_emotion_webcams = NULL;
+static Eet_File *_emotion_webcams_file = NULL;
+
+static Eet_Data_Descriptor *
+_emotion_webcams_data(void)
+{
+ Eet_Data_Descriptor_Class eddc;
+
+ EET_EINA_FILE_DATA_DESCRIPTOR_CLASS_SET(&eddc, Emotion_Webcam);
+ _webcam_edd = eet_data_descriptor_file_new(&eddc);
+ EET_DATA_DESCRIPTOR_ADD_BASIC(_webcam_edd, Emotion_Webcam, "device", device, EET_T_STRING);
+ EET_DATA_DESCRIPTOR_ADD_BASIC(_webcam_edd, Emotion_Webcam, "name", name, EET_T_STRING);
+ EET_DATA_DESCRIPTOR_ADD_BASIC(_webcam_edd, Emotion_Webcam, "custom", custom, EET_T_STRING);
+ EET_DATA_DESCRIPTOR_ADD_BASIC(_webcam_edd, Emotion_Webcam, "filename", filename, EET_T_STRING);
+
+ EET_EINA_FILE_DATA_DESCRIPTOR_CLASS_SET(&eddc, Emotion_Webcams);
+ _webcams_edd = eet_data_descriptor_file_new(&eddc);
+ EET_DATA_DESCRIPTOR_ADD_LIST(_webcams_edd, Emotion_Webcams, "webcams", webcams, _webcam_edd);
+
+ return _webcams_edd;
+}
+
+static void
+emotion_webcam_destroy(Emotion_Webcam *ew)
+{
+ if (!ew->custom)
+ {
+ eina_stringshare_del(ew->syspath);
+ eina_stringshare_del(ew->device);
+ eina_stringshare_del(ew->name);
+ }
+ free(ew);
+}
+
+#ifdef HAVE_EEZE
+static Eeze_Udev_Watch *eeze_watcher = NULL;
+
+static void
+_emotion_check_device(Emotion_Webcam *ew)
+{
+#ifdef HAVE_V4L2
+ Emotion_Webcam *check;
+ Eina_List *l;
+ struct v4l2_capability caps;
+ int fd;
+#endif
+
+ if (!ew) return ;
+#ifdef HAVE_V4L2
+ if (!ew->device) goto on_error;
+
+ fd = open(ew->filename, O_RDONLY);
+ if (fd < 0) goto on_error;
+
+ if (ioctl(fd, VIDIOC_QUERYCAP, &caps) == -1) goto on_error;
+
+ /* Likely not a webcam */
+ if (!caps.capabilities & V4L2_CAP_VIDEO_CAPTURE) goto on_error;
+ if (caps.capabilities & V4L2_CAP_TUNER
+ || caps.capabilities & V4L2_CAP_RADIO
+ || caps.capabilities & V4L2_CAP_MODULATOR)
+ goto on_error;
+
+ EINA_LIST_FOREACH(_emotion_webcams->webcams, l, check)
+ if (check->device == ew->device)
+ goto on_error;
+
+ _emotion_webcams->webcams = eina_list_append(_emotion_webcams->webcams, ew);
+
+ EINA_REFCOUNT_INIT(ew);
+
+ return ;
+
+ on_error:
+#endif
+ EINA_LOG_ERR("'%s' is not a webcam ['%s']", ew->name, strerror(errno));
+ eina_stringshare_del(ew->syspath);
+ eina_stringshare_del(ew->device);
+ eina_stringshare_del(ew->name);
+ free(ew);
+}
+
+static Emotion_Webcam *
+_emotion_webcam_new(const char *syspath)
+{
+ Emotion_Webcam *test;
+ const char *device;
+ char *local;
+
+ test = malloc(sizeof (Emotion_Webcam));
+ if (!test) return NULL;
+
+ test->custom = NULL;
+ test->syspath = eina_stringshare_ref(syspath);
+ test->name = eeze_udev_syspath_get_sysattr(syspath, "name");
+
+ device = eeze_udev_syspath_get_property(syspath, "DEVNAME");
+ local = alloca(eina_stringshare_strlen(device) + 8);
+ snprintf(local, eina_stringshare_strlen(device) + 8, "v4l2://%s", device);
+ test->device = eina_stringshare_add(local);
+ eina_stringshare_del(device);
+ test->filename = test->device + 7;
+
+ return test;
+}
+
+static void
+_emotion_enumerate_all_webcams(void)
+{
+ Eina_List *devices;
+ const char *syspath;
+
+ devices = eeze_udev_find_by_type(EEZE_UDEV_TYPE_V4L, NULL);
+
+ EINA_LIST_FREE(devices, syspath)
+ {
+ Emotion_Webcam *test;
+
+ test = _emotion_webcam_new(syspath);
+ if (test) _emotion_check_device(test);
+
+ eina_stringshare_del(syspath);
+ }
+}
+
+static void
+_emotion_eeze_events(const char *syspath,
+ Eeze_Udev_Event ev,
+ void *data EINA_UNUSED,
+ Eeze_Udev_Watch *watcher EINA_UNUSED)
+{
+ if (ev == EEZE_UDEV_EVENT_REMOVE)
+ {
+ Emotion_Webcam *check;
+ Eina_List *l;
+
+ EINA_LIST_FOREACH(_emotion_webcams->webcams, l, check)
+ if (check->syspath == syspath)
+ {
+ _emotion_webcams->webcams = eina_list_remove_list(_emotion_webcams->webcams, l);
+ EINA_REFCOUNT_UNREF(check)
+ emotion_webcam_destroy(check);
+ break ;
+ }
+ }
+ else if (ev == EEZE_UDEV_EVENT_ADD)
+ {
+ Emotion_Webcam *test;
+
+ test = _emotion_webcam_new(syspath);
+ if (test) _emotion_check_device(test);
+ }
+ ecore_event_add(EMOTION_WEBCAM_UPDATE, NULL, NULL, NULL);
+}
+
+#endif
+
+EAPI Eina_Bool
+emotion_init(void)
+{
+ char buffer[4096];
+
+ if (_emotion_webcams_count++) return EINA_TRUE;
+
+ ecore_init();
+
+ snprintf(buffer, 4096, "%s/emotion.cfg", PACKAGE_DATA_DIR);
+ _emotion_webcams_file = eet_open(buffer, EET_FILE_MODE_READ);
+ if (_emotion_webcams_file)
+ {
+ Eet_Data_Descriptor *edd;
+
+ edd = _emotion_webcams_data();
+
+ _emotion_webcams = eet_data_read(_emotion_webcams_file, edd, "config");
+
+ eet_data_descriptor_free(_webcams_edd); _webcams_edd = NULL;
+ eet_data_descriptor_free(_webcam_edd); _webcam_edd = NULL;
+ }
+
+ if (!_emotion_webcams)
+ {
+ _emotion_webcams = calloc(1, sizeof (Emotion_Webcams));
+ if (!_emotion_webcams) return EINA_FALSE;
+ }
+
+#ifdef HAVE_EEZE
+ EMOTION_WEBCAM_UPDATE = ecore_event_type_new();
+
+ eeze_init();
+
+ _emotion_enumerate_all_webcams();
+
+ eeze_watcher = eeze_udev_watch_add(EEZE_UDEV_TYPE_V4L,
+ (EEZE_UDEV_EVENT_ADD | EEZE_UDEV_EVENT_REMOVE),
+ _emotion_eeze_events, NULL);
+#endif
+
+ return EINA_TRUE;
+}
+
+EAPI Eina_Bool
+emotion_shutdown(void)
+{
+ Emotion_Webcam *ew;
+ double start;
+
+ if (_emotion_webcams_count <= 0)
+ {
+ EINA_LOG_ERR("Init count not greater than 0 in shutdown.");
+ return EINA_FALSE;
+ }
+ if (--_emotion_webcams_count) return EINA_TRUE;
+
+ EINA_LIST_FREE(_emotion_webcams->webcams, ew)
+ {
+ /* There is currently no way to refcount from the outside, this help, but could lead to some issue */
+ EINA_REFCOUNT_UNREF(ew)
+ emotion_webcam_destroy(ew);
+ }
+ free(_emotion_webcams);
+ _emotion_webcams = NULL;
+
+ if (_emotion_webcams_file)
+ {
+ /* As long as there is no one reference any pointer, you are safe */
+ eet_close(_emotion_webcams_file);
+ _emotion_webcams_file = NULL;
+ }
+
+#ifdef HAVE_EEZE
+ eeze_udev_watch_del(eeze_watcher);
+ eeze_watcher = NULL;
+
+ eeze_shutdown();
+#endif
+
+ start = ecore_time_get();
+ while (emotion_pending_objects && ecore_time_get() - start < 0.5)
+ ecore_main_loop_iterate();
+
+ if (emotion_pending_objects)
+ {
+ EINA_LOG_ERR("There is still %i Emotion pipeline running", emotion_pending_objects);
+ }
+
+ ecore_shutdown();
+
+ return EINA_TRUE;
+}
+
+EAPI const Eina_List *
+emotion_webcams_get(void)
+{
+ return _emotion_webcams->webcams;
+}
+
+EAPI const char *
+emotion_webcam_name_get(const Emotion_Webcam *ew)
+{
+ if (!ew) return NULL;
+
+ return ew->name;
+}
+
+EAPI const char *
+emotion_webcam_device_get(const Emotion_Webcam *ew)
+{
+ if (!ew) return NULL;
+
+ return ew->device;
+}
+
+EAPI const char *
+emotion_webcam_custom_get(const char *device)
+{
+ const Emotion_Webcam *ew;
+ const Eina_List *l;
+
+ if (_emotion_webcams)
+ {
+ EINA_LIST_FOREACH(_emotion_webcams->webcams, l, ew)
+ if (ew->device && strcmp(device, ew->device) == 0)
+ return ew->custom;
+ }
+
+ return NULL;
+}
+
+EAPI void
+_emotion_pending_object_ref(void)
+{
+ emotion_pending_objects++;
+}
+
+EAPI void
+_emotion_pending_object_unref(void)
+{
+ emotion_pending_objects--;
+}
diff --git a/src/lib/emotion/emotion_private.h b/src/lib/emotion/emotion_private.h
new file mode 100644
index 0000000000..73a1b7ddf0
--- /dev/null
+++ b/src/lib/emotion/emotion_private.h
@@ -0,0 +1,137 @@
+#ifndef EMOTION_PRIVATE_H
+#define EMOTION_PRIVATE_H
+
+#define META_TRACK_TITLE 1
+#define META_TRACK_ARTIST 2
+#define META_TRACK_GENRE 3
+#define META_TRACK_COMMENT 4
+#define META_TRACK_ALBUM 5
+#define META_TRACK_YEAR 6
+#define META_TRACK_DISCID 7
+#define META_TRACK_COUNT 8
+
+typedef enum _Emotion_Format Emotion_Format;
+typedef struct _Emotion_Video_Module Emotion_Video_Module;
+typedef struct _Emotion_Module_Options Emotion_Module_Options;
+typedef struct _Eina_Emotion_Plugins Eina_Emotion_Plugins;
+
+typedef Eina_Bool (*Emotion_Module_Open)(Evas_Object *, const Emotion_Video_Module **, void **, Emotion_Module_Options *);
+typedef void (*Emotion_Module_Close)(Emotion_Video_Module *module, void *);
+
+enum _Emotion_Format
+{
+ EMOTION_FORMAT_NONE,
+ EMOTION_FORMAT_I420,
+ EMOTION_FORMAT_YV12,
+ EMOTION_FORMAT_YUY2, /* unused for now since evas does not support yuy2 format */
+ EMOTION_FORMAT_BGRA
+};
+
+struct _Emotion_Module_Options
+{
+ const char *player;
+ Eina_Bool no_video : 1;
+ Eina_Bool no_audio : 1;
+};
+
+struct _Eina_Emotion_Plugins
+{
+ Emotion_Module_Open open;
+ Emotion_Module_Close close;
+};
+
+struct _Emotion_Video_Module
+{
+ unsigned char (*init) (Evas_Object *obj, void **video, Emotion_Module_Options *opt);
+ int (*shutdown) (void *video);
+ unsigned char (*file_open) (const char *file, Evas_Object *obj, void *video);
+ void (*file_close) (void *ef);
+ void (*play) (void *ef, double pos);
+ void (*stop) (void *ef);
+ void (*size_get) (void *ef, int *w, int *h);
+ void (*pos_set) (void *ef, double pos);
+ double (*len_get) (void *ef);
+ double (*buffer_size_get) (void *ef);
+ int (*fps_num_get) (void *ef);
+ int (*fps_den_get) (void *ef);
+ double (*fps_get) (void *ef);
+ double (*pos_get) (void *ef);
+ void (*vis_set) (void *ef, Emotion_Vis vis);
+ Emotion_Vis (*vis_get) (void *ef);
+ Eina_Bool (*vis_supported) (void *ef, Emotion_Vis vis);
+ double (*ratio_get) (void *ef);
+ int (*video_handled) (void *ef);
+ int (*audio_handled) (void *ef);
+ int (*seekable) (void *ef);
+ void (*frame_done) (void *ef);
+ Emotion_Format (*format_get) (void *ef);
+ void (*video_data_size_get) (void *ef, int *w, int *h);
+ int (*yuv_rows_get) (void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
+ int (*bgra_data_get) (void *ef, unsigned char **bgra_data);
+ void (*event_feed) (void *ef, int event);
+ void (*event_mouse_button_feed) (void *ef, int button, int x, int y);
+ void (*event_mouse_move_feed) (void *ef, int x, int y);
+ int (*video_channel_count) (void *ef);
+ void (*video_channel_set) (void *ef, int channel);
+ int (*video_channel_get) (void *ef);
+ void (*video_subtitle_file_set) (void *ef, const char *filepath);
+ const char * (*video_subtitle_file_get) (void *ef);
+ const char * (*video_channel_name_get) (void *ef, int channel);
+ void (*video_channel_mute_set) (void *ef, int mute);
+ int (*video_channel_mute_get) (void *ef);
+ int (*audio_channel_count) (void *ef);
+ void (*audio_channel_set) (void *ef, int channel);
+ int (*audio_channel_get) (void *ef);
+ const char * (*audio_channel_name_get) (void *ef, int channel);
+ void (*audio_channel_mute_set) (void *ef, int mute);
+ int (*audio_channel_mute_get) (void *ef);
+ void (*audio_channel_volume_set) (void *ef, double vol);
+ double (*audio_channel_volume_get) (void *ef);
+ int (*spu_channel_count) (void *ef);
+ void (*spu_channel_set) (void *ef, int channel);
+ int (*spu_channel_get) (void *ef);
+ const char * (*spu_channel_name_get) (void *ef, int channel);
+ void (*spu_channel_mute_set) (void *ef, int mute);
+ int (*spu_channel_mute_get) (void *ef);
+ int (*chapter_count) (void *ef);
+ void (*chapter_set) (void *ef, int chapter);
+ int (*chapter_get) (void *ef);
+ const char * (*chapter_name_get) (void *ef, int chapter);
+ void (*speed_set) (void *ef, double speed);
+ double (*speed_get) (void *ef);
+ int (*eject) (void *ef);
+ const char * (*meta_get) (void *ef, int meta);
+ void (*priority_set) (void *ef, Eina_Bool priority);
+ Eina_Bool (*priority_get) (void *ef);
+
+ Eina_Emotion_Plugins *plugin;
+};
+
+EAPI void *_emotion_video_get(const Evas_Object *obj);
+EAPI void _emotion_frame_new(Evas_Object *obj);
+EAPI void _emotion_video_pos_update(Evas_Object *obj, double pos, double len);
+EAPI void _emotion_frame_resize(Evas_Object *obj, int w, int h, double ratio);
+EAPI void _emotion_frame_refill(Evas_Object *obj, double w, double h);
+EAPI void _emotion_decode_stop(Evas_Object *obj);
+EAPI void _emotion_open_done(Evas_Object *obj);
+EAPI void _emotion_playback_started(Evas_Object *obj);
+EAPI void _emotion_playback_finished(Evas_Object *obj);
+EAPI void _emotion_audio_level_change(Evas_Object *obj);
+EAPI void _emotion_channels_change(Evas_Object *obj);
+EAPI void _emotion_title_set(Evas_Object *obj, char *title);
+EAPI void _emotion_progress_set(Evas_Object *obj, char *info, double stat);
+EAPI void _emotion_file_ref_set(Evas_Object *obj, const char *file, int num);
+EAPI void _emotion_spu_button_num_set(Evas_Object *obj, int num);
+EAPI void _emotion_spu_button_set(Evas_Object *obj, int button);
+EAPI void _emotion_seek_done(Evas_Object *obj);
+EAPI void _emotion_image_reset(Evas_Object *obj);
+
+EAPI Eina_Bool _emotion_module_register(const char *name, Emotion_Module_Open open, Emotion_Module_Close close);
+EAPI Eina_Bool _emotion_module_unregister(const char *name);
+
+EAPI const char *emotion_webcam_custom_get(const char *device);
+
+EAPI void _emotion_pending_object_ref(void);
+EAPI void _emotion_pending_object_unref(void);
+
+#endif
diff --git a/src/lib/emotion/emotion_smart.c b/src/lib/emotion/emotion_smart.c
new file mode 100644
index 0000000000..709414459c
--- /dev/null
+++ b/src/lib/emotion/emotion_smart.c
@@ -0,0 +1,2133 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <Evas.h>
+#include <Ecore.h>
+
+#ifdef HAVE_EIO
+# include <math.h>
+# include <Eio.h>
+#endif
+
+#include "Emotion.h"
+#include "emotion_private.h"
+
+#ifdef _WIN32
+# define FMT_UCHAR "%c"
+#else
+# define FMT_UCHAR "%hhu"
+#endif
+
+#define E_SMART_OBJ_GET(smart, o, type) \
+ { \
+ char *_e_smart_str; \
+ \
+ if (!o) return; \
+ smart = evas_object_smart_data_get(o); \
+ if (!smart) return; \
+ _e_smart_str = (char *)evas_object_type_get(o); \
+ if (!_e_smart_str) return; \
+ if (strcmp(_e_smart_str, type)) return; \
+ }
+
+#define E_SMART_OBJ_GET_RETURN(smart, o, type, ret) \
+ { \
+ char *_e_smart_str; \
+ \
+ if (!o) return ret; \
+ smart = evas_object_smart_data_get(o); \
+ if (!smart) return ret; \
+ _e_smart_str = (char *)evas_object_type_get(o); \
+ if (!_e_smart_str) return ret; \
+ if (strcmp(_e_smart_str, type)) return ret; \
+ }
+
+#define DBG(...) EINA_LOG_DOM_DBG(_log_domain, __VA_ARGS__)
+#define INF(...) EINA_LOG_DOM_INFO(_log_domain, __VA_ARGS__)
+#define WRN(...) EINA_LOG_DOM_WARN(_log_domain, __VA_ARGS__)
+#define ERR(...) EINA_LOG_DOM_ERR(_log_domain, __VA_ARGS__)
+#define CRITICAL(...) EINA_LOG_DOM_CRIT(_log_domain, __VA_ARGS__)
+
+#define E_OBJ_NAME "emotion_object"
+
+typedef struct _Smart_Data Smart_Data;
+
+struct _Smart_Data
+{
+ EINA_REFCOUNT;
+ Emotion_Video_Module *module;
+ void *video_data;
+
+ char *module_name;
+
+ const char *file;
+ Evas_Object *obj;
+ Evas_Object *bg;
+
+ Ecore_Job *job;
+
+ char *title;
+
+#ifdef HAVE_EIO
+ Eio_File *load_xattr;
+ Eio_File *save_xattr;
+#endif
+
+ struct {
+ char *info;
+ double stat;
+ } progress;
+ struct {
+ char *file;
+ int num;
+ } ref;
+ struct {
+ int button_num;
+ int button;
+ } spu;
+ struct {
+ int l; /* left */
+ int r; /* right */
+ int t; /* top */
+ int b; /* bottom */
+ Evas_Object *clipper;
+ } crop;
+
+ struct {
+ int w, h;
+ } video;
+ struct {
+ double w, h;
+ } fill;
+
+ double ratio;
+ double pos;
+ double remember_jump;
+ double seek_pos;
+ double len;
+
+ Emotion_Module_Options module_options;
+
+ Emotion_Suspend state;
+ Emotion_Aspect aspect;
+
+ Ecore_Animator *anim;
+
+ Eina_Bool open : 1;
+ Eina_Bool play : 1;
+ Eina_Bool remember_play : 1;
+ Eina_Bool seek : 1;
+ Eina_Bool seeking : 1;
+};
+
+static void _mouse_move(void *data, Evas *ev, Evas_Object *obj, void *event_info);
+static void _mouse_down(void *data, Evas *ev, Evas_Object *obj, void *event_info);
+static void _pos_set_job(void *data);
+static void _pixels_get(void *data, Evas_Object *obj);
+
+static void _smart_init(void);
+static void _smart_add(Evas_Object * obj);
+static void _smart_del(Evas_Object * obj);
+static void _smart_move(Evas_Object * obj, Evas_Coord x, Evas_Coord y);
+static void _smart_resize(Evas_Object * obj, Evas_Coord w, Evas_Coord h);
+static void _smart_show(Evas_Object * obj);
+static void _smart_hide(Evas_Object * obj);
+static void _smart_color_set(Evas_Object * obj, int r, int g, int b, int a);
+static void _smart_clip_set(Evas_Object * obj, Evas_Object * clip);
+static void _smart_clip_unset(Evas_Object * obj);
+
+/**********************************/
+/* Globals for the E Video Object */
+/**********************************/
+static Evas_Smart *smart = NULL;
+static Eina_Hash *_backends = NULL;
+static Eina_Array *_modules = NULL;
+static int _log_domain = -1;
+
+static const char *_backend_priority[] = {
+ "gstreamer",
+ "xine",
+ "generic"
+};
+
+static const char SIG_FRAME_DECODE[] = "frame_decode";
+static const char SIG_POSITION_UPDATE[] = "position_update";
+static const char SIG_LENGTH_CHANGE[] = "length_change";
+static const char SIG_FRAME_RESIZE[] = "frame_resize";
+static const char SIG_DECODE_STOP[] = "decode_stop";
+static const char SIG_PLAYBACK_STARTED[] = "playback_started";
+static const char SIG_PLAYBACK_FINISHED[] = "playback_finished";
+static const char SIG_AUDIO_LEVEL_CHANGE[] = "audio_level_change";
+static const char SIG_CHANNELS_CHANGE[] = "channels_change";
+static const char SIG_TITLE_CHANGE[] = "title_change";
+static const char SIG_PROGRESS_CHANGE[] = "progress_change";
+static const char SIG_REF_CHANGE[] = "ref_change";
+static const char SIG_BUTTON_NUM_CHANGE[] = "button_num_change";
+static const char SIG_BUTTON_CHANGE[] = "button_change";
+static const char SIG_OPEN_DONE[] = "open_done";
+static const char SIG_POSITION_SAVE_SUCCEED[] = "position_save,succeed";
+static const char SIG_POSITION_SAVE_FAILED[] = "position_save,failed";
+static const char SIG_POSITION_LOAD_SUCCEED[] = "position_load,succeed";
+static const char SIG_POSITION_LOAD_FAILED[] = "position_load,failed";
+
+static const Evas_Smart_Cb_Description _smart_callbacks[] = {
+ {SIG_FRAME_DECODE, ""},
+ {SIG_POSITION_UPDATE, ""},
+ {SIG_LENGTH_CHANGE, ""},
+ {SIG_FRAME_RESIZE, ""},
+ {SIG_DECODE_STOP, ""},
+ {SIG_PLAYBACK_STARTED, ""},
+ {SIG_PLAYBACK_FINISHED, ""},
+ {SIG_AUDIO_LEVEL_CHANGE, ""},
+ {SIG_CHANNELS_CHANGE, ""},
+ {SIG_TITLE_CHANGE, ""},
+ {SIG_PROGRESS_CHANGE, ""},
+ {SIG_REF_CHANGE, ""},
+ {SIG_BUTTON_NUM_CHANGE, ""},
+ {SIG_BUTTON_CHANGE, ""},
+ {SIG_OPEN_DONE, ""},
+ {NULL, NULL}
+};
+
+static void
+_emotion_image_data_zero(Evas_Object *img)
+{
+ void *data;
+
+ data = evas_object_image_data_get(img, 1);
+ if (data)
+ {
+ int w, h, sz = 0;
+ Evas_Colorspace cs;
+
+ evas_object_image_size_get(img, &w, &h);
+ cs = evas_object_image_colorspace_get(img);
+ if (cs == EVAS_COLORSPACE_ARGB8888)
+ sz = w * h * 4;
+ if ((cs == EVAS_COLORSPACE_YCBCR422P601_PL) ||
+ (cs == EVAS_COLORSPACE_YCBCR422P709_PL))
+ sz = h * 2 * sizeof(unsigned char *);
+ if (sz != 0) memset(data, 0, sz);
+ }
+ evas_object_image_data_set(img, data);
+}
+
+static void
+_emotion_module_close(Emotion_Video_Module *mod, void *video)
+{
+ if (!mod) return;
+ if (mod->plugin->close && video)
+ mod->plugin->close(mod, video);
+ /* FIXME: we can't go dlclosing here as a thread still may be running from
+ * the module - this in theory will leak- but it shouldn't be too bad and
+ * mean that once a module is dlopened() it can't be closed - its refcount
+ * will just keep going up
+ */
+}
+
+static void
+_smart_data_free(Smart_Data *sd)
+{
+ if (sd->video_data) sd->module->file_close(sd->video_data);
+ _emotion_module_close(sd->module, sd->video_data);
+ evas_object_del(sd->obj);
+ evas_object_del(sd->crop.clipper);
+ evas_object_del(sd->bg);
+ eina_stringshare_del(sd->file);
+ free(sd->module_name);
+ if (sd->job) ecore_job_del(sd->job);
+ if (sd->anim) ecore_animator_del(sd->anim);
+ free(sd->progress.info);
+ free(sd->ref.file);
+ free(sd);
+
+ ecore_shutdown();
+}
+
+EAPI Eina_Bool
+_emotion_module_register(const char *name, Emotion_Module_Open mod_open, Emotion_Module_Close mod_close)
+{
+ Eina_Emotion_Plugins *plugin;
+
+ plugin = malloc(sizeof (Eina_Emotion_Plugins));
+ if (!plugin) return EINA_FALSE;
+
+ plugin->open = mod_open;
+ plugin->close = mod_close;
+
+ return eina_hash_add(_backends, name, plugin);
+}
+
+EAPI Eina_Bool
+_emotion_module_unregister(const char *name)
+{
+ return eina_hash_del(_backends, name, NULL);
+}
+
+static const char *
+_emotion_module_open(const char *name, Evas_Object *obj, Emotion_Video_Module **mod, void **video)
+{
+ Eina_Emotion_Plugins *plugin;
+ Smart_Data *sd;
+ unsigned int i = 0;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!_backends)
+ {
+ ERR("No backend loaded");
+ return NULL;
+ }
+
+ if (!name && getenv("EMOTION_ENGINE"))
+ name = getenv("EMOTION_ENGINE");
+
+ /* FIXME: Always look for a working backend. */
+ retry:
+ if (!name || i > 0)
+ name = _backend_priority[i++];
+
+ plugin = eina_hash_find(_backends, name);
+ if (!plugin)
+ {
+ if (i != 0 && i < (sizeof (_backend_priority) / sizeof (char*)))
+ goto retry;
+
+ ERR("No backend loaded");
+ return EINA_FALSE;
+ }
+
+ if (plugin->open(obj, (const Emotion_Video_Module **) mod, video, &(sd->module_options)))
+ {
+ if (*mod)
+ {
+ (*mod)->plugin = plugin;
+ return name;
+ }
+ }
+
+ if (i != 0 && i < (sizeof (_backend_priority) / sizeof (char*)))
+ goto retry;
+
+ ERR("Unable to load module: %s", name);
+
+ return NULL;
+}
+
+static void
+_clipper_position_size_update(Evas_Object *obj, int x, int y, int w, int h, int vid_w, int vid_h)
+{
+ Smart_Data *sd;
+ double scale_w, scale_h;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ if (vid_w == 0 || vid_h == 0)
+ {
+ evas_object_image_fill_set(sd->obj, 0, 0, 0, 0);
+ evas_object_move(sd->obj, x, y);
+ evas_object_resize(sd->obj, 0, 0);
+ evas_object_move(sd->crop.clipper, x, y);
+ evas_object_resize(sd->crop.clipper, 0, 0);
+ }
+ else
+ {
+ evas_object_move(sd->crop.clipper, x, y);
+ scale_w = (double)w / (double)(vid_w - sd->crop.l - sd->crop.r);
+ scale_h = (double)h / (double)(vid_h - sd->crop.t - sd->crop.b);
+
+ if (sd->fill.w < 0 && sd->fill.h < 0)
+ evas_object_image_fill_set(sd->obj, 0, 0, vid_w * scale_w, vid_h * scale_h);
+ else
+ evas_object_image_fill_set(sd->obj, 0, 0, sd->fill.w * w, sd->fill.h * h);
+ evas_object_resize(sd->obj, vid_w * scale_w, vid_h * scale_h);
+ evas_object_move(sd->obj, x - sd->crop.l * scale_w, y - sd->crop.t * scale_h);
+ evas_object_resize(sd->crop.clipper, w, h);
+ }
+}
+
+/*******************************/
+/* Externally accessible calls */
+/*******************************/
+
+
+
+EAPI Evas_Object *
+emotion_object_add(Evas *evas)
+{
+ _smart_init();
+ return evas_object_smart_add(evas, smart);
+}
+
+EAPI Evas_Object *
+emotion_object_image_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return NULL;
+ return sd->obj;
+}
+
+EAPI void
+emotion_object_module_option_set(Evas_Object *obj, const char *opt, const char *val)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if ((!opt) || (!val)) return;
+
+ if (!strcmp(opt, "player"))
+ eina_stringshare_replace(&sd->module_options.player, val);
+}
+
+EAPI Eina_Bool
+emotion_object_init(Evas_Object *obj, const char *module_filename)
+{
+ Smart_Data *sd;
+ const char *file;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+
+ if ((sd->module_name) && module_filename && (!strcmp(sd->module_name, module_filename)))
+ return EINA_TRUE;
+ free(sd->module_name);
+ sd->module_name = NULL;
+
+ file = sd->file;
+ sd->file = NULL;
+
+ free(sd->title);
+ sd->title = NULL;
+ free(sd->progress.info);
+ sd->progress.info = NULL;
+ sd->progress.stat = 0.0;
+ free(sd->ref.file);
+ sd->ref.file = NULL;
+ sd->ref.num = 0;
+ sd->spu.button_num = 0;
+ sd->spu.button = -1;
+ sd->ratio = 1.0;
+ sd->pos = 0;
+ sd->remember_jump = 0;
+ sd->seek_pos = 0;
+ sd->len = 0;
+ sd->remember_play = 0;
+
+ if (sd->anim) ecore_animator_del(sd->anim);
+ sd->anim = NULL;
+
+ _emotion_module_close(sd->module, sd->video_data);
+ sd->module = NULL;
+ sd->video_data = NULL;
+
+ module_filename = _emotion_module_open(module_filename, obj, &sd->module, &sd->video_data);
+ if (!module_filename)
+ return EINA_FALSE;
+
+ sd->module_name = strdup(module_filename);
+
+ if (file)
+ {
+ emotion_object_file_set(obj, file);
+ eina_stringshare_del(file);
+ }
+
+ return EINA_TRUE;
+}
+
+EAPI Eina_Bool
+emotion_object_file_set(Evas_Object *obj, const char *file)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, EINA_FALSE);
+
+ DBG("file=%s", file);
+ if (!sd->module) return EINA_FALSE;
+
+ sd->video.w = 0;
+ sd->video.h = 0;
+ if ((file) && (sd->file) &&
+ ((file == sd->file) || (!strcmp(file, sd->file)))) return EINA_FALSE;
+ if ((file) && (file[0] != 0))
+ {
+ eina_stringshare_replace(&sd->file, file);
+ sd->module->file_close(sd->video_data);
+ evas_object_image_data_set(sd->obj, NULL);
+ evas_object_image_size_set(sd->obj, 1, 1);
+ _emotion_image_data_zero(sd->obj);
+ sd->open = 0;
+ if (!sd->module->file_open(sd->file, obj, sd->video_data))
+ return EINA_FALSE;
+ sd->pos = 0.0;
+ if (sd->play) sd->module->play(sd->video_data, 0.0);
+ }
+ else
+ {
+ if (sd->video_data && sd->module)
+ {
+ sd->module->file_close(sd->video_data);
+ evas_object_image_data_set(sd->obj, NULL);
+ evas_object_image_size_set(sd->obj, 1, 1);
+ _emotion_image_data_zero(sd->obj);
+ }
+ eina_stringshare_replace(&sd->file, NULL);
+ }
+
+ if (sd->anim) ecore_animator_del(sd->anim);
+ sd->anim = NULL;
+
+#ifdef HAVE_EIO
+ /* Only cancel the load_xattr or we will loose ref to time_seek stringshare */
+ if (sd->load_xattr) eio_file_cancel(sd->load_xattr);
+ sd->load_xattr = NULL;
+ if (sd->save_xattr) eio_file_cancel(sd->save_xattr);
+ sd->save_xattr = NULL;
+#endif
+
+ return EINA_TRUE;
+}
+
+EAPI const char *
+emotion_object_file_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->file;
+}
+
+static void
+_emotion_aspect_borders_apply(Evas_Object *obj, Smart_Data *sd, int w, int h, int iw, int ih)
+{
+ int x, y;
+
+ evas_object_geometry_get(obj, &x, &y, NULL, NULL);
+
+ /* applying calculated borders */
+ if (sd->crop.l == 0 && sd->crop.r == 0 &&
+ sd->crop.t == 0 && sd->crop.b == 0)
+ {
+ Evas_Object *old_clipper;
+ if (sd->crop.clipper)
+ {
+ old_clipper = evas_object_clip_get(sd->crop.clipper);
+ evas_object_clip_unset(sd->obj);
+ evas_object_clip_set(sd->obj, old_clipper);
+ evas_object_del(sd->crop.clipper);
+ sd->crop.clipper = NULL;
+ }
+ }
+ else
+ {
+ if (!sd->crop.clipper)
+ {
+ Evas_Object *old_clipper;
+ sd->crop.clipper = evas_object_rectangle_add(
+ evas_object_evas_get(obj));
+ evas_object_color_set(sd->crop.clipper, 255, 255, 255, 255);
+ evas_object_smart_member_add(sd->crop.clipper, obj);
+ old_clipper = evas_object_clip_get(sd->obj);
+ evas_object_clip_set(sd->obj, sd->crop.clipper);
+ evas_object_clip_set(sd->crop.clipper, old_clipper);
+ if (evas_object_visible_get(sd->obj))
+ evas_object_show(sd->crop.clipper);
+ }
+ }
+ _clipper_position_size_update(obj, x, y, w, h, iw, ih);
+}
+
+static void
+_emotion_object_aspect_border_apply(Evas_Object *obj, Smart_Data *sd, int w, int h)
+{
+ int iw, ih;
+ double ir;
+ double r;
+
+ int aspect_opt = 0;
+
+ iw = sd->video.w;
+ ih = sd->video.h;
+
+ ir = (double)iw / ih;
+ r = (double)w / h;
+
+ /* First check if we should fit the width or height of the video inside the
+ * width/height of the object. This check takes into account the original
+ * aspect ratio and the object aspect ratio, if we are keeping both sizes or
+ * cropping the exceding area.
+ */
+ if (sd->aspect == EMOTION_ASPECT_KEEP_NONE)
+ {
+ sd->crop.l = 0;
+ sd->crop.r = 0;
+ sd->crop.t = 0;
+ sd->crop.b = 0;
+ aspect_opt = 0; // just ignore keep_aspect
+ }
+ else if (sd->aspect == EMOTION_ASPECT_KEEP_WIDTH)
+ {
+ aspect_opt = 1;
+ }
+ else if (sd->aspect == EMOTION_ASPECT_KEEP_HEIGHT)
+ {
+ aspect_opt = 2;
+ }
+ else if (sd->aspect == EMOTION_ASPECT_KEEP_BOTH)
+ {
+ if (ir > r)
+ aspect_opt = 1;
+ else
+ aspect_opt = 2;
+ }
+ else if (sd->aspect == EMOTION_ASPECT_CROP)
+ {
+ if (ir > r)
+ aspect_opt = 2;
+ else
+ aspect_opt = 1;
+ }
+ else if (sd->aspect == EMOTION_ASPECT_CUSTOM)
+ {
+ // nothing to do, just respect the border settings
+ aspect_opt = 0;
+ }
+
+ /* updating borders based on keep_aspect settings */
+ if (aspect_opt == 1) // keep width
+ {
+ int th, dh;
+ double scale;
+
+ sd->crop.l = 0;
+ sd->crop.r = 0;
+ scale = (double)iw / w;
+ th = h * scale;
+ dh = ih - th;
+ sd->crop.t = sd->crop.b = dh / 2;
+ }
+ else if (aspect_opt == 2) // keep height
+ {
+ int tw, dw;
+ double scale;
+
+ sd->crop.t = 0;
+ sd->crop.b = 0;
+ scale = (double)ih / h;
+ tw = w * scale;
+ dw = iw - tw;
+ sd->crop.l = sd->crop.r = dw / 2;
+ }
+
+ _emotion_aspect_borders_apply(obj, sd, w, h, iw, ih);
+}
+
+EAPI void
+emotion_object_border_set(Evas_Object *obj, int l, int r, int t, int b)
+{
+ Smart_Data *sd;
+ int w, h;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ sd->aspect = EMOTION_ASPECT_CUSTOM;
+ sd->crop.l = -l;
+ sd->crop.r = -r;
+ sd->crop.t = -t;
+ sd->crop.b = -b;
+ evas_object_geometry_get(obj, NULL, NULL, &w, &h);
+ _emotion_object_aspect_border_apply(obj, sd, w, h);
+}
+
+EAPI void
+emotion_object_border_get(const Evas_Object *obj, int *l, int *r, int *t, int *b)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ *l = -sd->crop.l;
+ *r = -sd->crop.r;
+ *t = -sd->crop.t;
+ *b = -sd->crop.b;
+}
+
+EAPI void
+emotion_object_bg_color_set(Evas_Object *obj, int r, int g, int b, int a)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ evas_object_color_set(sd->bg, r, g, b, a);
+
+ if (!evas_object_visible_get(obj))
+ return;
+
+ if (a > 0)
+ evas_object_show(sd->bg);
+ else
+ evas_object_hide(sd->bg);
+}
+
+EAPI void
+emotion_object_bg_color_get(const Evas_Object *obj, int *r, int *g, int *b, int *a)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_color_get(sd->bg, r, g, b, a);
+}
+
+EAPI void
+emotion_object_keep_aspect_set(Evas_Object *obj, Emotion_Aspect a)
+{
+ Smart_Data *sd;
+ int w, h;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ if (a == sd->aspect)
+ return;
+
+ sd->aspect = a;
+ evas_object_geometry_get(obj, NULL, NULL, &w, &h);
+ _emotion_object_aspect_border_apply(obj, sd, w, h);
+}
+
+EAPI Emotion_Aspect
+emotion_object_keep_aspect_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, EMOTION_ASPECT_KEEP_NONE);
+
+ return sd->aspect;
+}
+
+EAPI void
+emotion_object_play_set(Evas_Object *obj, Eina_Bool play)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("play=" FMT_UCHAR ", was=" FMT_UCHAR, play, sd->play);
+ if (play == sd->play) return;
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ if (!sd->open)
+ {
+ sd->remember_play = play;
+ return;
+ }
+ sd->play = play;
+ sd->remember_play = play;
+ if (sd->state != EMOTION_WAKEUP) emotion_object_suspend_set(obj, EMOTION_WAKEUP);
+ if (sd->play) sd->module->play(sd->video_data, sd->pos);
+ else sd->module->stop(sd->video_data);
+}
+
+EAPI Eina_Bool
+emotion_object_play_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->video_data) return EINA_FALSE;
+
+ return sd->play;
+}
+
+EAPI void
+emotion_object_position_set(Evas_Object *obj, double sec)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("sec=%f", sec);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ if (!sd->open)
+ {
+ sd->remember_jump = sec;
+ return ;
+ }
+ sd->remember_jump = 0;
+ sd->seek_pos = sec;
+ sd->seek = 1;
+ sd->pos = sd->seek_pos;
+ if (sd->job) ecore_job_del(sd->job);
+ sd->job = ecore_job_add(_pos_set_job, obj);
+}
+
+EAPI double
+emotion_object_position_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video_data) return 0.0;
+ if (!sd->module->pos_get) return 0.0;
+ sd->pos = sd->module->pos_get(sd->video_data);
+ return sd->pos;
+}
+
+EAPI double
+emotion_object_buffer_size_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 1.0);
+ if (!sd->module) return 1.0;
+ if (!sd->video_data) return 1.0;
+ if (!sd->module->buffer_size_get) return 1.0;
+ return sd->module->buffer_size_get(sd->video_data);
+}
+
+EAPI Eina_Bool
+emotion_object_seekable_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->seekable(sd->video_data);
+}
+
+EAPI Eina_Bool
+emotion_object_video_handled_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->video_handled(sd->video_data);
+}
+
+EAPI Eina_Bool
+emotion_object_audio_handled_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->audio_handled(sd->video_data);
+}
+
+EAPI double
+emotion_object_play_length_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video_data) return 0.0;
+ sd->len = sd->module->len_get(sd->video_data);
+ return sd->len;
+}
+
+EAPI void
+emotion_object_size_get(const Evas_Object *obj, int *iw, int *ih)
+{
+ Smart_Data *sd;
+
+ if (iw) *iw = 0;
+ if (ih) *ih = 0;
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (iw) *iw = sd->video.w;
+ if (ih) *ih = sd->video.h;
+}
+
+EAPI void
+emotion_object_smooth_scale_set(Evas_Object *obj, Eina_Bool smooth)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_image_smooth_scale_set(sd->obj, smooth);
+}
+
+EAPI Eina_Bool
+emotion_object_smooth_scale_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return evas_object_image_smooth_scale_get(sd->obj);
+}
+
+EAPI double
+emotion_object_ratio_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 1.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video_data) return 0.0;
+ return sd->ratio;
+}
+
+/*
+ * Send a control event to the DVD.
+ */
+EAPI void
+emotion_object_event_simple_send(Evas_Object *obj, Emotion_Event ev)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->event_feed(sd->video_data, ev);
+}
+
+EAPI void
+emotion_object_audio_volume_set(Evas_Object *obj, double vol)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("vol=%f", vol);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->audio_channel_volume_set(sd->video_data, vol);
+}
+
+EAPI double
+emotion_object_audio_volume_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 1.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video_data) return 0.0;
+ return sd->module->audio_channel_volume_get(sd->video_data);
+}
+
+EAPI void
+emotion_object_audio_mute_set(Evas_Object *obj, Eina_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("mute=" FMT_UCHAR, mute);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->audio_channel_mute_set(sd->video_data, mute);
+}
+
+EAPI Eina_Bool
+emotion_object_audio_mute_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->audio_channel_mute_get(sd->video_data);
+}
+
+EAPI int
+emotion_object_audio_channel_count(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->audio_channel_count(sd->video_data);
+}
+
+EAPI const char *
+emotion_object_audio_channel_name_get(const Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video_data) return NULL;
+ return sd->module->audio_channel_name_get(sd->video_data, channel);
+}
+
+EAPI void
+emotion_object_audio_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("channel=%d", channel);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->audio_channel_set(sd->video_data, channel);
+}
+
+EAPI int
+emotion_object_audio_channel_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->audio_channel_get(sd->video_data);
+}
+
+EAPI void
+emotion_object_video_mute_set(Evas_Object *obj, Eina_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("mute=" FMT_UCHAR, mute);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->video_channel_mute_set(sd->video_data, mute);
+}
+
+EAPI Eina_Bool
+emotion_object_video_mute_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->video_channel_mute_get(sd->video_data);
+}
+
+EAPI void
+emotion_object_video_subtitle_file_set(Evas_Object *obj, const char *filepath)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("subtitle=%s", filepath);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->video_subtitle_file_set(sd->video_data, filepath);
+}
+
+EAPI const char *
+emotion_object_video_subtitle_file_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->video_subtitle_file_get(sd->video_data);
+}
+
+EAPI int
+emotion_object_video_channel_count(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->video_channel_count(sd->video_data);
+}
+
+EAPI const char *
+emotion_object_video_channel_name_get(const Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video_data) return NULL;
+ return sd->module->video_channel_name_get(sd->video_data, channel);
+}
+
+EAPI void
+emotion_object_video_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("channel=%d", channel);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->video_channel_set(sd->video_data, channel);
+}
+
+EAPI int
+emotion_object_video_channel_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->video_channel_get(sd->video_data);
+}
+
+EAPI void
+emotion_object_spu_mute_set(Evas_Object *obj, Eina_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("mute=" FMT_UCHAR, mute);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->spu_channel_mute_set(sd->video_data, mute);
+}
+
+EAPI Eina_Bool
+emotion_object_spu_mute_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->spu_channel_mute_get(sd->video_data);
+}
+
+EAPI int
+emotion_object_spu_channel_count(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->spu_channel_count(sd->video_data);
+}
+
+EAPI const char *
+emotion_object_spu_channel_name_get(const Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video_data) return NULL;
+ return sd->module->spu_channel_name_get(sd->video_data, channel);
+}
+
+EAPI void
+emotion_object_spu_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("channel=%d", channel);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->spu_channel_set(sd->video_data, channel);
+}
+
+EAPI int
+emotion_object_spu_channel_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->spu_channel_get(sd->video_data);
+}
+
+EAPI int
+emotion_object_chapter_count(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->chapter_count(sd->video_data);
+}
+
+EAPI void
+emotion_object_chapter_set(Evas_Object *obj, int chapter)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("chapter=%d", chapter);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->chapter_set(sd->video_data, chapter);
+}
+
+EAPI int
+emotion_object_chapter_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->chapter_get(sd->video_data);
+}
+
+EAPI const char *
+emotion_object_chapter_name_get(const Evas_Object *obj, int chapter)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video_data) return NULL;
+ return sd->module->chapter_name_get(sd->video_data, chapter);
+}
+
+EAPI void
+emotion_object_play_speed_set(Evas_Object *obj, double speed)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("speed=%f", speed);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->speed_set(sd->video_data, speed);
+}
+
+EAPI double
+emotion_object_play_speed_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video_data) return 0.0;
+ return sd->module->speed_get(sd->video_data);
+}
+
+EAPI void
+emotion_object_eject(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->eject(sd->video_data);
+}
+
+EAPI const char *
+emotion_object_title_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->title;
+}
+
+EAPI const char *
+emotion_object_progress_info_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->progress.info;
+}
+
+EAPI double
+emotion_object_progress_status_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ return sd->progress.stat;
+}
+
+EAPI const char *
+emotion_object_ref_file_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->ref.file;
+}
+
+EAPI int
+emotion_object_ref_num_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return sd->ref.num;
+}
+
+EAPI int
+emotion_object_spu_button_count_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return sd->spu.button_num;
+}
+
+EAPI int
+emotion_object_spu_button_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return sd->spu.button;
+}
+
+EAPI const char *
+emotion_object_meta_info_get(const Evas_Object *obj, Emotion_Meta_Info meta)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video_data) return NULL;
+ switch (meta)
+ {
+ case EMOTION_META_INFO_TRACK_TITLE:
+ return sd->module->meta_get(sd->video_data, META_TRACK_TITLE);
+ case EMOTION_META_INFO_TRACK_ARTIST:
+ return sd->module->meta_get(sd->video_data, META_TRACK_ARTIST);
+ case EMOTION_META_INFO_TRACK_ALBUM:
+ return sd->module->meta_get(sd->video_data, META_TRACK_ALBUM);
+ case EMOTION_META_INFO_TRACK_YEAR:
+ return sd->module->meta_get(sd->video_data, META_TRACK_YEAR);
+ case EMOTION_META_INFO_TRACK_GENRE:
+ return sd->module->meta_get(sd->video_data, META_TRACK_GENRE);
+ case EMOTION_META_INFO_TRACK_COMMENT:
+ return sd->module->meta_get(sd->video_data, META_TRACK_COMMENT);
+ case EMOTION_META_INFO_TRACK_DISC_ID:
+ return sd->module->meta_get(sd->video_data, META_TRACK_DISCID);
+ default:
+ break;
+ }
+ return NULL;
+}
+
+EAPI void
+emotion_object_vis_set(Evas_Object *obj, Emotion_Vis visualization)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("visualization=%d", visualization);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ if (!sd->module->vis_set) return;
+ sd->module->vis_set(sd->video_data, visualization);
+}
+
+EAPI Emotion_Vis
+emotion_object_vis_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, EMOTION_VIS_NONE);
+ if (!sd->module) return EMOTION_VIS_NONE;
+ if (!sd->video_data) return EMOTION_VIS_NONE;
+ if (!sd->module->vis_get) return EMOTION_VIS_NONE;
+ return sd->module->vis_get(sd->video_data);
+}
+
+EAPI Eina_Bool
+emotion_object_vis_supported(const Evas_Object *obj, Emotion_Vis visualization)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ if (!sd->module->vis_supported) return EINA_FALSE;
+ return sd->module->vis_supported(sd->video_data, visualization);
+}
+
+EAPI void
+emotion_object_priority_set(Evas_Object *obj, Eina_Bool priority)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return ;
+ if (!sd->video_data) return ;
+ if (!sd->module->priority_set) return ;
+ sd->module->priority_set(sd->video_data, priority);
+}
+
+EAPI Eina_Bool
+emotion_object_priority_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ if (!sd->module->priority_get) return EINA_FALSE;
+ return sd->module->priority_get(sd->video_data);
+}
+
+#ifdef HAVE_EIO
+static void
+_eio_load_xattr_cleanup(Smart_Data *sd, Eio_File *handler)
+{
+ if (handler == sd->load_xattr) sd->load_xattr = NULL;
+
+ EINA_REFCOUNT_UNREF(sd)
+ _smart_data_free(sd);
+}
+
+static void
+_eio_load_xattr_done(void *data, Eio_File *handler, double xattr_double)
+{
+ Smart_Data *sd = data;
+
+ emotion_object_position_set(evas_object_smart_parent_get(sd->obj), xattr_double);
+ evas_object_smart_callback_call(evas_object_smart_parent_get(sd->obj), SIG_POSITION_LOAD_SUCCEED, NULL);
+ _eio_load_xattr_cleanup(sd, handler);
+}
+
+static void
+_eio_load_xattr_error(void *data, Eio_File *handler, int err EINA_UNUSED)
+{
+ Smart_Data *sd = data;
+
+ evas_object_smart_callback_call(evas_object_smart_parent_get(sd->obj), SIG_POSITION_LOAD_FAILED, NULL);
+ _eio_load_xattr_cleanup(sd, handler);
+}
+#endif
+
+EAPI void
+emotion_object_last_position_load(Evas_Object *obj)
+{
+ Smart_Data *sd;
+ const char *tmp;
+#ifndef HAVE_EIO
+ double xattr;
+#endif
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->file) return ;
+
+ if (!strncmp(sd->file, "file://", 7))
+ tmp = sd->file + 7;
+ else if (!strstr(sd->file, "://"))
+ tmp = sd->file;
+ else
+ return ;
+
+#ifdef HAVE_EIO
+ if (sd->load_xattr) return ;
+
+ EINA_REFCOUNT_REF(sd);
+
+ sd->load_xattr = eio_file_xattr_double_get(tmp,
+ "user.e.time_seek",
+ _eio_load_xattr_done,
+ _eio_load_xattr_error,
+ sd);
+#else
+ if (eina_xattr_double_get(tmp, "user.e.time_seek", &xattr))
+ {
+ emotion_object_position_set(obj, xattr);
+ evas_object_smart_callback_call(obj, SIG_POSITION_LOAD_SUCCEED, NULL);
+ }
+ else
+ {
+ evas_object_smart_callback_call(obj, SIG_POSITION_LOAD_FAILED, NULL);
+ }
+#endif
+}
+
+#ifdef HAVE_EIO
+static void
+_eio_save_xattr_cleanup(Smart_Data *sd, Eio_File *handler)
+{
+ if (handler == sd->save_xattr) sd->save_xattr = NULL;
+
+ EINA_REFCOUNT_UNREF(sd)
+ _smart_data_free(sd);
+}
+
+static void
+_eio_save_xattr_done(void *data, Eio_File *handler)
+{
+ Smart_Data *sd = data;
+
+ evas_object_smart_callback_call(sd->obj, SIG_POSITION_SAVE_SUCCEED, NULL);
+ _eio_save_xattr_cleanup(sd, handler);
+}
+
+static void
+_eio_save_xattr_error(void *data, Eio_File *handler, int err EINA_UNUSED)
+{
+ Smart_Data *sd = data;
+
+ evas_object_smart_callback_call(sd->obj, SIG_POSITION_SAVE_FAILED, NULL);
+ _eio_save_xattr_cleanup(sd, handler);
+}
+#endif
+
+EAPI void
+emotion_object_last_position_save(Evas_Object *obj)
+{
+ Smart_Data *sd;
+ const char *tmp;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->file) return ;
+
+ if (!strncmp(sd->file, "file://", 7))
+ tmp = sd->file + 7;
+ else if (!strstr(sd->file, "://"))
+ tmp = sd->file;
+ else
+ return ;
+
+#ifdef HAVE_EIO
+ if (sd->save_xattr) return ;
+
+ EINA_REFCOUNT_REF(sd);
+
+ sd->save_xattr = eio_file_xattr_double_set(tmp,
+ "user.e.time_seek",
+ emotion_object_position_get(obj),
+ 0,
+ _eio_save_xattr_done,
+ _eio_save_xattr_error,
+ sd);
+#else
+ if (eina_xattr_double_set(tmp, "user.e.time_seek", emotion_object_position_get(obj), 0))
+ evas_object_smart_callback_call(obj, SIG_POSITION_SAVE_SUCCEED, NULL);
+ else
+ evas_object_smart_callback_call(obj, SIG_POSITION_SAVE_FAILED, NULL);
+#endif
+}
+
+EAPI void
+emotion_object_suspend_set(Evas_Object *obj, Emotion_Suspend state)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ switch (state)
+ {
+ case EMOTION_WAKEUP:
+ /* Restore the rendering pipeline, offset and everything back to play again (this will be called automatically by play_set) */
+ case EMOTION_SLEEP:
+ /* This destroy some part of the rendering pipeline */
+ case EMOTION_DEEP_SLEEP:
+ /* This destroy all the rendering pipeline and just keep the last rendered image (fullscreen) */
+ case EMOTION_HIBERNATE:
+ /* This destroy all the rendering pipeline and keep 1/4 of the last rendered image */
+ default:
+ break;
+ }
+
+ sd->state = state;
+}
+
+EAPI Emotion_Suspend
+emotion_object_suspend_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, EMOTION_WAKEUP);
+ return sd->state;
+}
+
+/*****************************/
+/* Utility calls for modules */
+/*****************************/
+
+EAPI void *
+_emotion_video_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->video_data;
+}
+
+static Eina_Bool
+_emotion_frame_anim(void *data)
+{
+ Evas_Object *obj = data;
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, EINA_FALSE);
+
+ evas_object_image_pixels_dirty_set(sd->obj, 1);
+ evas_object_smart_callback_call(obj, SIG_FRAME_DECODE, NULL);
+ sd->anim = NULL;
+
+ return EINA_FALSE;
+}
+
+EAPI void
+_emotion_frame_new(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ if (!sd->anim) sd->anim = ecore_animator_add(_emotion_frame_anim, obj);
+}
+
+EAPI void
+_emotion_video_pos_update(Evas_Object *obj, double pos, double len)
+{
+ Smart_Data *sd;
+ int npos = 0, nlen = 0;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (pos != sd->pos) npos = 1;
+ if (len != sd->len) nlen = 1;
+ sd->pos = pos;
+ sd->len = len;
+ if (npos) evas_object_smart_callback_call(obj, SIG_POSITION_UPDATE, NULL);
+ if (nlen) evas_object_smart_callback_call(obj, SIG_LENGTH_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_frame_resize(Evas_Object *obj, int w, int h, double ratio)
+{
+ Smart_Data *sd;
+ double tmp;
+ int changed = 0;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if ((w != sd->video.w) || (h != sd->video.h))
+ {
+ sd->video.w = w;
+ sd->video.h = h;
+ _emotion_image_data_zero(sd->obj);
+ changed = 1;
+ }
+ if (h > 0) tmp = (double)w / (double)h;
+ else tmp = 1.0;
+ if (ratio != tmp) tmp = ratio;
+ if (tmp != sd->ratio)
+ {
+ sd->ratio = tmp;
+ changed = 1;
+ }
+ if (changed)
+ {
+ evas_object_size_hint_request_set(obj, w, h);
+ evas_object_smart_callback_call(obj, SIG_FRAME_RESIZE, NULL);
+ evas_object_geometry_get(obj, NULL, NULL, &w, &h);
+ _emotion_object_aspect_border_apply(obj, sd, w, h);
+ }
+}
+
+EAPI void
+_emotion_image_reset(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ _emotion_image_data_zero(sd->obj);
+}
+
+EAPI void
+_emotion_decode_stop(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (sd->play)
+ {
+ sd->play = 0;
+ evas_object_smart_callback_call(obj, SIG_DECODE_STOP, NULL);
+ }
+}
+
+EAPI void
+_emotion_open_done(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->open = 1;
+
+ if (sd->remember_jump)
+ emotion_object_position_set(obj, sd->remember_jump);
+ if (sd->remember_play != sd->play)
+ emotion_object_play_set(obj, sd->remember_play);
+ evas_object_smart_callback_call(obj, SIG_OPEN_DONE, NULL);
+}
+
+EAPI void
+_emotion_playback_started(Evas_Object *obj)
+{
+ evas_object_smart_callback_call(obj, SIG_PLAYBACK_STARTED, NULL);
+}
+
+EAPI void
+_emotion_playback_finished(Evas_Object *obj)
+{
+ evas_object_smart_callback_call(obj, SIG_PLAYBACK_FINISHED, NULL);
+}
+
+EAPI void
+_emotion_audio_level_change(Evas_Object *obj)
+{
+ evas_object_smart_callback_call(obj, SIG_AUDIO_LEVEL_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_channels_change(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_smart_callback_call(obj, SIG_CHANNELS_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_title_set(Evas_Object *obj, char *title)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ free(sd->title);
+ sd->title = strdup(title);
+ evas_object_smart_callback_call(obj, SIG_TITLE_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_progress_set(Evas_Object *obj, char *info, double st)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ free(sd->progress.info);
+ sd->progress.info = strdup(info);
+ sd->progress.stat = st;
+ evas_object_smart_callback_call(obj, SIG_PROGRESS_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_file_ref_set(Evas_Object *obj, const char *file, int num)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ free(sd->ref.file);
+ sd->ref.file = strdup(file);
+ sd->ref.num = num;
+ evas_object_smart_callback_call(obj, SIG_REF_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_spu_button_num_set(Evas_Object *obj, int num)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->spu.button_num = num;
+ evas_object_smart_callback_call(obj, SIG_BUTTON_NUM_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_spu_button_set(Evas_Object *obj, int button)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->spu.button = button;
+ evas_object_smart_callback_call(obj, SIG_BUTTON_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_seek_done(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (sd->seeking)
+ {
+ sd->seeking = 0;
+ if (sd->seek) emotion_object_position_set(obj, sd->seek_pos);
+ }
+}
+
+EAPI void
+_emotion_frame_refill(Evas_Object *obj, double w, double h)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (sd->fill.w != w || sd->fill.h != h)
+ {
+ Evas_Coord ow, oh;
+
+ evas_object_geometry_get(obj, NULL, NULL, &ow, &oh);
+ if (w <= 0 || h <= 0)
+ {
+ double scale_w, scale_h;
+
+ sd->fill.w = -1;
+ sd->fill.h = -1;
+
+ scale_w = (double) ow / (double)(sd->video.w - sd->crop.l - sd->crop.r);
+ scale_h = (double) oh / (double)(sd->video.h - sd->crop.t - sd->crop.b);
+
+ evas_object_image_fill_set(sd->obj, 0, 0, scale_w * sd->video.w, scale_h * sd->video.h);
+ }
+ else
+ {
+ sd->fill.w = w;
+ sd->fill.h = h;
+
+ evas_object_image_fill_set(sd->obj, 0, 0, w * ow, h * oh);
+ }
+ }
+}
+
+/****************************/
+/* Internal object routines */
+/****************************/
+
+static void
+_mouse_move(void *data, Evas *ev EINA_UNUSED, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Move *e;
+ Smart_Data *sd;
+ int x, y, iw, ih;
+ Evas_Coord ox, oy, ow, oh;
+
+ e = event_info;
+ sd = data;
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ evas_object_geometry_get(obj, &ox, &oy, &ow, &oh);
+ evas_object_image_size_get(obj, &iw, &ih);
+ if ((iw < 1) || (ih < 1)) return;
+ x = (((int)e->cur.canvas.x - ox) * iw) / ow;
+ y = (((int)e->cur.canvas.y - oy) * ih) / oh;
+ sd->module->event_mouse_move_feed(sd->video_data, x, y);
+}
+
+static void
+_mouse_down(void *data, Evas *ev EINA_UNUSED, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Down *e;
+ Smart_Data *sd;
+ int x, y, iw, ih;
+ Evas_Coord ox, oy, ow, oh;
+
+ e = event_info;
+ sd = data;
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ evas_object_geometry_get(obj, &ox, &oy, &ow, &oh);
+ evas_object_image_size_get(obj, &iw, &ih);
+ if ((iw < 1) || (ih < 1)) return;
+ x = (((int)e->canvas.x - ox) * iw) / ow;
+ y = (((int)e->canvas.y - oy) * ih) / oh;
+ sd->module->event_mouse_button_feed(sd->video_data, 1, x, y);
+}
+
+static void
+_pos_set_job(void *data)
+{
+ Evas_Object *obj;
+ Smart_Data *sd;
+
+ obj = data;
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->job = NULL;
+ if (sd->seeking) return;
+ if (sd->seek)
+ {
+ sd->seeking = 1;
+ sd->module->pos_set(sd->video_data, sd->seek_pos);
+ sd->seek = 0;
+ }
+}
+
+/* called by evas when it needs pixels for the image object */
+static void
+_pixels_get(void *data, Evas_Object *obj)
+{
+ int iw, ih, w, h;
+ Smart_Data *sd;
+ Emotion_Format format;
+ unsigned char *bgra_data;
+
+ sd = data;
+ sd->module->video_data_size_get(sd->video_data, &w, &h);
+ w = (w >> 1) << 1;
+ h = (h >> 1) << 1;
+
+ evas_object_image_colorspace_set(obj, EVAS_COLORSPACE_YCBCR422P601_PL);
+ evas_object_image_alpha_set(obj, 0);
+ evas_object_image_size_set(obj, w, h);
+ iw = w;
+ ih = h;
+
+ if ((iw <= 1) || (ih <= 1))
+ {
+ _emotion_image_data_zero(sd->obj);
+ evas_object_image_pixels_dirty_set(obj, 0);
+ }
+ else
+ {
+ format = sd->module->format_get(sd->video_data);
+ if ((format == EMOTION_FORMAT_YV12) || (format == EMOTION_FORMAT_I420))
+ {
+ unsigned char **rows;
+
+ evas_object_image_colorspace_set(obj, EVAS_COLORSPACE_YCBCR422P601_PL);
+ rows = evas_object_image_data_get(obj, 1);
+ if (rows)
+ {
+ if (sd->module->yuv_rows_get(sd->video_data, iw, ih,
+ rows,
+ &rows[ih],
+ &rows[ih + (ih / 2)]))
+ evas_object_image_data_update_add(obj, 0, 0, iw, ih);
+ }
+ evas_object_image_data_set(obj, rows);
+ evas_object_image_pixels_dirty_set(obj, 0);
+ }
+ else if (format == EMOTION_FORMAT_BGRA)
+ {
+ evas_object_image_colorspace_set(obj, EVAS_COLORSPACE_ARGB8888);
+ if (sd->module->bgra_data_get(sd->video_data, &bgra_data))
+ {
+ evas_object_image_data_set(obj, bgra_data);
+ evas_object_image_pixels_dirty_set(obj, 0);
+ }
+ }
+ }
+}
+
+/*******************************************/
+/* Internal smart object required routines */
+/*******************************************/
+#ifdef EMOTION_STATIC_BUILD_XINE
+Eina_Bool xine_module_init(void);
+#endif
+#ifdef EMOTION_STATIC_BUILD_GSTREAMER
+Eina_Bool gstreamer_module_init(void);
+#endif
+#ifdef EMOTION_STATIC_BUILD_GENERIC
+Eina_Bool generic_module_init(void);
+#endif
+
+static void
+_smart_init(void)
+{
+ char *path;
+
+ if (smart) return;
+ {
+ eina_init();
+
+ _log_domain = eina_log_domain_register("emotion", EINA_COLOR_LIGHTCYAN);
+ if (_log_domain < 0)
+ {
+ EINA_LOG_CRIT("Could not register log domain 'emotion'");
+ eina_shutdown();
+ return;
+ }
+
+ _backends = eina_hash_string_small_new(free);
+
+ _modules = eina_module_list_get(NULL, PACKAGE_LIB_DIR "/emotion/", 0, NULL, NULL);
+
+ path = eina_module_environment_path_get("HOME", "/.emotion/");
+ _modules = eina_module_list_get(_modules, path, 0, NULL, NULL);
+ if (path) free(path);
+
+ path = eina_module_environment_path_get("EMOTION_MODULES_DIR", "/emotion/");
+ _modules = eina_module_list_get(_modules, path, 0, NULL, NULL);
+ if (path) free(path);
+
+ path = eina_module_symbol_path_get(emotion_object_add, "/emotion/");
+ _modules = eina_module_list_get(_modules, path, 0, NULL, NULL);
+ if (path) free(path);
+
+ if (!_modules)
+ {
+ ERR("No module found!");
+ return;
+ }
+
+ eina_module_list_load(_modules);
+
+ /* Init static module */
+#ifdef EMOTION_STATIC_BUILD_XINE
+ xine_module_init();
+#endif
+#ifdef EMOTION_STATIC_BUILD_GSTREAMER
+ gstreamer_module_init();
+#endif
+#ifdef EMOTION_STATIC_BUILD_GENERIC
+ generic_module_init();
+#endif
+
+ static Evas_Smart_Class sc =
+ EVAS_SMART_CLASS_INIT_NAME_VERSION(E_OBJ_NAME);
+ if (!sc.add)
+ {
+ sc.add = _smart_add;
+ sc.del = _smart_del;
+ sc.move = _smart_move;
+ sc.resize = _smart_resize;
+ sc.show = _smart_show;
+ sc.hide = _smart_hide;
+ sc.color_set = _smart_color_set;
+ sc.clip_set = _smart_clip_set;
+ sc.clip_unset = _smart_clip_unset;
+ sc.callbacks = _smart_callbacks;
+ }
+ smart = evas_smart_class_new(&sc);
+ }
+}
+
+static void
+_smart_add(Evas_Object * obj)
+{
+ Smart_Data *sd;
+ unsigned int *pixel;
+
+ sd = calloc(1, sizeof(Smart_Data));
+ if (!sd) return;
+ EINA_REFCOUNT_INIT(sd);
+ sd->state = EMOTION_WAKEUP;
+ sd->obj = evas_object_image_add(evas_object_evas_get(obj));
+ sd->bg = evas_object_rectangle_add(evas_object_evas_get(obj));
+ evas_object_color_set(sd->bg, 0, 0, 0, 0);
+ evas_object_event_callback_add(sd->obj, EVAS_CALLBACK_MOUSE_MOVE, _mouse_move, sd);
+ evas_object_event_callback_add(sd->obj, EVAS_CALLBACK_MOUSE_DOWN, _mouse_down, sd);
+ evas_object_image_pixels_get_callback_set(sd->obj, _pixels_get, sd);
+ evas_object_smart_member_add(sd->obj, obj);
+ evas_object_smart_member_add(sd->bg, obj);
+ evas_object_lower(sd->bg);
+ sd->ratio = 1.0;
+ sd->spu.button = -1;
+ sd->fill.w = -1;
+ sd->fill.h = -1;
+ evas_object_image_alpha_set(sd->obj, 0);
+ pixel = evas_object_image_data_get(sd->obj, 1);
+ if (pixel)
+ {
+ *pixel = 0xff000000;
+ evas_object_image_data_set(obj, pixel);
+ }
+ evas_object_smart_data_set(obj, sd);
+
+ ecore_init();
+}
+
+static void
+_smart_del(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ EINA_REFCOUNT_UNREF(sd)
+ _smart_data_free(sd);
+}
+
+static void
+_smart_move(Evas_Object * obj, Evas_Coord x, Evas_Coord y)
+{
+ Smart_Data *sd;
+ int w, h;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+
+ evas_object_geometry_get(obj, NULL, NULL, &w, &h);
+ _clipper_position_size_update(obj, x, y, w, h, sd->video.w, sd->video.h);
+ evas_object_move(sd->bg, x, y);
+}
+
+static void
+_smart_resize(Evas_Object * obj, Evas_Coord w, Evas_Coord h)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+
+ _emotion_object_aspect_border_apply(obj, sd, w, h);
+ evas_object_resize(sd->bg, w, h);
+}
+
+static void
+_smart_show(Evas_Object * obj)
+{
+ Smart_Data *sd;
+ int a;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_show(sd->obj);
+ if (sd->crop.clipper)
+ evas_object_show(sd->crop.clipper);
+
+ evas_object_color_get(sd->bg, NULL, NULL, NULL, &a);
+ if (a > 0)
+ evas_object_show(sd->bg);
+}
+
+static void
+_smart_hide(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_hide(sd->obj);
+ if (sd->crop.clipper)
+ evas_object_hide(sd->crop.clipper);
+ evas_object_hide(sd->bg);
+}
+
+static void
+_smart_color_set(Evas_Object * obj, int r, int g, int b, int a)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_color_set(sd->obj, r, g, b, a);
+ evas_object_color_set(sd->crop.clipper, r, g, b, a);
+}
+
+static void
+_smart_clip_set(Evas_Object * obj, Evas_Object * clip)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ if (sd->crop.clipper)
+ evas_object_clip_set(sd->crop.clipper, clip);
+ else
+ evas_object_clip_set(sd->obj, clip);
+ evas_object_clip_set(sd->bg, clip);
+}
+
+static void
+_smart_clip_unset(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ if (sd->crop.clipper)
+ evas_object_clip_unset(sd->crop.clipper);
+ else
+ evas_object_clip_unset(sd->obj);
+ evas_object_clip_unset(sd->bg);
+}
+
diff --git a/src/modules/emotion/generic/Emotion_Generic_Plugin.h b/src/modules/emotion/generic/Emotion_Generic_Plugin.h
new file mode 100644
index 0000000000..f00ea6e405
--- /dev/null
+++ b/src/modules/emotion/generic/Emotion_Generic_Plugin.h
@@ -0,0 +1,145 @@
+#ifndef EMOTION_GENERIC_PLUGIN_H
+#define EMOTION_GENERIC_PLUGIN_H
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <Eina.h>
+
+#define DEFAULTWIDTH 320
+#define DEFAULTHEIGHT 240
+#define DEFAULTPITCH 4
+
+typedef enum _Emotion_Generic_Cmd Emotion_Generic_Cmd;
+typedef enum _Emotion_Generic_Result Emotion_Generic_Result;
+typedef struct _Emotion_Generic_Video_Frame Emotion_Generic_Video_Frame;
+typedef struct _Emotion_Generic_Video_Shared Emotion_Generic_Video_Shared;
+
+enum _Emotion_Generic_Cmd
+{
+ EM_CMD_INIT = 0, // param: shared memory identifier (string)
+ EM_CMD_PLAY, // param: position (float)
+ EM_CMD_STOP, // param: none
+ EM_CMD_FILE_SET, // param: filename (string)
+ EM_CMD_FILE_SET_DONE, // param: success (int)
+ EM_CMD_FILE_CLOSE, // param: none
+ EM_CMD_POSITION_SET, // param: position (float)
+ EM_CMD_SPEED_SET, // param: speed (float)
+ EM_CMD_AUDIO_MUTE_SET, // param: muted (int)
+ EM_CMD_VIDEO_MUTE_SET, // param: muted (int)
+ EM_CMD_SPU_MUTE_SET, // param: muted (int)
+ EM_CMD_VOLUME_SET, // param: volume (float)
+ EM_CMD_AUDIO_TRACK_SET, // param: track id (int)
+ EM_CMD_VIDEO_TRACK_SET, // param: track id (int)
+ EM_CMD_SPU_TRACK_SET, // param: track id (int)
+ EM_CMD_SUBTITLE_SET, // param: subtitle filename (string)
+ EM_CMD_LAST
+};
+
+enum _Emotion_Generic_Result
+{
+ EM_RESULT_INIT = 0, // param: none
+ EM_RESULT_FILE_SET, // param: none
+ EM_RESULT_FILE_SET_DONE, // param: success (int)
+ EM_RESULT_PLAYBACK_STARTED, // param: none
+ EM_RESULT_PLAYBACK_STOPPED, // param: none
+ EM_RESULT_FILE_CLOSE, // param: none
+ EM_RESULT_FRAME_NEW, // param: none
+ EM_RESULT_FRAME_SIZE, // param: int, int (width, height)
+ EM_RESULT_LENGTH_CHANGED, // param: float
+ EM_RESULT_POSITION_CHANGED, // param: float
+ EM_RESULT_SEEKABLE_CHANGED, // param: int
+ EM_RESULT_AUDIO_TRACK_INFO, // param: current track, track count, track_id, track_name, track_id2, track_name2, ...
+ EM_RESULT_VIDEO_TRACK_INFO, // param: current track, track count, track_id, track_name, track_id2, track_name2, ...
+ EM_RESULT_SPU_TRACK_INFO, // param: current spu, spu count, spu_id, spu_name, spu_id2, spu_name2, ...
+ // (int, int, int, string, int, string, ...)
+ EM_RESULT_META_INFO, // param: title, artist, album, year, genre, comments, disc id, count (all int)
+ EM_RESULT_LAST
+};
+
+/* structure for frames 2 buffers to keep integrity */
+struct _Emotion_Generic_Video_Frame
+{
+ unsigned char *frames[3];
+};
+
+/* structure for frames 2 buffers to keep integrity */
+struct _Emotion_Generic_Video_Shared
+{
+ int size;
+ int width;
+ int height;
+ int pitch;
+ /**
+ * - "emotion" is the frame from where the Emotion process is reading pixels.
+ * The player shouldn't touch this frame.
+ * - "player" is the frame where the slayer process is writing pixels.
+ * The emotion process shouldn't touch this frame.
+ * - "last" is the last frame that was rendered by the player. Emotion will
+ * use this frame the next time it will fetch pixels to Evas.
+ * - "next" is the unused frame. The player currently using the "player"
+ * should, after finishing this frame, set "last" to "player", and "player"
+ * to "next", and finally "next" to "last" so this operation can be done
+ * many times in case that Emotion does not request pixels fast enough.
+ */
+ struct {
+ int emotion;
+ int player;
+ int last;
+ int next;
+ } frame;
+ Eina_Semaphore lock;
+ int frame_drop;
+};
+
+static inline int
+emotion_generic_shm_get(const char *shmname, Emotion_Generic_Video_Shared **vs, Emotion_Generic_Video_Frame *vf)
+{
+ int shmfd = -1;
+ int size;
+ Emotion_Generic_Video_Shared *t_vs;
+
+ shmfd = shm_open(shmname, O_RDWR, 0777);
+ if (shmfd == -1)
+ {
+ fprintf(stderr, "player: could not open shm: %s\n", shmname);
+ fprintf(stderr, "player: %s\n", strerror(errno));
+ return 0;
+ }
+
+ t_vs = mmap(NULL, sizeof(*t_vs), PROT_READ|PROT_WRITE, MAP_SHARED, shmfd, 0);
+ if (t_vs == MAP_FAILED)
+ {
+ fprintf(stderr, "player: could not map shared memory.\n");
+ fprintf(stderr, "player: %s\n", strerror(errno));
+ return 0;
+ }
+ size = t_vs->size;
+ munmap(t_vs, sizeof(*t_vs));
+ t_vs = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, shmfd, 0);
+ if (t_vs == MAP_FAILED)
+ {
+ fprintf(stderr, "player: could not map shared memory.\n");
+ fprintf(stderr, "player: %s\n", strerror(errno));
+ return 0;
+ }
+
+ vf->frames[0] = (unsigned char *)t_vs + sizeof(*t_vs);
+ vf->frames[1] = (unsigned char *)t_vs + sizeof(*t_vs) + t_vs->height * t_vs->width * t_vs->pitch;
+ vf->frames[2] = (unsigned char *)t_vs + sizeof(*t_vs) + 2 * t_vs->height * t_vs->width * t_vs->pitch;
+
+ *vs = t_vs;
+
+ return 1;
+}
+
+static inline void
+emotion_generic_shm_free(Emotion_Generic_Video_Shared *vs)
+{
+ munmap(vs, vs->size);
+}
+
+#endif // EMOTION_GENERIC_PLUGIN_H
diff --git a/src/modules/emotion/generic/README b/src/modules/emotion/generic/README
new file mode 100644
index 0000000000..c2a028de5d
--- /dev/null
+++ b/src/modules/emotion/generic/README
@@ -0,0 +1,79 @@
+Generic - emotion backend
+=========================
+
+This generic player backend executes a separate player in another
+process. It receives the bytes to be drawn on the emotion object through
+a shared memory, and communicates with the player through a pipe, using
+the player standard input/output.
+
+The player must communicate with emotion using the defined commands
+specified in the Emotion_Generic_Plugin.h. It doesn't need to link
+against emotion, just include this file for easier implementation.
+
+
+How does it work?
+=================
+
+When the module is initialized for an emotion object, it starts another process
+that runs the specified player. The player command line is specified using:
+
+ emotion_object_module_option_set(object, "player", <command>);
+
+A player using libvlc is being provided now, and the generic module internally
+checks if the command given was "vlc", in which case it will use this provided
+vlc player.
+
+When a file is set to this object, it will send the file name to the player, and
+expect an answer that will tell that the player already decoded a bit of the
+file, and the video size is already set on the module, so it can allocate a
+shared memory with correct size.
+
+The module then allocates the memory, sends a message to the player and expect
+an answer. After this last answer, the "open_done" signal is sent and the module
+knows that it is ready for playing. Commands sent before the module being ready
+are now applied (and play is resumed if necessary).
+
+During this setup stage, info about the file set will be stored in the module,
+so commands like meta data get, length get and so will be available to sync
+calls like emotion_object_play_length_get();
+
+If the player dies for any reason, a "decode_stop" signal is sent (should change
+to something more like an error signal), and if play is called again, it will be
+restarted. The playback should start from the same point it was before the
+player crashed, if the player supports seek on the current media format).
+
+TODO
+====
+
+ - Provide better description for commands;
+ - Explain in details the communication emotion <-> player;
+ - Make more common functions for players;
+ - (maybe) add support for named pipes, so we don't rely on standard in/out
+ for communication;
+ - Add a detection on the player to know that the emotion process died (so it
+ can just exit);
+ - shmname should contain the child pid too;
+ - better names for commands, maybe add namespace everywhere;
+
+
+questions
+=========
+
+ - Using semaphores to lock the critical region between process, and pthread
+ mutexes for the threads inside the player. Should move to only one type
+ (semphores or mutexes)?
+ - There are 2 inline functions insde Emotion_Generic_Plugin.h to make it easier
+ for the player to get the shared memory correctly. Any problem with this?
+ Would be good to add more functions/macros to make common tasks like
+ parsing commands there too?
+ - Should move players to another project (outside of emotion)?
+
+
+problems
+========
+ - file_set has some critical time when file is not set yet when we can't call
+ some functions (I think only another file_set now);
+ - communication player -> emotion depends on '\n' to delimitate commands, will
+ remove this soon (fix this urgently!);
+ - need to implement missing APIs;
+
diff --git a/src/modules/emotion/generic/emotion_generic.c b/src/modules/emotion/generic/emotion_generic.c
new file mode 100644
index 0000000000..d66553f251
--- /dev/null
+++ b/src/modules/emotion/generic/emotion_generic.c
@@ -0,0 +1,1820 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <Eina.h>
+#include <Evas.h>
+#include <Ecore.h>
+
+#include "Emotion.h"
+#include "emotion_private.h"
+#include "emotion_generic.h"
+
+static Eina_Prefix *pfx = NULL;
+
+static int _emotion_generic_log_domain = -1;
+#define DBG(...) EINA_LOG_DOM_DBG(_emotion_generic_log_domain, __VA_ARGS__)
+#define INF(...) EINA_LOG_DOM_INFO(_emotion_generic_log_domain, __VA_ARGS__)
+#define WRN(...) EINA_LOG_DOM_WARN(_emotion_generic_log_domain, __VA_ARGS__)
+#define ERR(...) EINA_LOG_DOM_ERR(_emotion_generic_log_domain, __VA_ARGS__)
+#define CRITICAL(...) EINA_LOG_DOM_CRIT(_emotion_generic_log_domain, __VA_ARGS__)
+
+
+struct _default_players {
+ const char *name;
+ const char *cmdline;
+};
+
+static struct _default_players players[] = {
+#ifdef EMOTION_BUILD_GENERIC_VLC
+ { "vlc", "em_generic_vlc" },
+#endif
+ { NULL, NULL }
+};
+
+static Eina_Bool _fork_and_exec(Emotion_Generic_Video *ev);
+static void em_partial_shutdown(Emotion_Generic_Video *ev);
+
+static Eina_Bool
+_player_restart(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+
+ _fork_and_exec(ev);
+ ev->player_restart = NULL;
+ return EINA_FALSE;
+}
+
+static const char *
+_get_player(const char *name)
+{
+ const char *selected_name = NULL;
+ const char *libdir = eina_prefix_lib_get(pfx);
+ static char buf[PATH_MAX];
+ int i;
+
+ if (name)
+ {
+ for (i = 0; players[i].name; i++)
+ {
+ if (!strcmp(players[i].name, name))
+ {
+ selected_name = players[i].cmdline;
+ break;
+ }
+ }
+ }
+
+ if ((!selected_name) && (name))
+ selected_name = name;
+
+ if (selected_name)
+ {
+ const char *cmd;
+
+ if (selected_name[0] == '/') cmd = selected_name;
+ else
+ {
+ snprintf(buf, sizeof(buf), "%s/emotion/utils/%s",
+ libdir, selected_name);
+ cmd = buf;
+ }
+
+ DBG("Try generic player '%s'", cmd);
+ if (access(cmd, R_OK | X_OK) == 0)
+ {
+ INF("Using generic player '%s'", cmd);
+ return cmd;
+ }
+ }
+
+ for (i = 0; players[i].name; i++)
+ {
+ snprintf(buf, sizeof(buf), "%s/emotion/utils/%s",
+ libdir, players[i].cmdline);
+ DBG("Try generic player '%s'", buf);
+ if (access(buf, R_OK | X_OK) == 0)
+ {
+ INF("Using fallback player '%s'", buf);
+ return buf;
+ }
+ }
+
+ ERR("no generic player found, given name='%s'", name ? name : "");
+ return NULL;
+}
+
+static void
+_player_send_cmd(Emotion_Generic_Video *ev, int cmd)
+{
+ if (cmd >= EM_CMD_LAST)
+ {
+ ERR("invalid command to player.");
+ return;
+ }
+ if (ev->fd_write == -1)
+ {
+ ERR("you should wait for emotion to be ready to take action.");
+ return ;
+ }
+ if (write(ev->fd_write, &cmd, sizeof(cmd)) < 0) perror("write");
+}
+
+static void
+_player_send_int(Emotion_Generic_Video *ev, int number)
+{
+ if (ev->fd_write == -1)
+ {
+ ERR("you should wait for emotion to be ready to take action.");
+ return ;
+ }
+ if (write(ev->fd_write, &number, sizeof(number)) < 0) perror("write");
+}
+
+static void
+_player_send_float(Emotion_Generic_Video *ev, float number)
+{
+ if (ev->fd_write == -1)
+ {
+ ERR("you should wait for emotion to be ready to take action.");
+ return ;
+ }
+ if (write(ev->fd_write, &number, sizeof(number)) < 0) perror("write");
+}
+
+static void
+_player_send_str(Emotion_Generic_Video *ev, const char *str, Eina_Bool stringshared)
+{
+ int len;
+
+ if (stringshared)
+ len = eina_stringshare_strlen(str) + 1;
+ else
+ len = strlen(str) + 1;
+ if (write(ev->fd_write, &len, sizeof(len)) < 0) perror("write");
+ if (write(ev->fd_write, str, len) < 0) perror("write");
+}
+
+static Eina_Bool
+_create_shm_data(Emotion_Generic_Video *ev, const char *shmname)
+{
+ int shmfd;
+ int npages;
+ size_t size;
+ Emotion_Generic_Video_Shared *vs;
+
+ shmfd = shm_open(shmname, O_CREAT | O_RDWR | O_TRUNC, 0777);
+ if (shmfd == -1)
+ {
+ ERR("player: could not open shm: %s", shmname);
+ ERR("player: %s", strerror(errno));
+ return 0;
+ }
+ size = 3 * (ev->w * ev->h * DEFAULTPITCH) + sizeof(*vs);
+
+ npages = (int)(size / getpagesize()) + 1;
+ size = npages * getpagesize();
+
+ if (ftruncate(shmfd, size))
+ {
+ ERR("error when allocating shared memory (size = %zd): "
+ "%s", size, strerror(errno));
+ shm_unlink(shmname);
+ return EINA_FALSE;
+ }
+ vs = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, shmfd, 0);
+ if (vs == MAP_FAILED)
+ {
+ ERR("error when mapping shared memory");
+ return EINA_FALSE;
+ }
+
+ vs->size = size;
+ vs->width = ev->w;
+ vs->height = ev->h;
+ vs->pitch = DEFAULTPITCH;
+ vs->frame.emotion = 0;
+ vs->frame.player = 1;
+ vs->frame.last = 2;
+ vs->frame.next = 2;
+ vs->frame_drop = 0;
+ if (!eina_semaphore_new(&vs->lock, 1))
+ {
+ ERR("can not create semaphore");
+ return EINA_FALSE;
+ }
+ ev->frame.frames[0] = (unsigned char *)vs + sizeof(*vs);
+ ev->frame.frames[1] = (unsigned char *)vs + sizeof(*vs) + vs->height * vs->width * vs->pitch;
+ ev->frame.frames[2] = (unsigned char *)vs + sizeof(*vs) + 2 * vs->height * vs->width * vs->pitch;
+
+ if (ev->shared)
+ munmap(ev->shared, ev->shared->size);
+ ev->shared = vs;
+
+ return EINA_TRUE;
+}
+
+static void
+_player_new_frame(Emotion_Generic_Video *ev)
+{
+ if (!ev->file_ready)
+ return;
+ _emotion_frame_new(ev->obj);
+}
+
+static void
+_file_open(Emotion_Generic_Video *ev)
+{
+ INF("Opening file: %s", ev->filename);
+ ev->drop = 0;
+
+ if (!ev->ready || !ev->filename)
+ return;
+ _player_send_cmd(ev, EM_CMD_FILE_SET);
+ _player_send_str(ev, ev->filename, EINA_TRUE);
+}
+
+static void
+_player_file_set_done(Emotion_Generic_Video *ev)
+{
+ if (ev->file_changed)
+ {
+ _file_open(ev);
+ ev->file_changed = EINA_FALSE;
+ return;
+ }
+
+ if (!_create_shm_data(ev, ev->shmname))
+ {
+ ERR("could not create shared memory.");
+ return;
+ }
+ _player_send_cmd(ev, EM_CMD_FILE_SET_DONE);
+}
+
+static void
+_player_ready(Emotion_Generic_Video *ev)
+{
+ INF("received: player ready.");
+
+ ev->initializing = EINA_FALSE;
+ ev->ready = EINA_TRUE;
+
+ if (!ev->filename)
+ return;
+
+ _file_open(ev);
+}
+
+static Eina_Bool
+_player_cmd_param_read(Emotion_Generic_Video *ev, void *param, size_t size)
+{
+ ssize_t done, todo, i;
+
+ /* When a parameter must be read, we cannot make sure it will be entirely
+ * available. Thus we store the bytes that could be read in a temp buffer,
+ * and when more data is read we try to complete the buffer and finally use
+ * the read value.
+ */
+ if (!ev->cmd.tmp)
+ {
+ ev->cmd.tmp = malloc(size);
+ ev->cmd.i = 0;
+ ev->cmd.total = size;
+ }
+
+ todo = ev->cmd.total - ev->cmd.i;
+ i = ev->cmd.i;
+ done = read(ev->fd_read, &ev->cmd.tmp[i], todo);
+
+ if (done < 0 && errno != EINTR && errno != EAGAIN)
+ {
+ if (ev->cmd.tmp)
+ {
+ free(ev->cmd.tmp);
+ ev->cmd.tmp = NULL;
+ }
+ ERR("problem when reading parameter from pipe.");
+ ev->cmd.type = -1;
+ return EINA_FALSE;
+ }
+
+ if (done == todo)
+ {
+ memcpy(param, ev->cmd.tmp, size);
+ free(ev->cmd.tmp);
+ ev->cmd.tmp = NULL;
+ return EINA_TRUE;
+ }
+
+ if (done > 0)
+ ev->cmd.i += done;
+
+ return EINA_FALSE;
+}
+
+static void
+_player_frame_resize(Emotion_Generic_Video *ev)
+{
+ int w, h;
+
+ w = ev->cmd.param.size.width;
+ h = ev->cmd.param.size.height;
+
+ INF("received frame resize: %dx%d", w, h);
+ ev->w = w;
+ ev->h = h;
+ ev->ratio = (float)w / h;
+
+ if (ev->opening)
+ return;
+
+ _emotion_frame_resize(ev->obj, ev->w, ev->h, ev->ratio);
+}
+
+static void
+_player_length_changed(Emotion_Generic_Video *ev)
+{
+ float length = ev->cmd.param.f_num;
+
+ INF("received length changed: %0.3f", length);
+
+ ev->len = length;
+ _emotion_video_pos_update(ev->obj, ev->pos, ev->len);
+}
+
+static void
+_player_position_changed(Emotion_Generic_Video *ev)
+{
+ float position = ev->cmd.param.f_num;
+
+ INF("received position changed: %0.3f", position);
+
+ ev->pos = position;
+ _emotion_video_pos_update(ev->obj, ev->pos, ev->len);
+/* hmmm. no _emotion_progress_set() is for "buffering" progress.
+ if (ev->len == 0)
+ return;
+
+ float progress = ev->pos / ev->len;
+ char buf[16];
+ snprintf(buf, sizeof(buf), "%0.1f%%", progress * 100);
+
+ _emotion_progress_set(ev->obj, buf, progress);
+ */
+}
+
+static void
+_player_seekable_changed(Emotion_Generic_Video *ev)
+{
+ int seekable = ev->cmd.param.i_num;
+
+ INF("received seekable changed: %d", seekable);
+
+ seekable = !!seekable;
+
+ ev->seekable = seekable;
+}
+
+static void
+_audio_channels_free(Emotion_Generic_Video *ev)
+{
+ int i;
+ for (i = 0; i < ev->audio_channels_count; i++)
+ eina_stringshare_del(ev->audio_channels[i].name);
+ free(ev->audio_channels);
+ ev->audio_channels = NULL;
+ ev->audio_channels_count = 0;
+}
+
+static void
+_video_channels_free(Emotion_Generic_Video *ev)
+{
+ int i;
+ for (i = 0; i < ev->video_channels_count; i++)
+ eina_stringshare_del(ev->video_channels[i].name);
+ free(ev->video_channels);
+ ev->video_channels = NULL;
+ ev->video_channels_count = 0;
+}
+
+static void
+_spu_channels_free(Emotion_Generic_Video *ev)
+{
+ int i;
+ for (i = 0; i < ev->spu_channels_count; i++)
+ eina_stringshare_del(ev->spu_channels[i].name);
+ free(ev->spu_channels);
+ ev->spu_channels = NULL;
+ ev->spu_channels_count = 0;
+}
+
+static void
+_player_tracks_info(Emotion_Generic_Video *ev, Emotion_Generic_Channel **channels, int *count, int *current)
+{
+ Emotion_Generic_Channel *pchannels;
+ int i;
+
+ *count = ev->cmd.param.track.total;
+ *current = ev->cmd.param.track.current;
+ pchannels = ev->cmd.param.track.channels;
+
+ INF("number of tracks: %d (current = %d):", *count, *current);
+ for (i = 0; i < *count; i++)
+ {
+ INF("\tchannel %d: %s", pchannels[i].id, pchannels[i].name);
+ }
+
+ *channels = pchannels;
+}
+
+static void
+_player_audio_tracks_info(Emotion_Generic_Video *ev)
+{
+ INF("Receiving audio channels:");
+ if (ev->audio_channels_count)
+ _audio_channels_free(ev);
+
+ _player_tracks_info(ev, &ev->audio_channels, &ev->audio_channels_count,
+ &ev->audio_channel_current);
+}
+
+static void
+_player_video_tracks_info(Emotion_Generic_Video *ev)
+{
+ INF("Receiving video channels:");
+ if (ev->video_channels_count)
+ _video_channels_free(ev);
+
+ _player_tracks_info(ev, &ev->video_channels, &ev->video_channels_count,
+ &ev->video_channel_current);
+}
+
+static void
+_player_spu_tracks_info(Emotion_Generic_Video *ev)
+{
+ INF("Receiving spu channels:");
+ if (ev->spu_channels_count)
+ _spu_channels_free(ev);
+
+ _player_tracks_info(ev, &ev->spu_channels, &ev->spu_channels_count,
+ &ev->spu_channel_current);
+}
+
+static void
+_player_meta_info_free(Emotion_Generic_Video *ev)
+{
+ eina_stringshare_replace(&ev->meta.title, NULL);
+ eina_stringshare_replace(&ev->meta.artist, NULL);
+ eina_stringshare_replace(&ev->meta.album, NULL);
+ eina_stringshare_replace(&ev->meta.year, NULL);
+ eina_stringshare_replace(&ev->meta.genre, NULL);
+ eina_stringshare_replace(&ev->meta.comment, NULL);
+ eina_stringshare_replace(&ev->meta.disc_id, NULL);
+ eina_stringshare_replace(&ev->meta.count, NULL);
+}
+
+static void
+_player_meta_info_read(Emotion_Generic_Video *ev)
+{
+ INF("Receiving meta info:");
+ _player_meta_info_free(ev);
+ ev->meta.title = ev->cmd.param.meta.title;
+ ev->meta.artist = ev->cmd.param.meta.artist;
+ ev->meta.album = ev->cmd.param.meta.album;
+ ev->meta.year = ev->cmd.param.meta.year;
+ ev->meta.genre = ev->cmd.param.meta.genre;
+ ev->meta.comment = ev->cmd.param.meta.comment;
+ ev->meta.disc_id = ev->cmd.param.meta.disc_id;
+ ev->meta.count = ev->cmd.param.meta.count;
+ INF("title: '%s'", ev->meta.title);
+ INF("artist: '%s'", ev->meta.artist);
+ INF("album: '%s'", ev->meta.album);
+ INF("year: '%s'", ev->meta.year);
+ INF("genre: '%s'", ev->meta.genre);
+ INF("comment: '%s'", ev->meta.comment);
+ INF("disc_id: '%s'", ev->meta.disc_id);
+ INF("count: '%s'", ev->meta.count);
+}
+
+static void
+_player_file_closed(Emotion_Generic_Video *ev)
+{
+ INF("Closed previous file.");
+ eina_semaphore_free(&ev->shared->lock);
+ ev->closing = EINA_FALSE;
+
+ if (ev->opening)
+ _file_open(ev);
+}
+
+static void
+_player_open_done(Emotion_Generic_Video *ev)
+{
+ int success;
+
+ success = ev->cmd.param.i_num;
+ shm_unlink(ev->shmname);
+
+ if (ev->file_changed)
+ {
+ _file_open(ev);
+ ev->file_changed = EINA_FALSE;
+ return;
+ }
+
+ ev->opening = EINA_FALSE;
+ if (!success)
+ {
+ ERR("Could not open file.");
+ return;
+ }
+
+ ev->file_ready = EINA_TRUE;
+
+ _emotion_open_done(ev->obj);
+
+ if (ev->play)
+ {
+ _player_send_cmd(ev, EM_CMD_PLAY);
+ _player_send_float(ev, ev->pos);
+ }
+
+ _player_send_cmd(ev, EM_CMD_VOLUME_SET);
+ _player_send_float(ev, ev->volume);
+
+ _player_send_cmd(ev, EM_CMD_SPEED_SET);
+ _player_send_float(ev, ev->speed);
+
+ int mute = ev->audio_mute;
+ _player_send_cmd(ev, EM_CMD_AUDIO_MUTE_SET);
+ _player_send_int(ev, mute);
+
+ mute = ev->video_mute;
+ _player_send_cmd(ev, EM_CMD_VIDEO_MUTE_SET);
+ _player_send_int(ev, mute);
+
+ mute = ev->spu_mute;
+ _player_send_cmd(ev, EM_CMD_SPU_MUTE_SET);
+ _player_send_int(ev, mute);
+
+ INF("Open done");
+}
+
+static void
+_player_cmd_process(Emotion_Generic_Video *ev)
+{
+ switch (ev->cmd.type) {
+ case EM_RESULT_INIT:
+ _player_ready(ev);
+ break;
+ case EM_RESULT_FRAME_NEW:
+ _player_new_frame(ev);
+ break;
+ case EM_RESULT_FILE_SET:
+ _player_file_set_done(ev);
+ break;
+ case EM_RESULT_FILE_SET_DONE:
+ _player_open_done(ev);
+ break;
+ case EM_RESULT_FILE_CLOSE:
+ _player_file_closed(ev);
+ break;
+ case EM_RESULT_PLAYBACK_STARTED:
+ _emotion_playback_started(ev->obj);
+ break;
+ case EM_RESULT_PLAYBACK_STOPPED:
+ ev->pos = 0;
+ _emotion_playback_finished(ev->obj);
+ _emotion_decode_stop(ev->obj);
+
+ em_partial_shutdown(ev);
+ ev->player_restart = ecore_idler_add(_player_restart, ev);
+ break;
+ case EM_RESULT_FRAME_SIZE:
+ _player_frame_resize(ev);
+ break;
+ case EM_RESULT_LENGTH_CHANGED:
+ _player_length_changed(ev);
+ break;
+ case EM_RESULT_POSITION_CHANGED:
+ _player_position_changed(ev);
+ break;
+ case EM_RESULT_SEEKABLE_CHANGED:
+ _player_seekable_changed(ev);
+ break;
+ case EM_RESULT_AUDIO_TRACK_INFO:
+ _player_audio_tracks_info(ev);
+ break;
+ case EM_RESULT_VIDEO_TRACK_INFO:
+ _player_video_tracks_info(ev);
+ break;
+ case EM_RESULT_SPU_TRACK_INFO:
+ _player_spu_tracks_info(ev);
+ break;
+ case EM_RESULT_META_INFO:
+ _player_meta_info_read(ev);
+ break;
+ default:
+ WRN("received wrong command: %d", ev->cmd.type);
+ }
+
+ ev->cmd.type = -1;
+}
+
+static void
+_player_cmd_single_int_process(Emotion_Generic_Video *ev)
+{
+ if (!_player_cmd_param_read(ev, &ev->cmd.param.i_num, sizeof(ev->cmd.param.i_num)))
+ return;
+
+ _player_cmd_process(ev);
+}
+
+static void
+_player_cmd_single_float_process(Emotion_Generic_Video *ev)
+{
+ if (!_player_cmd_param_read(ev, &ev->cmd.param.f_num, sizeof(ev->cmd.param.f_num)))
+ return;
+
+ _player_cmd_process(ev);
+}
+
+static void
+_player_cmd_double_int_process(Emotion_Generic_Video *ev)
+{
+ int param;
+
+ if (ev->cmd.num_params == 0)
+ {
+ ev->cmd.num_params = 2;
+ ev->cmd.cur_param = 0;
+ ev->cmd.param.size.width = 0;
+ ev->cmd.param.size.height = 0;
+ }
+
+ if (!_player_cmd_param_read(ev, &param, sizeof(param)))
+ return;
+
+ if (ev->cmd.cur_param == 0)
+ ev->cmd.param.size.width = param;
+ else
+ ev->cmd.param.size.height = param;
+
+ ev->cmd.cur_param++;
+ if (ev->cmd.cur_param == ev->cmd.num_params)
+ _player_cmd_process(ev);
+}
+
+static void
+_player_cmd_track_info(Emotion_Generic_Video *ev)
+{
+ int param;
+ int i;
+
+ if (ev->cmd.num_params == 0)
+ {
+ ev->cmd.cur_param = 0;
+ ev->cmd.num_params = 2;
+ ev->cmd.param.track.channels = NULL;
+ ev->cmd.s_len = -1;
+ }
+
+ while (ev->cmd.cur_param < 2)
+ {
+ if (!_player_cmd_param_read(ev, &param, sizeof(param)))
+ return;
+
+ if (ev->cmd.cur_param == 0)
+ ev->cmd.param.track.current = param;
+ else
+ {
+ ev->cmd.param.track.total = param;
+ ev->cmd.num_params += param * 2;
+ ev->cmd.param.track.channels =
+ calloc(param, sizeof(*ev->cmd.param.track.channels));
+ }
+ ev->cmd.cur_param++;
+ }
+
+ if (ev->cmd.cur_param == ev->cmd.num_params)
+ {
+ _player_cmd_process(ev);
+ return;
+ }
+
+ i = (ev->cmd.cur_param - 2) / 2;
+ if ((ev->cmd.cur_param % 2) == 0) // reading track id
+ {
+ if (!_player_cmd_param_read(ev, &param, sizeof(param)))
+ return;
+ ev->cmd.param.track.channels[i].id = param;
+ ev->cmd.cur_param++;
+ }
+ else // reading track name
+ {
+ char buf[PATH_MAX];
+
+ if (ev->cmd.s_len == -1)
+ {
+ if (!_player_cmd_param_read(ev, &param, sizeof(param)))
+ return;
+ ev->cmd.s_len = param;
+ }
+
+ if (!_player_cmd_param_read(ev, buf, ev->cmd.s_len))
+ return;
+ ev->cmd.param.track.channels[i].name =
+ eina_stringshare_add_length(buf, ev->cmd.s_len);
+ ev->cmd.cur_param++;
+ ev->cmd.s_len = -1;
+ }
+
+ if (ev->cmd.cur_param == ev->cmd.num_params)
+ _player_cmd_process(ev);
+}
+
+static void
+_player_cmd_meta_info(Emotion_Generic_Video *ev)
+{
+ int param;
+ const char *info;
+ char buf[PATH_MAX];
+
+ if (ev->cmd.num_params == 0)
+ {
+ ev->cmd.cur_param = 0;
+ ev->cmd.num_params = 8;
+ ev->cmd.param.meta.title = NULL;
+ ev->cmd.param.meta.artist = NULL;
+ ev->cmd.param.meta.album = NULL;
+ ev->cmd.param.meta.year = NULL;
+ ev->cmd.param.meta.genre = NULL;
+ ev->cmd.param.meta.comment = NULL;
+ ev->cmd.param.meta.disc_id = NULL;
+ ev->cmd.param.meta.count = NULL;
+ ev->cmd.s_len = -1;
+ }
+
+ if (ev->cmd.s_len == -1)
+ {
+ if (!_player_cmd_param_read(ev, &param, sizeof(param)))
+ return;
+ ev->cmd.s_len = param;
+ }
+
+ if (!_player_cmd_param_read(ev, buf, ev->cmd.s_len))
+ return;
+
+ info = eina_stringshare_add_length(buf, ev->cmd.s_len);
+ ev->cmd.s_len = -1;
+
+ if (ev->cmd.cur_param == 0)
+ ev->cmd.param.meta.title = info;
+ else if (ev->cmd.cur_param == 1)
+ ev->cmd.param.meta.artist = info;
+ else if (ev->cmd.cur_param == 2)
+ ev->cmd.param.meta.album = info;
+ else if (ev->cmd.cur_param == 3)
+ ev->cmd.param.meta.year = info;
+ else if (ev->cmd.cur_param == 4)
+ ev->cmd.param.meta.genre = info;
+ else if (ev->cmd.cur_param == 5)
+ ev->cmd.param.meta.comment = info;
+ else if (ev->cmd.cur_param == 6)
+ ev->cmd.param.meta.disc_id = info;
+ else if (ev->cmd.cur_param == 7)
+ ev->cmd.param.meta.count = info;
+
+ ev->cmd.cur_param++;
+
+ if (ev->cmd.cur_param == 8)
+ _player_cmd_process(ev);
+}
+
+static void
+_player_cmd_read(Emotion_Generic_Video *ev)
+{
+ if (ev->cmd.type < 0)
+ {
+ if (!_player_cmd_param_read(ev, &ev->cmd.type, sizeof(ev->cmd.type)))
+ return;
+ ev->cmd.num_params = 0;
+ }
+
+ switch (ev->cmd.type) {
+ case EM_RESULT_INIT:
+ case EM_RESULT_FILE_SET:
+ case EM_RESULT_PLAYBACK_STARTED:
+ case EM_RESULT_PLAYBACK_STOPPED:
+ case EM_RESULT_FILE_CLOSE:
+ case EM_RESULT_FRAME_NEW:
+ _player_cmd_process(ev);
+ break;
+ case EM_RESULT_FILE_SET_DONE:
+ case EM_RESULT_SEEKABLE_CHANGED:
+ _player_cmd_single_int_process(ev);
+ break;
+ case EM_RESULT_LENGTH_CHANGED:
+ case EM_RESULT_POSITION_CHANGED:
+ _player_cmd_single_float_process(ev);
+ break;
+ case EM_RESULT_FRAME_SIZE:
+ _player_cmd_double_int_process(ev);
+ break;
+ case EM_RESULT_AUDIO_TRACK_INFO:
+ case EM_RESULT_VIDEO_TRACK_INFO:
+ case EM_RESULT_SPU_TRACK_INFO:
+ _player_cmd_track_info(ev);
+ break;
+ case EM_RESULT_META_INFO:
+ _player_cmd_meta_info(ev);
+ break;
+
+ default:
+ WRN("received wrong command: %d", ev->cmd.type);
+ ev->cmd.type = -1;
+ }
+}
+
+static Eina_Bool
+_player_cmd_handler_cb(void *data, Ecore_Fd_Handler *fd_handler)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (ecore_main_fd_handler_active_get(fd_handler, ECORE_FD_ERROR))
+ {
+ ERR("an error occurred on fd_read %d.", ev->fd_read);
+ return ECORE_CALLBACK_CANCEL;
+ }
+
+ _player_cmd_read(ev);
+
+ return ECORE_CALLBACK_RENEW;
+}
+
+static Eina_Bool
+_player_data_cb(void *data, int type EINA_UNUSED, void *event)
+{
+ Ecore_Exe_Event_Data *ev = event;
+ Emotion_Generic_Video *evideo = data;
+ int i;
+
+ if (ev->exe != evideo->player.exe)
+ {
+ INF("slave != ev->exe");
+ return ECORE_CALLBACK_PASS_ON;
+ }
+
+ for (i = 0; ev->lines[i].line; i++)
+ INF("received input from player: \"%s\"", ev->lines[i].line);
+
+ return ECORE_CALLBACK_DONE;
+}
+
+static Eina_Bool
+_player_add_cb(void *data, int type EINA_UNUSED, void *event)
+{
+ Ecore_Exe_Event_Add *event_add = event;
+ Ecore_Exe *player = event_add->exe;
+ Emotion_Generic_Video *ev = data;
+
+ if (ev->player.exe != player)
+ {
+ INF("ev->player != player.");
+ return ECORE_CALLBACK_PASS_ON;
+ }
+
+ _player_send_cmd(ev, EM_CMD_INIT);
+ _player_send_str(ev, ev->shmname, EINA_TRUE);
+
+ return ECORE_CALLBACK_DONE;
+}
+
+static Eina_Bool
+_player_del_cb(void *data, int type EINA_UNUSED, void *event EINA_UNUSED)
+{
+ Ecore_Exe_Event_Del *event_del = event;
+ Ecore_Exe *player = event_del->exe;
+ Emotion_Generic_Video *ev = data;
+
+ if (ev->player.exe != player)
+ {
+ INF("ev->player != player.");
+ return ECORE_CALLBACK_PASS_ON;
+ }
+
+ ERR("player died.");
+
+ ev->player.exe = NULL;
+ ev->ready = EINA_FALSE;
+ ev->file_ready = EINA_FALSE;
+ ecore_main_fd_handler_del(ev->fd_handler);
+ close(ev->fd_read);
+ close(ev->fd_write);
+ ev->fd_read = -1;
+ ev->fd_write = -1;
+ _emotion_decode_stop(ev->obj);
+
+ return ECORE_CALLBACK_DONE;
+}
+
+static Eina_Bool
+_player_exec(Emotion_Generic_Video *ev)
+{
+ int pipe_out[2];
+ int pipe_in[2];
+ char buf[PATH_MAX];
+
+ if (pipe(pipe_out) == -1)
+ {
+ ERR("could not create pipe for communication emotion -> player: %s", strerror(errno));
+ return EINA_FALSE;
+ }
+
+ if (pipe(pipe_in) == -1)
+ {
+ ERR("could not create pipe for communication player -> emotion: %s", strerror(errno));
+ close(pipe_out[0]);
+ close(pipe_out[1]);
+ return EINA_FALSE;
+ }
+
+ snprintf(buf, sizeof(buf), "%s %d %d\n", ev->cmdline, pipe_out[0], pipe_in[1]);
+
+ ev->player.exe = ecore_exe_pipe_run(
+ buf,
+ ECORE_EXE_PIPE_READ | ECORE_EXE_PIPE_WRITE |
+ ECORE_EXE_PIPE_READ_LINE_BUFFERED | ECORE_EXE_NOT_LEADER,
+ ev);
+
+ INF("created pipe emotion -> player: %d -> %d", pipe_out[1], pipe_out[0]);
+ INF("created pipe player -> emotion: %d -> %d", pipe_in[1], pipe_in[0]);
+
+ close(pipe_in[1]);
+ close(pipe_out[0]);
+
+ if (!ev->player.exe)
+ {
+ close(pipe_in[0]);
+ close(pipe_out[1]);
+ return EINA_FALSE;
+ }
+
+ ev->fd_read = pipe_in[0];
+ ev->fd_write = pipe_out[1];
+
+ ev->fd_handler = ecore_main_fd_handler_add(
+ ev->fd_read, ECORE_FD_READ | ECORE_FD_ERROR, _player_cmd_handler_cb, ev,
+ NULL, NULL);
+
+ return EINA_TRUE;
+}
+
+static Eina_Bool
+_fork_and_exec(Emotion_Generic_Video *ev)
+{
+ char shmname[256];
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ snprintf(shmname, sizeof(shmname), "/em-generic-shm_%d_%d",
+ (int)tv.tv_sec, (int)tv.tv_usec);
+
+ ev->shmname = eina_stringshare_add(shmname);
+
+ ev->player_add = ecore_event_handler_add(
+ ECORE_EXE_EVENT_ADD, _player_add_cb, ev);
+ ev->player_del = ecore_event_handler_add(
+ ECORE_EXE_EVENT_DEL, _player_del_cb, ev);
+ ev->player_data = ecore_event_handler_add(
+ ECORE_EXE_EVENT_DATA, _player_data_cb, ev);
+
+ if (!_player_exec(ev))
+ {
+ ERR("could not start player.");
+ return EINA_FALSE;
+ }
+
+ ev->initializing = EINA_TRUE;
+
+ return EINA_TRUE;
+}
+
+static unsigned char
+em_init(Evas_Object *obj, void **emotion_video, Emotion_Module_Options *opt)
+{
+ Emotion_Generic_Video *ev;
+ const char *player;
+
+ if (!emotion_video) return 0;
+ player = _get_player(opt ? opt->player : NULL);
+ if (!player) return 0;
+
+ ev = (Emotion_Generic_Video *)calloc(1, sizeof(*ev));
+ if (!ev) return 0;
+
+ ev->fd_read = -1;
+ ev->fd_write = -1;
+ ev->speed = 1.0;
+ ev->volume = 0.5;
+ ev->audio_mute = EINA_FALSE;
+ ev->cmd.type = -1;
+
+ ev->obj = obj;
+ ev->cmdline = eina_stringshare_add(player);
+ *emotion_video = ev;
+
+ return _fork_and_exec(ev);
+}
+
+static void
+em_partial_shutdown(Emotion_Generic_Video *ev)
+{
+ _emotion_image_reset(ev->obj);
+
+ if (ev->player.exe)
+ {
+ ecore_exe_terminate(ev->player.exe);
+ ecore_exe_free(ev->player.exe);
+ ev->player.exe = NULL;
+ }
+
+ ev->file_ready = EINA_FALSE;
+
+ if (ev->shared)
+ munmap(ev->shared, ev->shared->size);
+ ev->shared = NULL;
+
+ if (ev->fd_read >= 0)
+ close(ev->fd_read);
+ ev->fd_read = -1;
+ if (ev->fd_write >= 0)
+ close(ev->fd_write);
+ ev->fd_write = -1;
+ if (ev->fd_handler)
+ ecore_main_fd_handler_del(ev->fd_handler);
+ ev->fd_handler = NULL;
+
+ if (ev->player_add) ecore_event_handler_del(ev->player_add);
+ ev->player_add = NULL;
+ if (ev->player_data) ecore_event_handler_del(ev->player_data);
+ ev->player_data = NULL;
+ if (ev->player_del) ecore_event_handler_del(ev->player_del);
+ ev->player_del = NULL;
+ if (ev->player_restart) ecore_idler_del(ev->player_restart);
+ ev->player_restart = NULL;
+}
+
+static int
+em_shutdown(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (!ev) return 0;
+
+ eina_stringshare_del(ev->cmdline);
+ eina_stringshare_del(ev->shmname);
+
+ em_partial_shutdown(ev);
+
+ return 1;
+}
+
+static unsigned char
+em_file_open(const char *file, Evas_Object *obj EINA_UNUSED, void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ INF("file set: %s", file);
+ if (!ev) return 0;
+
+ eina_stringshare_replace(&ev->filename, file);
+
+ ev->pos = 0;
+ ev->w = 0;
+ ev->h = 0;
+ ev->ratio = 1;
+ ev->len = 0;
+
+ if (ev->ready && ev->opening)
+ {
+ INF("file changed while opening.");
+ ev->file_changed = EINA_TRUE;
+ return 1;
+ }
+
+ ev->opening = EINA_TRUE;
+
+ if (!ev->closing)
+ _file_open(ev);
+
+ return 1;
+}
+
+static void
+em_file_close(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (!ev || !ev->filename) return;
+
+ INF("file close: %s", ev->filename);
+
+ eina_stringshare_replace(&ev->filename, NULL);
+ eina_stringshare_replace(&ev->subtitle_path, NULL);
+
+ ev->file_ready = EINA_FALSE;
+ _audio_channels_free(ev);
+ _video_channels_free(ev);
+ _spu_channels_free(ev);
+ _player_meta_info_free(ev);
+
+ if (ev->opening)
+ return;
+
+ _player_send_cmd(ev, EM_CMD_FILE_CLOSE);
+ ev->closing = EINA_TRUE;
+}
+
+static Emotion_Format
+em_format_get(void *ef EINA_UNUSED)
+{
+ return EMOTION_FORMAT_BGRA;
+}
+
+static void
+em_video_data_size_get(void *data, int *w, int *h)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (!ev) return;
+ if (w) *w = ev->w;
+ if (h) *h = ev->h;
+}
+
+static void
+em_play(void *data, double pos)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (!ev)
+ return;
+
+ ev->play = EINA_TRUE;
+ INF("play: %0.3f", pos);
+
+ if (ev->initializing || ev->opening)
+ return;
+
+ if (ev->ready)
+ {
+ if (ev->subtitle_path)
+ {
+ _player_send_cmd(ev, EM_CMD_SUBTITLE_SET);
+ _player_send_str(ev, ev->subtitle_path, EINA_TRUE);
+ }
+
+ _player_send_cmd(ev, EM_CMD_PLAY);
+ _player_send_float(ev, ev->pos);
+
+ return;
+ }
+
+ if (!_player_exec(ev))
+ ERR("could not start player.");
+}
+
+static void
+em_stop(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (!ev)
+ return;
+
+ ev->play = EINA_FALSE;
+
+ if (!ev->file_ready)
+ return;
+
+ _player_send_cmd(ev, EM_CMD_STOP);
+ _emotion_decode_stop(ev->obj);
+}
+
+static void
+em_size_get(void *data, int *w, int *h)
+{
+ Emotion_Generic_Video *ev = data;
+ if (w) *w = ev->w;
+ if (h) *h = ev->h;
+}
+
+static void
+em_pos_set(void *data, double pos)
+{
+ Emotion_Generic_Video *ev = data;
+ float position = pos;
+
+ if (!ev->file_ready)
+ return;
+
+ _player_send_cmd(ev, EM_CMD_POSITION_SET);
+ _player_send_float(ev, position);
+ _emotion_seek_done(ev->obj);
+}
+
+static double
+em_len_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->len;
+}
+
+static double
+em_buffer_size_get(void *data EINA_UNUSED)
+{
+ return 1.0;
+}
+
+static int
+em_fps_num_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return (int)(ev->fps * 1000.0);
+}
+
+static int
+em_fps_den_get(void *ef EINA_UNUSED)
+{
+ return 1000;
+}
+
+static double
+em_fps_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->fps;
+}
+
+static double
+em_pos_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->pos;
+}
+
+static void
+em_vis_set(void *ef EINA_UNUSED, Emotion_Vis vis EINA_UNUSED)
+{
+}
+
+static Emotion_Vis
+em_vis_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->vis;
+}
+
+static Eina_Bool
+em_vis_supported(void *ef EINA_UNUSED, Emotion_Vis vis EINA_UNUSED)
+{
+ return EINA_FALSE;
+}
+
+static double
+em_ratio_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->ratio;
+}
+
+static int em_video_handled(void *ef EINA_UNUSED)
+{
+ DBG("video handled!");
+ return 1;
+}
+
+static int em_audio_handled(void *ef EINA_UNUSED)
+{
+ DBG("audio handled!");
+ return 1;
+}
+
+static int em_seekable(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->seekable;
+}
+
+static void em_frame_done(void *ef EINA_UNUSED)
+{
+}
+
+static int
+em_yuv_rows_get(void *data EINA_UNUSED, int w EINA_UNUSED, int h EINA_UNUSED, unsigned char **yrows EINA_UNUSED, unsigned char **urows EINA_UNUSED, unsigned char **vrows EINA_UNUSED)
+{
+ return 0;
+}
+
+static int
+em_bgra_data_get(void *data, unsigned char **bgra_data)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (!ev || !ev->file_ready)
+ return 0;
+
+ // lock frame here
+ if (!eina_semaphore_lock(&ev->shared->lock))
+ return 0;
+
+ // send current frame to emotion
+ if (ev->shared->frame.emotion != ev->shared->frame.last)
+ {
+ ev->shared->frame.next = ev->shared->frame.emotion;
+ ev->shared->frame.emotion = ev->shared->frame.last;
+ }
+ *bgra_data = ev->frame.frames[ev->shared->frame.emotion];
+
+ if (ev->shared->frame_drop > 1)
+ WRN("dropped frames: %d", ev->shared->frame_drop - 1);
+ ev->shared->frame_drop = 0;
+
+ // unlock frame here
+ eina_semaphore_release(&ev->shared->lock, 1);
+ ev->drop = 0;
+
+ return 1;
+}
+
+static void
+em_event_feed(void *ef EINA_UNUSED, int event EINA_UNUSED)
+{
+}
+
+static void
+em_event_mouse_button_feed(void *ef EINA_UNUSED, int button EINA_UNUSED, int x EINA_UNUSED, int y EINA_UNUSED)
+{
+}
+
+static void
+em_event_mouse_move_feed(void *ef EINA_UNUSED, int x EINA_UNUSED, int y EINA_UNUSED)
+{
+}
+
+static int
+em_video_channel_count(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->video_channels_count;
+}
+
+static void
+em_video_channel_set(void *data, int channel)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (channel < 0 || channel >= ev->video_channels_count)
+ {
+ WRN("video channel out of range.");
+ return;
+ }
+
+ _player_send_cmd(ev, EM_CMD_VIDEO_TRACK_SET);
+ _player_send_int(ev, ev->video_channels[channel].id);
+ ev->video_channel_current = channel;
+}
+
+static int
+em_video_channel_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->video_channel_current;
+}
+
+static void
+em_video_subtitle_file_set(void *data, const char *filepath)
+{
+ Emotion_Generic_Video *ev = data;
+ eina_stringshare_replace(&ev->subtitle_path, filepath);
+}
+
+static const char *
+em_video_subtitle_file_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->subtitle_path;
+}
+
+static const char *
+em_video_channel_name_get(void *data, int channel)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (channel < 0 || channel >= ev->video_channels_count)
+ {
+ WRN("video channel out of range.");
+ return NULL;
+ }
+
+ return ev->video_channels[channel].name;
+}
+
+static void
+em_video_channel_mute_set(void *data, int mute)
+{
+ Emotion_Generic_Video *ev = data;
+
+ ev->video_mute = !!mute;
+
+ if (!ev || !ev->file_ready)
+ return;
+
+ _player_send_cmd(ev, EM_CMD_VIDEO_MUTE_SET);
+ _player_send_int(ev, mute);
+}
+
+static int
+em_video_channel_mute_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->video_mute;
+}
+
+static int
+em_audio_channel_count(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->audio_channels_count;
+}
+
+static void
+em_audio_channel_set(void *data, int channel)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (channel < 0 || channel >= ev->audio_channels_count)
+ {
+ WRN("audio channel out of range.");
+ return;
+ }
+
+ _player_send_cmd(ev, EM_CMD_AUDIO_TRACK_SET);
+ _player_send_int(ev, ev->audio_channels[channel].id);
+ ev->audio_channel_current = channel;
+}
+
+static int
+em_audio_channel_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->audio_channel_current;
+}
+
+static const char *
+em_audio_channel_name_get(void *data, int channel)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (channel < 0 || channel >= ev->audio_channels_count)
+ {
+ WRN("audio channel out of range.");
+ return NULL;
+ }
+
+ return ev->audio_channels[channel].name;
+}
+
+static void
+em_audio_channel_mute_set(void *data, int mute)
+{
+ Emotion_Generic_Video *ev = data;
+
+ ev->audio_mute = !!mute;
+
+ if (!ev || !ev->file_ready)
+ return;
+
+ _player_send_cmd(ev, EM_CMD_AUDIO_MUTE_SET);
+ _player_send_int(ev, mute);
+}
+
+static int
+em_audio_channel_mute_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->audio_mute;
+}
+
+static void
+em_audio_channel_volume_set(void *data, double vol)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (vol > 1.0) vol = 1.0;
+ if (vol < 0.0) vol = 0.0;
+
+ ev->volume = vol;
+
+ if (!ev || !ev->file_ready)
+ return;
+
+ _player_send_cmd(ev, EM_CMD_VOLUME_SET);
+ _player_send_float(ev, ev->volume);
+}
+
+static double
+em_audio_channel_volume_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->volume;
+}
+
+static int
+em_spu_channel_count(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->spu_channels_count;
+}
+
+static void
+em_spu_channel_set(void *data, int channel)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (channel < 0 || channel >= ev->spu_channels_count)
+ {
+ WRN("spu channel out of range.");
+ return;
+ }
+
+ _player_send_cmd(ev, EM_CMD_SPU_TRACK_SET);
+ _player_send_int(ev, ev->spu_channels[channel].id);
+ ev->spu_channel_current = channel;
+}
+
+static int
+em_spu_channel_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->spu_channel_current;
+}
+
+static const char *
+em_spu_channel_name_get(void *data, int channel)
+{
+ Emotion_Generic_Video *ev = data;
+
+ if (channel < 0 || channel >= ev->spu_channels_count)
+ {
+ WRN("spu channel out of range.");
+ return NULL;
+ }
+
+ return ev->spu_channels[channel].name;
+}
+
+static void
+em_spu_channel_mute_set(void *data, int mute)
+{
+ Emotion_Generic_Video *ev = data;
+
+ ev->spu_mute = !!mute;
+
+ if (!ev || !ev->file_ready)
+ return;
+
+ _player_send_cmd(ev, EM_CMD_SPU_MUTE_SET);
+ _player_send_int(ev, mute);
+}
+
+static int
+em_spu_channel_mute_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return ev->spu_mute;
+}
+
+static int
+em_chapter_count(void *ef EINA_UNUSED)
+{
+ int num = 0;
+ return num;
+}
+
+static void
+em_chapter_set(void *ef EINA_UNUSED, int chapter EINA_UNUSED)
+{
+}
+
+static int
+em_chapter_get(void *ef EINA_UNUSED)
+{
+ int num = 0;
+ return num;
+}
+
+static const char *
+em_chapter_name_get(void *ef EINA_UNUSED, int chapter EINA_UNUSED)
+{
+ return NULL;
+}
+
+static void
+em_speed_set(void *data, double speed)
+{
+ Emotion_Generic_Video *ev = data;
+ float rate = speed;
+ ev->speed = rate;
+
+ if (!ev || !ev->file_ready)
+ return;
+
+ _player_send_cmd(ev, EM_CMD_SPEED_SET);
+ _player_send_float(ev, rate);
+}
+
+static double
+em_speed_get(void *data)
+{
+ Emotion_Generic_Video *ev = data;
+ return (double)ev->speed;
+}
+
+static int
+em_eject(void *ef EINA_UNUSED)
+{
+ return 1;
+}
+
+static const char *
+em_meta_get(void *data, int meta)
+{
+ Emotion_Generic_Video *ev = data;
+
+ switch (meta) {
+ case EMOTION_META_INFO_TRACK_TITLE:
+ return ev->meta.title;
+ case EMOTION_META_INFO_TRACK_ARTIST:
+ return ev->meta.artist;
+ case EMOTION_META_INFO_TRACK_ALBUM:
+ return ev->meta.album;
+ case EMOTION_META_INFO_TRACK_YEAR:
+ return ev->meta.year;
+ case EMOTION_META_INFO_TRACK_GENRE:
+ return ev->meta.genre;
+ case EMOTION_META_INFO_TRACK_COMMENT:
+ return ev->meta.comment;
+ case EMOTION_META_INFO_TRACK_DISC_ID:
+ return ev->meta.disc_id;
+ case EMOTION_META_INFO_TRACK_COUNT:
+ return ev->meta.count;
+ }
+
+ return NULL;
+}
+
+static Emotion_Video_Module em_module =
+{
+ em_init, /* init */
+ em_shutdown, /* shutdown */
+ em_file_open, /* file_open */
+ em_file_close, /* file_close */
+ em_play, /* play */
+ em_stop, /* stop */
+ em_size_get, /* size_get */
+ em_pos_set, /* pos_set */
+ em_len_get, /* len_get */
+ em_buffer_size_get, /* buffer_size_get */
+ em_fps_num_get, /* fps_num_get */
+ em_fps_den_get, /* fps_den_get */
+ em_fps_get, /* fps_get */
+ em_pos_get, /* pos_get */
+ em_vis_set, /* vis_set */
+ em_vis_get, /* vis_get */
+ em_vis_supported, /* vis_supported */
+ em_ratio_get, /* ratio_get */
+ em_video_handled, /* video_handled */
+ em_audio_handled, /* audio_handled */
+ em_seekable, /* seekable */
+ em_frame_done, /* frame_done */
+ em_format_get, /* format_get */
+ em_video_data_size_get, /* video_data_size_get */
+ em_yuv_rows_get, /* yuv_rows_get */
+ em_bgra_data_get, /* bgra_data_get */
+ em_event_feed, /* event_feed */
+ em_event_mouse_button_feed, /* event_mouse_button_feed */
+ em_event_mouse_move_feed, /* event_mouse_move_feed */
+ em_video_channel_count, /* video_channel_count */
+ em_video_channel_set, /* video_channel_set */
+ em_video_channel_get, /* video_channel_get */
+ em_video_subtitle_file_set, /* video_subtitle_file_set */
+ em_video_subtitle_file_get, /* video_subtitle_file_get */
+ em_video_channel_name_get, /* video_channel_name_get */
+ em_video_channel_mute_set, /* video_channel_mute_set */
+ em_video_channel_mute_get, /* video_channel_mute_get */
+ em_audio_channel_count, /* audio_channel_count */
+ em_audio_channel_set, /* audio_channel_set */
+ em_audio_channel_get, /* audio_channel_get */
+ em_audio_channel_name_get, /* audio_channel_name_get */
+ em_audio_channel_mute_set, /* audio_channel_mute_set */
+ em_audio_channel_mute_get, /* audio_channel_mute_get */
+ em_audio_channel_volume_set, /* audio_channel_volume_set */
+ em_audio_channel_volume_get, /* audio_channel_volume_get */
+ em_spu_channel_count, /* spu_channel_count */
+ em_spu_channel_set, /* spu_channel_set */
+ em_spu_channel_get, /* spu_channel_get */
+ em_spu_channel_name_get, /* spu_channel_name_get */
+ em_spu_channel_mute_set, /* spu_channel_mute_set */
+ em_spu_channel_mute_get, /* spu_channel_mute_get */
+ em_chapter_count, /* chapter_count */
+ em_chapter_set, /* chapter_set */
+ em_chapter_get, /* chapter_get */
+ em_chapter_name_get, /* chapter_name_get */
+ em_speed_set, /* speed_set */
+ em_speed_get, /* speed_get */
+ em_eject, /* eject */
+ em_meta_get, /* meta_get */
+ NULL, /* priority_set */
+ NULL, /* priority_get */
+ NULL /* handle */
+};
+
+static Eina_Bool
+module_open(Evas_Object *obj, const Emotion_Video_Module **module, void **video, Emotion_Module_Options *opt)
+{
+ if (!module) {
+ return EINA_FALSE;
+ }
+
+ if (_emotion_generic_log_domain < 0)
+ {
+ eina_threads_init();
+ eina_log_threads_enable();
+ _emotion_generic_log_domain = eina_log_domain_register
+ ("emotion-generic", EINA_COLOR_LIGHTCYAN);
+ if (_emotion_generic_log_domain < 0)
+ {
+ EINA_LOG_CRIT("Could not register log domain 'emotion-generic'");
+ return EINA_FALSE;
+ }
+ }
+
+
+ if (!em_module.init(obj, video, opt)) {
+ return EINA_FALSE;
+ }
+
+ *module = &em_module;
+
+ return EINA_TRUE;
+}
+
+static void module_close(Emotion_Video_Module *module EINA_UNUSED, void *video)
+{
+ em_module.shutdown(video);
+}
+
+
+Eina_Bool
+generic_module_init(void)
+{
+ if (!pfx)
+ {
+ pfx = eina_prefix_new(NULL, emotion_object_add,
+ "EMOTION", "emotion", NULL,
+ PACKAGE_BIN_DIR,
+ PACKAGE_LIB_DIR,
+ PACKAGE_DATA_DIR,
+ "");
+ if (!pfx) return EINA_FALSE;
+ }
+ return _emotion_module_register("generic", module_open, module_close);
+}
+
+static void
+generic_module_shutdown(void)
+{
+ if (pfx)
+ {
+ eina_prefix_free(pfx);
+ pfx = NULL;
+ }
+ _emotion_module_unregister("generic");
+}
+
+#ifndef EMOTION_STATIC_BUILD_GENERIC
+
+EINA_MODULE_INIT(generic_module_init);
+EINA_MODULE_SHUTDOWN(generic_module_shutdown);
+
+#endif
+
diff --git a/src/modules/emotion/generic/emotion_generic.h b/src/modules/emotion/generic/emotion_generic.h
new file mode 100644
index 0000000000..864abb3f41
--- /dev/null
+++ b/src/modules/emotion/generic/emotion_generic.h
@@ -0,0 +1,113 @@
+#ifndef EMOTION_GENERIC_H
+#define EMOTION_GENERIC_H
+
+#include "Emotion_Generic_Plugin.h"
+
+/* default values */
+
+typedef struct _Emotion_Generic_Video Emotion_Generic_Video;
+typedef struct _Emotion_Generic_Player Emotion_Generic_Player;
+typedef struct _Emotion_Generic_Cmd_Buffer Emotion_Generic_Cmd_Buffer;
+typedef struct _Emotion_Generic_Channel Emotion_Generic_Channel;
+typedef struct _Emotion_Generic_Meta Emotion_Generic_Meta;
+
+struct _Emotion_Generic_Player
+{
+ Ecore_Exe *exe;
+};
+
+struct _Emotion_Generic_Channel
+{
+ int id;
+ const char *name;
+};
+
+struct _Emotion_Generic_Meta
+{
+ const char *title;
+ const char *artist;
+ const char *album;
+ const char *year;
+ const char *genre;
+ const char *comment;
+ const char *disc_id;
+ const char *count;
+};
+
+struct _Emotion_Generic_Cmd_Buffer
+{
+ char *tmp;
+ int type;
+ ssize_t i, total;
+ int s_len;
+ int num_params, cur_param;
+ int padding;
+ union {
+ struct {
+ int width;
+ int height;
+ } size;
+ int i_num;
+ float f_num;
+ struct {
+ int total;
+ int current;
+ Emotion_Generic_Channel *channels;
+ } track;
+ Emotion_Generic_Meta meta;
+ } param;
+};
+
+/* emotion/generic main structure */
+struct _Emotion_Generic_Video
+{
+ const char *cmdline;
+ const char *shmname;
+
+ Emotion_Generic_Player player;
+ Emotion_Generic_Cmd_Buffer cmd;
+ Ecore_Event_Handler *player_add, *player_del, *player_data;
+ Ecore_Idler *player_restart;
+ int drop;
+ int fd_read, fd_write;
+ Ecore_Fd_Handler *fd_handler;
+
+ const char *filename;
+ volatile double len;
+ volatile double pos;
+ double fps;
+ double ratio;
+ int w, h;
+ Evas_Object *obj;
+ Emotion_Generic_Video_Shared *shared;
+ Emotion_Generic_Video_Frame frame;
+ volatile int fq;
+ float volume;
+ float speed;
+ Emotion_Vis vis;
+ Eina_Bool initializing : 1;
+ Eina_Bool ready : 1;
+ Eina_Bool play : 1;
+ Eina_Bool video_mute : 1;
+ Eina_Bool audio_mute : 1;
+ Eina_Bool spu_mute : 1;
+ Eina_Bool seekable : 1;
+ volatile Eina_Bool opening : 1;
+ volatile Eina_Bool closing : 1;
+ Eina_Bool file_changed : 1;
+ Eina_Bool file_ready : 1;
+ int audio_channels_count;
+ int audio_channel_current;
+ Emotion_Generic_Channel *audio_channels;
+ int video_channels_count;
+ int video_channel_current;
+ Emotion_Generic_Channel *video_channels;
+ int spu_channels_count;
+ int spu_channel_current;
+ Emotion_Generic_Channel *spu_channels;
+ Emotion_Generic_Meta meta;
+ const char *subtitle_path;
+};
+
+#endif
+
diff --git a/src/modules/emotion/gstreamer/emotion_alloc.c b/src/modules/emotion/gstreamer/emotion_alloc.c
new file mode 100644
index 0000000000..b7eecd43ab
--- /dev/null
+++ b/src/modules/emotion/gstreamer/emotion_alloc.c
@@ -0,0 +1,90 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <Eina.h>
+#include <Evas.h>
+#include <Ecore.h>
+
+#include <glib.h>
+#include <gst/gst.h>
+#include <gst/video/video.h>
+#include <gst/video/gstvideosink.h>
+
+#ifdef HAVE_ECORE_X
+# include <Ecore_X.h>
+# ifdef HAVE_XOVERLAY_H
+# include <gst/interfaces/xoverlay.h>
+# endif
+#endif
+
+#include "Emotion.h"
+#include "emotion_gstreamer.h"
+
+Emotion_Gstreamer_Buffer *
+emotion_gstreamer_buffer_alloc(EvasVideoSinkPrivate *sink,
+ GstBuffer *buffer,
+ Eina_Bool preroll)
+{
+ Emotion_Gstreamer_Buffer *send;
+
+ if (!sink->ev) return NULL;
+
+ send = malloc(sizeof (Emotion_Gstreamer_Buffer));
+ if (!send) return NULL;
+
+ send->sink = sink;
+ send->frame = gst_buffer_ref(buffer);
+ send->preroll = preroll;
+ send->force = EINA_FALSE;
+ sink->ev->out++;
+ send->ev = sink->ev;
+
+ return send;
+}
+
+void
+emotion_gstreamer_buffer_free(Emotion_Gstreamer_Buffer *send)
+{
+ send->ev->in++;
+
+ if (send->ev->in == send->ev->out
+ && send->ev->threads == NULL
+ && send->ev->delete_me)
+ em_shutdown(send->ev);
+
+ gst_buffer_unref(send->frame);
+ free(send);
+}
+
+Emotion_Gstreamer_Message *
+emotion_gstreamer_message_alloc(Emotion_Gstreamer_Video *ev,
+ GstMessage *msg)
+{
+ Emotion_Gstreamer_Message *send;
+
+ if (!ev) return NULL;
+
+ send = malloc(sizeof (Emotion_Gstreamer_Message));
+ if (!send) return NULL;
+
+ ev->out++;
+ send->ev = ev;
+ send->msg = gst_message_ref(msg);
+
+ return send;
+}
+
+void
+emotion_gstreamer_message_free(Emotion_Gstreamer_Message *send)
+{
+ send->ev->in++;
+
+ if (send->ev->in == send->ev->out
+ && send->ev->threads == NULL
+ && send->ev->delete_me)
+ em_shutdown(send->ev);
+
+ gst_message_unref(send->msg);
+ free(send);
+}
diff --git a/src/modules/emotion/gstreamer/emotion_convert.c b/src/modules/emotion/gstreamer/emotion_convert.c
new file mode 100644
index 0000000000..2664d28be6
--- /dev/null
+++ b/src/modules/emotion/gstreamer/emotion_convert.c
@@ -0,0 +1,251 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <Eina.h>
+#include <Evas.h>
+
+#include <glib.h>
+#include <gst/gst.h>
+#include <gst/video/video.h>
+#include <gst/video/gstvideosink.h>
+
+#ifdef HAVE_ECORE_X
+# include <Ecore_X.h>
+# ifdef HAVE_XOVERLAY_H
+# include <gst/interfaces/xoverlay.h>
+# endif
+#endif
+
+#include "Emotion.h"
+#include "emotion_gstreamer.h"
+
+static inline void
+_evas_video_bgrx_step(unsigned char *evas_data, const unsigned char *gst_data,
+ unsigned int w, unsigned int h EINA_UNUSED, unsigned int output_height, unsigned int step)
+{
+ unsigned int x;
+ unsigned int y;
+
+ for (y = 0; y < output_height; ++y)
+ {
+ for (x = 0; x < w; x++)
+ {
+ evas_data[0] = gst_data[0];
+ evas_data[1] = gst_data[1];
+ evas_data[2] = gst_data[2];
+ evas_data[3] = 255;
+ gst_data += step;
+ evas_data += 4;
+ }
+ }
+}
+
+static void
+_evas_video_bgr(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w, unsigned int h, unsigned int output_height)
+{
+ _evas_video_bgrx_step(evas_data, gst_data, w, h, output_height, 3);
+}
+
+static void
+_evas_video_bgrx(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w, unsigned int h, unsigned int output_height)
+{
+ _evas_video_bgrx_step(evas_data, gst_data, w, h, output_height, 4);
+}
+
+static void
+_evas_video_bgra(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w, unsigned int h EINA_UNUSED, unsigned int output_height)
+{
+ unsigned int x;
+ unsigned int y;
+
+ for (y = 0; y < output_height; ++y)
+ {
+ unsigned char alpha;
+
+ for (x = 0; x < w; ++x)
+ {
+ alpha = gst_data[3];
+ evas_data[0] = (gst_data[0] * alpha) / 255;
+ evas_data[1] = (gst_data[1] * alpha) / 255;
+ evas_data[2] = (gst_data[2] * alpha) / 255;
+ evas_data[3] = alpha;
+ gst_data += 4;
+ evas_data += 4;
+ }
+ }
+}
+
+static void
+_evas_video_i420(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w, unsigned int h, unsigned int output_height)
+{
+ const unsigned char **rows;
+ unsigned int i, j;
+ unsigned int rh;
+ unsigned int stride_y, stride_uv;
+
+ rh = output_height;
+
+ rows = (const unsigned char **)evas_data;
+
+ stride_y = GST_ROUND_UP_4(w);
+ stride_uv = GST_ROUND_UP_8(w) / 2;
+
+ for (i = 0; i < rh; i++)
+ rows[i] = &gst_data[i * stride_y];
+
+ for (j = 0; j < (rh / 2); j++, i++)
+ rows[i] = &gst_data[h * stride_y + j * stride_uv];
+
+ for (j = 0; j < (rh / 2); j++, i++)
+ rows[i] = &gst_data[h * stride_y +
+ (rh / 2) * stride_uv +
+ j * stride_uv];
+}
+
+static void
+_evas_video_yv12(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w, unsigned int h, unsigned int output_height)
+{
+ const unsigned char **rows;
+ unsigned int i, j;
+ unsigned int rh;
+ unsigned int stride_y, stride_uv;
+
+ rh = output_height;
+
+ rows = (const unsigned char **)evas_data;
+
+ stride_y = GST_ROUND_UP_4(w);
+ stride_uv = GST_ROUND_UP_8(w) / 2;
+
+ for (i = 0; i < rh; i++)
+ rows[i] = &gst_data[i * stride_y];
+
+ for (j = 0; j < (rh / 2); j++, i++)
+ rows[i] = &gst_data[h * stride_y +
+ (rh / 2) * stride_uv +
+ j * stride_uv];
+
+ for (j = 0; j < (rh / 2); j++, i++)
+ rows[i] = &gst_data[h * stride_y + j * stride_uv];
+}
+
+static void
+_evas_video_yuy2(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w, unsigned int h EINA_UNUSED, unsigned int output_height)
+{
+ const unsigned char **rows;
+ unsigned int i;
+ unsigned int stride;
+
+ rows = (const unsigned char **)evas_data;
+
+ stride = GST_ROUND_UP_4(w * 2);
+
+ for (i = 0; i < output_height; i++)
+ rows[i] = &gst_data[i * stride];
+}
+
+static void
+_evas_video_nv12(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w, unsigned int h EINA_UNUSED, unsigned int output_height)
+{
+ const unsigned char **rows;
+ unsigned int i, j;
+ unsigned int rh;
+
+ rh = output_height;
+
+ rows = (const unsigned char **)evas_data;
+
+ for (i = 0; i < rh; i++)
+ rows[i] = &gst_data[i * w];
+
+ for (j = 0; j < (rh / 2); j++, i++)
+ rows[i] = &gst_data[rh * w + j * w];
+}
+
+static void
+_evas_video_mt12(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w, unsigned int h, unsigned int output_height EINA_UNUSED)
+{
+ const unsigned char **rows;
+ unsigned int i;
+ unsigned int j;
+
+ rows = (const unsigned char **)evas_data;
+
+ for (i = 0; i < (h / 32) / 2; i++)
+ rows[i] = &gst_data[i * w * 2 * 32];
+
+ if ((h / 32) % 2)
+ {
+ rows[i] = &gst_data[i * w * 2 * 32];
+ i++;
+ }
+
+ for (j = 0; j < ((h / 2) / 32) / 2; ++j, ++i)
+ rows[i] = &gst_data[h * w + j * (w / 2) * 2 * 16];
+}
+
+void
+_evas_video_st12_multiplane(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w, unsigned int h, unsigned int output_height EINA_UNUSED)
+{
+ const GstMultiPlaneImageBuffer *mp_buf = (const GstMultiPlaneImageBuffer *) gst_data;
+ const unsigned char **rows;
+ unsigned int i;
+ unsigned int j;
+
+ rows = (const unsigned char **)evas_data;
+
+ for (i = 0; i < (h / 32) / 2; i++)
+ rows[i] = mp_buf->uaddr[0] + i * w * 2 * 32;
+ if ((h / 32) % 2)
+ {
+ rows[i] = mp_buf->uaddr[0] + i * w * 2 * 32;
+ i++;
+ }
+
+ for (j = 0; j < ((h / 2) / 16) / 2; j++, i++)
+ {
+ rows[i] = mp_buf->uaddr[1] + j * w * 2 * 16 * 2;
+ }
+ if (((h / 2) / 16) % 2)
+ rows[i] = mp_buf->uaddr[1] + j * w * 2 * 16 * 2;
+}
+
+void
+_evas_video_st12(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w EINA_UNUSED, unsigned int h, unsigned int output_height EINA_UNUSED)
+{
+ const SCMN_IMGB *imgb = (const SCMN_IMGB *) gst_data;
+ const unsigned char **rows;
+ unsigned int i, j;
+
+ rows = (const unsigned char **)evas_data;
+
+ for (i = 0; i < (h / 32) / 2; i++)
+ rows[i] = imgb->uaddr[0] + i * imgb->stride[0] * 2 * 32;
+ if ((h / 32) % 2)
+ {
+ rows[i] = imgb->uaddr[0] + i * imgb->stride[0] * 2 * 32;
+ i++;
+ }
+
+ for (j = 0; j < (unsigned int) imgb->elevation[1] / 32 / 2; j++, i++)
+ rows[i] = imgb->uaddr[1] + j * imgb->stride[1] * 32 * 2;
+ if ((imgb->elevation[1] / 32) % 2)
+ rows[i++] = imgb->uaddr[1] + j * imgb->stride[1] * 32 * 2;
+}
+
+const ColorSpace_FourCC_Convertion colorspace_fourcc_convertion[] = {
+ { "I420", GST_MAKE_FOURCC('I', '4', '2', '0'), EVAS_COLORSPACE_YCBCR422P601_PL, _evas_video_i420, EINA_TRUE },
+ { "YV12", GST_MAKE_FOURCC('Y', 'V', '1', '2'), EVAS_COLORSPACE_YCBCR422P601_PL, _evas_video_yv12, EINA_TRUE },
+ { "YUY2", GST_MAKE_FOURCC('Y', 'U', 'Y', '2'), EVAS_COLORSPACE_YCBCR422601_PL, _evas_video_yuy2, EINA_FALSE },
+ { "NV12", GST_MAKE_FOURCC('N', 'V', '1', '2'), EVAS_COLORSPACE_YCBCR420NV12601_PL, _evas_video_nv12, EINA_TRUE },
+ { "TM12", GST_MAKE_FOURCC('T', 'M', '1', '2'), EVAS_COLORSPACE_YCBCR420TM12601_PL, _evas_video_mt12, EINA_TRUE },
+ { NULL, 0, 0, NULL, 0 }
+};
+
+const ColorSpace_Format_Convertion colorspace_format_convertion[] = {
+ { "BGR", GST_VIDEO_FORMAT_BGR, EVAS_COLORSPACE_ARGB8888, _evas_video_bgr },
+ { "BGRx", GST_VIDEO_FORMAT_BGRx, EVAS_COLORSPACE_ARGB8888, _evas_video_bgrx },
+ { "BGRA", GST_VIDEO_FORMAT_BGRA, EVAS_COLORSPACE_ARGB8888, _evas_video_bgra },
+ { NULL, 0, 0, NULL }
+};
diff --git a/src/modules/emotion/gstreamer/emotion_fakeeos.c b/src/modules/emotion/gstreamer/emotion_fakeeos.c
new file mode 100644
index 0000000000..6296ce3acf
--- /dev/null
+++ b/src/modules/emotion/gstreamer/emotion_fakeeos.c
@@ -0,0 +1,70 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <Eina.h>
+#include <Evas.h>
+
+#include <glib.h>
+#include <gst/gst.h>
+#include <gst/video/video.h>
+#include <gst/video/gstvideosink.h>
+
+#ifdef HAVE_ECORE_X
+# include <Ecore_X.h>
+# ifdef HAVE_XOVERLAY_H
+# include <gst/interfaces/xoverlay.h>
+# endif
+#endif
+
+#include "Emotion.h"
+#include "emotion_gstreamer.h"
+
+typedef struct _FakeEOSBin
+{
+ GstBin parent;
+} FakeEOSBin;
+
+typedef struct _FakeEOSBinClass
+{
+ GstBinClass parent;
+} FakeEOSBinClass;
+
+GST_BOILERPLATE(FakeEOSBin, fakeeos_bin, GstBin,
+ GST_TYPE_BIN);
+
+static void
+fakeeos_bin_handle_message(GstBin * bin, GstMessage * message)
+{
+ /* FakeEOSBin *fakeeos = (FakeEOSBin *)(bin); */
+
+ switch (GST_MESSAGE_TYPE(message)) {
+ case GST_MESSAGE_EOS:
+ /* what to do here ? just returning at the moment */
+ return ;
+ default:
+ break;
+ }
+
+ GST_BIN_CLASS(parent_class)->handle_message(bin, message);
+}
+
+static void
+fakeeos_bin_base_init(gpointer g_class EINA_UNUSED)
+{
+}
+
+static void
+fakeeos_bin_class_init(FakeEOSBinClass * klass)
+{
+ GstBinClass *gstbin_class = GST_BIN_CLASS(klass);
+
+ gstbin_class->handle_message =
+ GST_DEBUG_FUNCPTR (fakeeos_bin_handle_message);
+}
+
+static void
+fakeeos_bin_init(FakeEOSBin *src EINA_UNUSED,
+ FakeEOSBinClass *klass EINA_UNUSED)
+{
+}
diff --git a/src/modules/emotion/gstreamer/emotion_gstreamer.c b/src/modules/emotion/gstreamer/emotion_gstreamer.c
new file mode 100644
index 0000000000..8138ae6413
--- /dev/null
+++ b/src/modules/emotion/gstreamer/emotion_gstreamer.c
@@ -0,0 +1,2156 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+#include <fcntl.h>
+
+#include <Eina.h>
+#include <Evas.h>
+#include <Ecore.h>
+
+#define HTTP_STREAM 0
+#define RTSP_STREAM 1
+#include <glib.h>
+#include <gst/gst.h>
+#include <glib-object.h>
+#include <gst/video/gstvideosink.h>
+#include <gst/video/video.h>
+
+#ifdef HAVE_ECORE_X
+# include <Ecore_X.h>
+# ifdef HAVE_XOVERLAY_H
+# include <gst/interfaces/xoverlay.h>
+# endif
+#endif
+
+#include "Emotion.h"
+#include "emotion_private.h"
+#include "emotion_gstreamer.h"
+
+Eina_Bool window_manager_video = EINA_FALSE;
+int _emotion_gstreamer_log_domain = -1;
+Eina_Bool debug_fps = EINA_FALSE;
+Eina_Bool _ecore_x_available = EINA_FALSE;
+
+static Ecore_Idler *restart_idler;
+
+/* Callbacks to get the eos */
+static void _for_each_tag (GstTagList const* list, gchar const* tag, void *data);
+static void _free_metadata (Emotion_Gstreamer_Metadata *m);
+
+/* Interface */
+
+static unsigned char em_init (Evas_Object *obj,
+ void **emotion_video,
+ Emotion_Module_Options *opt);
+
+static unsigned char em_file_open (const char *file,
+ Evas_Object *obj,
+ void *video);
+
+static void em_file_close (void *video);
+
+static void em_play (void *video,
+ double pos);
+
+static void em_stop (void *video);
+
+static void em_size_get (void *video,
+ int *width,
+ int *height);
+
+static void em_pos_set (void *video,
+ double pos);
+
+
+static double em_len_get (void *video);
+
+static double em_buffer_size_get (void *video);
+
+static int em_fps_num_get (void *video);
+
+static int em_fps_den_get (void *video);
+
+static double em_fps_get (void *video);
+
+static double em_pos_get (void *video);
+
+static void em_vis_set (void *video,
+ Emotion_Vis vis);
+
+static Emotion_Vis em_vis_get (void *video);
+
+static Eina_Bool em_vis_supported (void *video,
+ Emotion_Vis vis);
+
+static double em_ratio_get (void *video);
+
+static int em_video_handled (void *video);
+
+static int em_audio_handled (void *video);
+
+static int em_seekable (void *video);
+
+static void em_frame_done (void *video);
+
+static Emotion_Format em_format_get (void *video);
+
+static void em_video_data_size_get (void *video,
+ int *w,
+ int *h);
+
+static int em_yuv_rows_get (void *video,
+ int w,
+ int h,
+ unsigned char **yrows,
+ unsigned char **urows,
+ unsigned char **vrows);
+
+static int em_bgra_data_get (void *video,
+ unsigned char **bgra_data);
+
+static void em_event_feed (void *video,
+ int event);
+
+static void em_event_mouse_button_feed (void *video,
+ int button,
+ int x,
+ int y);
+
+static void em_event_mouse_move_feed (void *video,
+ int x,
+ int y);
+
+static int em_video_channel_count (void *video);
+
+static void em_video_channel_set (void *video,
+ int channel);
+
+static int em_video_channel_get (void *video);
+
+static void em_video_subtitle_file_set (void *video,
+ const char *filepath);
+
+static const char *em_video_subtitle_file_get (void *video);
+
+static const char *em_video_channel_name_get (void *video,
+ int channel);
+
+static void em_video_channel_mute_set (void *video,
+ int mute);
+
+static int em_video_channel_mute_get (void *video);
+
+static int em_audio_channel_count (void *video);
+
+static void em_audio_channel_set (void *video,
+ int channel);
+
+static int em_audio_channel_get (void *video);
+
+static const char *em_audio_channel_name_get (void *video,
+ int channel);
+
+static void em_audio_channel_mute_set (void *video,
+ int mute);
+
+static int em_audio_channel_mute_get (void *video);
+
+static void em_audio_channel_volume_set (void *video,
+ double vol);
+
+static double em_audio_channel_volume_get (void *video);
+
+static int em_spu_channel_count (void *video);
+
+static void em_spu_channel_set (void *video,
+ int channel);
+
+static int em_spu_channel_get (void *video);
+
+static const char *em_spu_channel_name_get (void *video,
+ int channel);
+
+static void em_spu_channel_mute_set (void *video,
+ int mute);
+
+static int em_spu_channel_mute_get (void *video);
+
+static int em_chapter_count (void *video);
+
+static void em_chapter_set (void *video,
+ int chapter);
+
+static int em_chapter_get (void *video);
+
+static const char *em_chapter_name_get (void *video,
+ int chapter);
+
+static void em_speed_set (void *video,
+ double speed);
+
+static double em_speed_get (void *video);
+
+static int em_eject (void *video);
+
+static const char *em_meta_get (void *video,
+ int meta);
+
+static void em_priority_set (void *video,
+ Eina_Bool pri);
+static Eina_Bool em_priority_get (void *video);
+
+static GstBusSyncReply _eos_sync_fct(GstBus *bus,
+ GstMessage *message,
+ gpointer data);
+
+static Eina_Bool _em_restart_stream(void *data);
+
+/* Module interface */
+
+static Emotion_Video_Module em_module =
+{
+ em_init, /* init */
+ em_shutdown, /* shutdown */
+ em_file_open, /* file_open */
+ em_file_close, /* file_close */
+ em_play, /* play */
+ em_stop, /* stop */
+ em_size_get, /* size_get */
+ em_pos_set, /* pos_set */
+ em_len_get, /* len_get */
+ em_buffer_size_get, /* buffer_size_get */
+ em_fps_num_get, /* fps_num_get */
+ em_fps_den_get, /* fps_den_get */
+ em_fps_get, /* fps_get */
+ em_pos_get, /* pos_get */
+ em_vis_set, /* vis_set */
+ em_vis_get, /* vis_get */
+ em_vis_supported, /* vis_supported */
+ em_ratio_get, /* ratio_get */
+ em_video_handled, /* video_handled */
+ em_audio_handled, /* audio_handled */
+ em_seekable, /* seekable */
+ em_frame_done, /* frame_done */
+ em_format_get, /* format_get */
+ em_video_data_size_get, /* video_data_size_get */
+ em_yuv_rows_get, /* yuv_rows_get */
+ em_bgra_data_get, /* bgra_data_get */
+ em_event_feed, /* event_feed */
+ em_event_mouse_button_feed, /* event_mouse_button_feed */
+ em_event_mouse_move_feed, /* event_mouse_move_feed */
+ em_video_channel_count, /* video_channel_count */
+ em_video_channel_set, /* video_channel_set */
+ em_video_channel_get, /* video_channel_get */
+ em_video_subtitle_file_set, /* video_subtitle_file_set */
+ em_video_subtitle_file_get, /* video_subtitle_file_get */
+ em_video_channel_name_get, /* video_channel_name_get */
+ em_video_channel_mute_set, /* video_channel_mute_set */
+ em_video_channel_mute_get, /* video_channel_mute_get */
+ em_audio_channel_count, /* audio_channel_count */
+ em_audio_channel_set, /* audio_channel_set */
+ em_audio_channel_get, /* audio_channel_get */
+ em_audio_channel_name_get, /* audio_channel_name_get */
+ em_audio_channel_mute_set, /* audio_channel_mute_set */
+ em_audio_channel_mute_get, /* audio_channel_mute_get */
+ em_audio_channel_volume_set, /* audio_channel_volume_set */
+ em_audio_channel_volume_get, /* audio_channel_volume_get */
+ em_spu_channel_count, /* spu_channel_count */
+ em_spu_channel_set, /* spu_channel_set */
+ em_spu_channel_get, /* spu_channel_get */
+ em_spu_channel_name_get, /* spu_channel_name_get */
+ em_spu_channel_mute_set, /* spu_channel_mute_set */
+ em_spu_channel_mute_get, /* spu_channel_mute_get */
+ em_chapter_count, /* chapter_count */
+ em_chapter_set, /* chapter_set */
+ em_chapter_get, /* chapter_get */
+ em_chapter_name_get, /* chapter_name_get */
+ em_speed_set, /* speed_set */
+ em_speed_get, /* speed_get */
+ em_eject, /* eject */
+ em_meta_get, /* meta_get */
+ em_priority_set, /* priority_set */
+ em_priority_get, /* priority_get */
+ NULL /* handle */
+};
+
+static int priority_overide = 0;
+
+static Emotion_Video_Stream *
+emotion_video_stream_new(Emotion_Gstreamer_Video *ev)
+{
+ Emotion_Video_Stream *vstream;
+
+ if (!ev) return NULL;
+
+ vstream = (Emotion_Video_Stream *)calloc(1, sizeof(Emotion_Video_Stream));
+ if (!vstream) return NULL;
+
+ ev->video_streams = eina_list_append(ev->video_streams, vstream);
+ if (eina_error_get())
+ {
+ free(vstream);
+ return NULL;
+ }
+ return vstream;
+}
+
+static const char *
+emotion_visualization_element_name_get(Emotion_Vis visualisation)
+{
+ switch (visualisation)
+ {
+ case EMOTION_VIS_NONE:
+ return NULL;
+ case EMOTION_VIS_GOOM:
+ return "goom";
+ case EMOTION_VIS_LIBVISUAL_BUMPSCOPE:
+ return "libvisual_bumpscope";
+ case EMOTION_VIS_LIBVISUAL_CORONA:
+ return "libvisual_corona";
+ case EMOTION_VIS_LIBVISUAL_DANCING_PARTICLES:
+ return "libvisual_dancingparticles";
+ case EMOTION_VIS_LIBVISUAL_GDKPIXBUF:
+ return "libvisual_gdkpixbuf";
+ case EMOTION_VIS_LIBVISUAL_G_FORCE:
+ return "libvisual_G-Force";
+ case EMOTION_VIS_LIBVISUAL_GOOM:
+ return "libvisual_goom";
+ case EMOTION_VIS_LIBVISUAL_INFINITE:
+ return "libvisual_infinite";
+ case EMOTION_VIS_LIBVISUAL_JAKDAW:
+ return "libvisual_jakdaw";
+ case EMOTION_VIS_LIBVISUAL_JESS:
+ return "libvisual_jess";
+ case EMOTION_VIS_LIBVISUAL_LV_ANALYSER:
+ return "libvisual_lv_analyzer";
+ case EMOTION_VIS_LIBVISUAL_LV_FLOWER:
+ return "libvisual_lv_flower";
+ case EMOTION_VIS_LIBVISUAL_LV_GLTEST:
+ return "libvisual_lv_gltest";
+ case EMOTION_VIS_LIBVISUAL_LV_SCOPE:
+ return "libvisual_lv_scope";
+ case EMOTION_VIS_LIBVISUAL_MADSPIN:
+ return "libvisual_madspin";
+ case EMOTION_VIS_LIBVISUAL_NEBULUS:
+ return "libvisual_nebulus";
+ case EMOTION_VIS_LIBVISUAL_OINKSIE:
+ return "libvisual_oinksie";
+ case EMOTION_VIS_LIBVISUAL_PLASMA:
+ return "libvisual_plazma";
+ default:
+ return "goom";
+ }
+}
+
+static unsigned char
+em_init(Evas_Object *obj,
+ void **emotion_video,
+ Emotion_Module_Options *opt EINA_UNUSED)
+{
+ Emotion_Gstreamer_Video *ev;
+ GError *error;
+
+ if (!emotion_video)
+ return 0;
+
+ ev = calloc(1, sizeof(Emotion_Gstreamer_Video));
+ if (!ev) return 0;
+
+ ev->obj = obj;
+
+ /* Initialization of gstreamer */
+ if (!gst_init_check(NULL, NULL, &error))
+ goto failure;
+
+ /* Default values */
+ ev->ratio = 1.0;
+ ev->vis = EMOTION_VIS_NONE;
+ ev->volume = 0.8;
+ ev->play_started = 0;
+ ev->delete_me = EINA_FALSE;
+ ev->threads = NULL;
+
+ *emotion_video = ev;
+
+ return 1;
+
+failure:
+ free(ev);
+
+ return 0;
+}
+
+static void
+em_cleanup(Emotion_Gstreamer_Video *ev)
+{
+ Emotion_Audio_Stream *astream;
+ Emotion_Video_Stream *vstream;
+
+ if (ev->send)
+ {
+ emotion_gstreamer_buffer_free(ev->send);
+ ev->send = NULL;
+ }
+
+ if (ev->eos_bus)
+ {
+ gst_object_unref(GST_OBJECT(ev->eos_bus));
+ ev->eos_bus = NULL;
+ }
+
+ if (ev->metadata)
+ {
+ _free_metadata(ev->metadata);
+ ev->metadata = NULL;
+ }
+
+ if (ev->last_buffer)
+ {
+ gst_buffer_unref(ev->last_buffer);
+ ev->last_buffer = NULL;
+ }
+
+ if (!ev->stream)
+ {
+ evas_object_image_video_surface_set(emotion_object_image_get(ev->obj), NULL);
+ ev->stream = EINA_TRUE;
+ }
+
+ if (ev->pipeline)
+ {
+ gstreamer_video_sink_new(ev, ev->obj, NULL);
+
+ g_object_set(G_OBJECT(ev->esink), "ev", NULL, NULL);
+ g_object_set(G_OBJECT(ev->esink), "evas-object", NULL, NULL);
+ gst_element_set_state(ev->pipeline, GST_STATE_NULL);
+ gst_object_unref(ev->pipeline);
+
+ ev->pipeline = NULL;
+ ev->sink = NULL;
+
+ if (ev->eteepad) gst_object_unref(ev->eteepad);
+ ev->eteepad = NULL;
+ if (ev->xvteepad) gst_object_unref(ev->xvteepad);
+ ev->xvteepad = NULL;
+ if (ev->xvpad) gst_object_unref(ev->xvpad);
+ ev->xvpad = NULL;
+
+ ev->src_width = 0;
+ ev->src_height = 0;
+
+#ifdef HAVE_ECORE_X
+ INF("destroying window: %i", ev->win);
+ if (ev->win) ecore_x_window_free(ev->win);
+ ev->win = 0;
+#endif
+ }
+
+ if (restart_idler)
+ {
+ ecore_idler_del(restart_idler);
+ restart_idler = NULL;
+ }
+
+ EINA_LIST_FREE(ev->audio_streams, astream)
+ free(astream);
+ EINA_LIST_FREE(ev->video_streams, vstream)
+ free(vstream);
+}
+
+int
+em_shutdown(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ if (!ev)
+ return 0;
+
+ if (ev->threads)
+ {
+ Ecore_Thread *t;
+
+ EINA_LIST_FREE(ev->threads, t)
+ ecore_thread_cancel(t);
+
+ ev->delete_me = EINA_TRUE;
+ return EINA_FALSE;
+ }
+
+ if (ev->in != ev->out)
+ {
+ ev->delete_me = EINA_TRUE;
+ return EINA_FALSE;
+ }
+
+ em_cleanup(ev);
+
+ free(ev);
+
+ return 1;
+}
+
+
+static unsigned char
+em_file_open(const char *file,
+ Evas_Object *obj,
+ void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Eina_Strbuf *sbuf = NULL;
+ const char *uri;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!file) return EINA_FALSE;
+ if (strstr(file, "://") == NULL)
+ {
+ sbuf = eina_strbuf_new();
+ eina_strbuf_append(sbuf, "file://");
+ if (strncmp(file, "./", 2) == 0)
+ file += 2;
+ if (strstr(file, ":/") != NULL)
+ { /* We absolutely need file:///C:/ under Windows, so adding it here */
+ eina_strbuf_append(sbuf, "/");
+ }
+ else if (*file != '/')
+ {
+ char tmp[PATH_MAX];
+
+ if (getcwd(tmp, PATH_MAX))
+ {
+ eina_strbuf_append(sbuf, tmp);
+ eina_strbuf_append(sbuf, "/");
+ }
+ }
+ eina_strbuf_append(sbuf, file);
+ }
+
+ ev->play_started = 0;
+ ev->pipeline_parsed = 0;
+
+ uri = sbuf ? eina_strbuf_string_get(sbuf) : file;
+ DBG("setting file to '%s'", uri);
+ ev->pipeline = gstreamer_video_sink_new(ev, obj, uri);
+ if (sbuf) eina_strbuf_free(sbuf);
+
+ if (!ev->pipeline)
+ return EINA_FALSE;
+
+ ev->eos_bus = gst_pipeline_get_bus(GST_PIPELINE(ev->pipeline));
+ if (!ev->eos_bus)
+ {
+ ERR("could not get the bus");
+ return EINA_FALSE;
+ }
+
+ gst_bus_set_sync_handler(ev->eos_bus, _eos_sync_fct, ev);
+
+ /* Evas Object */
+ ev->obj = obj;
+
+ ev->position = 0.0;
+
+ return 1;
+}
+
+static void
+em_file_close(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ if (!ev)
+ return;
+
+ if (ev->threads)
+ {
+ Ecore_Thread *t;
+
+ EINA_LIST_FREE(ev->threads, t)
+ ecore_thread_cancel(t);
+ }
+
+ em_cleanup(ev);
+
+ ev->pipeline_parsed = EINA_FALSE;
+ ev->play_started = 0;
+}
+
+static void
+em_play(void *video,
+ double pos EINA_UNUSED)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+ if (!ev->pipeline) return ;
+
+ if (ev->pipeline_parsed)
+ gst_element_set_state(ev->pipeline, GST_STATE_PLAYING);
+ ev->play = 1;
+ ev->play_started = 1;
+}
+
+static void
+em_stop(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!ev->pipeline) return ;
+
+ if (ev->pipeline_parsed)
+ gst_element_set_state(ev->pipeline, GST_STATE_PAUSED);
+ ev->play = 0;
+}
+
+static void
+em_size_get(void *video,
+ int *width,
+ int *height)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Stream *vstream;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
+ goto on_error;
+
+ vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
+ if (vstream)
+ {
+ if (width) *width = vstream->width;
+ if (height) *height = vstream->height;
+
+ return ;
+ }
+
+ on_error:
+ if (width) *width = 0;
+ if (height) *height = 0;
+}
+
+static void
+em_pos_set(void *video,
+ double pos)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!ev->pipeline) return ;
+
+ if (ev->play)
+ gst_element_set_state(ev->pipeline, GST_STATE_PAUSED);
+
+ gst_element_seek(ev->pipeline, 1.0,
+ GST_FORMAT_TIME,
+ GST_SEEK_FLAG_ACCURATE | GST_SEEK_FLAG_FLUSH,
+ GST_SEEK_TYPE_SET,
+ (gint64)(pos * (double)GST_SECOND),
+ GST_SEEK_TYPE_NONE, -1);
+
+ if (ev->play)
+ gst_element_set_state(ev->pipeline, GST_STATE_PLAYING);
+}
+
+static double
+em_len_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Stream *vstream;
+ Emotion_Audio_Stream *astream;
+ Eina_List *l;
+ GstFormat fmt;
+ gint64 val;
+ gboolean ret;
+
+ ev = video;
+ fmt = GST_FORMAT_TIME;
+
+ if (!ev->pipeline) return 0.0;
+
+ ret = gst_element_query_duration(ev->pipeline, &fmt, &val);
+ if (!ret)
+ goto fallback;
+
+ if (fmt != GST_FORMAT_TIME)
+ {
+ DBG("requrested duration in time, but got %s instead.",
+ gst_format_get_name(fmt));
+ goto fallback;
+ }
+
+ if (val <= 0.0)
+ goto fallback;
+
+ return val / 1000000000.0;
+
+ fallback:
+ if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
+ return 0.0;
+
+ EINA_LIST_FOREACH(ev->audio_streams, l, astream)
+ if (astream->length_time >= 0)
+ return astream->length_time;
+
+ EINA_LIST_FOREACH(ev->video_streams, l, vstream)
+ if (vstream->length_time >= 0)
+ return vstream->length_time;
+
+ return 0.0;
+}
+
+static double
+em_buffer_size_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ GstQuery *query;
+ gboolean busy;
+ gint percent;
+
+ ev = video;
+
+ if (!ev->pipeline) return 0.0;
+
+ query = gst_query_new_buffering(GST_FORMAT_DEFAULT);
+ if (gst_element_query(ev->pipeline, query))
+ gst_query_parse_buffering_percent(query, &busy, &percent);
+ else
+ percent = 100;
+
+ gst_query_unref(query);
+ return ((float)(percent)) / 100.0;
+}
+
+static int
+em_fps_num_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Stream *vstream;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
+ return 0;
+
+ vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
+ if (vstream)
+ return vstream->fps_num;
+
+ return 0;
+}
+
+static int
+em_fps_den_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Stream *vstream;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
+ return 1;
+
+ vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
+ if (vstream)
+ return vstream->fps_den;
+
+ return 1;
+}
+
+static double
+em_fps_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Stream *vstream;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
+ return 0.0;
+
+ vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
+ if (vstream)
+ return (double)vstream->fps_num / (double)vstream->fps_den;
+
+ return 0.0;
+}
+
+static double
+em_pos_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ GstFormat fmt;
+ gint64 val;
+ gboolean ret;
+
+ ev = video;
+ fmt = GST_FORMAT_TIME;
+
+ if (!ev->pipeline) return 0.0;
+
+ ret = gst_element_query_position(ev->pipeline, &fmt, &val);
+ if (!ret)
+ return ev->position;
+
+ if (fmt != GST_FORMAT_TIME)
+ {
+ ERR("requrested position in time, but got %s instead.",
+ gst_format_get_name(fmt));
+ return ev->position;
+ }
+
+ ev->position = val / 1000000000.0;
+ return ev->position;
+}
+
+static void
+em_vis_set(void *video,
+ Emotion_Vis vis)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ ev->vis = vis;
+}
+
+static Emotion_Vis
+em_vis_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->vis;
+}
+
+static Eina_Bool
+em_vis_supported(void *ef EINA_UNUSED, Emotion_Vis vis)
+{
+ const char *name;
+ GstElementFactory *factory;
+
+ if (vis == EMOTION_VIS_NONE)
+ return EINA_TRUE;
+
+ name = emotion_visualization_element_name_get(vis);
+ if (!name)
+ return EINA_FALSE;
+
+ factory = gst_element_factory_find(name);
+ if (!factory)
+ return EINA_FALSE;
+
+ gst_object_unref(factory);
+ return EINA_TRUE;
+}
+
+static double
+em_ratio_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->ratio;
+}
+
+static int
+em_video_handled(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ _emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
+
+ if (!eina_list_count(ev->video_streams))
+ return 0;
+
+ return 1;
+}
+
+static int
+em_audio_handled(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ _emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
+
+ if (!eina_list_count(ev->audio_streams))
+ return 0;
+
+ return 1;
+}
+
+static int
+em_seekable(void *video EINA_UNUSED)
+{
+ return 1;
+}
+
+static void
+em_frame_done(void *video EINA_UNUSED)
+{
+}
+
+static Emotion_Format
+em_format_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Stream *vstream;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
+ return EMOTION_FORMAT_NONE;
+
+ vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
+ if (vstream)
+ {
+ switch (vstream->fourcc)
+ {
+ case GST_MAKE_FOURCC('I', '4', '2', '0'):
+ return EMOTION_FORMAT_I420;
+ case GST_MAKE_FOURCC('Y', 'V', '1', '2'):
+ return EMOTION_FORMAT_YV12;
+ case GST_MAKE_FOURCC('Y', 'U', 'Y', '2'):
+ return EMOTION_FORMAT_YUY2;
+ case GST_MAKE_FOURCC('A', 'R', 'G', 'B'):
+ return EMOTION_FORMAT_BGRA;
+ default:
+ return EMOTION_FORMAT_NONE;
+ }
+ }
+ return EMOTION_FORMAT_NONE;
+}
+
+static void
+em_video_data_size_get(void *video, int *w, int *h)
+{
+ Emotion_Gstreamer_Video *ev;
+ Emotion_Video_Stream *vstream;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (ev->pipeline && (!ev->video_stream_nbr || !ev->video_streams))
+ if (!_emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE))
+ goto on_error;
+
+ vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
+ if (vstream)
+ {
+ *w = vstream->width;
+ *h = vstream->height;
+
+ return ;
+ }
+
+ on_error:
+ *w = 0;
+ *h = 0;
+}
+
+static int
+em_yuv_rows_get(void *video EINA_UNUSED,
+ int w EINA_UNUSED,
+ int h EINA_UNUSED,
+ unsigned char **yrows EINA_UNUSED,
+ unsigned char **urows EINA_UNUSED,
+ unsigned char **vrows EINA_UNUSED)
+{
+ return 0;
+}
+
+static int
+em_bgra_data_get(void *video EINA_UNUSED, unsigned char **bgra_data EINA_UNUSED)
+{
+ return 0;
+}
+
+static void
+em_event_feed(void *video EINA_UNUSED, int event EINA_UNUSED)
+{
+}
+
+static void
+em_event_mouse_button_feed(void *video EINA_UNUSED, int button EINA_UNUSED, int x EINA_UNUSED, int y EINA_UNUSED)
+{
+}
+
+static void
+em_event_mouse_move_feed(void *video EINA_UNUSED, int x EINA_UNUSED, int y EINA_UNUSED)
+{
+}
+
+/* Video channels */
+static int
+em_video_channel_count(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ _emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
+
+ return eina_list_count(ev->video_streams);
+}
+
+static void
+em_video_channel_set(void *video EINA_UNUSED,
+ int channel EINA_UNUSED)
+{
+#if 0
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (channel < 0) channel = 0;
+#endif
+ /* FIXME: a faire... */
+}
+
+static int
+em_video_channel_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ _emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
+
+ return ev->video_stream_nbr;
+}
+
+static void
+em_video_subtitle_file_set(void *video EINA_UNUSED,
+ const char *filepath EINA_UNUSED)
+{
+ DBG("video_subtitle_file_set not implemented for gstreamer yet.");
+}
+
+static const char *
+em_video_subtitle_file_get(void *video EINA_UNUSED)
+{
+ DBG("video_subtitle_file_get not implemented for gstreamer yet.");
+ return NULL;
+}
+
+static const char *
+em_video_channel_name_get(void *video EINA_UNUSED,
+ int channel EINA_UNUSED)
+{
+ return NULL;
+}
+
+static void
+em_video_channel_mute_set(void *video,
+ int mute)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ ev->video_mute = mute;
+}
+
+static int
+em_video_channel_mute_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->video_mute;
+}
+
+/* Audio channels */
+
+static int
+em_audio_channel_count(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ _emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
+
+ return eina_list_count(ev->audio_streams);
+}
+
+static void
+em_audio_channel_set(void *video EINA_UNUSED,
+ int channel EINA_UNUSED)
+{
+#if 0
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (channel < -1) channel = -1;
+#endif
+ /* FIXME: a faire... */
+}
+
+static int
+em_audio_channel_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ _emotion_gstreamer_video_pipeline_parse(ev, EINA_FALSE);
+
+ return ev->audio_stream_nbr;
+}
+
+static const char *
+em_audio_channel_name_get(void *video EINA_UNUSED,
+ int channel EINA_UNUSED)
+{
+ return NULL;
+}
+
+#define GST_PLAY_FLAG_AUDIO (1 << 1)
+
+static void
+em_audio_channel_mute_set(void *video,
+ int mute)
+{
+ /* NOTE: at first I wanted to completly shutdown the audio path on mute,
+ but that's not possible as the audio sink could be the clock source
+ for the pipeline (at least that's the case on some of the hardware
+ I have been tested emotion on.
+ */
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!ev->pipeline) return ;
+
+ ev->audio_mute = mute;
+
+ g_object_set(G_OBJECT(ev->pipeline), "mute", !!mute, NULL);
+}
+
+static int
+em_audio_channel_mute_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->audio_mute;
+}
+
+static void
+em_audio_channel_volume_set(void *video,
+ double vol)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!ev->pipeline) return ;
+
+ if (vol < 0.0)
+ vol = 0.0;
+ if (vol > 1.0)
+ vol = 1.0;
+ ev->volume = vol;
+ g_object_set(G_OBJECT(ev->pipeline), "volume", vol, NULL);
+}
+
+static double
+em_audio_channel_volume_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ return ev->volume;
+}
+
+/* spu stuff */
+
+static int
+em_spu_channel_count(void *video EINA_UNUSED)
+{
+ return 0;
+}
+
+static void
+em_spu_channel_set(void *video EINA_UNUSED, int channel EINA_UNUSED)
+{
+}
+
+static int
+em_spu_channel_get(void *video EINA_UNUSED)
+{
+ return 1;
+}
+
+static const char *
+em_spu_channel_name_get(void *video EINA_UNUSED, int channel EINA_UNUSED)
+{
+ return NULL;
+}
+
+static void
+em_spu_channel_mute_set(void *video EINA_UNUSED, int mute EINA_UNUSED)
+{
+}
+
+static int
+em_spu_channel_mute_get(void *video EINA_UNUSED)
+{
+ return 0;
+}
+
+static int
+em_chapter_count(void *video EINA_UNUSED)
+{
+ return 0;
+}
+
+static void
+em_chapter_set(void *video EINA_UNUSED, int chapter EINA_UNUSED)
+{
+}
+
+static int
+em_chapter_get(void *video EINA_UNUSED)
+{
+ return 0;
+}
+
+static const char *
+em_chapter_name_get(void *video EINA_UNUSED, int chapter EINA_UNUSED)
+{
+ return NULL;
+}
+
+static void
+em_speed_set(void *video EINA_UNUSED, double speed EINA_UNUSED)
+{
+}
+
+static double
+em_speed_get(void *video EINA_UNUSED)
+{
+ return 1.0;
+}
+
+static int
+em_eject(void *video EINA_UNUSED)
+{
+ return 1;
+}
+
+static const char *
+em_meta_get(void *video, int meta)
+{
+ Emotion_Gstreamer_Video *ev;
+ const char *str = NULL;
+
+ ev = (Emotion_Gstreamer_Video *)video;
+
+ if (!ev || !ev->metadata) return NULL;
+ switch (meta)
+ {
+ case META_TRACK_TITLE:
+ str = ev->metadata->title;
+ break;
+ case META_TRACK_ARTIST:
+ str = ev->metadata->artist;
+ break;
+ case META_TRACK_ALBUM:
+ str = ev->metadata->album;
+ break;
+ case META_TRACK_YEAR:
+ str = ev->metadata->year;
+ break;
+ case META_TRACK_GENRE:
+ str = ev->metadata->genre;
+ break;
+ case META_TRACK_COMMENT:
+ str = ev->metadata->comment;
+ break;
+ case META_TRACK_DISCID:
+ str = ev->metadata->disc_id;
+ break;
+ default:
+ break;
+ }
+
+ return str;
+}
+
+static void
+em_priority_set(void *video, Eina_Bool pri)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = video;
+ if (priority_overide > 3) return ; /* If we failed to much to create that pipeline, let's don't wast our time anymore */
+
+ if (ev->priority != pri && ev->pipeline)
+ {
+ if (ev->threads)
+ {
+ Ecore_Thread *t;
+
+ EINA_LIST_FREE(ev->threads, t)
+ ecore_thread_cancel(t);
+ }
+ em_cleanup(ev);
+ restart_idler = ecore_idler_add(_em_restart_stream, ev);
+ }
+ ev->priority = pri;
+}
+
+static Eina_Bool
+em_priority_get(void *video)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = video;
+ return !ev->stream;
+}
+
+#ifdef HAVE_ECORE_X
+static Eina_Bool
+_ecore_event_x_destroy(void *data EINA_UNUSED, int type EINA_UNUSED, void *event EINA_UNUSED)
+{
+ Ecore_X_Event_Window_Destroy *ev = event;
+
+ INF("killed window: %x (%x).", ev->win, ev->event_win);
+
+ return EINA_TRUE;
+}
+#endif
+
+static Eina_Bool
+module_open(Evas_Object *obj,
+ const Emotion_Video_Module **module,
+ void **video,
+ Emotion_Module_Options *opt)
+{
+#ifdef HAVE_ECORE_X
+ Ecore_X_Window *roots;
+ int num;
+#endif
+
+ if (!module)
+ return EINA_FALSE;
+
+ if (_emotion_gstreamer_log_domain < 0)
+ {
+ eina_threads_init();
+ eina_log_threads_enable();
+ _emotion_gstreamer_log_domain = eina_log_domain_register
+ ("emotion-gstreamer", EINA_COLOR_LIGHTCYAN);
+ if (_emotion_gstreamer_log_domain < 0)
+ {
+ EINA_LOG_CRIT("Could not register log domain 'emotion-gstreamer'");
+ return EINA_FALSE;
+ }
+ }
+
+ if (!em_module.init(obj, video, opt))
+ return EINA_FALSE;
+
+#ifdef HAVE_ECORE_X
+ ecore_event_handler_add(ECORE_X_EVENT_WINDOW_DESTROY, _ecore_event_x_destroy, NULL);
+#endif
+
+ if (getenv("EMOTION_FPS_DEBUG")) debug_fps = EINA_TRUE;
+
+ eina_threads_init();
+
+#ifdef HAVE_ECORE_X
+ if (ecore_x_init(NULL) > 0)
+ {
+ _ecore_x_available = EINA_TRUE;
+ }
+
+ /* Check if the window manager is able to handle our special Xv window. */
+ roots = _ecore_x_available ? ecore_x_window_root_list(&num) : NULL;
+ if (roots && num > 0)
+ {
+ Ecore_X_Window win, twin;
+ int nwins;
+
+ nwins = ecore_x_window_prop_window_get(roots[0],
+ ECORE_X_ATOM_NET_SUPPORTING_WM_CHECK,
+ &win, 1);
+ if (nwins > 0)
+ {
+ nwins = ecore_x_window_prop_window_get(win,
+ ECORE_X_ATOM_NET_SUPPORTING_WM_CHECK,
+ &twin, 1);
+ if (nwins > 0 && twin == win)
+ {
+ Ecore_X_Atom *supported;
+ int supported_num;
+ int i;
+
+ if (ecore_x_netwm_supported_get(roots[0], &supported, &supported_num))
+ {
+ Eina_Bool parent = EINA_FALSE;
+ Eina_Bool video_position = EINA_FALSE;
+
+ for (i = 0; i < supported_num; ++i)
+ {
+ if (supported[i] == ECORE_X_ATOM_E_VIDEO_PARENT)
+ parent = EINA_TRUE;
+ else if (supported[i] == ECORE_X_ATOM_E_VIDEO_POSITION)
+ video_position = EINA_TRUE;
+ if (parent && video_position)
+ break;
+ }
+
+ if (parent && video_position)
+ {
+ window_manager_video = EINA_TRUE;
+ }
+ }
+ }
+ }
+ }
+ free(roots);
+#endif
+
+ *module = &em_module;
+ return EINA_TRUE;
+}
+
+static void
+module_close(Emotion_Video_Module *module EINA_UNUSED,
+ void *video)
+{
+ em_module.shutdown(video);
+
+#ifdef HAVE_ECORE_X
+ if (_ecore_x_available)
+ {
+ ecore_x_shutdown();
+ }
+#endif
+
+ eina_threads_shutdown();
+}
+
+Eina_Bool
+gstreamer_module_init(void)
+{
+ GError *error;
+
+ if (!gst_init_check(0, NULL, &error))
+ {
+ EINA_LOG_CRIT("Could not init GStreamer");
+ return EINA_FALSE;
+ }
+
+ if (gst_plugin_register_static(GST_VERSION_MAJOR, GST_VERSION_MINOR,
+ "emotion-sink",
+ "video sink plugin for Emotion",
+ gstreamer_plugin_init,
+ VERSION,
+ "LGPL",
+ "Enlightenment",
+ PACKAGE,
+ "http://www.enlightenment.org/") == FALSE)
+ {
+ EINA_LOG_CRIT("Could not load static gstreamer video sink for Emotion.");
+ return EINA_FALSE;
+ }
+
+ return _emotion_module_register("gstreamer", module_open, module_close);
+}
+
+void
+gstreamer_module_shutdown(void)
+{
+ _emotion_module_unregister("gstreamer");
+
+ gst_deinit();
+}
+
+#ifndef EMOTION_STATIC_BUILD_GSTREAMER
+
+EINA_MODULE_INIT(gstreamer_module_init);
+EINA_MODULE_SHUTDOWN(gstreamer_module_shutdown);
+
+#endif
+
+static void
+_for_each_tag(GstTagList const* list,
+ gchar const* tag,
+ void *data)
+{
+ Emotion_Gstreamer_Video *ev;
+ int i;
+ int count;
+
+
+ ev = (Emotion_Gstreamer_Video*)data;
+
+ if (!ev || !ev->metadata) return;
+
+ count = gst_tag_list_get_tag_size(list, tag);
+
+ for (i = 0; i < count; i++)
+ {
+ if (!strcmp(tag, GST_TAG_TITLE))
+ {
+ char *str;
+ g_free(ev->metadata->title);
+ if (gst_tag_list_get_string(list, GST_TAG_TITLE, &str))
+ ev->metadata->title = str;
+ else
+ ev->metadata->title = NULL;
+ break;
+ }
+ if (!strcmp(tag, GST_TAG_ALBUM))
+ {
+ gchar *str;
+ g_free(ev->metadata->album);
+ if (gst_tag_list_get_string(list, GST_TAG_ALBUM, &str))
+ ev->metadata->album = str;
+ else
+ ev->metadata->album = NULL;
+ break;
+ }
+ if (!strcmp(tag, GST_TAG_ARTIST))
+ {
+ gchar *str;
+ g_free(ev->metadata->artist);
+ if (gst_tag_list_get_string(list, GST_TAG_ARTIST, &str))
+ ev->metadata->artist = str;
+ else
+ ev->metadata->artist = NULL;
+ break;
+ }
+ if (!strcmp(tag, GST_TAG_GENRE))
+ {
+ gchar *str;
+ g_free(ev->metadata->genre);
+ if (gst_tag_list_get_string(list, GST_TAG_GENRE, &str))
+ ev->metadata->genre = str;
+ else
+ ev->metadata->genre = NULL;
+ break;
+ }
+ if (!strcmp(tag, GST_TAG_COMMENT))
+ {
+ gchar *str;
+ g_free(ev->metadata->comment);
+ if (gst_tag_list_get_string(list, GST_TAG_COMMENT, &str))
+ ev->metadata->comment = str;
+ else
+ ev->metadata->comment = NULL;
+ break;
+ }
+ if (!strcmp(tag, GST_TAG_DATE))
+ {
+ gchar *str;
+ const GValue *date;
+ g_free(ev->metadata->year);
+ date = gst_tag_list_get_value_index(list, GST_TAG_DATE, 0);
+ if (date)
+ str = g_strdup_value_contents(date);
+ else
+ str = NULL;
+ ev->metadata->year = str;
+ break;
+ }
+
+ if (!strcmp(tag, GST_TAG_TRACK_NUMBER))
+ {
+ gchar *str;
+ const GValue *track;
+ g_free(ev->metadata->count);
+ track = gst_tag_list_get_value_index(list, GST_TAG_TRACK_NUMBER, 0);
+ if (track)
+ str = g_strdup_value_contents(track);
+ else
+ str = NULL;
+ ev->metadata->count = str;
+ break;
+ }
+
+#ifdef GST_TAG_CDDA_CDDB_DISCID
+ if (!strcmp(tag, GST_TAG_CDDA_CDDB_DISCID))
+ {
+ gchar *str;
+ const GValue *discid;
+ g_free(ev->metadata->disc_id);
+ discid = gst_tag_list_get_value_index(list, GST_TAG_CDDA_CDDB_DISCID, 0);
+ if (discid)
+ str = g_strdup_value_contents(discid);
+ else
+ str = NULL;
+ ev->metadata->disc_id = str;
+ break;
+ }
+#endif
+ }
+
+}
+
+static void
+_free_metadata(Emotion_Gstreamer_Metadata *m)
+{
+ if (!m) return;
+
+ g_free(m->title);
+ g_free(m->album);
+ g_free(m->artist);
+ g_free(m->genre);
+ g_free(m->comment);
+ g_free(m->year);
+ g_free(m->count);
+ g_free(m->disc_id);
+
+ free(m);
+}
+
+static Eina_Bool
+_em_restart_stream(void *data)
+{
+ Emotion_Gstreamer_Video *ev;
+
+ ev = data;
+
+ ev->pipeline = gstreamer_video_sink_new(ev, ev->obj, ev->uri);
+
+ if (ev->pipeline)
+ {
+ ev->eos_bus = gst_pipeline_get_bus(GST_PIPELINE(ev->pipeline));
+ if (!ev->eos_bus)
+ {
+ ERR("could not get the bus");
+ return EINA_FALSE;
+ }
+
+ gst_bus_set_sync_handler(ev->eos_bus, _eos_sync_fct, ev);
+ }
+
+ restart_idler = NULL;
+
+ return ECORE_CALLBACK_CANCEL;
+}
+
+static Eina_Bool
+_video_size_get(GstElement *elem, int *width, int *height)
+{
+ GstIterator *itr = NULL;
+ GstCaps *caps;
+ GstStructure *str;
+ gpointer pad;
+ Eina_Bool ret = EINA_FALSE;
+
+ itr = gst_element_iterate_src_pads(elem);
+ while(gst_iterator_next(itr, &pad) && !ret)
+ {
+ caps = gst_pad_get_caps(GST_PAD(pad));
+ str = gst_caps_get_structure(caps, 0);
+ if (g_strrstr(gst_structure_get_name(str), "video"))
+ {
+ if (gst_structure_get_int(str, "width", width) && gst_structure_get_int(str, "height", height))
+ ret = EINA_TRUE;
+ }
+ gst_caps_unref(caps);
+ gst_object_unref(pad);
+ }
+ gst_iterator_free(itr);
+
+ return ret;
+}
+
+static void
+_main_frame_resize(void *data)
+{
+ Emotion_Gstreamer_Video *ev = data;
+ double ratio;
+
+ ratio = (double)ev->src_width / (double)ev->src_height;
+ _emotion_frame_resize(ev->obj, ev->src_width, ev->src_height, ratio);
+}
+
+static void
+_no_more_pads(GstElement *decodebin, gpointer data)
+{
+ GstIterator *itr = NULL;
+ gpointer elem;
+ Emotion_Gstreamer_Video *ev = data;
+
+ itr = gst_bin_iterate_elements(GST_BIN(decodebin));
+ while(gst_iterator_next(itr, &elem))
+ {
+ if(_video_size_get(GST_ELEMENT(elem), &ev->src_width, &ev->src_height))
+ {
+ ecore_main_loop_thread_safe_call_async(_main_frame_resize, ev);
+ gst_object_unref(elem);
+ break;
+ }
+ gst_object_unref(elem);
+ }
+ gst_iterator_free(itr);
+}
+
+static void
+_eos_main_fct(void *data)
+{
+ Emotion_Gstreamer_Message *send;
+ Emotion_Gstreamer_Video *ev;
+ GstMessage *msg;
+
+ send = data;
+ ev = send->ev;
+ msg = send->msg;
+
+ if (ev->play_started && !ev->delete_me)
+ {
+ _emotion_playback_started(ev->obj);
+ ev->play_started = 0;
+ }
+
+ switch (GST_MESSAGE_TYPE(msg))
+ {
+ case GST_MESSAGE_EOS:
+ if (!ev->delete_me)
+ {
+ ev->play = 0;
+ _emotion_decode_stop(ev->obj);
+ _emotion_playback_finished(ev->obj);
+ }
+ break;
+ case GST_MESSAGE_TAG:
+ if (!ev->delete_me)
+ {
+ GstTagList *new_tags;
+ gst_message_parse_tag(msg, &new_tags);
+ if (new_tags)
+ {
+ gst_tag_list_foreach(new_tags,
+ (GstTagForeachFunc)_for_each_tag,
+ ev);
+ gst_tag_list_free(new_tags);
+ }
+ }
+ break;
+ case GST_MESSAGE_ASYNC_DONE:
+ if (!ev->delete_me) _emotion_seek_done(ev->obj);
+ break;
+ case GST_MESSAGE_STREAM_STATUS:
+ break;
+ case GST_MESSAGE_STATE_CHANGED:
+ if (!ev->delete_me)
+ {
+ if (!g_signal_handlers_disconnect_by_func(msg->src, _no_more_pads, ev))
+ g_signal_connect(msg->src, "no-more-pads", G_CALLBACK(_no_more_pads), ev);
+ }
+ break;
+ case GST_MESSAGE_ERROR:
+ em_cleanup(ev);
+
+ if (ev->priority)
+ {
+ ERR("Switching back to canvas rendering.");
+ ev->priority = EINA_FALSE;
+ priority_overide++;
+
+ restart_idler = ecore_idler_add(_em_restart_stream, ev);
+ }
+ break;
+ default:
+ ERR("bus say: %s [%i - %s]",
+ GST_MESSAGE_SRC_NAME(msg),
+ GST_MESSAGE_TYPE(msg),
+ GST_MESSAGE_TYPE_NAME(msg));
+ break;
+ }
+
+ emotion_gstreamer_message_free(send);
+}
+
+static GstBusSyncReply
+_eos_sync_fct(GstBus *bus EINA_UNUSED, GstMessage *msg, gpointer data)
+{
+ Emotion_Gstreamer_Video *ev = data;
+ Emotion_Gstreamer_Message *send;
+
+ switch (GST_MESSAGE_TYPE(msg))
+ {
+ case GST_MESSAGE_EOS:
+ case GST_MESSAGE_TAG:
+ case GST_MESSAGE_ASYNC_DONE:
+ case GST_MESSAGE_STREAM_STATUS:
+ INF("bus say: %s [%i - %s]",
+ GST_MESSAGE_SRC_NAME(msg),
+ GST_MESSAGE_TYPE(msg),
+ GST_MESSAGE_TYPE_NAME(msg));
+ send = emotion_gstreamer_message_alloc(ev, msg);
+
+ if (send) ecore_main_loop_thread_safe_call_async(_eos_main_fct, send);
+
+ break;
+
+ case GST_MESSAGE_STATE_CHANGED:
+ {
+ GstState old_state, new_state;
+
+ gst_message_parse_state_changed(msg, &old_state, &new_state, NULL);
+ INF("Element %s changed state from %s to %s.",
+ GST_OBJECT_NAME(msg->src),
+ gst_element_state_get_name(old_state),
+ gst_element_state_get_name(new_state));
+
+ if (!strncmp(GST_OBJECT_NAME(msg->src), "decodebin", 9) && !strcmp(gst_element_state_get_name(new_state), "READY"))
+ {
+ send = emotion_gstreamer_message_alloc(ev, msg);
+
+ if (send) ecore_main_loop_thread_safe_call_async(_eos_main_fct, send);
+ }
+ break;
+ }
+ case GST_MESSAGE_ERROR:
+ {
+ GError *error;
+ gchar *debug;
+
+ gst_message_parse_error(msg, &error, &debug);
+ ERR("ERROR from element %s: %s", GST_OBJECT_NAME(msg->src), error->message);
+ ERR("Debugging info: %s", (debug) ? debug : "none");
+ g_error_free(error);
+ g_free(debug);
+
+ if (strncmp(GST_OBJECT_NAME(msg->src), "xvimagesink", 11) == 0)
+ {
+ send = emotion_gstreamer_message_alloc(ev, msg);
+
+ if (send) ecore_main_loop_thread_safe_call_async(_eos_main_fct, send);
+ }
+ break;
+ }
+ case GST_MESSAGE_WARNING:
+ {
+ GError *error;
+ gchar *debug;
+
+ gst_message_parse_warning(msg, &error, &debug);
+ WRN("WARNING from element %s: %s", GST_OBJECT_NAME(msg->src), error->message);
+ WRN("Debugging info: %s", (debug) ? debug : "none");
+ g_error_free(error);
+ g_free(debug);
+ break;
+ }
+ default:
+ WRN("bus say: %s [%i - %s]",
+ GST_MESSAGE_SRC_NAME(msg),
+ GST_MESSAGE_TYPE(msg),
+ GST_MESSAGE_TYPE_NAME(msg));
+ break;
+ }
+
+ gst_message_unref(msg);
+
+ return GST_BUS_DROP;
+}
+
+Eina_Bool
+_emotion_gstreamer_video_pipeline_parse(Emotion_Gstreamer_Video *ev,
+ Eina_Bool force)
+{
+ gboolean res;
+ int i;
+
+ if (ev->pipeline_parsed)
+ return EINA_TRUE;
+
+ if (force && ev->threads)
+ {
+ Ecore_Thread *t;
+
+ EINA_LIST_FREE(ev->threads, t)
+ ecore_thread_cancel(t);
+ }
+
+ if (ev->threads)
+ return EINA_FALSE;
+
+ res = gst_element_get_state(ev->pipeline, NULL, NULL, GST_CLOCK_TIME_NONE);
+ if (res == GST_STATE_CHANGE_NO_PREROLL)
+ {
+ gst_element_set_state(ev->pipeline, GST_STATE_PLAYING);
+
+ res = gst_element_get_state(ev->pipeline, NULL, NULL, GST_CLOCK_TIME_NONE);
+ }
+
+ /** NOTE: you need to set: GST_DEBUG_DUMP_DOT_DIR=/tmp EMOTION_ENGINE=gstreamer to save the $EMOTION_GSTREAMER_DOT file in '/tmp' */
+ /** then call dot -Tpng -oemotion_pipeline.png /tmp/$TIMESTAMP-$EMOTION_GSTREAMER_DOT.dot */
+ if (getenv("EMOTION_GSTREAMER_DOT"))
+ GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(ev->pipeline),
+ GST_DEBUG_GRAPH_SHOW_ALL,
+ getenv("EMOTION_GSTREAMER_DOT"));
+
+ if (!(res == GST_STATE_CHANGE_SUCCESS
+ || res == GST_STATE_CHANGE_NO_PREROLL))
+ {
+ ERR("Unable to get GST_CLOCK_TIME_NONE.");
+ return EINA_FALSE;
+ }
+
+ g_object_get(G_OBJECT(ev->pipeline),
+ "n-audio", &ev->audio_stream_nbr,
+ "n-video", &ev->video_stream_nbr,
+ NULL);
+
+ if ((ev->video_stream_nbr == 0) && (ev->audio_stream_nbr == 0))
+ {
+ ERR("No audio nor video stream found");
+ return EINA_FALSE;
+ }
+
+ /* video stream */
+ for (i = 0; i < ev->video_stream_nbr; i++)
+ {
+ Emotion_Video_Stream *vstream;
+ GstPad *pad = NULL;
+ GstCaps *caps;
+ GstStructure *structure;
+ GstQuery *query;
+ const GValue *val;
+ gchar *str;
+
+ gdouble length_time = 0.0;
+ gint width;
+ gint height;
+ gint fps_num;
+ gint fps_den;
+ guint32 fourcc = 0;
+
+ g_signal_emit_by_name(ev->pipeline, "get-video-pad", i, &pad);
+ if (!pad)
+ continue;
+
+ caps = gst_pad_get_negotiated_caps(pad);
+ if (!caps)
+ goto unref_pad_v;
+ structure = gst_caps_get_structure(caps, 0);
+ str = gst_caps_to_string(caps);
+
+ if (!gst_structure_get_int(structure, "width", &width))
+ goto unref_caps_v;
+ if (!gst_structure_get_int(structure, "height", &height))
+ goto unref_caps_v;
+ if (!gst_structure_get_fraction(structure, "framerate", &fps_num, &fps_den))
+ goto unref_caps_v;
+
+ if (g_str_has_prefix(str, "video/x-raw-yuv"))
+ {
+ val = gst_structure_get_value(structure, "format");
+ fourcc = gst_value_get_fourcc(val);
+ }
+ else if (g_str_has_prefix(str, "video/x-raw-rgb"))
+ fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
+ else
+ goto unref_caps_v;
+
+ query = gst_query_new_duration(GST_FORMAT_TIME);
+ if (gst_pad_peer_query(pad, query))
+ {
+ gint64 t;
+
+ gst_query_parse_duration(query, NULL, &t);
+ length_time = (double)t / (double)GST_SECOND;
+ }
+ else
+ goto unref_query_v;
+
+ vstream = emotion_video_stream_new(ev);
+ if (!vstream) goto unref_query_v;
+
+ vstream->length_time = length_time;
+ vstream->width = width;
+ vstream->height = height;
+ vstream->fps_num = fps_num;
+ vstream->fps_den = fps_den;
+ vstream->fourcc = fourcc;
+ vstream->index = i;
+
+ unref_query_v:
+ gst_query_unref(query);
+ unref_caps_v:
+ gst_caps_unref(caps);
+ unref_pad_v:
+ gst_object_unref(pad);
+ }
+
+ /* Audio streams */
+ for (i = 0; i < ev->audio_stream_nbr; i++)
+ {
+ Emotion_Audio_Stream *astream;
+ GstPad *pad;
+ GstCaps *caps;
+ GstStructure *structure;
+ GstQuery *query;
+
+ gdouble length_time = 0.0;
+ gint channels;
+ gint samplerate;
+
+ g_signal_emit_by_name(ev->pipeline, "get-audio-pad", i, &pad);
+ if (!pad)
+ continue;
+
+ caps = gst_pad_get_negotiated_caps(pad);
+ if (!caps)
+ goto unref_pad_a;
+ structure = gst_caps_get_structure(caps, 0);
+
+ if (!gst_structure_get_int(structure, "channels", &channels))
+ goto unref_caps_a;
+ if (!gst_structure_get_int(structure, "rate", &samplerate))
+ goto unref_caps_a;
+
+ query = gst_query_new_duration(GST_FORMAT_TIME);
+ if (gst_pad_peer_query(pad, query))
+ {
+ gint64 t;
+
+ gst_query_parse_duration(query, NULL, &t);
+ length_time = (double)t / (double)GST_SECOND;
+ }
+ else
+ goto unref_query_a;
+
+ astream = calloc(1, sizeof(Emotion_Audio_Stream));
+ if (!astream) continue;
+ ev->audio_streams = eina_list_append(ev->audio_streams, astream);
+ if (eina_error_get())
+ {
+ free(astream);
+ continue;
+ }
+
+ astream->length_time = length_time;
+ astream->channels = channels;
+ astream->samplerate = samplerate;
+
+ unref_query_a:
+ gst_query_unref(query);
+ unref_caps_a:
+ gst_caps_unref(caps);
+ unref_pad_a:
+ gst_object_unref(pad);
+ }
+
+ /* Visualization sink */
+ if (ev->video_stream_nbr == 0)
+ {
+ GstElement *vis = NULL;
+ Emotion_Video_Stream *vstream;
+ Emotion_Audio_Stream *astream;
+ gint flags;
+ const char *vis_name;
+
+ if (!(vis_name = emotion_visualization_element_name_get(ev->vis)))
+ {
+ WRN("pb vis name %d", ev->vis);
+ goto finalize;
+ }
+
+ astream = eina_list_data_get(ev->audio_streams);
+
+ vis = gst_element_factory_make(vis_name, "vissink");
+ vstream = emotion_video_stream_new(ev);
+ if (!vstream)
+ goto finalize;
+ else
+ DBG("could not create visualization stream");
+
+ vstream->length_time = astream->length_time;
+ vstream->width = 320;
+ vstream->height = 200;
+ vstream->fps_num = 25;
+ vstream->fps_den = 1;
+ vstream->fourcc = GST_MAKE_FOURCC('A', 'R', 'G', 'B');
+
+ g_object_set(G_OBJECT(ev->pipeline), "vis-plugin", vis, NULL);
+ g_object_get(G_OBJECT(ev->pipeline), "flags", &flags, NULL);
+ flags |= 0x00000008;
+ g_object_set(G_OBJECT(ev->pipeline), "flags", flags, NULL);
+ }
+
+ finalize:
+
+ ev->video_stream_nbr = eina_list_count(ev->video_streams);
+ ev->audio_stream_nbr = eina_list_count(ev->audio_streams);
+
+ if (ev->video_stream_nbr == 1)
+ {
+ Emotion_Video_Stream *vstream;
+
+ vstream = eina_list_data_get(ev->video_streams);
+ ev->ratio = (double)vstream->width / (double)vstream->height;
+ _emotion_frame_resize(ev->obj, vstream->width, vstream->height, ev->ratio);
+ }
+
+ {
+ /* on recapitule : */
+ Emotion_Video_Stream *vstream;
+ Emotion_Audio_Stream *astream;
+
+ vstream = eina_list_data_get(ev->video_streams);
+ if (vstream)
+ {
+ DBG("video size=%dx%d, fps=%d/%d, "
+ "fourcc=%"GST_FOURCC_FORMAT", length=%"GST_TIME_FORMAT,
+ vstream->width, vstream->height, vstream->fps_num, vstream->fps_den,
+ GST_FOURCC_ARGS(vstream->fourcc),
+ GST_TIME_ARGS((guint64)(vstream->length_time * GST_SECOND)));
+ }
+
+ astream = eina_list_data_get(ev->audio_streams);
+ if (astream)
+ {
+ DBG("audio channels=%d, rate=%d, length=%"GST_TIME_FORMAT,
+ astream->channels, astream->samplerate,
+ GST_TIME_ARGS((guint64)(astream->length_time * GST_SECOND)));
+ }
+ }
+
+ if (ev->metadata)
+ _free_metadata(ev->metadata);
+ ev->metadata = calloc(1, sizeof(Emotion_Gstreamer_Metadata));
+
+ ev->pipeline_parsed = EINA_TRUE;
+
+ em_audio_channel_volume_set(ev, ev->volume);
+ em_audio_channel_mute_set(ev, ev->audio_mute);
+
+ if (ev->play_started)
+ {
+ _emotion_playback_started(ev->obj);
+ ev->play_started = 0;
+ }
+
+ _emotion_open_done(ev->obj);
+
+ return EINA_TRUE;
+}
diff --git a/src/modules/emotion/gstreamer/emotion_gstreamer.h b/src/modules/emotion/gstreamer/emotion_gstreamer.h
new file mode 100644
index 0000000000..3218a2b7f7
--- /dev/null
+++ b/src/modules/emotion/gstreamer/emotion_gstreamer.h
@@ -0,0 +1,330 @@
+#ifndef __EMOTION_GSTREAMER_H__
+#define __EMOTION_GSTREAMER_H__
+
+typedef void (*Evas_Video_Convert_Cb)(unsigned char *evas_data,
+ const unsigned char *gst_data,
+ unsigned int w,
+ unsigned int h,
+ unsigned int output_height);
+
+typedef struct _EvasVideoSinkPrivate EvasVideoSinkPrivate;
+typedef struct _EvasVideoSink EvasVideoSink;
+typedef struct _EvasVideoSinkClass EvasVideoSinkClass;
+typedef struct _Emotion_Gstreamer_Video Emotion_Gstreamer_Video;
+typedef struct _Emotion_Audio_Stream Emotion_Audio_Stream;
+typedef struct _Emotion_Gstreamer_Metadata Emotion_Gstreamer_Metadata;
+typedef struct _Emotion_Gstreamer_Buffer Emotion_Gstreamer_Buffer;
+typedef struct _Emotion_Gstreamer_Message Emotion_Gstreamer_Message;
+typedef struct _Emotion_Video_Stream Emotion_Video_Stream;
+
+struct _Emotion_Video_Stream
+{
+ gdouble length_time;
+ gint width;
+ gint height;
+ gint fps_num;
+ gint fps_den;
+ guint32 fourcc;
+ int index;
+};
+
+struct _Emotion_Audio_Stream
+{
+ gdouble length_time;
+ gint channels;
+ gint samplerate;
+};
+
+struct _Emotion_Gstreamer_Metadata
+{
+ char *title;
+ char *album;
+ char *artist;
+ char *genre;
+ char *comment;
+ char *year;
+ char *count;
+ char *disc_id;
+};
+
+struct _Emotion_Gstreamer_Video
+{
+ /* Gstreamer elements */
+ GstElement *pipeline;
+ GstElement *sink;
+ GstElement *esink;
+ GstElement *xvsink;
+ GstElement *tee;
+ GstElement *convert;
+
+ GstPad *eteepad;
+ GstPad *xvteepad;
+ GstPad *xvpad;
+ Eina_List *threads;
+
+ /* eos */
+ GstBus *eos_bus;
+
+ /* Strams */
+ Eina_List *video_streams;
+ Eina_List *audio_streams;
+
+ int video_stream_nbr;
+ int audio_stream_nbr;
+
+ /* We need to keep a copy of the last inserted buffer as evas doesn't copy YUV data around */
+ GstBuffer *last_buffer;
+
+ /* Evas object */
+ Evas_Object *obj;
+
+ /* Characteristics of stream */
+ double position;
+ double ratio;
+ double volume;
+
+ volatile int seek_to;
+ volatile int get_poslen;
+
+ Emotion_Gstreamer_Metadata *metadata;
+
+#ifdef HAVE_ECORE_X
+ Ecore_X_Window win;
+#endif
+
+ const char *uri;
+
+ Emotion_Gstreamer_Buffer *send;
+
+ EvasVideoSinkPrivate *sink_data;
+
+ Emotion_Vis vis;
+
+ int in;
+ int out;
+
+ int frames;
+ int flapse;
+ double rtime;
+ double rlapse;
+
+ struct
+ {
+ double width;
+ double height;
+ } fill;
+
+ Eina_Bool play : 1;
+ Eina_Bool play_started : 1;
+ Eina_Bool video_mute : 1;
+ Eina_Bool audio_mute : 1;
+ Eina_Bool pipeline_parsed : 1;
+ Eina_Bool delete_me : 1;
+ Eina_Bool samsung : 1;
+ Eina_Bool kill_buffer : 1;
+ Eina_Bool stream : 1;
+ Eina_Bool priority : 1;
+
+ int src_width;
+ int src_height;
+};
+
+struct _EvasVideoSink {
+ /*< private >*/
+ GstVideoSink parent;
+ EvasVideoSinkPrivate *priv;
+};
+
+struct _EvasVideoSinkClass {
+ /*< private >*/
+ GstVideoSinkClass parent_class;
+};
+
+struct _EvasVideoSinkPrivate {
+ EINA_REFCOUNT;
+
+ Evas_Object *o;
+
+ Emotion_Gstreamer_Video *ev;
+
+ Evas_Video_Convert_Cb func;
+
+ unsigned int width;
+ unsigned int height;
+ unsigned int source_height;
+ Evas_Colorspace eformat;
+
+ Eina_Lock m;
+ Eina_Condition c;
+
+ // If this is TRUE all processing should finish ASAP
+ // This is necessary because there could be a race between
+ // unlock() and render(), where unlock() wins, signals the
+ // GCond, then render() tries to render a frame although
+ // everything else isn't running anymore. This will lead
+ // to deadlocks because render() holds the stream lock.
+ //
+ // Protected by the buffer mutex
+ Eina_Bool unlocked : 1;
+ Eina_Bool samsung : 1; /** ST12 will only define a Samsung specific GstBuffer */
+};
+
+struct _Emotion_Gstreamer_Buffer
+{
+ Emotion_Gstreamer_Video *ev;
+ EvasVideoSinkPrivate *sink;
+
+ GstBuffer *frame;
+
+ Eina_Bool preroll : 1;
+ Eina_Bool force : 1;
+};
+
+struct _Emotion_Gstreamer_Message
+{
+ Emotion_Gstreamer_Video *ev;
+
+ GstMessage *msg;
+};
+
+extern Eina_Bool window_manager_video;
+extern Eina_Bool debug_fps;
+extern int _emotion_gstreamer_log_domain;
+extern Eina_Bool _ecore_x_available;
+#define DBG(...) EINA_LOG_DOM_DBG(_emotion_gstreamer_log_domain, __VA_ARGS__)
+#define INF(...) EINA_LOG_DOM_INFO(_emotion_gstreamer_log_domain, __VA_ARGS__)
+#define WRN(...) EINA_LOG_DOM_WARN(_emotion_gstreamer_log_domain, __VA_ARGS__)
+#define ERR(...) EINA_LOG_DOM_ERR(_emotion_gstreamer_log_domain, __VA_ARGS__)
+#define CRITICAL(...) EINA_LOG_DOM_CRIT(_emotion_gstreamer_log_domain, __VA_ARGS__)
+
+#define EVAS_TYPE_VIDEO_SINK evas_video_sink_get_type()
+
+GType fakeeos_bin_get_type(void);
+
+#define EVAS_VIDEO_SINK(obj) \
+ (G_TYPE_CHECK_INSTANCE_CAST((obj), \
+ EVAS_TYPE_VIDEO_SINK, EvasVideoSink))
+
+#define EVAS_VIDEO_SINK_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_CAST((klass), \
+ EVAS_TYPE_VIDEO_SINK, EvasVideoSinkClass))
+
+#define EVAS_IS_VIDEO_SINK(obj) \
+ (G_TYPE_CHECK_INSTANCE_TYPE((obj), \
+ EVAS_TYPE_VIDEO_SINK))
+
+#define EVAS_IS_VIDEO_SINK_CLASS(klass) \
+ (G_TYPE_CHECK_CLASS_TYPE((klass), \
+ EVAS_TYPE_VIDEO_SINK))
+
+#define EVAS_VIDEO_SINK_GET_CLASS(obj) \
+ (G_TYPE_INSTANCE_GET_CLASS((obj), \
+ EVAS_TYPE_VIDEO_SINK, EvasVideoSinkClass))
+
+#define GST_TYPE_FAKEEOS_BIN fakeeos_bin_get_type()
+
+GstElement *gstreamer_video_sink_new(Emotion_Gstreamer_Video *ev,
+ Evas_Object *obj,
+ const char *uri);
+
+gboolean gstreamer_plugin_init(GstPlugin *plugin);
+
+Emotion_Gstreamer_Buffer *emotion_gstreamer_buffer_alloc(EvasVideoSinkPrivate *sink,
+ GstBuffer *buffer,
+ Eina_Bool preroll);
+void emotion_gstreamer_buffer_free(Emotion_Gstreamer_Buffer *send);
+
+Emotion_Gstreamer_Message *emotion_gstreamer_message_alloc(Emotion_Gstreamer_Video *ev,
+ GstMessage *msg);
+void emotion_gstreamer_message_free(Emotion_Gstreamer_Message *send);
+Eina_Bool _emotion_gstreamer_video_pipeline_parse(Emotion_Gstreamer_Video *ev,
+ Eina_Bool force);
+
+int em_shutdown(void *video);
+
+typedef struct _ColorSpace_FourCC_Convertion ColorSpace_FourCC_Convertion;
+typedef struct _ColorSpace_Format_Convertion ColorSpace_Format_Convertion;
+
+struct _ColorSpace_FourCC_Convertion
+{
+ const char *name;
+ guint32 fourcc;
+ Evas_Colorspace eformat;
+ Evas_Video_Convert_Cb func;
+ Eina_Bool force_height;
+};
+
+struct _ColorSpace_Format_Convertion
+{
+ const char *name;
+ GstVideoFormat format;
+ Evas_Colorspace eformat;
+ Evas_Video_Convert_Cb func;
+};
+
+extern const ColorSpace_FourCC_Convertion colorspace_fourcc_convertion[];
+extern const ColorSpace_Format_Convertion colorspace_format_convertion[];
+
+/** Samsung specific infrastructure - do not touch, do not modify */
+#define MPLANE_IMGB_MAX_COUNT 4
+#define SCMN_IMGB_MAX_PLANE 4
+
+typedef struct _GstMultiPlaneImageBuffer GstMultiPlaneImageBuffer;
+typedef struct _SCMN_IMGB SCMN_IMGB;
+
+struct _GstMultiPlaneImageBuffer
+{
+ GstBuffer buffer;
+
+ /* width of each image plane */
+ gint width[MPLANE_IMGB_MAX_COUNT];
+ /* height of each image plane */
+ gint height[MPLANE_IMGB_MAX_COUNT];
+ /* stride of each image plane */
+ gint stride[MPLANE_IMGB_MAX_COUNT];
+ /* elevation of each image plane */
+ gint elevation[MPLANE_IMGB_MAX_COUNT];
+ /* user space address of each image plane */
+ gpointer uaddr[MPLANE_IMGB_MAX_COUNT];
+ /* Index of real address of each image plane, if needs */
+ gpointer index[MPLANE_IMGB_MAX_COUNT];
+ /* left postion, if needs */
+ gint x;
+ /* top position, if needs */
+ gint y;
+ /* to align memory */
+ gint __dummy2;
+ /* arbitrary data */
+ gint data[16];
+};
+
+struct _SCMN_IMGB
+{
+ /* width of each image plane */
+ int width[SCMN_IMGB_MAX_PLANE];
+ /* height of each image plane */
+ int height[SCMN_IMGB_MAX_PLANE];
+ /* stride of each image plane */
+ int stride[SCMN_IMGB_MAX_PLANE];
+ /* elevation of each image plane */
+ int elevation[SCMN_IMGB_MAX_PLANE];
+ /* user space address of each image plane */
+ void * uaddr[SCMN_IMGB_MAX_PLANE];
+ /* physical address of each image plane, if needs */
+ void * p[SCMN_IMGB_MAX_PLANE];
+ /* color space type of image */
+ int cs;
+ /* left postion, if needs */
+ int x;
+ /* top position, if needs */
+ int y;
+ /* to align memory */
+ int __dummy2;
+ /* arbitrary data */
+ int data[16];
+};
+
+void _evas_video_st12_multiplane(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w, unsigned int h, unsigned int output_height EINA_UNUSED);
+void _evas_video_st12(unsigned char *evas_data, const unsigned char *gst_data, unsigned int w EINA_UNUSED, unsigned int h, unsigned int output_height EINA_UNUSED);
+
+#endif /* __EMOTION_GSTREAMER_H__ */
diff --git a/src/modules/emotion/gstreamer/emotion_sink.c b/src/modules/emotion/gstreamer/emotion_sink.c
new file mode 100644
index 0000000000..dcba379200
--- /dev/null
+++ b/src/modules/emotion/gstreamer/emotion_sink.c
@@ -0,0 +1,1391 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <Eina.h>
+#include <Evas.h>
+#include <Ecore.h>
+
+#define HTTP_STREAM 0
+#define RTSP_STREAM 1
+#include <glib.h>
+#include <gst/gst.h>
+#include <glib-object.h>
+#include <gst/video/gstvideosink.h>
+#include <gst/video/video.h>
+
+#ifdef HAVE_ECORE_X
+# include <Ecore_X.h>
+# include <Ecore_Evas.h>
+# ifdef HAVE_XOVERLAY_H
+# include <gst/interfaces/xoverlay.h>
+# endif
+#endif
+
+#include "Emotion.h"
+#include "emotion_private.h"
+#include "emotion_gstreamer.h"
+
+static GstStaticPadTemplate sinktemplate = GST_STATIC_PAD_TEMPLATE("sink",
+ GST_PAD_SINK, GST_PAD_ALWAYS,
+ GST_STATIC_CAPS(GST_VIDEO_CAPS_YUV("{ I420, YV12, YUY2, NV12, ST12, TM12 }") ";"
+ GST_VIDEO_CAPS_BGRx ";" GST_VIDEO_CAPS_BGR ";" GST_VIDEO_CAPS_BGRA));
+
+GST_DEBUG_CATEGORY_STATIC(evas_video_sink_debug);
+#define GST_CAT_DEFAULT evas_video_sink_debug
+
+enum {
+ REPAINT_REQUESTED,
+ LAST_SIGNAL
+};
+
+enum {
+ PROP_0,
+ PROP_EVAS_OBJECT,
+ PROP_WIDTH,
+ PROP_HEIGHT,
+ PROP_EV,
+ PROP_LAST
+};
+
+static guint evas_video_sink_signals[LAST_SIGNAL] = { 0, };
+
+#define _do_init(bla) \
+ GST_DEBUG_CATEGORY_INIT(evas_video_sink_debug, \
+ "emotion-sink", \
+ 0, \
+ "emotion video sink")
+
+GST_BOILERPLATE_FULL(EvasVideoSink,
+ evas_video_sink,
+ GstVideoSink,
+ GST_TYPE_VIDEO_SINK,
+ _do_init);
+
+
+static void unlock_buffer_mutex(EvasVideoSinkPrivate* priv);
+static void evas_video_sink_main_render(void *data);
+static void evas_video_sink_samsung_main_render(void *data);
+
+static void
+evas_video_sink_base_init(gpointer g_class)
+{
+ GstElementClass* element_class;
+
+ element_class = GST_ELEMENT_CLASS(g_class);
+ gst_element_class_add_pad_template(element_class, gst_static_pad_template_get(&sinktemplate));
+ gst_element_class_set_details_simple(element_class, "Evas video sink",
+ "Sink/Video", "Sends video data from a GStreamer pipeline to an Evas object",
+ "Vincent Torri <vtorri@univ-evry.fr>");
+}
+
+static void
+evas_video_sink_init(EvasVideoSink* sink, EvasVideoSinkClass* klass EINA_UNUSED)
+{
+ EvasVideoSinkPrivate* priv;
+
+ INF("sink init");
+ sink->priv = priv = G_TYPE_INSTANCE_GET_PRIVATE(sink, EVAS_TYPE_VIDEO_SINK, EvasVideoSinkPrivate);
+ priv->o = NULL;
+ priv->width = 0;
+ priv->height = 0;
+ priv->func = NULL;
+ priv->eformat = EVAS_COLORSPACE_ARGB8888;
+ priv->samsung = EINA_FALSE;
+ eina_lock_new(&priv->m);
+ eina_condition_new(&priv->c, &priv->m);
+ priv->unlocked = EINA_FALSE;
+}
+
+/**** Object methods ****/
+static void
+_cleanup_priv(void *data, Evas *e EINA_UNUSED, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ EvasVideoSinkPrivate* priv;
+
+ priv = data;
+
+ eina_lock_take(&priv->m);
+ if (priv->o == obj)
+ priv->o = NULL;
+ eina_lock_release(&priv->m);
+}
+
+static void
+evas_video_sink_set_property(GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec)
+{
+ EvasVideoSink* sink;
+ EvasVideoSinkPrivate* priv;
+
+ sink = EVAS_VIDEO_SINK (object);
+ priv = sink->priv;
+
+ switch (prop_id) {
+ case PROP_EVAS_OBJECT:
+ eina_lock_take(&priv->m);
+ evas_object_event_callback_del(priv->o, EVAS_CALLBACK_FREE, _cleanup_priv);
+ priv->o = g_value_get_pointer (value);
+ INF("sink set Evas_Object %p.", priv->o);
+ evas_object_event_callback_add(priv->o, EVAS_CALLBACK_FREE, _cleanup_priv, priv);
+ eina_lock_release(&priv->m);
+ break;
+ case PROP_EV:
+ INF("sink set ev.");
+ eina_lock_take(&priv->m);
+ priv->ev = g_value_get_pointer (value);
+ if (priv->ev)
+ priv->ev->samsung = EINA_TRUE;
+ eina_lock_release(&priv->m);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ ERR("invalid property");
+ break;
+ }
+}
+
+static void
+evas_video_sink_get_property(GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec)
+{
+ EvasVideoSink* sink;
+ EvasVideoSinkPrivate* priv;
+
+ sink = EVAS_VIDEO_SINK (object);
+ priv = sink->priv;
+
+ switch (prop_id) {
+ case PROP_EVAS_OBJECT:
+ INF("sink get property.");
+ eina_lock_take(&priv->m);
+ g_value_set_pointer(value, priv->o);
+ eina_lock_release(&priv->m);
+ break;
+ case PROP_WIDTH:
+ INF("sink get width.");
+ eina_lock_take(&priv->m);
+ g_value_set_int(value, priv->width);
+ eina_lock_release(&priv->m);
+ break;
+ case PROP_HEIGHT:
+ INF("sink get height.");
+ eina_lock_take(&priv->m);
+ g_value_set_int (value, priv->height);
+ eina_lock_release(&priv->m);
+ break;
+ case PROP_EV:
+ INF("sink get ev.");
+ eina_lock_take(&priv->m);
+ g_value_set_pointer (value, priv->ev);
+ eina_lock_release(&priv->m);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ ERR("invalide property");
+ break;
+ }
+}
+
+static void
+evas_video_sink_dispose(GObject* object)
+{
+ EvasVideoSink* sink;
+ EvasVideoSinkPrivate* priv;
+
+ INF("dispose.");
+
+ sink = EVAS_VIDEO_SINK(object);
+ priv = sink->priv;
+
+ eina_lock_free(&priv->m);
+ eina_condition_free(&priv->c);
+
+ G_OBJECT_CLASS(parent_class)->dispose(object);
+}
+
+
+/**** BaseSink methods ****/
+
+gboolean evas_video_sink_set_caps(GstBaseSink *bsink, GstCaps *caps)
+{
+ EvasVideoSink* sink;
+ EvasVideoSinkPrivate* priv;
+ GstStructure *structure;
+ GstVideoFormat format;
+ guint32 fourcc;
+ unsigned int i;
+
+ sink = EVAS_VIDEO_SINK(bsink);
+ priv = sink->priv;
+
+ structure = gst_caps_get_structure(caps, 0);
+
+ if (gst_structure_get_int(structure, "width", (int*) &priv->width)
+ && gst_structure_get_int(structure, "height", (int*) &priv->height)
+ && gst_structure_get_fourcc(structure, "format", &fourcc))
+ {
+ priv->source_height = priv->height;
+
+ for (i = 0; colorspace_fourcc_convertion[i].name != NULL; ++i)
+ if (fourcc == colorspace_fourcc_convertion[i].fourcc)
+ {
+ DBG("Found '%s'", colorspace_fourcc_convertion[i].name);
+ priv->eformat = colorspace_fourcc_convertion[i].eformat;
+ priv->func = colorspace_fourcc_convertion[i].func;
+ if (colorspace_fourcc_convertion[i].force_height)
+ {
+ priv->height = (priv->height >> 1) << 1;
+ }
+ if (priv->ev)
+ priv->ev->kill_buffer = EINA_TRUE;
+ return TRUE;
+ }
+
+ if (fourcc == GST_MAKE_FOURCC('S', 'T', '1', '2'))
+ {
+ DBG("Found '%s'", "ST12");
+ priv->eformat = EVAS_COLORSPACE_YCBCR420TM12601_PL;
+ priv->samsung = EINA_TRUE;
+ priv->func = NULL;
+ if (priv->ev)
+ {
+ priv->ev->samsung = EINA_TRUE;
+ priv->ev->kill_buffer = EINA_TRUE;
+ }
+ return TRUE;
+ }
+ }
+
+ INF("fallback code !");
+ if (!gst_video_format_parse_caps(caps, &format, (int*) &priv->width, (int*) &priv->height))
+ {
+ ERR("Unable to parse caps.");
+ return FALSE;
+ }
+
+ priv->source_height = priv->height;
+
+ for (i = 0; colorspace_format_convertion[i].name != NULL; ++i)
+ if (format == colorspace_format_convertion[i].format)
+ {
+ DBG("Found '%s'", colorspace_format_convertion[i].name);
+ priv->eformat = colorspace_format_convertion[i].eformat;
+ priv->func = colorspace_format_convertion[i].func;
+ if (priv->ev)
+ priv->ev->kill_buffer = EINA_FALSE;
+ return TRUE;
+ }
+
+ ERR("unsupported : %d\n", format);
+ return FALSE;
+}
+
+static gboolean
+evas_video_sink_start(GstBaseSink* base_sink)
+{
+ EvasVideoSinkPrivate* priv;
+ gboolean res = TRUE;
+
+ INF("sink start");
+
+ priv = EVAS_VIDEO_SINK(base_sink)->priv;
+ eina_lock_take(&priv->m);
+ if (!priv->o)
+ res = FALSE;
+ else
+ priv->unlocked = EINA_FALSE;
+ eina_lock_release(&priv->m);
+ return res;
+}
+
+static gboolean
+evas_video_sink_stop(GstBaseSink* base_sink)
+{
+ EvasVideoSinkPrivate* priv = EVAS_VIDEO_SINK(base_sink)->priv;
+
+ INF("sink stop");
+
+ unlock_buffer_mutex(priv);
+ return TRUE;
+}
+
+static gboolean
+evas_video_sink_unlock(GstBaseSink* object)
+{
+ EvasVideoSink* sink;
+
+ INF("sink unlock");
+
+ sink = EVAS_VIDEO_SINK(object);
+
+ unlock_buffer_mutex(sink->priv);
+
+ return GST_CALL_PARENT_WITH_DEFAULT(GST_BASE_SINK_CLASS, unlock,
+ (object), TRUE);
+}
+
+static gboolean
+evas_video_sink_unlock_stop(GstBaseSink* object)
+{
+ EvasVideoSink* sink;
+ EvasVideoSinkPrivate* priv;
+
+ sink = EVAS_VIDEO_SINK(object);
+ priv = sink->priv;
+
+ INF("sink unlock stop");
+
+ eina_lock_take(&priv->m);
+ priv->unlocked = FALSE;
+ eina_lock_release(&priv->m);
+
+ return GST_CALL_PARENT_WITH_DEFAULT(GST_BASE_SINK_CLASS, unlock_stop,
+ (object), TRUE);
+}
+
+static GstFlowReturn
+evas_video_sink_preroll(GstBaseSink* bsink, GstBuffer* buffer)
+{
+ Emotion_Gstreamer_Buffer *send;
+ EvasVideoSinkPrivate *priv;
+ EvasVideoSink *sink;
+
+ INF("sink preroll %p [%i]", GST_BUFFER_DATA(buffer), GST_BUFFER_SIZE(buffer));
+
+ sink = EVAS_VIDEO_SINK(bsink);
+ priv = sink->priv;
+
+ if (GST_BUFFER_SIZE(buffer) <= 0 && !priv->samsung)
+ {
+ WRN("empty buffer");
+ return GST_FLOW_OK;
+ }
+
+ send = emotion_gstreamer_buffer_alloc(priv, buffer, EINA_TRUE);
+
+ if (send)
+ {
+ if (priv->samsung)
+ {
+ if (!priv->func)
+ {
+ GstStructure *structure;
+ GstCaps *caps;
+ gboolean is_multiplane = FALSE;
+
+ caps = GST_BUFFER_CAPS(buffer);
+ structure = gst_caps_get_structure (caps, 0);
+ gst_structure_get_boolean(structure, "multiplane", &is_multiplane);
+ gst_caps_unref(caps);
+
+ if (is_multiplane)
+ priv->func = _evas_video_st12_multiplane;
+ else
+ priv->func = _evas_video_st12;
+ }
+
+ ecore_main_loop_thread_safe_call_async(evas_video_sink_samsung_main_render, send);
+ }
+ else
+ ecore_main_loop_thread_safe_call_async(evas_video_sink_main_render, send);
+ }
+
+ return GST_FLOW_OK;
+}
+
+static GstFlowReturn
+evas_video_sink_render(GstBaseSink* bsink, GstBuffer* buffer)
+{
+ Emotion_Gstreamer_Buffer *send;
+ EvasVideoSinkPrivate *priv;
+ EvasVideoSink *sink;
+
+ INF("sink render %p", buffer);
+
+ sink = EVAS_VIDEO_SINK(bsink);
+ priv = sink->priv;
+
+ eina_lock_take(&priv->m);
+
+ if (priv->unlocked) {
+ ERR("LOCKED");
+ eina_lock_release(&priv->m);
+ return GST_FLOW_OK;
+ }
+
+ send = emotion_gstreamer_buffer_alloc(priv, buffer, EINA_FALSE);
+ if (!send) {
+ eina_lock_release(&priv->m);
+ return GST_FLOW_ERROR;
+ }
+
+ if (priv->samsung)
+ {
+ if (!priv->func)
+ {
+ GstStructure *structure;
+ GstCaps *caps;
+ gboolean is_multiplane = FALSE;
+
+ caps = GST_BUFFER_CAPS(buffer);
+ structure = gst_caps_get_structure (caps, 0);
+ gst_structure_get_boolean(structure, "multiplane", &is_multiplane);
+ gst_caps_unref(caps);
+
+ if (is_multiplane)
+ priv->func = _evas_video_st12_multiplane;
+ else
+ priv->func = _evas_video_st12;
+ }
+
+ ecore_main_loop_thread_safe_call_async(evas_video_sink_samsung_main_render, send);
+ }
+ else
+ ecore_main_loop_thread_safe_call_async(evas_video_sink_main_render, send);
+
+ eina_condition_wait(&priv->c);
+ eina_lock_release(&priv->m);
+
+ return GST_FLOW_OK;
+}
+
+static void
+_update_emotion_fps(Emotion_Gstreamer_Video *ev)
+{
+ double tim;
+
+ if (!debug_fps) return ;
+
+ tim = ecore_time_get();
+ ev->frames++;
+
+ if (ev->rlapse == 0.0)
+ {
+ ev->rlapse = tim;
+ ev->flapse = ev->frames;
+ }
+ else if ((tim - ev->rlapse) >= 0.5)
+ {
+ printf("FRAME: %i, FPS: %3.1f\n",
+ ev->frames,
+ (ev->frames - ev->flapse) / (tim - ev->rlapse));
+ ev->rlapse = tim;
+ ev->flapse = ev->frames;
+ }
+}
+
+static void
+evas_video_sink_samsung_main_render(void *data)
+{
+ Emotion_Gstreamer_Buffer *send;
+ Emotion_Video_Stream *vstream;
+ EvasVideoSinkPrivate *priv = NULL;
+ GstBuffer* buffer;
+ unsigned char *evas_data;
+ const guint8 *gst_data;
+ GstFormat fmt = GST_FORMAT_TIME;
+ gint64 pos;
+ Eina_Bool preroll = EINA_FALSE;
+ int stride, elevation;
+ Evas_Coord w, h;
+
+ send = data;
+
+ if (!send) goto exit_point;
+
+ priv = send->sink;
+ buffer = send->frame;
+ preroll = send->preroll;
+
+ /* frame after cleanup */
+ if (!preroll && !send->ev->last_buffer)
+ {
+ priv = NULL;
+ goto exit_point;
+ }
+
+ if (!priv || !priv->o || priv->unlocked)
+ goto exit_point;
+
+ if (send->ev->send)
+ {
+ emotion_gstreamer_buffer_free(send->ev->send);
+ send->ev->send = NULL;
+ }
+
+ if (!send->ev->stream && !send->force)
+ {
+ send->ev->send = send;
+ _emotion_frame_new(send->ev->obj);
+ goto exit_stream;
+ }
+
+ _emotion_gstreamer_video_pipeline_parse(send->ev, EINA_TRUE);
+
+ /* Getting stride to compute the right size and then fill the object properly */
+ /* Y => [0] and UV in [1] */
+ if (priv->func == _evas_video_st12_multiplane)
+ {
+ const GstMultiPlaneImageBuffer *mp_buf = (const GstMultiPlaneImageBuffer *) buffer;
+
+ stride = mp_buf->stride[0];
+ elevation = mp_buf->elevation[0];
+ priv->width = mp_buf->width[0];
+ priv->height = mp_buf->height[0];
+
+ gst_data = (const guint8 *) mp_buf;
+ }
+ else
+ {
+ const SCMN_IMGB *imgb = (const SCMN_IMGB *) GST_BUFFER_MALLOCDATA(buffer);
+
+ stride = imgb->stride[0];
+ elevation = imgb->elevation[0];
+ priv->width = imgb->width[0];
+ priv->height = imgb->height[0];
+
+ gst_data = (const guint8 *) imgb;
+ }
+
+ evas_object_geometry_get(priv->o, NULL, NULL, &w, &h);
+
+ send->ev->fill.width = (double) stride / priv->width;
+ send->ev->fill.height = (double) elevation / priv->height;
+
+ evas_object_image_alpha_set(priv->o, 0);
+ evas_object_image_colorspace_set(priv->o, priv->eformat);
+ evas_object_image_size_set(priv->o, stride, elevation);
+
+ _update_emotion_fps(send->ev);
+
+ evas_data = evas_object_image_data_get(priv->o, 1);
+
+ if (priv->func)
+ priv->func(evas_data, gst_data, stride, elevation, elevation);
+ else
+ WRN("No way to decode %x colorspace !", priv->eformat);
+
+ evas_object_image_data_set(priv->o, evas_data);
+ evas_object_image_data_update_add(priv->o, 0, 0, priv->width, priv->height);
+ evas_object_image_pixels_dirty_set(priv->o, 0);
+
+ if (!preroll && send->ev->play_started)
+ {
+ _emotion_playback_started(send->ev->obj);
+ send->ev->play_started = 0;
+ }
+
+ if (!send->force)
+ {
+ _emotion_frame_new(send->ev->obj);
+ }
+
+ vstream = eina_list_nth(send->ev->video_streams, send->ev->video_stream_nbr - 1);
+
+ gst_element_query_position(send->ev->pipeline, &fmt, &pos);
+ send->ev->position = (double)pos / (double)GST_SECOND;
+
+ if (vstream)
+ {
+ vstream->width = priv->width;
+ vstream->height = priv->height;
+
+ _emotion_video_pos_update(send->ev->obj, send->ev->position, vstream->length_time);
+ }
+
+ send->ev->ratio = (double) priv->width / (double) priv->height;
+ _emotion_frame_refill(send->ev->obj, send->ev->fill.width, send->ev->fill.height);
+ _emotion_frame_resize(send->ev->obj, priv->width, priv->height, send->ev->ratio);
+
+ buffer = gst_buffer_ref(buffer);
+ if (send->ev->last_buffer) gst_buffer_unref(send->ev->last_buffer);
+ send->ev->last_buffer = buffer;
+
+ exit_point:
+ emotion_gstreamer_buffer_free(send);
+
+ exit_stream:
+ if (priv)
+ {
+ if (preroll || !priv->o) return;
+
+ if (!priv->unlocked)
+ eina_condition_signal(&priv->c);
+ }
+}
+
+static void
+evas_video_sink_main_render(void *data)
+{
+ Emotion_Gstreamer_Buffer *send;
+ Emotion_Gstreamer_Video *ev = NULL;
+ Emotion_Video_Stream *vstream;
+ EvasVideoSinkPrivate *priv = NULL;
+ GstBuffer *buffer;
+ unsigned char *evas_data;
+ GstFormat fmt = GST_FORMAT_TIME;
+ gint64 pos;
+ Eina_Bool preroll = EINA_FALSE;
+
+ send = data;
+
+ if (!send) goto exit_point;
+
+ priv = send->sink;
+ buffer = send->frame;
+ preroll = send->preroll;
+ ev = send->ev;
+
+ /* frame after cleanup */
+ if (!preroll && !ev->last_buffer)
+ {
+ priv = NULL;
+ goto exit_point;
+ }
+
+ if (!priv || !priv->o || priv->unlocked)
+ goto exit_point;
+
+ if (ev->send && send != ev->send)
+ {
+ emotion_gstreamer_buffer_free(ev->send);
+ ev->send = NULL;
+ }
+
+ if (!ev->stream && !send->force)
+ {
+ ev->send = send;
+ _emotion_frame_new(ev->obj);
+ evas_object_image_data_update_add(priv->o, 0, 0, priv->width, priv->height);
+ goto exit_stream;
+ }
+
+ _emotion_gstreamer_video_pipeline_parse(ev, EINA_TRUE);
+
+ INF("sink main render [%i, %i] (source height: %i)", priv->width, priv->height, priv->source_height);
+
+ evas_object_image_alpha_set(priv->o, 0);
+ evas_object_image_colorspace_set(priv->o, priv->eformat);
+ evas_object_image_size_set(priv->o, priv->width, priv->height);
+
+ evas_data = evas_object_image_data_get(priv->o, 1);
+
+ if (priv->func)
+ priv->func(evas_data, GST_BUFFER_DATA(buffer), priv->width, priv->source_height, priv->height);
+ else
+ WRN("No way to decode %x colorspace !", priv->eformat);
+
+ evas_object_image_data_set(priv->o, evas_data);
+ evas_object_image_data_update_add(priv->o, 0, 0, priv->width, priv->height);
+ evas_object_image_pixels_dirty_set(priv->o, 0);
+
+ _update_emotion_fps(ev);
+
+ if (!preroll && ev->play_started)
+ {
+ _emotion_playback_started(ev->obj);
+ ev->play_started = 0;
+ }
+
+ if (!send->force)
+ {
+ _emotion_frame_new(ev->obj);
+ }
+
+ gst_element_query_position(ev->pipeline, &fmt, &pos);
+ ev->position = (double)pos / (double)GST_SECOND;
+
+ vstream = eina_list_nth(ev->video_streams, ev->video_stream_nbr - 1);
+
+ if (vstream)
+ {
+ vstream->width = priv->width;
+ vstream->height = priv->height;
+ _emotion_video_pos_update(ev->obj, ev->position, vstream->length_time);
+ }
+
+ ev->ratio = (double) priv->width / (double) priv->height;
+
+ _emotion_frame_resize(ev->obj, priv->width, priv->height, ev->ratio);
+
+ buffer = gst_buffer_ref(buffer);
+ if (ev->last_buffer) gst_buffer_unref(ev->last_buffer);
+ ev->last_buffer = buffer;
+
+ exit_point:
+ emotion_gstreamer_buffer_free(send);
+
+ exit_stream:
+ if (priv)
+ {
+ if (preroll || !priv->o) return;
+
+ if (!priv->unlocked)
+ eina_condition_signal(&priv->c);
+ }
+}
+
+static void
+unlock_buffer_mutex(EvasVideoSinkPrivate* priv)
+{
+ priv->unlocked = EINA_TRUE;
+
+ eina_condition_signal(&priv->c);
+}
+
+static void
+marshal_VOID__MINIOBJECT(GClosure * closure, GValue * return_value EINA_UNUSED,
+ guint n_param_values, const GValue * param_values,
+ gpointer invocation_hint EINA_UNUSED, gpointer marshal_data)
+{
+ typedef void (*marshalfunc_VOID__MINIOBJECT) (gpointer obj, gpointer arg1, gpointer data2);
+ marshalfunc_VOID__MINIOBJECT callback;
+ GCClosure *cc;
+ gpointer data1, data2;
+
+ cc = (GCClosure *) closure;
+
+ g_return_if_fail(n_param_values == 2);
+
+ if (G_CCLOSURE_SWAP_DATA(closure)) {
+ data1 = closure->data;
+ data2 = g_value_peek_pointer(param_values + 0);
+ } else {
+ data1 = g_value_peek_pointer(param_values + 0);
+ data2 = closure->data;
+ }
+ callback = (marshalfunc_VOID__MINIOBJECT) (marshal_data ? marshal_data : cc->callback);
+
+ callback(data1, gst_value_get_mini_object(param_values + 1), data2);
+}
+
+static void
+evas_video_sink_class_init(EvasVideoSinkClass* klass)
+{
+ GObjectClass* gobject_class;
+ GstBaseSinkClass* gstbase_sink_class;
+
+ gobject_class = G_OBJECT_CLASS(klass);
+ gstbase_sink_class = GST_BASE_SINK_CLASS(klass);
+
+ g_type_class_add_private(klass, sizeof(EvasVideoSinkPrivate));
+
+ gobject_class->set_property = evas_video_sink_set_property;
+ gobject_class->get_property = evas_video_sink_get_property;
+
+ g_object_class_install_property (gobject_class, PROP_EVAS_OBJECT,
+ g_param_spec_pointer ("evas-object", "Evas Object",
+ "The Evas object where the display of the video will be done",
+ G_PARAM_READWRITE));
+
+ g_object_class_install_property (gobject_class, PROP_WIDTH,
+ g_param_spec_int ("width", "Width",
+ "The width of the video",
+ 0, 65536, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_HEIGHT,
+ g_param_spec_int ("height", "Height",
+ "The height of the video",
+ 0, 65536, 0, G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
+ g_object_class_install_property (gobject_class, PROP_EV,
+ g_param_spec_pointer ("ev", "Emotion_Gstreamer_Video",
+ "THe internal data of the emotion object",
+ G_PARAM_READWRITE));
+
+ gobject_class->dispose = evas_video_sink_dispose;
+
+ gstbase_sink_class->set_caps = evas_video_sink_set_caps;
+ gstbase_sink_class->stop = evas_video_sink_stop;
+ gstbase_sink_class->start = evas_video_sink_start;
+ gstbase_sink_class->unlock = evas_video_sink_unlock;
+ gstbase_sink_class->unlock_stop = evas_video_sink_unlock_stop;
+ gstbase_sink_class->render = evas_video_sink_render;
+ gstbase_sink_class->preroll = evas_video_sink_preroll;
+
+ evas_video_sink_signals[REPAINT_REQUESTED] = g_signal_new("repaint-requested",
+ G_TYPE_FROM_CLASS(klass),
+ (GSignalFlags)(G_SIGNAL_RUN_LAST | G_SIGNAL_ACTION),
+ 0,
+ 0,
+ 0,
+ marshal_VOID__MINIOBJECT,
+ G_TYPE_NONE, 1, GST_TYPE_BUFFER);
+}
+
+gboolean
+gstreamer_plugin_init (GstPlugin * plugin)
+{
+ return gst_element_register (plugin,
+ "emotion-sink",
+ GST_RANK_NONE,
+ EVAS_TYPE_VIDEO_SINK);
+}
+
+static void
+_emotion_gstreamer_pause(void *data, Ecore_Thread *thread)
+{
+ Emotion_Gstreamer_Video *ev = data;
+ gboolean res;
+
+ if (ecore_thread_check(thread) || !ev->pipeline) return ;
+
+ gst_element_set_state(ev->pipeline, GST_STATE_PAUSED);
+ res = gst_element_get_state(ev->pipeline, NULL, NULL, GST_CLOCK_TIME_NONE);
+ if (res == GST_STATE_CHANGE_NO_PREROLL)
+ {
+ gst_element_set_state(ev->pipeline, GST_STATE_PLAYING);
+ gst_element_get_state(ev->pipeline, NULL, NULL, GST_CLOCK_TIME_NONE);
+ }
+}
+
+static void
+_emotion_gstreamer_cancel(void *data, Ecore_Thread *thread)
+{
+ Emotion_Gstreamer_Video *ev = data;
+
+ ev->threads = eina_list_remove(ev->threads, thread);
+
+ if (getenv("EMOTION_GSTREAMER_DOT")) GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(ev->pipeline), GST_DEBUG_GRAPH_SHOW_ALL, getenv("EMOTION_GSTREAMER_DOT"));
+
+ if (ev->in == ev->out && ev->delete_me)
+ em_shutdown(ev);
+}
+
+static void
+_emotion_gstreamer_end(void *data, Ecore_Thread *thread)
+{
+ Emotion_Gstreamer_Video *ev = data;
+
+ ev->threads = eina_list_remove(ev->threads, thread);
+
+ if (ev->play)
+ {
+ gst_element_set_state(ev->pipeline, GST_STATE_PLAYING);
+ ev->play_started = 1;
+ }
+
+ if (getenv("EMOTION_GSTREAMER_DOT")) GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(ev->pipeline), GST_DEBUG_GRAPH_SHOW_ALL, getenv("EMOTION_GSTREAMER_DOT"));
+
+ if (ev->in == ev->out && ev->delete_me)
+ em_shutdown(ev);
+ else
+ _emotion_gstreamer_video_pipeline_parse(data, EINA_TRUE);
+}
+
+static void
+_video_resize(void *data, Evas_Object *obj EINA_UNUSED, const Evas_Video_Surface *surface EINA_UNUSED,
+ Evas_Coord w, Evas_Coord h)
+{
+#ifdef HAVE_ECORE_X
+ Emotion_Gstreamer_Video *ev = data;
+
+ ecore_x_window_resize(ev->win, w, h);
+ DBG("resize: %i, %i", w, h);
+#endif
+}
+
+static void
+_video_move(void *data, Evas_Object *obj EINA_UNUSED, const Evas_Video_Surface *surface EINA_UNUSED,
+ Evas_Coord x, Evas_Coord y)
+{
+#ifdef HAVE_ECORE_X
+ Emotion_Gstreamer_Video *ev = data;
+ unsigned int pos[2];
+
+ DBG("move: %i, %i", x, y);
+ pos[0] = x; pos[1] = y;
+ ecore_x_window_prop_card32_set(ev->win, ECORE_X_ATOM_E_VIDEO_POSITION, pos, 2);
+#endif
+}
+
+#if 0
+/* Much better idea to always feed the XvImageSink and let him handle optimizing the rendering as we do */
+static void
+_block_pad_unlink_cb(GstPad *pad, gboolean blocked, gpointer user_data)
+{
+ if (blocked)
+ {
+ Emotion_Gstreamer_Video *ev = user_data;
+ GstEvent *gev;
+
+ gst_pad_unlink(ev->xvteepad, ev->xvpad);
+ gev = gst_event_new_eos();
+ gst_pad_send_event(ev->xvpad, gev);
+ gst_pad_set_blocked_async(pad, FALSE, _block_pad_unlink_cb, NULL);
+ }
+}
+
+static void
+_block_pad_link_cb(GstPad *pad, gboolean blocked, gpointer user_data)
+{
+ if (blocked)
+ {
+ Emotion_Gstreamer_Video *ev = user_data;
+
+ gst_pad_link(ev->xvteepad, ev->xvpad);
+ if (ev->play)
+ gst_element_set_state(ev->xvsink, GST_STATE_PLAYING);
+ else
+ gst_element_set_state(ev->xvsink, GST_STATE_PAUSED);
+ gst_pad_set_blocked_async(pad, FALSE, _block_pad_link_cb, NULL);
+ }
+}
+#endif
+
+static void
+_video_show(void *data, Evas_Object *obj EINA_UNUSED, const Evas_Video_Surface *surface EINA_UNUSED)
+{
+#ifdef HAVE_ECORE_X
+ Emotion_Gstreamer_Video *ev = data;
+
+ DBG("show xv");
+ ecore_x_window_show(ev->win);
+#endif
+ /* gst_pad_set_blocked_async(ev->xvteepad, TRUE, _block_pad_link_cb, ev); */
+}
+
+static void
+_video_hide(void *data, Evas_Object *obj EINA_UNUSED, const Evas_Video_Surface *surface EINA_UNUSED)
+{
+#ifdef HAVE_ECORE_X
+ Emotion_Gstreamer_Video *ev = data;
+
+ DBG("hide xv");
+ ecore_x_window_hide(ev->win);
+#endif
+ /* gst_pad_set_blocked_async(ev->xvteepad, TRUE, _block_pad_unlink_cb, ev); */
+}
+
+static void
+_video_update_pixels(void *data, Evas_Object *obj EINA_UNUSED, const Evas_Video_Surface *surface EINA_UNUSED)
+{
+ Emotion_Gstreamer_Video *ev = data;
+ Emotion_Gstreamer_Buffer *send;
+ EvasVideoSinkPrivate *priv = NULL;
+
+ if (!ev->send) return ;
+
+ send = ev->send;
+ priv = send->sink;
+ send->force = EINA_TRUE;
+ ev->send = NULL;
+
+ if (priv->samsung)
+ evas_video_sink_samsung_main_render(send);
+ else
+ evas_video_sink_main_render(send);
+}
+
+static void
+_image_resize(void *data, Evas *e EINA_UNUSED, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ Emotion_Gstreamer_Video *ev = data;
+ Evas_Coord width, height;
+ int image_area, src_area;
+ double ratio;
+
+ GstElementFactory *cfactory = NULL;
+ GstElement *convert = NULL, *filter = NULL, *queue = NULL;
+ GstPad *pad = NULL, *teepad = NULL;
+ GstCaps *caps = NULL;
+ Eina_List *l, *engines;
+ const char *ename, *engine = NULL;
+
+ evas_object_geometry_get(obj, NULL, NULL, &width, &height);
+ image_area = width * height;
+ src_area = ev->src_width * ev->src_height;
+ ratio = (double)image_area / (double)src_area;
+
+ // when an image is much smaller than original video size,
+ // add fimcconvert element to the pipeline
+ if (ratio < 0.8 && ev->stream && !ev->convert)
+ {
+ cfactory = gst_element_factory_find("fimcconvert");
+ if (!cfactory) return;
+
+ convert = gst_element_factory_create(cfactory, NULL);
+ if (!convert) return;
+
+ // add capsfilter to limit size and formats based on the backend
+ filter = gst_element_factory_make("capsfilter", "fimccapsfilter");
+ if (!filter)
+ {
+ gst_object_unref(convert);
+ return;
+ }
+
+ engines = evas_render_method_list();
+ EINA_LIST_FOREACH(engines, l, ename)
+ {
+ if (evas_render_method_lookup(ename) ==
+ evas_output_method_get(evas_object_evas_get(obj)))
+ {
+ engine = ename;
+ break;
+ }
+ }
+
+ if (strstr(engine, "software") != NULL)
+ {
+ caps = gst_caps_new_simple("video/x-raw-rgb",
+ "width", G_TYPE_INT, width,
+ "height", G_TYPE_INT, height,
+ NULL);
+ }
+ else if (strstr(engine, "gl") != NULL)
+ {
+ caps = gst_caps_new_simple("video/x-raw-yuv",
+ "width", G_TYPE_INT, width,
+ "height", G_TYPE_INT, height,
+ NULL);
+ }
+ g_object_set(G_OBJECT(filter), "caps", caps, NULL);
+ gst_caps_unref(caps);
+
+ // add new elements to the pipeline
+ queue = gst_bin_get_by_name(GST_BIN(ev->sink), "equeue");
+ gst_element_unlink(ev->tee, queue);
+ gst_element_release_request_pad(ev->tee, ev->eteepad);
+ gst_object_unref(ev->eteepad);
+
+ gst_bin_add_many(GST_BIN(ev->sink), convert, filter, NULL);
+ gst_element_link_many(ev->tee, convert, filter, queue, NULL);
+
+ pad = gst_element_get_pad(convert, "sink");
+ teepad = gst_element_get_request_pad(ev->tee, "src%d");
+ gst_pad_link(teepad, pad);
+ gst_object_unref(pad);
+
+ gst_element_sync_state_with_parent(convert);
+ gst_element_sync_state_with_parent(filter);
+
+ ev->eteepad = teepad;
+ ev->convert = convert;
+ evas_render_method_list_free(engines);
+
+ INF("add fimcconvert element. video size: %dx%d. emotion object size: %dx%d",
+ ev->src_width, ev->src_height, width, height);
+ }
+ // set size again to the capsfilter when the image is resized
+ else if (ev->convert)
+ {
+ filter = gst_bin_get_by_name(GST_BIN(ev->sink), "fimccapsfilter");
+
+ engines = evas_render_method_list();
+ EINA_LIST_FOREACH(engines, l, ename)
+ {
+ if (evas_render_method_lookup(ename) ==
+ evas_output_method_get(evas_object_evas_get(obj)))
+ {
+ engine = ename;
+ break;
+ }
+ }
+
+ if (strstr(engine, "software") != NULL)
+ {
+ caps = gst_caps_new_simple("video/x-raw-rgb",
+ "width", G_TYPE_INT, width,
+ "height", G_TYPE_INT, height,
+ NULL);
+ }
+ else if (strstr(engine, "gl") != NULL)
+ {
+ caps = gst_caps_new_simple("video/x-raw-yuv",
+ "width", G_TYPE_INT, width,
+ "height", G_TYPE_INT, height,
+ NULL);
+ }
+
+ g_object_set(G_OBJECT(filter), "caps", caps, NULL);
+ gst_caps_unref(caps);
+ evas_render_method_list_free(engines);
+
+ INF("set capsfilter size again:. video size: %dx%d. emotion object size: %dx%d",
+ ev->src_width, ev->src_height, width, height);
+ }
+}
+
+GstElement *
+gstreamer_video_sink_new(Emotion_Gstreamer_Video *ev,
+ Evas_Object *o,
+ const char *uri)
+{
+ GstElement *playbin;
+ GstElement *bin = NULL;
+ GstElement *esink = NULL;
+ GstElement *xvsink = NULL;
+ GstElement *tee = NULL;
+ GstElement *queue = NULL;
+ Evas_Object *obj;
+ GstPad *pad;
+ GstPad *teepad;
+ int flags;
+ const char *launch;
+#if defined HAVE_ECORE_X && defined HAVE_XOVERLAY_H
+ const char *engine = NULL;
+ Eina_List *engines;
+#endif
+
+ obj = emotion_object_image_get(o);
+ if (!obj)
+ {
+ ERR("Not Evas_Object specified");
+ return NULL;
+ }
+
+ if (!uri)
+ return NULL;
+
+ launch = emotion_webcam_custom_get(uri);
+ if (launch)
+ {
+ GError *error = NULL;
+
+ playbin = gst_parse_bin_from_description(launch, 1, &error);
+ if (!playbin)
+ {
+ ERR("Unable to setup command : '%s' got error '%s'.", launch, error->message);
+ g_error_free(error);
+ return NULL;
+ }
+ if (error)
+ {
+ WRN("got recoverable error '%s' for command : '%s'.", error->message, launch);
+ g_error_free(error);
+ }
+ }
+ else
+ {
+ playbin = gst_element_factory_make("playbin2", "playbin");
+ if (!playbin)
+ {
+ ERR("Unable to create 'playbin' GstElement.");
+ return NULL;
+ }
+ }
+
+ bin = gst_bin_new(NULL);
+ if (!bin)
+ {
+ ERR("Unable to create GstBin !");
+ goto unref_pipeline;
+ }
+
+ tee = gst_element_factory_make("tee", NULL);
+ if (!tee)
+ {
+ ERR("Unable to create 'tee' GstElement.");
+ goto unref_pipeline;
+ }
+
+#if defined HAVE_ECORE_X && defined HAVE_XOVERLAY_H
+ if (window_manager_video)
+ {
+ Eina_List *l;
+ const char *ename;
+
+ engines = evas_render_method_list();
+
+ EINA_LIST_FOREACH(engines, l, ename)
+ {
+ if (evas_render_method_lookup(ename) ==
+ evas_output_method_get(evas_object_evas_get(obj)))
+ {
+ engine = ename;
+ break;
+ }
+ }
+
+ if (ev->priority && engine && strstr(engine, "_x11") != NULL)
+ {
+ Ecore_Evas *ee;
+ Evas_Coord x, y, w, h;
+ Ecore_X_Window win;
+ Ecore_X_Window parent;
+
+ evas_object_geometry_get(obj, &x, &y, &w, &h);
+
+ ee = ecore_evas_ecore_evas_get(evas_object_evas_get(obj));
+
+ if (w < 4) w = 4;
+ if (h < 2) h = 2;
+
+ /* Here we really need to have the help of the window manager, this code will change when we update E17. */
+ parent = (Ecore_X_Window) ecore_evas_window_get(ee);
+ DBG("parent: %x", parent);
+
+ win = ecore_x_window_new(0, x, y, w, h);
+ DBG("creating window: %x [%i, %i, %i, %i]", win, x, y, w, h);
+ if (win)
+ {
+ Ecore_X_Window_State state[] = { ECORE_X_WINDOW_STATE_SKIP_TASKBAR, ECORE_X_WINDOW_STATE_SKIP_PAGER };
+
+ ecore_x_netwm_window_state_set(win, state, 2);
+ ecore_x_window_hide(win);
+ xvsink = gst_element_factory_make("xvimagesink", NULL);
+ if (xvsink)
+ {
+ unsigned int pos[2];
+
+#ifdef HAVE_X_OVERLAY_SET
+ gst_x_overlay_set_window_handle(GST_X_OVERLAY(xvsink), win);
+#else
+ gst_x_overlay_set_xwindow_id(GST_X_OVERLAY(xvsink), win);
+#endif
+ ev->win = win;
+
+ ecore_x_window_prop_card32_set(win, ECORE_X_ATOM_E_VIDEO_PARENT, &parent, 1);
+
+ pos[0] = x; pos[1] = y;
+ ecore_x_window_prop_card32_set(win, ECORE_X_ATOM_E_VIDEO_POSITION, pos, 2);
+ }
+ else
+ {
+ DBG("destroying win: %x", win);
+ ecore_x_window_free(win);
+ }
+ }
+ }
+ evas_render_method_list_free(engines);
+ }
+#else
+# warning "missing: ecore_x OR xoverlay"
+#endif
+
+ esink = gst_element_factory_make("emotion-sink", "sink");
+ if (!esink)
+ {
+ ERR("Unable to create 'emotion-sink' GstElement.");
+ goto unref_pipeline;
+ }
+
+ g_object_set(G_OBJECT(esink), "evas-object", obj, NULL);
+ g_object_set(G_OBJECT(esink), "ev", ev, NULL);
+
+ evas_object_image_pixels_get_callback_set(obj, NULL, NULL);
+ evas_object_event_callback_add(obj, EVAS_CALLBACK_RESIZE, _image_resize, ev);
+
+ /* We need queue to force each video sink to be in its own thread */
+ queue = gst_element_factory_make("queue", "equeue");
+ if (!queue)
+ {
+ ERR("Unable to create 'queue' GstElement.");
+ goto unref_pipeline;
+ }
+
+ gst_bin_add_many(GST_BIN(bin), tee, queue, esink, NULL);
+ gst_element_link_many(queue, esink, NULL);
+
+ /* link both sink to GstTee */
+ pad = gst_element_get_pad(queue, "sink");
+ teepad = gst_element_get_request_pad(tee, "src%d");
+ gst_pad_link(teepad, pad);
+ gst_object_unref(pad);
+
+ ev->eteepad = teepad;
+
+ if (xvsink)
+ {
+ GstElement *fakeeos;
+
+ queue = gst_element_factory_make("queue", "xvqueue");
+ fakeeos = GST_ELEMENT(GST_BIN(g_object_new(GST_TYPE_FAKEEOS_BIN, "name", "eosbin", NULL)));
+ if (queue && fakeeos)
+ {
+ GstPad *queue_pad;
+
+ gst_bin_add_many(GST_BIN(bin), fakeeos, NULL);
+
+ gst_bin_add_many(GST_BIN(fakeeos), queue, xvsink, NULL);
+ gst_element_link_many(queue, xvsink, NULL);
+ queue_pad = gst_element_get_pad(queue, "sink");
+ gst_element_add_pad(fakeeos, gst_ghost_pad_new("sink", queue_pad));
+
+ pad = gst_element_get_pad(fakeeos, "sink");
+ teepad = gst_element_get_request_pad(tee, "src%d");
+ gst_pad_link(teepad, pad);
+
+ xvsink = fakeeos;
+
+ ev->xvteepad = teepad;
+ ev->xvpad = pad;
+ }
+ else
+ {
+ if (fakeeos) gst_object_unref(fakeeos);
+ if (queue) gst_object_unref(queue);
+ gst_object_unref(xvsink);
+ xvsink = NULL;
+ }
+ }
+
+ teepad = gst_element_get_pad(tee, "sink");
+ gst_element_add_pad(bin, gst_ghost_pad_new("sink", teepad));
+ gst_object_unref(teepad);
+
+#define GST_PLAY_FLAG_NATIVE_VIDEO (1 << 6)
+#define GST_PLAY_FLAG_DOWNLOAD (1 << 7)
+#define GST_PLAY_FLAG_AUDIO (1 << 1)
+#define GST_PLAY_FLAG_NATIVE_AUDIO (1 << 5)
+
+ if (launch)
+ {
+ g_object_set(G_OBJECT(playbin), "sink", bin, NULL);
+ }
+ else
+ {
+ g_object_get(G_OBJECT(playbin), "flags", &flags, NULL);
+ g_object_set(G_OBJECT(playbin), "flags", flags | GST_PLAY_FLAG_NATIVE_VIDEO | GST_PLAY_FLAG_DOWNLOAD | GST_PLAY_FLAG_NATIVE_AUDIO, NULL);
+ g_object_set(G_OBJECT(playbin), "video-sink", bin, NULL);
+ g_object_set(G_OBJECT(playbin), "uri", uri, NULL);
+ }
+
+ evas_object_image_pixels_get_callback_set(obj, NULL, NULL);
+
+ ev->stream = EINA_TRUE;
+
+ if (xvsink)
+ {
+ Evas_Video_Surface video;
+
+ video.version = EVAS_VIDEO_SURFACE_VERSION;
+ video.data = ev;
+ video.parent = NULL;
+ video.move = _video_move;
+ video.resize = _video_resize;
+ video.show = _video_show;
+ video.hide = _video_hide;
+ video.update_pixels = _video_update_pixels;
+
+ evas_object_image_video_surface_set(obj, &video);
+ ev->stream = EINA_FALSE;
+ }
+
+ eina_stringshare_replace(&ev->uri, uri);
+ ev->pipeline = playbin;
+ ev->sink = bin;
+ ev->esink = esink;
+ ev->xvsink = xvsink;
+ ev->tee = tee;
+ ev->threads = eina_list_append(ev->threads,
+ ecore_thread_run(_emotion_gstreamer_pause,
+ _emotion_gstreamer_end,
+ _emotion_gstreamer_cancel,
+ ev));
+
+ /** NOTE: you need to set: GST_DEBUG_DUMP_DOT_DIR=/tmp EMOTION_ENGINE=gstreamer to save the $EMOTION_GSTREAMER_DOT file in '/tmp' */
+ /** then call dot -Tpng -oemotion_pipeline.png /tmp/$TIMESTAMP-$EMOTION_GSTREAMER_DOT.dot */
+ if (getenv("EMOTION_GSTREAMER_DOT")) GST_DEBUG_BIN_TO_DOT_FILE_WITH_TS(GST_BIN(playbin), GST_DEBUG_GRAPH_SHOW_ALL, getenv("EMOTION_GSTREAMER_DOT"));
+
+ return playbin;
+
+ unref_pipeline:
+ gst_object_unref(xvsink);
+ gst_object_unref(esink);
+ gst_object_unref(tee);
+ gst_object_unref(bin);
+ gst_object_unref(playbin);
+ return NULL;
+}
diff --git a/src/modules/emotion/xine/emotion_xine.c b/src/modules/emotion/xine/emotion_xine.c
new file mode 100644
index 0000000000..2cc2eac5c8
--- /dev/null
+++ b/src/modules/emotion/xine/emotion_xine.c
@@ -0,0 +1,1723 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <Eina.h>
+#include <Evas.h>
+#include <Ecore.h>
+
+#include "Emotion.h"
+#include "emotion_private.h"
+#include "emotion_xine.h"
+
+int _emotion_xine_log_domain = -1;
+
+/* module api */
+static unsigned char em_init (Evas_Object *obj, void **emotion_video, Emotion_Module_Options *opt);
+static int em_shutdown (void *ef);
+static unsigned char em_file_open (const char *file, Evas_Object *obj, void *ef);
+static void em_file_close (void *ef);
+static void em_play (void *ef, double pos);
+static void em_stop (void *ef);
+static void em_size_get (void *ef, int *w, int *h);
+static void em_pos_set (void *ef, double pos);
+static double em_buffer_size_get (void *ef);
+static double em_len_get (void *ef);
+static int em_fps_num_get (void *ef);
+static int em_fps_den_get (void *ef);
+static double em_fps_get (void *ef);
+static double em_pos_get (void *ef);
+static void em_vis_set (void *ef, Emotion_Vis vis);
+static Emotion_Vis em_vis_get (void *ef);
+static Eina_Bool em_vis_supported (void *ef, Emotion_Vis vis);
+static double em_ratio_get (void *ef);
+static int em_seekable (void *ef);
+static void em_frame_done (void *ef);
+static Emotion_Format em_format_get (void *ef);
+static void em_video_data_size_get (void *ef, int *w, int *h);
+static int em_yuv_rows_get (void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
+static int em_bgra_data_get (void *ef, unsigned char **bgra_data);
+static void em_event_feed (void *ef, int event);
+static void em_event_mouse_button_feed (void *ef, int button, int x, int y);
+static void em_event_mouse_move_feed (void *ef, int x, int y);
+static int em_video_channel_count (void *ef);
+static void em_video_channel_set (void *ef, int channel);
+static int em_video_channel_get (void *ef);
+static const char *em_video_channel_name_get (void *ef, int channel);
+static void em_video_channel_mute_set (void *ef, int mute);
+static int em_video_channel_mute_get (void *ef);
+static int em_audio_channel_count (void *ef);
+static void em_audio_channel_set (void *ef, int channel);
+static int em_audio_channel_get (void *ef);
+static const char *em_audio_channel_name_get (void *ef, int channel);
+static void em_audio_channel_mute_set (void *ef, int mute);
+static int em_audio_channel_mute_get (void *ef);
+static void em_audio_channel_volume_set(void *ef, double vol);
+static double em_audio_channel_volume_get(void *ef);
+static int em_spu_channel_count (void *ef);
+static void em_spu_channel_set (void *ef, int channel);
+static int em_spu_channel_get (void *ef);
+static const char *em_spu_channel_name_get (void *ef, int channel);
+static void em_spu_channel_mute_set (void *ef, int mute);
+static int em_spu_channel_mute_get (void *ef);
+static int em_chapter_count (void *ef);
+static void em_chapter_set (void *ef, int chapter);
+static int em_chapter_get (void *ef);
+static const char *em_chapter_name_get (void *ef, int chapter);
+static void em_speed_set (void *ef, double speed);
+static double em_speed_get (void *ef);
+static int em_eject (void *ef);
+static const char *em_meta_get (void *ef, int meta);
+
+/* internal util calls */
+static void *_em_slave (void *par);
+static void _em_slave_event (void *data, int type, void *arg);
+static Eina_Bool _em_fd_active (void *data, Ecore_Fd_Handler *fdh);
+static void _em_event (void *data, const xine_event_t *event);
+static void _em_module_event (void *data, int type);
+static Eina_Bool _em_fd_ev_active (void *data, Ecore_Fd_Handler *fdh);
+//static int _em_timer (void *data);
+static void *_em_get_pos_len_th(void *par);
+static void _em_get_pos_len (Emotion_Xine_Video *ev);
+
+extern plugin_info_t emotion_xine_plugin_info[];
+
+/* this is a slave controller thread for the xine module - libxine loves
+ * to deadlock, internally stall and otherwise have unpredictable behavior
+ * if we use the main process thread for many things - so a lot will be
+ * farmed off to this slave. its job is to handle opening, closing, file
+ * opening, recoder init etc. and all sorts of things can that often block.
+ * anything this thread needs to return, it will return via the event pipe.
+ */
+static void *
+_em_slave(void *par)
+{
+ Emotion_Xine_Video *ev;
+ void *buf[2];
+ int len;
+
+ ev = (Emotion_Xine_Video *)par;
+ while ((len = read(ev->fd_slave_read, buf, sizeof(buf))) > 0)
+ {
+ if (len == sizeof(buf))
+ {
+ Emotion_Xine_Event *eev;
+
+ ev = buf[0];
+ eev = buf[1];
+ switch (eev->mtype)
+ {
+ case 0: /* noop */
+ break;
+ case 1: /* init */
+ {
+ ev->decoder = xine_new();
+ xine_init(ev->decoder);
+ xine_register_plugins(ev->decoder, emotion_xine_plugin_info);
+ if (1)
+ {
+ xine_cfg_entry_t cf;
+ if (xine_config_lookup_entry(ev->decoder, "input.dvd_use_readahead", &cf))
+ {
+ cf.num_value = 1; // 0 or 1
+ xine_config_update_entry(ev->decoder, &cf);
+ }
+ }
+ DBG("OPEN VIDEO PLUGIN...");
+ if (!ev->opt_no_video)
+ ev->video = xine_open_video_driver(ev->decoder, "emotion",
+ XINE_VISUAL_TYPE_NONE, ev);
+ DBG("RESULT: xine_open_video_driver() = %p", ev->video);
+ // Let xine autodetect the best audio output driver
+ if (!ev->opt_no_audio)
+ ev->audio = xine_open_audio_driver(ev->decoder, NULL, ev);
+ // ev->audio = xine_open_audio_driver(ev->decoder, "oss", ev);
+ // dont use alsa - alsa has oss emulation.
+ // ev->audio = xine_open_audio_driver(ev->decoder, "alsa", ev);
+ // ev->audio = xine_open_audio_driver(ev->decoder, "arts", ev);
+ // ev->audio = xine_open_audio_driver(ev->decoder, "esd", ev);
+ ev->stream = xine_stream_new(ev->decoder, ev->audio, ev->video);
+ ev->queue = xine_event_new_queue(ev->stream);
+ xine_event_create_listener_thread(ev->queue, _em_event, ev);
+ ev->opening = 0;
+ ev->play_ok = 1;
+ _em_module_event(ev, 1); /* event - open done */
+ }
+ break;
+ case 3: /* shutdown */
+ {
+ _em_module_event(ev, 3);
+ DBG("shutdown stop");
+ xine_stop(ev->stream);
+ // pthread_mutex_lock(&(ev->get_pos_len_mutex));
+ if (!ev->get_pos_thread_deleted)
+ {
+ DBG("closing get_pos thread, %p", ev);
+ pthread_mutex_lock(&(ev->get_pos_len_mutex));
+ pthread_cond_broadcast(&(ev->get_pos_len_cond));
+ pthread_mutex_unlock(&(ev->get_pos_len_mutex));
+ while (ev->get_poslen);
+ }
+ DBG("dispose %p", ev);
+ xine_dispose(ev->stream);
+ DBG("dispose evq %p", ev);
+ xine_event_dispose_queue(ev->queue);
+ DBG("close video drv %p", ev);
+ if (ev->video) xine_close_video_driver(ev->decoder, ev->video);
+ DBG("wait for vo to go");
+ while (ev->have_vo);
+ DBG("vo gone");
+ DBG("close audio drv %p", ev);
+ if (ev->audio) xine_close_audio_driver(ev->decoder, ev->audio);
+ DBG("xine exit %p", ev);
+ xine_exit(ev->decoder);
+ DBG("DONE %p", ev);
+ close(ev->fd_write);
+ close(ev->fd_read);
+ close(ev->fd_ev_write);
+ close(ev->fd_ev_read);
+ close(ev->fd_slave_write);
+ close(ev->fd_slave_read);
+ ev->closing = 0;
+ if (eev->xine_event) free(eev->xine_event);
+ free(eev);
+ free(ev);
+ return NULL;
+ }
+ break;
+ case 2: /* file open */
+ {
+ int pos_stream = 0;
+ int pos_time = 0;
+ int length_time = 0;
+ uint32_t v;
+ char *file;
+
+ file = eev->xine_event;
+ DBG("OPEN STREAM %s", file);
+ if (xine_open(ev->stream, file))
+ {
+ if (xine_get_pos_length(ev->stream, &pos_stream, &pos_time, &length_time))
+ {
+ if (length_time == 0)
+ {
+ ev->pos = (double)pos_stream / 65535;
+ ev->len = 1.0;
+ ev->no_time = 1;
+ }
+ else
+ {
+ ev->pos = 0.0;
+ ev->len = (double)length_time / 1000.0;
+ }
+ }
+ else
+ {
+ ev->pos = 0.0;
+ ev->len = 1.0;
+ }
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_FRAME_DURATION);
+ if (v > 0) ev->fps = 90000.0 / (double)v;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_WIDTH);
+ ev->w = v;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HEIGHT);
+ ev->h = v;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_RATIO);
+ ev->ratio = (double)v / 10000.0;
+ ev->just_loaded = 1;
+ ev->get_poslen = 0;
+ xine_set_param(ev->stream, XINE_PARAM_AUDIO_VOLUME, ev->volume * 100);
+ }
+ _em_module_event(ev, 2); /* event - open done */
+ }
+ break;
+ case 11: /* file close */
+ {
+ DBG("done %p", ev);
+ em_frame_done(ev);
+ DBG("stop %p", ev);
+ xine_stop(ev->stream);
+ DBG("close %p", ev);
+ xine_close(ev->stream);
+ DBG("close done %p", ev);
+ _em_module_event(ev, 11);
+ }
+ break;
+ case 4: /* play */
+ {
+ double pos;
+ int pos_stream, pos_time, length_time;
+
+ pos = *((double *)eev->xine_event);
+ if ((xine_get_param(ev->stream, XINE_PARAM_SPEED) == XINE_SPEED_PAUSE) &&
+ (pos == ev->pos) &&
+ (!ev->just_loaded))
+ {
+ xine_set_param(ev->stream, XINE_PARAM_SPEED, XINE_SPEED_NORMAL);
+ }
+ else
+ {
+ if (ev->no_time)
+ xine_play(ev->stream, pos * 65535, 0);
+ else
+ xine_play(ev->stream, 0, pos * 1000);
+ }
+ ev->just_loaded = 0;
+
+ if (xine_get_pos_length(ev->stream,
+ &pos_stream,
+ &pos_time,
+ &length_time))
+ {
+ if (length_time == 0)
+ {
+ ev->pos = (double)pos_stream / 65535;
+ ev->len = 1.0;
+ ev->no_time = 1;
+ }
+ else
+ {
+ ev->pos = (double)pos_time / 1000.0;
+ ev->len = (double)length_time / 1000.0;
+ }
+ }
+ _em_module_event(ev, 4);
+ }
+ break;
+ case 5: /* stop */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_SPEED, XINE_SPEED_PAUSE);
+ _em_module_event(ev, 5);
+ }
+ break;
+ case 6: /* seek */
+ {
+ double pos;
+
+ pos = *((double *)eev->xine_event);
+ if (ev->no_time)
+ xine_play(ev->stream, pos * 65535, 0);
+ else
+ xine_play(ev->stream, 0, pos * 1000);
+ if (!ev->play)
+ xine_set_param(ev->stream, XINE_PARAM_SPEED, XINE_SPEED_PAUSE);
+ _em_module_event(ev, 6);
+ }
+ break;
+ case 7: /* eject */
+ {
+ xine_eject(ev->stream);
+ _em_module_event(ev, 7);
+ }
+ break;
+ case 8: /* spu mute */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_IGNORE_SPU, ev->spu_mute);
+ _em_module_event(ev, 8);
+ }
+ break;
+ case 9: /* channel */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_SPU_CHANNEL, ev->spu_channel);
+ _em_module_event(ev, 9);
+ }
+ break;
+ case 10: /* vol */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_AUDIO_VOLUME, ev->volume * 100);
+ _em_module_event(ev, 10);
+ }
+ break;
+ case 12: /* audio mute */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_AUDIO_MUTE, ev->audio_mute);
+ }
+ break;
+ case 13: /* audio mute */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_AUDIO_CHANNEL_LOGICAL, ev->audio_channel);
+ }
+ break;
+ case 14: /* audio mute */
+ {
+ xine_set_param(ev->stream, XINE_PARAM_VIDEO_CHANNEL, ev->video_channel);
+ }
+ break;
+ default:
+ break;
+ }
+ if (eev->xine_event) free(eev->xine_event);
+ free(eev);
+ }
+ }
+ return NULL;
+}
+static void
+_em_slave_event(void *data, int type, void *arg)
+{
+ void *buf[2];
+ Emotion_Xine_Event *new_ev;
+ Emotion_Xine_Video *ev;
+
+ ev = data;
+ new_ev = calloc(1, sizeof(Emotion_Xine_Event));
+ if (!new_ev) return;
+ new_ev->mtype = type;
+ new_ev->type = -1;
+ new_ev->xine_event = arg;
+ buf[0] = data;
+ buf[1] = new_ev;
+ if (write(ev->fd_slave_write, buf, sizeof(buf)) < 0) perror("write");
+}
+
+static unsigned char
+em_init(Evas_Object *obj, void **emotion_video, Emotion_Module_Options *opt)
+{
+ Emotion_Xine_Video *ev;
+ int fds[2];
+
+ if (!emotion_video) return 0;
+
+ ev = calloc(1, sizeof(Emotion_Xine_Video));
+ if (!ev) return 0;
+ ev->obj = obj;
+
+ if (pipe(fds) == 0)
+ {
+ ev->fd_read = fds[0];
+ ev->fd_write = fds[1];
+ fcntl(ev->fd_read, F_SETFL, O_NONBLOCK);
+ ev->fd_handler = ecore_main_fd_handler_add(ev->fd_read, ECORE_FD_READ,
+ _em_fd_active, ev,
+ NULL, NULL);
+ ecore_main_fd_handler_active_set(ev->fd_handler, ECORE_FD_READ);
+ }
+ if (pipe(fds) == 0)
+ {
+ ev->fd_ev_read = fds[0];
+ ev->fd_ev_write = fds[1];
+ fcntl(ev->fd_ev_read, F_SETFL, O_NONBLOCK);
+ ev->fd_ev_handler = ecore_main_fd_handler_add(ev->fd_ev_read,
+ ECORE_FD_READ, _em_fd_ev_active, ev, NULL, NULL);
+ ecore_main_fd_handler_active_set(ev->fd_ev_handler, ECORE_FD_READ);
+ }
+ if (pipe(fds) == 0)
+ {
+ ev->fd_slave_read = fds[0];
+ ev->fd_slave_write = fds[1];
+ fcntl(ev->fd_slave_write, F_SETFL, O_NONBLOCK);
+ }
+ ev->volume = 0.8;
+ ev->delete_me = 0;
+ ev->get_pos_thread_deleted = 0;
+ ev->opening = 1;
+ ev->play_ok = 0;
+
+ if (opt)
+ {
+ ev->opt_no_audio = opt->no_audio;
+ ev->opt_no_video = opt->no_video;
+ }
+
+ pthread_cond_init(&(ev->get_pos_len_cond), NULL);
+ pthread_mutex_init(&(ev->get_pos_len_mutex), NULL);
+ pthread_create(&ev->get_pos_len_th, NULL, _em_get_pos_len_th, ev);
+
+ pthread_create(&ev->slave_th, NULL, _em_slave, ev);
+ pthread_detach(ev->slave_th);
+ _em_slave_event(ev, 1, NULL);
+
+ ev->buffer = 1.0;
+
+ *emotion_video = ev;
+ return 1;
+}
+
+static int
+em_shutdown(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->closing = 1;
+ ev->delete_me = 1;
+ DBG("del fds %p", ev);
+ ecore_main_fd_handler_del(ev->fd_handler);
+ ev->fd_handler = NULL;
+ ecore_main_fd_handler_del(ev->fd_ev_handler);
+ ev->fd_ev_handler = NULL;
+ if (ev->anim)
+ {
+ ecore_animator_del(ev->anim);
+ ev->anim = NULL;
+ }
+
+ ev->closing = 1;
+ _em_slave_event(ev, 3, NULL);
+ DBG("done %p", ev);
+ return 1;
+}
+
+static unsigned char
+em_file_open(const char *file, Evas_Object *obj EINA_UNUSED, void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (!ev) return 0;
+ _em_slave_event(ev, 2, strdup(file));
+ return 1;
+}
+
+static void
+em_file_close(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (!ev) return;
+ _em_slave_event(ev, 11, NULL);
+}
+
+static void
+em_play(void *ef, double pos)
+{
+ Emotion_Xine_Video *ev;
+ double *ppos;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->play = 1;
+ ev->play_ok = 0;
+ ppos = malloc(sizeof(double));
+ *ppos = pos;
+ _em_slave_event(ev, 4, ppos);
+}
+
+static void
+em_stop(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->play = 0;
+ ev->play_ok = 0;
+ _em_slave_event(ev, 5, NULL);
+}
+
+static void
+em_size_get(void *ef, int *w, int *h)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (w) *w = ev->w;
+ if (h) *h = ev->h;
+}
+
+static void
+em_pos_set(void *ef, double pos)
+{
+ Emotion_Xine_Video *ev;
+ double *ppos;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ppos = malloc(sizeof(double));
+ *ppos = pos;
+ _em_slave_event(ev, 6, ppos);
+}
+
+static double
+em_len_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->len;
+}
+
+static double
+em_buffer_size_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->buffer;
+}
+
+static int
+em_fps_num_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return (int)(ev->fps * 10000.0);
+}
+
+static int
+em_fps_den_get(void *ef EINA_UNUSED)
+{
+ return 10000;
+}
+
+static double
+em_fps_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->fps;
+}
+
+static double
+em_pos_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ _em_get_pos_len(ev);
+ return ev->pos;
+}
+
+static void
+em_vis_set(void *ef, Emotion_Vis vis)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->vis == vis) return;
+ ev->vis = vis;
+}
+
+static Emotion_Vis
+em_vis_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+
+ return ev->vis;
+}
+
+static Eina_Bool
+em_vis_supported(void *ef EINA_UNUSED, Emotion_Vis vis EINA_UNUSED)
+{
+ return EINA_FALSE;
+}
+
+static double
+em_ratio_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->ratio;
+}
+
+static int
+em_video_handled(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED));
+}
+
+static int
+em_audio_handled(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_AUDIO) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_AUDIO_HANDLED));
+}
+
+static int
+em_seekable(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_stream_info(ev->stream, XINE_STREAM_INFO_SEEKABLE);
+}
+
+static void
+em_frame_done(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->cur_frame)
+ {
+ ev->fq--;
+ if (ev->cur_frame->done_func)
+ ev->cur_frame->done_func(ev->cur_frame->done_data);
+ ev->cur_frame = NULL;
+ }
+}
+
+static Emotion_Format
+em_format_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+ volatile Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (fr) return fr->format;
+ return EMOTION_FORMAT_YV12;
+}
+
+static void
+em_video_data_size_get(void *ef, int *w, int *h)
+{
+ Emotion_Xine_Video *ev;
+ volatile Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (!fr)
+ {
+ if (w) *w = 0;
+ if (h) *h = 0;
+ return;
+ }
+ if (w) *w = fr->w;
+ if (h) *h = fr->h;
+}
+
+static int
+em_yuv_rows_get(void *ef, int w EINA_UNUSED, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows)
+{
+ Emotion_Xine_Video *ev;
+ volatile Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (!fr) return 0;
+ if (fr->y)
+ {
+ int i;
+
+ for (i = 0; i < h; i++) yrows[i] = fr->y + (i * fr->y_stride);
+ for (i = 0; i < (h / 2); i++) urows[i] = fr->u + (i * fr->u_stride);
+ for (i = 0; i < (h / 2); i++) vrows[i] = fr->v + (i * fr->v_stride);
+ return 1;
+ }
+ return 0;
+}
+
+static int
+em_bgra_data_get(void *ef, unsigned char **bgra_data)
+{
+ Emotion_Xine_Video *ev;
+ volatile Emotion_Xine_Video_Frame *fr;
+
+ ev = (Emotion_Xine_Video *)ef;
+ fr = ev->cur_frame;
+ if (!fr) return 0;
+ if (fr->bgra_data)
+ {
+ *bgra_data = fr->bgra_data;
+ return 1;
+ }
+ return 0;
+}
+
+static void
+em_event_feed(void *ef, int event)
+{
+ Emotion_Xine_Video *ev;
+ xine_event_t xine_event;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if ((ev->opening) || (!ev->play_ok)) return;
+ xine_event.data_length = 0;
+ xine_event.data = NULL;
+ xine_event.stream = ev->stream;
+ gettimeofday(&xine_event.tv, NULL);
+ switch (event)
+ {
+ case EMOTION_EVENT_MENU1:
+ xine_event.type = XINE_EVENT_INPUT_MENU1;
+ break;
+ case EMOTION_EVENT_MENU2:
+ xine_event.type = XINE_EVENT_INPUT_MENU2;
+ break;
+ case EMOTION_EVENT_MENU3:
+ xine_event.type = XINE_EVENT_INPUT_MENU3;
+ break;
+ case EMOTION_EVENT_MENU4:
+ xine_event.type = XINE_EVENT_INPUT_MENU4;
+ break;
+ case EMOTION_EVENT_MENU5:
+ xine_event.type = XINE_EVENT_INPUT_MENU5;
+ break;
+ case EMOTION_EVENT_MENU6:
+ xine_event.type = XINE_EVENT_INPUT_MENU6;
+ break;
+ case EMOTION_EVENT_MENU7:
+ xine_event.type = XINE_EVENT_INPUT_MENU7;
+ break;
+ case EMOTION_EVENT_UP:
+ xine_event.type = XINE_EVENT_INPUT_UP;
+ break;
+ case EMOTION_EVENT_DOWN:
+ xine_event.type = XINE_EVENT_INPUT_DOWN;
+ break;
+ case EMOTION_EVENT_LEFT:
+ xine_event.type = XINE_EVENT_INPUT_LEFT;
+ break;
+ case EMOTION_EVENT_RIGHT:
+ xine_event.type = XINE_EVENT_INPUT_RIGHT;
+ break;
+ case EMOTION_EVENT_SELECT:
+ xine_event.type = XINE_EVENT_INPUT_SELECT;
+ break;
+ case EMOTION_EVENT_NEXT:
+ xine_event.type = XINE_EVENT_INPUT_NEXT;
+ break;
+ case EMOTION_EVENT_PREV:
+ xine_event.type = XINE_EVENT_INPUT_PREVIOUS;
+ break;
+ case EMOTION_EVENT_ANGLE_NEXT:
+ xine_event.type = XINE_EVENT_INPUT_ANGLE_NEXT;
+ break;
+ case EMOTION_EVENT_ANGLE_PREV:
+ xine_event.type = XINE_EVENT_INPUT_ANGLE_PREVIOUS;
+ break;
+ case EMOTION_EVENT_FORCE:
+ xine_event.type = XINE_EVENT_INPUT_BUTTON_FORCE;
+ break;
+ case EMOTION_EVENT_0:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_0;
+ break;
+ case EMOTION_EVENT_1:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_1;
+ break;
+ case EMOTION_EVENT_2:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_2;
+ break;
+ case EMOTION_EVENT_3:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_3;
+ break;
+ case EMOTION_EVENT_4:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_4;
+ break;
+ case EMOTION_EVENT_5:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_5;
+ break;
+ case EMOTION_EVENT_6:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_6;
+ break;
+ case EMOTION_EVENT_7:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_7;
+ break;
+ case EMOTION_EVENT_8:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_8;
+ break;
+ case EMOTION_EVENT_9:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_9;
+ break;
+ case EMOTION_EVENT_10:
+ xine_event.type = XINE_EVENT_INPUT_NUMBER_10_ADD;
+ break;
+ default:
+ return;
+ break;
+ }
+ xine_event_send(ev->stream, &xine_event);
+}
+
+static void
+em_event_mouse_button_feed(void *ef, int button EINA_UNUSED, int x, int y)
+{
+ Emotion_Xine_Video *ev;
+ xine_event_t xine_event;
+ xine_input_data_t xine_input;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if ((ev->opening) || (!ev->play_ok)) return;
+ xine_event.stream = ev->stream;
+ gettimeofday(&xine_event.tv, NULL);
+ xine_event.type = XINE_EVENT_INPUT_MOUSE_BUTTON;
+ xine_input.button = 1;
+ xine_input.x = x;
+ xine_input.y = y;
+ xine_event.data = &xine_input;
+ xine_event.data_length = sizeof(xine_input);
+ xine_event_send(ev->stream, &xine_event);
+}
+
+static void
+em_event_mouse_move_feed(void *ef, int x, int y)
+{
+ Emotion_Xine_Video *ev;
+ xine_event_t xine_event;
+ xine_input_data_t xine_input;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if ((ev->opening) || (!ev->play_ok)) return;
+ xine_event.stream = ev->stream;
+ gettimeofday(&xine_event.tv, NULL);
+ xine_event.type = XINE_EVENT_INPUT_MOUSE_MOVE;
+ xine_input.button = 0;
+ xine_input.x = x;
+ xine_input.y = y;
+ xine_event.data = &xine_input;
+ xine_event.data_length = sizeof(xine_input);
+ xine_event_send(ev->stream, &xine_event);
+}
+
+static int
+em_video_channel_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+ int v;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ v = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_CHANNELS);
+ if ((v < 1) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO)) return 1;
+ return v;
+}
+
+static void
+em_video_channel_set(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (channel < 0) channel = 0;
+ ev->video_channel = channel;
+ _em_slave_event(ev, 14, NULL);
+}
+
+static int
+em_video_channel_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_param(ev->stream, XINE_PARAM_VIDEO_CHANNEL);
+}
+
+static const char *
+em_video_channel_name_get(void *ef EINA_UNUSED, int channel EINA_UNUSED)
+{
+ return NULL;
+}
+
+static void
+em_video_channel_mute_set(void *ef, int mute)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->video_mute = mute;
+}
+
+static int
+em_video_channel_mute_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->video_mute;
+}
+
+static int
+em_audio_channel_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_AUDIO_CHANNEL);
+}
+
+static void
+em_audio_channel_set(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (channel < -1) channel = -1;
+ ev->audio_channel = channel;
+ _em_slave_event(ev, 13, NULL);
+}
+
+static int
+em_audio_channel_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_param(ev->stream, XINE_PARAM_AUDIO_CHANNEL_LOGICAL);
+}
+
+static const char *
+em_audio_channel_name_get(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+ static char lang[XINE_LANG_MAX + 1];
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening) return NULL;
+ lang[0] = 0;
+ if (xine_get_audio_lang(ev->stream, channel, lang)) return lang;
+ return NULL;
+}
+
+static void
+em_audio_channel_mute_set(void *ef, int mute)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->audio_mute = mute;
+ _em_slave_event(ev, 12, NULL);
+}
+
+static int
+em_audio_channel_mute_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->audio_mute;
+}
+
+static void
+em_audio_channel_volume_set(void *ef, double vol)
+{
+ Emotion_Xine_Video *ev;
+
+ if (vol < 0.0) vol = 0.0;
+ else if (vol > 1.0) vol = 1.0;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->volume = vol;
+ _em_slave_event(ev, 10, NULL);
+}
+
+static double
+em_audio_channel_volume_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return ev->volume;
+ ev->volume = xine_get_param(ev->stream, XINE_PARAM_AUDIO_VOLUME) / 100.0;
+ return ev->volume;
+}
+
+static int
+em_spu_channel_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_SPU_CHANNEL);
+}
+
+static void
+em_spu_channel_set(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (channel < 0) channel = 0;
+ ev->spu_channel = channel;
+ _em_slave_event(ev, 9, NULL);
+}
+
+static int
+em_spu_channel_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ return xine_get_param(ev->stream, XINE_PARAM_SPU_CHANNEL);
+}
+
+static const char *
+em_spu_channel_name_get(void *ef, int channel)
+{
+ Emotion_Xine_Video *ev;
+ static char lang[XINE_LANG_MAX + 1];
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening) return NULL;
+ lang[0] = 0;
+ if (xine_get_spu_lang(ev->stream, channel, lang)) return lang;
+ return NULL;
+}
+
+static void
+em_spu_channel_mute_set(void *ef, int mute)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ ev->spu_mute = mute;
+ _em_slave_event(ev, 8, NULL);
+}
+
+static int
+em_spu_channel_mute_get(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ return ev->spu_mute;
+}
+
+static int
+em_chapter_count(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if (ev->opening || (!ev->play_ok)) return 0;
+ if (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_CHAPTERS))
+ return 99;
+ return 0;
+}
+
+static void
+em_chapter_set(void *ef EINA_UNUSED, int chapter EINA_UNUSED)
+{
+}
+
+static int
+em_chapter_get(void *ef EINA_UNUSED)
+{
+ return 0;
+}
+
+static const char *
+em_chapter_name_get(void *ef EINA_UNUSED, int chapter EINA_UNUSED)
+{
+ return NULL;
+}
+
+static void
+em_speed_set(void *ef EINA_UNUSED, double speed EINA_UNUSED)
+{
+}
+
+static double
+em_speed_get(void *ef EINA_UNUSED)
+{
+ return 1.0;
+}
+
+static int
+em_eject(void *ef)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ _em_slave_event(ev, 7, NULL);
+ return 1;
+}
+
+static const char *
+em_meta_get(void *ef, int meta)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)ef;
+ if ((ev->opening) || (!ev->play_ok)) return NULL;
+ switch (meta)
+ {
+ case META_TRACK_TITLE:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_TITLE);
+ break;
+ case META_TRACK_ARTIST:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_ARTIST);
+ break;
+ case META_TRACK_GENRE:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_GENRE);
+ break;
+ case META_TRACK_COMMENT:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_COMMENT);
+ break;
+ case META_TRACK_ALBUM:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_ALBUM);
+ break;
+ case META_TRACK_YEAR:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_YEAR);
+ break;
+ case META_TRACK_DISCID:
+ return xine_get_meta_info(ev->stream, XINE_META_INFO_CDINDEX_DISCID);
+ break;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+static Eina_Bool
+_em_fd_active(void *data EINA_UNUSED, Ecore_Fd_Handler *fdh)
+{
+ void *buf;
+ int fd, len;
+ Emotion_Xine_Video_Frame *fr;
+
+ fd = ecore_main_fd_handler_fd_get(fdh);
+ while ((len = read(fd, &buf, sizeof(buf))) > 0)
+ {
+ if (len == sizeof(buf))
+ {
+ Emotion_Xine_Video *ev;
+
+ fr = buf;
+ ev = _emotion_video_get(fr->obj);
+ if (ev)
+ {
+ em_frame_done(ev);
+ ev->cur_frame = fr;
+ _em_get_pos_len(ev);
+ if ((xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO)) &&
+ (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED)))
+ {
+ if (ev->video_mute) em_frame_done(ev);
+ else _emotion_frame_new(fr->obj);
+ }
+ _emotion_frame_resize(fr->obj, fr->w, fr->h, fr->ratio);
+ _emotion_video_pos_update(fr->obj, ev->pos, ev->len);
+ }
+ }
+ }
+ return EINA_TRUE;
+ data = NULL;
+}
+
+static void
+_em_event(void *data, const xine_event_t *event)
+{
+ void *buf[2];
+ Emotion_Xine_Event *new_ev;
+ Emotion_Xine_Video *ev;
+
+ ev = data;
+ new_ev = calloc(1, sizeof(Emotion_Xine_Event));
+ if (!new_ev) return;
+ new_ev->mtype = 0;
+ new_ev->type = event->type;
+ if (event->data)
+ {
+ new_ev->xine_event = malloc(event->data_length);
+ if (!new_ev->xine_event)
+ {
+ free(new_ev);
+ return;
+ }
+ memcpy(new_ev->xine_event, event->data, event->data_length);
+ }
+ buf[0] = data;
+ buf[1] = new_ev;
+ if (write(ev->fd_ev_write, buf, sizeof(buf)) < 0) perror("write");
+}
+
+static void
+_em_module_event(void *data, int type)
+{
+ void *buf[2];
+ Emotion_Xine_Event *new_ev;
+ Emotion_Xine_Video *ev;
+
+ ev = data;
+ new_ev = calloc(1, sizeof(Emotion_Xine_Event));
+ if (!new_ev) return;
+ new_ev->mtype = type;
+ new_ev->type = -1;
+ buf[0] = data;
+ buf[1] = new_ev;
+ if (write(ev->fd_ev_write, buf, sizeof(buf)) < 0) perror("write");
+}
+
+static Eina_Bool
+_em_audio_only_poller(void *data)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = data;
+ _em_get_pos_len(ev);
+ return EINA_TRUE;
+}
+
+static Eina_Bool
+_em_fd_ev_active(void *data EINA_UNUSED, Ecore_Fd_Handler *fdh)
+{
+ int fd, len;
+ void *buf[2];
+
+ fd = ecore_main_fd_handler_fd_get(fdh);
+ while ((len = read(fd, buf, sizeof(buf))) > 0)
+ {
+ if (len == sizeof(buf))
+ {
+ Emotion_Xine_Video *ev;
+ Emotion_Xine_Event *eev;
+
+ ev = buf[0];
+ eev = buf[1];
+ if (eev->mtype != 0)
+ {
+ switch (eev->mtype)
+ {
+ case 1: /* init done */
+ ev->play_ok = 1;
+ break;
+ case 2: /* open done */
+ ev->play_ok = 1;
+ if (ev->anim)
+ {
+ ecore_animator_del(ev->anim);
+ ev->anim = NULL;
+ }
+ _emotion_open_done(ev->obj);
+ _emotion_frame_resize(ev->obj, ev->w, ev->h, ev->ratio);
+ break;
+ case 3: /* shutdown done */
+ if (ev->anim)
+ {
+ ecore_animator_del(ev->anim);
+ ev->anim = NULL;
+ }
+ ev->play_ok = 1;
+ break;
+ case 4: /* play done */
+ ev->play_ok = 1;
+ if (ev->anim)
+ {
+ ecore_animator_del(ev->anim);
+ ev->anim = NULL;
+ }
+ if ((!(xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_VIDEO) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_HANDLED))) &&
+ (xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_AUDIO) &&
+ xine_get_stream_info(ev->stream, XINE_STREAM_INFO_AUDIO_HANDLED)))
+ ev->anim = ecore_animator_add(_em_audio_only_poller, ev);
+ _emotion_playback_started(ev->obj);
+ break;
+ case 5: /* stop done */
+ if (ev->anim)
+ {
+ ecore_animator_del(ev->anim);
+ ev->anim = NULL;
+ }
+ ev->play_ok = 1;
+ break;
+ case 6: /* seek done */
+ ev->play_ok = 1;
+ _emotion_seek_done(ev->obj);
+ _em_get_pos_len(ev);
+ _emotion_video_pos_update(ev->obj, ev->pos, ev->len);
+ break;
+ case 7: /* eject done */
+ if (ev->anim)
+ {
+ ecore_animator_del(ev->anim);
+ ev->anim = NULL;
+ }
+ ev->play_ok = 1;
+ break;
+ case 8: /* spu mute done */
+ ev->play_ok = 1;
+ break;
+ case 9: /* channel done */
+ ev->play_ok = 1;
+ break;
+ case 10: /* volume done */
+ ev->play_ok = 1;
+ break;
+ case 11: /* close done */
+ if (ev->anim)
+ {
+ ecore_animator_del(ev->anim);
+ ev->anim = NULL;
+ }
+ ev->play_ok = 1;
+ break;
+ case 15: /* get pos done */
+ if (ev->last_pos != ev->pos)
+ {
+ ev->last_pos = ev->pos;
+ _emotion_video_pos_update(ev->obj, ev->pos, ev->len);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ else
+ {
+ switch (eev->type)
+ {
+ case XINE_EVENT_UI_PLAYBACK_FINISHED:
+ {
+ ev->play = 0;
+ _emotion_decode_stop(ev->obj);
+ _emotion_playback_finished(ev->obj);
+ }
+ break;
+ case XINE_EVENT_UI_CHANNELS_CHANGED:
+ {
+ _emotion_channels_change(ev->obj);
+ }
+ break;
+ case XINE_EVENT_UI_SET_TITLE:
+ {
+ xine_ui_data_t *e;
+
+ e = (xine_ui_data_t *)eev->xine_event;
+ _emotion_title_set(ev->obj, e->str);
+ }
+ break;
+ case XINE_EVENT_FRAME_FORMAT_CHANGE:
+ break;
+ case XINE_EVENT_UI_MESSAGE:
+ {
+ WRN("UI Message [FIXME: break this out to emotion api]");
+ // e->type = error type(XINE_MSG_NO_ERROR, XINE_MSG_GENERAL_WARNING, XINE_MSG_UNKNOWN_HOST etc.)
+ // e->messages is a list of messages DOUBLE null terminated
+ }
+ break;
+ case XINE_EVENT_AUDIO_LEVEL:
+ {
+ _emotion_audio_level_change(ev->obj);
+ WRN("Audio Level [FIXME: break this out to emotion api]");
+ // e->left (0->100)
+ // e->right
+ // e->mute
+ }
+ break;
+ case XINE_EVENT_PROGRESS:
+ {
+ xine_progress_data_t *e;
+
+ e = (xine_progress_data_t *)eev->xine_event;
+ DBG("PROGRESS: %i", e->percent);
+ ev->buffer = e->percent;
+ _emotion_progress_set(ev->obj, (char *)e->description, (double)e->percent / 100.0);
+ }
+ break;
+ case XINE_EVENT_MRL_REFERENCE_EXT:
+ {
+ xine_mrl_reference_data_ext_t *e;
+
+ e = (xine_mrl_reference_data_ext_t *)eev->xine_event;
+ _emotion_file_ref_set(ev->obj, e->mrl, e->alternative);
+ }
+ break;
+ case XINE_EVENT_UI_NUM_BUTTONS:
+ {
+ xine_ui_data_t *e;
+
+ e = (xine_ui_data_t *)eev->xine_event;
+ _emotion_spu_button_num_set(ev->obj, e->num_buttons);
+ }
+ break;
+ case XINE_EVENT_SPU_BUTTON:
+ {
+ xine_spu_button_t *e;
+
+ e = (xine_spu_button_t *)eev->xine_event;
+ if (e->direction == 1)
+ _emotion_spu_button_set(ev->obj, e->button);
+ else
+ _emotion_spu_button_set(ev->obj, -1);
+ }
+ break;
+ case XINE_EVENT_DROPPED_FRAMES:
+ {
+ xine_dropped_frames_t *e;
+
+ e = (xine_dropped_frames_t *)eev->xine_event;
+ WRN("Dropped Frames (skipped %i) (discarded %i) [FIXME: break this out to the emotion api]", e->skipped_frames, e->discarded_frames);
+ // e->skipped_frames = % frames skipped * 10
+ // e->discarded_frames = % frames skipped * 10
+ }
+ break;
+ default:
+ // DBG("unknown event type %i", eev->type);
+ break;
+ }
+ }
+ if (eev->xine_event) free(eev->xine_event);
+ free(eev);
+ }
+ }
+ return EINA_TRUE;
+}
+
+static void *
+_em_get_pos_len_th(void *par)
+{
+ Emotion_Xine_Video *ev;
+
+ ev = (Emotion_Xine_Video *)par;
+
+ for (;;)
+ {
+ pthread_mutex_lock(&(ev->get_pos_len_mutex));
+ pthread_cond_wait(&(ev->get_pos_len_cond), &(ev->get_pos_len_mutex));
+ pthread_mutex_unlock(&(ev->get_pos_len_mutex));
+ if (ev->get_poslen)
+ {
+ int pos_stream = 0;
+ int pos_time = 0;
+ int length_time = 0;
+
+ if (xine_get_pos_length(ev->stream, &pos_stream, &pos_time, &length_time))
+ {
+ if (length_time == 0)
+ {
+ ev->pos = (double)pos_stream / 65535;
+ ev->len = 1.0;
+ ev->no_time = 1;
+ }
+ else
+ {
+ ev->pos = (double)pos_time / 1000.0;
+ ev->len = (double)length_time / 1000.0;
+ ev->no_time = 0;
+ }
+ }
+ ev->get_poslen = 0;
+ _em_module_event(ev, 15); /* event - getpos done */
+ //DBG("get pos %3.3f", ev->pos);
+ }
+ if (ev->delete_me)
+ {
+ ev->get_pos_thread_deleted = 1;
+ return NULL;
+ }
+ }
+ return NULL;
+}
+
+static void
+_em_get_pos_len(Emotion_Xine_Video *ev)
+{
+ if (!ev->play_ok) return;
+ ev->get_poslen = 1;
+ pthread_mutex_lock(&(ev->get_pos_len_mutex));
+ pthread_cond_broadcast(&(ev->get_pos_len_cond));
+ pthread_mutex_unlock(&(ev->get_pos_len_mutex));
+}
+
+static Emotion_Video_Module em_module =
+{
+ em_init, /* init */
+ em_shutdown, /* shutdown */
+ em_file_open, /* file_open */
+ em_file_close, /* file_close */
+ em_play, /* play */
+ em_stop, /* stop */
+ em_size_get, /* size_get */
+ em_pos_set, /* pos_set */
+ em_len_get, /* len_get */
+ em_buffer_size_get, /* buffer_size_get */
+ em_fps_num_get, /* fps_num_get */
+ em_fps_den_get, /* fps_den_get */
+ em_fps_get, /* fps_get */
+ em_pos_get, /* pos_get */
+ em_vis_set, /* vis_set */
+ em_vis_get, /* vis_get */
+ em_vis_supported, /* vis_supported */
+ em_ratio_get, /* ratio_get */
+ em_video_handled, /* video_handled */
+ em_audio_handled, /* audio_handled */
+ em_seekable, /* seekable */
+ em_frame_done, /* frame_done */
+ em_format_get, /* format_get */
+ em_video_data_size_get, /* video_data_size_get */
+ em_yuv_rows_get, /* yuv_rows_get */
+ em_bgra_data_get, /* bgra_data_get */
+ em_event_feed, /* event_feed */
+ em_event_mouse_button_feed, /* event_mouse_button_feed */
+ em_event_mouse_move_feed, /* event_mouse_move_feed */
+ em_video_channel_count, /* video_channel_count */
+ em_video_channel_set, /* video_channel_set */
+ em_video_channel_get, /* video_channel_get */
+ em_video_channel_name_get, /* video_channel_name_get */
+ em_video_channel_mute_set, /* video_channel_mute_set */
+ em_video_channel_mute_get, /* video_channel_mute_get */
+ em_audio_channel_count, /* audio_channel_count */
+ em_audio_channel_set, /* audio_channel_set */
+ em_audio_channel_get, /* audio_channel_get */
+ em_audio_channel_name_get, /* audio_channel_name_get */
+ em_audio_channel_mute_set, /* audio_channel_mute_set */
+ em_audio_channel_mute_get, /* audio_channel_mute_get */
+ em_audio_channel_volume_set, /* audio_channel_volume_set */
+ em_audio_channel_volume_get, /* audio_channel_volume_get */
+ em_spu_channel_count, /* spu_channel_count */
+ em_spu_channel_set, /* spu_channel_set */
+ em_spu_channel_get, /* spu_channel_get */
+ em_spu_channel_name_get, /* spu_channel_name_get */
+ em_spu_channel_mute_set, /* spu_channel_mute_set */
+ em_spu_channel_mute_get, /* spu_channel_mute_get */
+ em_chapter_count, /* chapter_count */
+ em_chapter_set, /* chapter_set */
+ em_chapter_get, /* chapter_get */
+ em_chapter_name_get, /* chapter_name_get */
+ em_speed_set, /* speed_set */
+ em_speed_get, /* speed_get */
+ em_eject, /* eject */
+ em_meta_get, /* meta_get */
+ NULL, /* priority_set */
+ NULL, /* priority_get */
+ NULL /* handle */
+};
+
+static Eina_Bool
+module_open(Evas_Object *obj, const Emotion_Video_Module **module, void **video, Emotion_Module_Options *opt)
+{
+ if (!module)
+ return EINA_FALSE;
+
+ if (_emotion_xine_log_domain < 0)
+ {
+ eina_threads_init();
+ eina_log_threads_enable();
+ _emotion_xine_log_domain = eina_log_domain_register
+ ("emotion-xine", EINA_COLOR_LIGHTCYAN);
+ if (_emotion_xine_log_domain < 0)
+ {
+ EINA_LOG_CRIT("Could not register log domain 'emotion-xine'");
+ return EINA_FALSE;
+ }
+ }
+
+ if (!em_module.init(obj, video, opt))
+ return EINA_FALSE;
+
+ *module = &em_module;
+ return EINA_TRUE;
+}
+
+static void
+module_close(Emotion_Video_Module *module EINA_UNUSED, void *video)
+{
+ em_module.shutdown(video);
+}
+
+Eina_Bool
+xine_module_init(void)
+{
+ return _emotion_module_register("xine", module_open, module_close);
+}
+
+void
+xine_module_shutdown(void)
+{
+ _emotion_module_unregister("xine");
+}
+
+#ifndef EMOTION_STATIC_BUILD_XINE
+
+EINA_MODULE_INIT(xine_module_init);
+EINA_MODULE_SHUTDOWN(xine_module_shutdown);
+
+#endif
+
+#if 0
+void
+em_debug(Emotion_Xine_Video *ev)
+{
+ int has_chapters = 0;
+ int max_spu = 0;
+ int max_audio = 0;
+ int video_channels = 0;
+ int video_streams = 0;
+ int video_seekable = 0;
+ char *title;
+ char *comment;
+ char *artist;
+ char *genre;
+ char *album;
+ char *year;
+ char *cdindex_discid;
+ int video_channel = 0;
+ int audio_channel = 0;
+ int spu_channel = 0;
+ int video_ratio = 0;
+ int audio_mode = 0;
+
+// return;
+ has_chapters = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_HAS_CHAPTERS);
+ max_spu = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_SPU_CHANNEL);
+ max_audio = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_MAX_AUDIO_CHANNEL);
+ video_channels = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_CHANNELS);
+ video_streams = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_STREAMS);
+ video_seekable = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_SEEKABLE);
+ title = xine_get_meta_info(ev->stream, XINE_META_INFO_TITLE);
+ comment = xine_get_meta_info(ev->stream, XINE_META_INFO_COMMENT);
+ artist = xine_get_meta_info(ev->stream, XINE_META_INFO_ARTIST);
+ genre = xine_get_meta_info(ev->stream, XINE_META_INFO_GENRE);
+ album = xine_get_meta_info(ev->stream, XINE_META_INFO_ALBUM);
+ year = xine_get_meta_info(ev->stream, XINE_META_INFO_YEAR);
+ cdindex_discid = xine_get_meta_info(ev->stream, XINE_META_INFO_CDINDEX_DISCID);
+ video_channel = xine_get_param(ev->stream, XINE_PARAM_VIDEO_CHANNEL);
+ audio_channel = xine_get_param(ev->stream, XINE_PARAM_AUDIO_CHANNEL_LOGICAL);
+ spu_channel = xine_get_param(ev->stream, XINE_PARAM_SPU_CHANNEL);
+ video_ratio = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_VIDEO_RATIO);
+ audio_mode = xine_get_stream_info(ev->stream, XINE_STREAM_INFO_AUDIO_MODE);
+ DBG("has_chapters = %i", has_chapters);
+ DBG("max_spu = %i", max_spu);
+ DBG("max_audio = %i", max_audio);
+ DBG("video_channels = %i", video_channels);
+ DBG("video_streams = %i", video_streams);
+ DBG("video_seekable = %i", video_seekable);
+ DBG("title = %s", title);
+ DBG("comment = %s", comment);
+ DBG("artist = %s", artist);
+ DBG("genre = %s", genre);
+ DBG("album = %s", album);
+ DBG("year = %s", year);
+ DBG("cdindex_discid = %s", cdindex_discid);
+ DBG("video_channel = %i", video_channel);
+ DBG("audio_channel = %i", audio_channel);
+ DBG("spu_channels = %i", spu_channel);
+ DBG("video_ratio = %i", video_ratio);
+ DBG("audio_mode = %i", audio_mode);
+ {
+ int i;
+
+ for (i = 0; i <= max_audio; i++)
+ {
+ char lang[XINE_LANG_MAX + 1];
+ char buf[128] = "NONE";
+
+ lang[0] = 0;
+ if (xine_get_audio_lang(ev->stream, i, lang))
+ eina_strlcpy(buf, lang, sizeof(buf));
+ DBG(" AUDIO %i = %s", i, buf);
+ }
+ for (i = 0; i <= max_spu; i++)
+ {
+ char lang[XINE_LANG_MAX + 1];
+ char buf[128] = "NONE";
+
+ lang[0] = 0;
+ if (xine_get_spu_lang(ev->stream, i, lang))
+ eina_strlcpy(buf, lang, sizeof(buf));
+ DBG(" SPU %i = %s", i, buf);
+ }
+ }
+}
+#endif
diff --git a/src/modules/emotion/xine/emotion_xine.h b/src/modules/emotion/xine/emotion_xine.h
new file mode 100644
index 0000000000..c1cae2e5a0
--- /dev/null
+++ b/src/modules/emotion/xine/emotion_xine.h
@@ -0,0 +1,98 @@
+#ifndef EMOTION_XINE_H
+#define EMOTION_XINE_H
+
+#include <xine.h>
+#include <xine/xine_plugin.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <pthread.h>
+
+typedef struct _Emotion_Xine_Video Emotion_Xine_Video;
+typedef struct _Emotion_Xine_Video_Frame Emotion_Xine_Video_Frame;
+typedef struct _Emotion_Xine_Event Emotion_Xine_Event;
+
+struct _Emotion_Xine_Video
+{
+ xine_t *decoder;
+ xine_video_port_t *video;
+ xine_audio_port_t *audio;
+ xine_stream_t *stream;
+ xine_event_queue_t *queue;
+ volatile double len;
+ volatile double pos;
+ volatile double last_pos;
+ volatile double volume;
+ volatile double buffer;
+ double fps;
+ double ratio;
+ int w, h;
+ Evas_Object *obj;
+ volatile Emotion_Xine_Video_Frame *cur_frame;
+ volatile int get_poslen;
+ volatile int spu_channel;
+ volatile int audio_channel;
+ volatile int video_channel;
+ volatile int fq;
+ Emotion_Vis vis;
+ int fd_read;
+ int fd_write;
+ Ecore_Fd_Handler *fd_handler;
+ int fd_ev_read;
+ int fd_ev_write;
+ Ecore_Fd_Handler *fd_ev_handler;
+ Ecore_Animator *anim;
+ unsigned char play : 1;
+ unsigned char just_loaded : 1;
+ unsigned char video_mute : 1;
+ unsigned char audio_mute : 1;
+ unsigned char spu_mute : 1;
+ Eina_Bool opt_no_video : 1;
+ Eina_Bool opt_no_audio : 1;
+ volatile unsigned char delete_me : 1;
+ volatile unsigned char no_time : 1;
+ volatile unsigned char opening : 1;
+ volatile unsigned char closing : 1;
+ volatile unsigned char have_vo : 1;
+ volatile unsigned char play_ok : 1;
+
+ pthread_t get_pos_len_th;
+ pthread_cond_t get_pos_len_cond;
+ pthread_mutex_t get_pos_len_mutex;
+
+ pthread_t slave_th;
+ int fd_slave_read;
+ int fd_slave_write;
+
+ unsigned char get_pos_thread_deleted : 1;
+};
+
+struct _Emotion_Xine_Video_Frame
+{
+ int w, h;
+ double ratio;
+ Emotion_Format format;
+ unsigned char *y, *u, *v;
+ unsigned char *bgra_data;
+ int y_stride, u_stride, v_stride;
+ Evas_Object *obj;
+ double timestamp;
+ void (*done_func)(void *data);
+ void *done_data;
+ void *frame;
+};
+
+struct _Emotion_Xine_Event
+{
+ int type;
+ void *xine_event;
+ int mtype;
+};
+
+extern int _emotion_xine_log_domain;
+#define DBG(...) EINA_LOG_DOM_DBG(_emotion_xine_log_domain, __VA_ARGS__)
+#define INF(...) EINA_LOG_DOM_INFO(_emotion_xine_log_domain, __VA_ARGS__)
+#define WRN(...) EINA_LOG_DOM_WARN(_emotion_xine_log_domain, __VA_ARGS__)
+#define ERR(...) EINA_LOG_DOM_ERR(_emotion_xine_log_domain, __VA_ARGS__)
+#define CRITICAL(...) EINA_LOG_DOM_CRIT(_emotion_xine_log_domain, __VA_ARGS__)
+
+#endif
diff --git a/src/modules/emotion/xine/emotion_xine_vo_out.c b/src/modules/emotion/xine/emotion_xine_vo_out.c
new file mode 100644
index 0000000000..e6370279fc
--- /dev/null
+++ b/src/modules/emotion/xine/emotion_xine_vo_out.c
@@ -0,0 +1,767 @@
+/***************************************************************************/
+/*** emotion xine display engine ***/
+/***************************************************************************/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <Eina.h>
+#include <Evas.h>
+#include <Ecore.h>
+
+#include "Emotion.h"
+#include "emotion_private.h"
+#include "emotion_xine.h"
+
+#include <xine.h>
+#include <xine/video_out.h>
+#include <xine/xine_internal.h>
+#include <xine/xineutils.h>
+#include <xine/vo_scale.h>
+
+#define BLEND_BYTE(dst, src, o) (((src)*o + ((dst)*(0xf-o)))/0xf)
+
+/***************************************************************************/
+typedef struct _Emotion_Frame Emotion_Frame;
+typedef struct _Emotion_Driver Emotion_Driver;
+typedef struct _Emotion_Class Emotion_Class;
+typedef struct _Emotion_Lut Emotion_Lut;
+
+struct _Emotion_Frame
+{
+ vo_frame_t vo_frame;
+ int width;
+ int height;
+ double ratio;
+ int format;
+ xine_t *xine;
+
+ Emotion_Xine_Video_Frame frame;
+ unsigned char in_use : 1;
+};
+
+struct _Emotion_Driver
+{
+ vo_driver_t vo_driver;
+ config_values_t *config;
+ int ratio;
+ xine_t *xine;
+ Emotion_Xine_Video *ev;
+};
+
+struct _Emotion_Class
+{
+ video_driver_class_t driver_class;
+ config_values_t *config;
+ xine_t *xine;
+};
+
+struct _Emotion_Lut
+{
+ uint8_t cb : 8;
+ uint8_t cr : 8;
+ uint8_t y : 8;
+ uint8_t foo : 8;
+} __attribute__ ((packed));
+
+typedef void (*done_func_type)(void *data);
+
+/***************************************************************************/
+static void *_emotion_class_init (xine_t *xine, void *visual);
+static void _emotion_class_dispose (video_driver_class_t *driver_class);
+static char *_emotion_class_identifier_get (video_driver_class_t *driver_class);
+static char *_emotion_class_description_get (video_driver_class_t *driver_class);
+
+static vo_driver_t *_emotion_open (video_driver_class_t *driver_class, const void *visual);
+static void _emotion_dispose (vo_driver_t *vo_driver);
+
+static int _emotion_redraw (vo_driver_t *vo_driver);
+
+static uint32_t _emotion_capabilities_get (vo_driver_t *vo_driver);
+static int _emotion_gui_data_exchange (vo_driver_t *vo_driver, int data_type, void *data);
+
+static int _emotion_property_set (vo_driver_t *vo_driver, int property, int value);
+static int _emotion_property_get (vo_driver_t *vo_driver, int property);
+static void _emotion_property_min_max_get (vo_driver_t *vo_driver, int property, int *min, int *max);
+
+static vo_frame_t *_emotion_frame_alloc (vo_driver_t *vo_driver);
+static void _emotion_frame_dispose (vo_frame_t *vo_frame);
+static void _emotion_frame_format_update (vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags);
+static void _emotion_frame_display (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
+static void _emotion_frame_field (vo_frame_t *vo_frame, int which_field);
+
+static void _emotion_frame_data_free (Emotion_Frame *fr);
+static void _emotion_frame_data_unlock (Emotion_Frame *fr);
+
+static void _emotion_overlay_begin (vo_driver_t *vo_driver, vo_frame_t *vo_frame, int changed);
+static void _emotion_overlay_end (vo_driver_t *vo_driver, vo_frame_t *vo_frame);
+static void _emotion_overlay_blend (vo_driver_t *vo_driver, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay);
+
+static void _emotion_overlay_mem_blend_8 (uint8_t *mem, uint8_t val, uint8_t o, size_t sz);
+static void _emotion_overlay_blend_yuv (uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3]);
+
+static void _emotion_yuy2_to_bgra32 (int width, int height, unsigned char *src, unsigned char *dst);
+
+/***************************************************************************/
+static vo_info_t _emotion_info =
+{
+ 1, /* priority */
+ XINE_VISUAL_TYPE_NONE /* visual type */
+};
+
+plugin_info_t emotion_xine_plugin_info[] =
+{
+ { PLUGIN_VIDEO_OUT, 21, "emotion", XINE_VERSION_CODE, &_emotion_info, _emotion_class_init },
+ { PLUGIN_VIDEO_OUT, 22, "emotion", XINE_VERSION_CODE, &_emotion_info, _emotion_class_init },
+ { PLUGIN_NONE, 0, "", 0, NULL, NULL }
+};
+
+/***************************************************************************/
+static void *
+_emotion_class_init(xine_t *xine, void *visual EINA_UNUSED)
+{
+ Emotion_Class *cl;
+
+// DBG("");
+ cl = (Emotion_Class *) malloc(sizeof(Emotion_Class));
+ if (!cl) return NULL;
+ cl->driver_class.open_plugin = _emotion_open;
+#if XINE_MAJOR_VERSION < 1 || (XINE_MAJOR_VERSION == 1 && XINE_MINOR_VERSION < 2)
+ cl->driver_class.get_identifier = _emotion_class_identifier_get;
+ cl->driver_class.get_description = _emotion_class_description_get;
+#else
+ cl->driver_class.identifier = _emotion_class_identifier_get(NULL);
+ cl->driver_class.description = _emotion_class_description_get(NULL);
+#endif
+ cl->driver_class.dispose = _emotion_class_dispose;
+ cl->config = xine->config;
+ cl->xine = xine;
+
+ return cl;
+}
+
+static void
+_emotion_class_dispose(video_driver_class_t *driver_class)
+{
+ Emotion_Class *cl;
+
+ cl = (Emotion_Class *)driver_class;
+ free(cl);
+}
+
+static char *
+_emotion_class_identifier_get(video_driver_class_t *driver_class EINA_UNUSED)
+{
+ return "emotion";
+}
+
+static char *
+_emotion_class_description_get(video_driver_class_t *driver_class EINA_UNUSED)
+{
+ return "Emotion xine video output plugin";
+}
+
+/***************************************************************************/
+static vo_driver_t *
+_emotion_open(video_driver_class_t *driver_class, const void *visual)
+{
+ Emotion_Class *cl;
+ Emotion_Driver *dv;
+
+ cl = (Emotion_Class *)driver_class;
+ /* visual here is the data ptr passed to xine_open_video_driver() */
+// DBG("");
+ dv = (Emotion_Driver *)malloc(sizeof(Emotion_Driver));
+ if (!dv) return NULL;
+
+ dv->config = cl->config;
+ dv->xine = cl->xine;
+ dv->ratio = XINE_VO_ASPECT_AUTO;
+ dv->vo_driver.get_capabilities = _emotion_capabilities_get;
+ dv->vo_driver.alloc_frame = _emotion_frame_alloc;
+ dv->vo_driver.update_frame_format = _emotion_frame_format_update;
+ dv->vo_driver.overlay_begin = _emotion_overlay_begin;
+ dv->vo_driver.overlay_blend = _emotion_overlay_blend;
+ dv->vo_driver.overlay_end = _emotion_overlay_end;
+ dv->vo_driver.display_frame = _emotion_frame_display;
+ dv->vo_driver.get_property = _emotion_property_get;
+ dv->vo_driver.set_property = _emotion_property_set;
+ dv->vo_driver.get_property_min_max = _emotion_property_min_max_get;
+ dv->vo_driver.gui_data_exchange = _emotion_gui_data_exchange;
+ dv->vo_driver.dispose = _emotion_dispose;
+ dv->vo_driver.redraw_needed = _emotion_redraw;
+ dv->ev = (Emotion_Xine_Video *)visual;
+ dv->ev->have_vo = 1;
+ DBG("vo_driver = %p", &dv->vo_driver);
+ return &dv->vo_driver;
+}
+
+static void
+_emotion_dispose(vo_driver_t *vo_driver)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+ dv->ev->have_vo = 0;
+ DBG("vo_driver = %p", dv);
+ free(dv);
+}
+
+/***************************************************************************/
+static int
+_emotion_redraw(vo_driver_t *vo_driver EINA_UNUSED)
+{
+// DBG("");
+ return 0;
+}
+
+/***************************************************************************/
+static uint32_t
+_emotion_capabilities_get(vo_driver_t *vo_driver EINA_UNUSED)
+{
+// DBG("");
+ return VO_CAP_YV12 | VO_CAP_YUY2;
+}
+
+/***************************************************************************/
+static int
+_emotion_gui_data_exchange(vo_driver_t *vo_driver EINA_UNUSED, int data_type, void *data EINA_UNUSED)
+{
+// DBG("");
+ switch (data_type)
+ {
+ case XINE_GUI_SEND_COMPLETION_EVENT:
+ break;
+ case XINE_GUI_SEND_DRAWABLE_CHANGED:
+ break;
+ case XINE_GUI_SEND_EXPOSE_EVENT:
+ break;
+ case XINE_GUI_SEND_TRANSLATE_GUI_TO_VIDEO:
+ break;
+ case XINE_GUI_SEND_VIDEOWIN_VISIBLE:
+ break;
+ case XINE_GUI_SEND_SELECT_VISUAL:
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/***************************************************************************/
+static int
+_emotion_property_set(vo_driver_t *vo_driver, int property, int value)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// DBG("");
+ switch (property)
+ {
+ case VO_PROP_ASPECT_RATIO:
+ if (value >= XINE_VO_ASPECT_NUM_RATIOS)
+ value = XINE_VO_ASPECT_AUTO;
+// DBG("DRIVER RATIO SET %i!", value);
+ dv->ratio = value;
+ break;
+ default:
+ break;
+ }
+ return value;
+}
+
+static int
+_emotion_property_get(vo_driver_t *vo_driver, int property)
+{
+ Emotion_Driver *dv;
+
+ dv = (Emotion_Driver *)vo_driver;
+// DBG("");
+ switch (property)
+ {
+ case VO_PROP_ASPECT_RATIO:
+ return dv->ratio;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void
+_emotion_property_min_max_get(vo_driver_t *vo_driver EINA_UNUSED, int property EINA_UNUSED, int *min, int *max)
+{
+// DBG("");
+ *min = 0;
+ *max = 0;
+}
+
+/***************************************************************************/
+static vo_frame_t *
+_emotion_frame_alloc(vo_driver_t *vo_driver EINA_UNUSED)
+{
+ Emotion_Frame *fr;
+
+// DBG("");
+ fr = (Emotion_Frame *)calloc(1, sizeof(Emotion_Frame));
+ if (!fr) return NULL;
+
+ fr->vo_frame.base[0] = NULL;
+ fr->vo_frame.base[1] = NULL;
+ fr->vo_frame.base[2] = NULL;
+
+ fr->vo_frame.proc_slice = NULL;
+ fr->vo_frame.proc_frame = NULL;
+ fr->vo_frame.field = _emotion_frame_field;
+ fr->vo_frame.dispose = _emotion_frame_dispose;
+ fr->vo_frame.driver = vo_driver;
+
+ return (vo_frame_t *)fr;
+}
+
+static void
+_emotion_frame_dispose(vo_frame_t *vo_frame)
+{
+ Emotion_Frame *fr;
+
+ fr = (Emotion_Frame *)vo_frame;
+// DBG("");
+ _emotion_frame_data_free(fr);
+ free(fr);
+}
+
+static void
+_emotion_frame_format_update(vo_driver_t *vo_driver, vo_frame_t *vo_frame, uint32_t width, uint32_t height, double ratio, int format, int flags EINA_UNUSED)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+
+ if ((fr->width != (int)width) || (fr->height != (int)height) ||
+ (fr->format != format) || (!fr->vo_frame.base[0]))
+ {
+// DBG("");
+ _emotion_frame_data_free(fr);
+
+ fr->width = width;
+ fr->height = height;
+ fr->format = format;
+
+ switch (format)
+ {
+ case XINE_IMGFMT_YV12:
+ {
+ int y_size, uv_size;
+
+ fr->frame.format = EMOTION_FORMAT_YV12;
+ fr->vo_frame.pitches[0] = 8 * ((width + 7) / 8);
+ fr->vo_frame.pitches[1] = 8 * ((width + 15) / 16);
+ fr->vo_frame.pitches[2] = 8 * ((width + 15) / 16);
+
+ y_size = fr->vo_frame.pitches[0] * height;
+ uv_size = fr->vo_frame.pitches[1] * ((height + 1) / 2);
+
+ fr->vo_frame.base[0] = malloc(y_size + (2 * uv_size));
+ fr->vo_frame.base[1] = fr->vo_frame.base[0] + y_size + uv_size;
+ fr->vo_frame.base[2] = fr->vo_frame.base[0] + y_size;
+ fr->frame.w = fr->width;
+ fr->frame.h = fr->height;
+ fr->frame.ratio = fr->vo_frame.ratio;
+ fr->frame.y = fr->vo_frame.base[0];
+ fr->frame.u = fr->vo_frame.base[1];
+ fr->frame.v = fr->vo_frame.base[2];
+ fr->frame.bgra_data = NULL;
+ fr->frame.y_stride = fr->vo_frame.pitches[0];
+ fr->frame.u_stride = fr->vo_frame.pitches[1];
+ fr->frame.v_stride = fr->vo_frame.pitches[2];
+ fr->frame.obj = dv->ev->obj;
+ }
+ break;
+ case XINE_IMGFMT_YUY2:
+ {
+ fr->frame.format = EMOTION_FORMAT_BGRA;
+ fr->vo_frame.pitches[0] = 8 * ((width + 3) / 4);
+ fr->vo_frame.pitches[1] = 0;
+ fr->vo_frame.pitches[2] = 0;
+
+ fr->vo_frame.base[0] = malloc(fr->vo_frame.pitches[0] * height);
+ fr->vo_frame.base[1] = NULL;
+ fr->vo_frame.base[2] = NULL;
+
+ fr->frame.w = fr->width;
+ fr->frame.h = fr->height;
+ fr->frame.ratio = fr->vo_frame.ratio;
+ fr->frame.y = NULL;
+ fr->frame.u = NULL;
+ fr->frame.v = NULL;
+ fr->frame.bgra_data = malloc(fr->width * fr->height * 4);
+ fr->frame.y_stride = 0;
+ fr->frame.u_stride = 0;
+ fr->frame.v_stride = 0;
+ fr->frame.obj = dv->ev->obj;
+ }
+ break;
+ default:
+ break;
+ }
+ if (((format == XINE_IMGFMT_YV12)
+ && ((!fr->vo_frame.base[0])
+ || (!fr->vo_frame.base[1])
+ || (!fr->vo_frame.base[2])))
+ || ((format == XINE_IMGFMT_YUY2)
+ && ((!fr->vo_frame.base[0])
+ || (!fr->frame.bgra_data))))
+ {
+ _emotion_frame_data_free(fr);
+ }
+ }
+ fr->frame.ratio = fr->vo_frame.ratio;
+ fr->ratio = ratio;
+}
+
+static void
+_emotion_frame_display(vo_driver_t *vo_driver, vo_frame_t *vo_frame)
+{
+ Emotion_Driver *dv;
+ Emotion_Frame *fr;
+
+ dv = (Emotion_Driver *)vo_driver;
+ fr = (Emotion_Frame *)vo_frame;
+// DBG("fq %i %p", dv->ev->fq, dv->ev);
+// if my frame queue is too deep ( > 4 frames) simply block and wait for them
+// to drain
+// while (dv->ev->fq > 4) usleep(1);
+ if (dv->ev)
+ {
+ void *buf;
+
+ if (dv->ev->closing) return;
+ if (fr->format == XINE_IMGFMT_YUY2)
+ {
+ _emotion_yuy2_to_bgra32(fr->width, fr->height, fr->vo_frame.base[0], fr->frame.bgra_data);
+ }
+
+ buf = &(fr->frame);
+ fr->frame.timestamp = (double)fr->vo_frame.vpts / 90000.0;
+ fr->frame.done_func = (done_func_type)_emotion_frame_data_unlock;
+ fr->frame.done_data = fr;
+// DBG("FRAME FOR %p", dv->ev);
+ if (write(dv->ev->fd_write, &buf, sizeof(void *)) < 0) perror("write");
+// DBG("-- FRAME DEC %p == %i", fr->frame.obj, ret);
+ fr->in_use = 1;
+ dv->ev->fq++;
+ }
+ /* hmm - must find a way to sanely copy data out... FIXME problem */
+// fr->vo_frame.free(&fr->vo_frame);
+}
+
+static void
+_emotion_frame_field(vo_frame_t *vo_frame EINA_UNUSED, int which_field EINA_UNUSED)
+{
+// DBG("");
+}
+
+/***************************************************************************/
+static void
+_emotion_frame_data_free(Emotion_Frame *fr)
+{
+ if (fr->vo_frame.base[0])
+ {
+ free(fr->vo_frame.base[0]);
+ fr->vo_frame.base[0] = NULL;
+ fr->vo_frame.base[1] = NULL;
+ fr->vo_frame.base[2] = NULL;
+ fr->frame.y = fr->vo_frame.base[0];
+ fr->frame.u = fr->vo_frame.base[1];
+ fr->frame.v = fr->vo_frame.base[2];
+ }
+ if (fr->frame.bgra_data)
+ {
+ free(fr->frame.bgra_data);
+ fr->frame.bgra_data = NULL;
+ }
+}
+
+static void
+_emotion_frame_data_unlock(Emotion_Frame *fr)
+{
+// DBG("");
+ if (fr->in_use)
+ {
+ fr->vo_frame.free(&fr->vo_frame);
+ fr->in_use = 0;
+ }
+}
+
+/***************************************************************************/
+static void
+_emotion_overlay_begin(vo_driver_t *vo_driver EINA_UNUSED, vo_frame_t *vo_frame EINA_UNUSED, int changed EINA_UNUSED)
+{
+// DBG("");
+}
+
+static void
+_emotion_overlay_end(vo_driver_t *vo_driver EINA_UNUSED, vo_frame_t *vo_frame EINA_UNUSED)
+{
+// DBG("");
+}
+
+static void
+_emotion_overlay_blend(vo_driver_t *vo_driver EINA_UNUSED, vo_frame_t *vo_frame, vo_overlay_t *vo_overlay EINA_UNUSED)
+{
+ Emotion_Frame *fr;
+
+ fr = (Emotion_Frame *)vo_frame;
+// DBG("");
+ _emotion_overlay_blend_yuv(fr->vo_frame.base, vo_overlay,
+ fr->width, fr->height,
+ fr->vo_frame.pitches);
+}
+
+static void _emotion_overlay_mem_blend_8(uint8_t *mem, uint8_t val, uint8_t o, size_t sz)
+{
+ uint8_t *limit = mem + sz;
+ while (mem < limit)
+ {
+ *mem = BLEND_BYTE(*mem, val, o);
+ mem++;
+ }
+}
+
+static void _emotion_overlay_blend_yuv(uint8_t *dst_base[3], vo_overlay_t * img_overl, int dst_width, int dst_height, int dst_pitches[3])
+{
+ Emotion_Lut *my_clut;
+ uint8_t *my_trans;
+ int src_width;
+ int src_height;
+ rle_elem_t *rle;
+ rle_elem_t *rle_limit;
+ int x_off;
+ int y_off;
+ int ymask, xmask;
+ int rle_this_bite;
+ int rle_remainder;
+ int rlelen;
+ int x, y;
+ int hili_right;
+ uint8_t clr = 0;
+
+ src_width = img_overl->width;
+ src_height = img_overl->height;
+ rle = img_overl->rle;
+ rle_limit = rle + img_overl->num_rle;
+ x_off = img_overl->x;
+ y_off = img_overl->y;
+
+ if (!rle) return;
+
+ uint8_t *dst_y = dst_base[0] + dst_pitches[0] * y_off + x_off;
+ uint8_t *dst_cr = dst_base[2] + (y_off / 2) * dst_pitches[1] + (x_off / 2) + 1;
+ uint8_t *dst_cb = dst_base[1] + (y_off / 2) * dst_pitches[2] + (x_off / 2) + 1;
+ my_clut = (Emotion_Lut *) img_overl->hili_color;
+ my_trans = img_overl->hili_trans;
+
+ /* avoid wraping overlay if drawing to small image */
+ if( (x_off + img_overl->hili_right) < dst_width )
+ hili_right = img_overl->hili_right;
+ else
+ hili_right = dst_width - 1 - x_off;
+
+ /* avoid buffer overflow */
+ if( (src_height + y_off) >= dst_height )
+ src_height = dst_height - 1 - y_off;
+
+ rlelen=rle_remainder=0;
+ for (y = 0; y < src_height; y++)
+ {
+ ymask = ((img_overl->hili_top > y) || (img_overl->hili_bottom < y));
+ xmask = 0;
+
+ for (x = 0; x < src_width;)
+ {
+ uint16_t o;
+
+ if (rlelen == 0)
+ {
+ rle_remainder = rlelen = rle->len;
+ clr = rle->color;
+ rle++;
+ }
+ if (rle_remainder == 0)
+ {
+ rle_remainder = rlelen;
+ }
+ if ((rle_remainder + x) > src_width)
+ {
+ /* Do something for long rlelengths */
+ rle_remainder = src_width - x;
+ }
+
+ if (ymask == 0)
+ {
+ if (x <= img_overl->hili_left)
+ {
+ /* Starts outside clip area */
+ if ((x + rle_remainder - 1) > img_overl->hili_left )
+ {
+ /* Cutting needed, starts outside, ends inside */
+ rle_this_bite = (img_overl->hili_left - x + 1);
+ rle_remainder -= rle_this_bite;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ else
+ {
+ /* no cutting needed, starts outside, ends outside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ }
+ else if (x < hili_right)
+ {
+ /* Starts inside clip area */
+ if ((x + rle_remainder) > hili_right )
+ {
+ /* Cutting needed, starts inside, ends outside */
+ rle_this_bite = (hili_right - x);
+ rle_remainder -= rle_this_bite;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->hili_color;
+ my_trans = img_overl->hili_trans;
+ xmask++;
+ }
+ else
+ {
+ /* no cutting needed, starts inside, ends inside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->hili_color;
+ my_trans = img_overl->hili_trans;
+ xmask++;
+ }
+ }
+ else if (x >= hili_right)
+ {
+ /* Starts outside clip area, ends outsite clip area */
+ if ((x + rle_remainder ) > src_width )
+ {
+ /* Cutting needed, starts outside, ends at right edge */
+ /* It should never reach here due to the earlier test of src_width */
+ rle_this_bite = (src_width - x );
+ rle_remainder -= rle_this_bite;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ else
+ {
+ /* no cutting needed, starts outside, ends outside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ }
+ }
+ else
+ {
+ /* Outside clip are due to y */
+ /* no cutting needed, starts outside, ends outside */
+ rle_this_bite = rle_remainder;
+ rle_remainder = 0;
+ rlelen -= rle_this_bite;
+ my_clut = (Emotion_Lut *) img_overl->color;
+ my_trans = img_overl->trans;
+ xmask = 0;
+ }
+ o = my_trans[clr];
+ if (o)
+ {
+ if (o >= 15)
+ {
+ memset(dst_y + x, my_clut[clr].y, rle_this_bite);
+ if (y & 1)
+ {
+ memset(dst_cr + (x >> 1), my_clut[clr].cr, (rle_this_bite+1) >> 1);
+ memset(dst_cb + (x >> 1), my_clut[clr].cb, (rle_this_bite+1) >> 1);
+ }
+ }
+ else
+ {
+ _emotion_overlay_mem_blend_8(dst_y + x, my_clut[clr].y, o, rle_this_bite);
+ if (y & 1)
+ {
+ /* Blending cr and cb should use a different function, with pre -128 to each sample */
+ _emotion_overlay_mem_blend_8(dst_cr + (x >> 1), my_clut[clr].cr, o, (rle_this_bite+1) >> 1);
+ _emotion_overlay_mem_blend_8(dst_cb + (x >> 1), my_clut[clr].cb, o, (rle_this_bite+1) >> 1);
+ }
+ }
+ }
+ x += rle_this_bite;
+ if (rle >= rle_limit)
+ {
+ break;
+ }
+ }
+ if (rle >= rle_limit)
+ {
+ break;
+ }
+
+ dst_y += dst_pitches[0];
+
+ if (y & 1)
+ {
+ dst_cr += dst_pitches[2];
+ dst_cb += dst_pitches[1];
+ }
+ }
+}
+
+//TODO: Really need to improve this converter!
+#define LIMIT(x) ((x) > 0xff ? 0xff : ((x) < 0 ? 0 : (x)))
+
+static void
+_emotion_yuy2_to_bgra32(int width, int height, unsigned char *src, unsigned char *dst)
+{
+ int i, j;
+ unsigned char *y, *u, *v;
+
+ y = src;
+ u = src + 1;
+ v = src + 3;
+ for (i = 0; i < width; i++)
+ {
+ for (j = 0; j < height; j++)
+ {
+ *dst++ = LIMIT(1.164 * (*y - 16) + 2.018 * (*u - 128));
+ *dst++ = LIMIT(1.164 * (*y - 16) - 0.813 * (*v - 128) - 0.391 * (*u - 128));
+ *dst++ = LIMIT(1.164 * (*y - 16) + 1.596 * (*v - 128));
+ *dst++ = 0;
+
+ y += 2;
+ if (j % 2 == 1)
+ {
+ u += 4;
+ v += 4;
+ }
+ }
+ }
+}
diff --git a/src/tests/emotion/data/bpause.png b/src/tests/emotion/data/bpause.png
new file mode 100644
index 0000000000..e827545157
--- /dev/null
+++ b/src/tests/emotion/data/bpause.png
Binary files differ
diff --git a/src/tests/emotion/data/bplay.png b/src/tests/emotion/data/bplay.png
new file mode 100644
index 0000000000..91644c6ef1
--- /dev/null
+++ b/src/tests/emotion/data/bplay.png
Binary files differ
diff --git a/src/tests/emotion/data/bstop.png b/src/tests/emotion/data/bstop.png
new file mode 100644
index 0000000000..a7be66d288
--- /dev/null
+++ b/src/tests/emotion/data/bstop.png
Binary files differ
diff --git a/src/tests/emotion/data/e_logo.png b/src/tests/emotion/data/e_logo.png
new file mode 100644
index 0000000000..0b79b69ab3
--- /dev/null
+++ b/src/tests/emotion/data/e_logo.png
Binary files differ
diff --git a/src/tests/emotion/data/fr1.png b/src/tests/emotion/data/fr1.png
new file mode 100644
index 0000000000..11670d79a5
--- /dev/null
+++ b/src/tests/emotion/data/fr1.png
Binary files differ
diff --git a/src/tests/emotion/data/fr2.png b/src/tests/emotion/data/fr2.png
new file mode 100644
index 0000000000..0b14911cfb
--- /dev/null
+++ b/src/tests/emotion/data/fr2.png
Binary files differ
diff --git a/src/tests/emotion/data/fr3.png b/src/tests/emotion/data/fr3.png
new file mode 100644
index 0000000000..8e524fd1cf
--- /dev/null
+++ b/src/tests/emotion/data/fr3.png
Binary files differ
diff --git a/src/tests/emotion/data/fr4.png b/src/tests/emotion/data/fr4.png
new file mode 100644
index 0000000000..fcbb28857b
--- /dev/null
+++ b/src/tests/emotion/data/fr4.png
Binary files differ
diff --git a/src/tests/emotion/data/fr5.png b/src/tests/emotion/data/fr5.png
new file mode 100644
index 0000000000..c4819d2777
--- /dev/null
+++ b/src/tests/emotion/data/fr5.png
Binary files differ
diff --git a/src/tests/emotion/data/fr6.png b/src/tests/emotion/data/fr6.png
new file mode 100644
index 0000000000..e674bf4d9f
--- /dev/null
+++ b/src/tests/emotion/data/fr6.png
Binary files differ
diff --git a/src/tests/emotion/data/fr7.png b/src/tests/emotion/data/fr7.png
new file mode 100644
index 0000000000..10476aa5ad
--- /dev/null
+++ b/src/tests/emotion/data/fr7.png
Binary files differ
diff --git a/src/tests/emotion/data/h_slider.png b/src/tests/emotion/data/h_slider.png
new file mode 100644
index 0000000000..688565aed2
--- /dev/null
+++ b/src/tests/emotion/data/h_slider.png
Binary files differ
diff --git a/src/tests/emotion/data/icon.edc b/src/tests/emotion/data/icon.edc
new file mode 100644
index 0000000000..0a3ea3dc7a
--- /dev/null
+++ b/src/tests/emotion/data/icon.edc
@@ -0,0 +1,14 @@
+collections {
+ images.image: "e_logo.png" COMP;
+ group { name: "icon";
+ min: 20 10;
+ parts {
+ part { name: "icon"; type: IMAGE;
+ mouse_events: 0;
+ description { state: "default" 0.0;
+ image.normal: "e_logo.png";
+ }
+ }
+ }
+ }
+}
diff --git a/src/tests/emotion/data/knob.png b/src/tests/emotion/data/knob.png
new file mode 100644
index 0000000000..f39f738a5c
--- /dev/null
+++ b/src/tests/emotion/data/knob.png
Binary files differ
diff --git a/src/tests/emotion/data/orb.png b/src/tests/emotion/data/orb.png
new file mode 100644
index 0000000000..9ab08a3f85
--- /dev/null
+++ b/src/tests/emotion/data/orb.png
Binary files differ
diff --git a/src/tests/emotion/data/pnl.png b/src/tests/emotion/data/pnl.png
new file mode 100644
index 0000000000..28f1915de5
--- /dev/null
+++ b/src/tests/emotion/data/pnl.png
Binary files differ
diff --git a/src/tests/emotion/data/sl.png b/src/tests/emotion/data/sl.png
new file mode 100644
index 0000000000..119d11f91b
--- /dev/null
+++ b/src/tests/emotion/data/sl.png
Binary files differ
diff --git a/src/tests/emotion/data/theme.edc b/src/tests/emotion/data/theme.edc
new file mode 100644
index 0000000000..63e732dc5a
--- /dev/null
+++ b/src/tests/emotion/data/theme.edc
@@ -0,0 +1,1667 @@
+images {
+ image: "tiles.png" COMP;
+ image: "window_inner_shadow.png" LOSSY 70;
+/* image: "e_logo.png" COMP;*/
+
+ image: "h_slider.png" COMP;
+/************/
+ image: "video_frame_left.png" COMP;
+ image: "video_frame_right.png" COMP;
+ image: "video_frame_top.png" COMP;
+ image: "video_frame_bottom.png" COMP;
+
+ image: "knob.png" COMP;
+
+ image: "fr1.png" COMP;
+ image: "fr2.png" COMP;
+ image: "fr3.png" COMP;
+ image: "fr4.png" COMP;
+ image: "fr5.png" COMP;
+ image: "fr6.png" COMP;
+ image: "fr7.png" COMP;
+ image: "sl.png" COMP;
+ image: "orb.png" COMP;
+ image: "whb.png" COMP;
+ image: "bpause.png" COMP;
+ image: "bplay.png" COMP;
+ image: "bstop.png" COMP;
+ image: "pnl.png" COMP;
+}
+
+collections {
+ group {
+ name: "background";
+ parts {
+ part {
+ name: "bg";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ color_class: "background";
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ fill {
+ smooth: 0;
+ origin {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ size {
+ relative: 0.0 0.0;
+ offset: 128 128;
+ }
+ }
+ image {
+ normal: "tiles.png";
+ }
+ }
+ }
+/*
+ part {
+ name: "logo";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ min: 120 140;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 48 48;
+ }
+ rel2 {
+ relative: 0.0 0.0;
+ offset: 140 140;
+ }
+ image {
+ normal: "e_logo.png";
+ }
+ }
+ }
+ */
+ part {
+ name: "shadow";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ image {
+ normal: "window_inner_shadow.png";
+ }
+ }
+ }
+ }
+ }
+#if 0
+ group {
+ name: "video_controller";
+ parts {
+ // need swallow parts:
+ // "video_swallow"
+ //
+ // need txt parts:
+ // "video_speed_txt"
+ // "video_progress_txt"
+ //
+ // need dragables:
+ // "video_progress" horizontal
+ // "video_speed" vertical
+ part {
+ name: "video_swallow";
+ mouse_events: 0;
+ type: SWALLOW;
+ clip_to: "vclip";
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_c1";
+ relative: 0.0 0.0;
+ offset: 20 20;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 14 14;
+ }
+ }
+ }
+ part {
+ name: "vclip";
+ mouse_events: 0;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_c1";
+ relative: 0.0 0.0;
+ offset: 20 20;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 14 14;
+ }
+ color: 255 255 255 255;
+ }
+ description {
+ state: "faded" 0.0;
+ rel1 {
+ to: "fr_c1";
+ relative: 0.0 0.0;
+ offset: 20 20;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 14 14;
+ }
+ color: 255 255 255 180;
+ }
+ description {
+ state: "dim" 0.0;
+ rel1 {
+ to: "fr_c1";
+ relative: 0.0 0.0;
+ offset: 20 20;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 14 14;
+ }
+ color: 255 255 255 80;
+ }
+ }
+ part {
+ name: "fr_c1";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ relative: 0.0 0.0;
+ offset: 34 34;
+ }
+ image {
+ normal: "fr1.png";
+ }
+ }
+ }
+ part {
+ name: "fr_c2";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 0.0;
+ offset: -35 0;
+ }
+ rel2 {
+ relative: 1.0 0.0;
+ offset: -1 34;
+ }
+ image {
+ normal: "fr6.png";
+ }
+ }
+ }
+ part {
+ name: "fr_c3";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 1.0;
+ offset: 0 -35;
+ }
+ rel2 {
+ relative: 0.0 1.0;
+ offset: 34 -1;
+ }
+ image {
+ normal: "fr3.png";
+ }
+ }
+ }
+ part {
+ name: "fr_c4";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 1.0;
+ offset: -35 -35;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: 9 9;
+ }
+ image {
+ normal: "fr5.png";
+ }
+ }
+ }
+ part {
+ name: "fr_s1";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 1.0;
+ offset: 0 0;
+ to: "fr_c1";
+ }
+ rel2 {
+ relative: 1.0 0.0;
+ offset: -1 -1;
+ to: "fr_c3";
+ }
+ image {
+ normal: "fr2.png";
+ }
+ }
+ }
+ part {
+ name: "fr_s2";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 0.0;
+ offset: 0 0;
+ to: "fr_c3";
+ }
+ rel2 {
+ relative: 0.0 1.0;
+ offset: -1 -11;
+ to: "fr_c4";
+ }
+ image {
+ normal: "fr4.png";
+ }
+ }
+ }
+ part {
+ name: "fr_s3";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 1.0;
+ offset: 0 0;
+ to: "fr_c2";
+ }
+ rel2 {
+ relative: 1.0 0.0;
+ offset: -11 -1;
+ to: "fr_c4";
+ }
+ image {
+ normal: "fr2.png";
+ }
+ }
+ }
+ part {
+ name: "fr_t";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 0.0;
+ offset: 0 0;
+ to: "fr_c1";
+ }
+ rel2 {
+ relative: 0.0 1.0;
+ offset: -1 -1;
+ to: "fr_c2";
+ }
+ image {
+ border: 50 50 0 0;
+ normal: "fr7.png";
+ }
+ }
+ }
+ part {
+ name: "panel_bg";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ to: "panel_clip";
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ to: "panel_clip";
+ }
+ image {
+ normal: "pnl.png";
+ }
+ fill {
+ smooth: 1;
+ origin {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ size {
+ relative: 0.0 1.0;
+ offset: 32 0;
+ }
+ }
+ }
+ }
+ part {
+ name: "prog_container";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 4 4;
+ to: "panel_clip";
+ }
+ rel2 {
+ relative: 1.0 0.0;
+ offset: -5 9;
+ to: "panel_clip";
+ }
+ image {
+ border: 2 2 2 2;
+ normal: "whb.png";
+ }
+ }
+ }
+ part {
+ name: "prog_done";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ to: "prog_container";
+ }
+ rel2 {
+ relative: 0.5 1.0;
+ offset: 0 -1;
+ to_x: "video_progress";
+ to_y: "prog_container";
+ }
+ image {
+ border: 2 2 2 2;
+ normal: "orb.png";
+ }
+ }
+ }
+ part {
+ name: "video_progress";
+ type: RECT;
+ clip_to: "panel_clip";
+ mouse_events: 1;
+ dragable {
+ x: 1 1 0;
+ y: 0 0 0;
+ confine: "prog_container";
+ }
+ description {
+ state: "default" 0.0;
+ min: 10 5;
+ align: 0.5 0.5;
+ rel1 {
+ to: "prog_container";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "prog_container";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ color: 0 255 0 50;
+ }
+ }
+ part {
+ name: "video_progress_img";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ min: 9 16;
+ align: 0.5 1.0;
+ fixed: 1 1;
+ rel1 {
+ relative: 0.5 0.0;
+ offset: 0 0;
+ to_x: "video_progress";
+ to_y: "prog_container";
+ }
+ rel2 {
+ relative: 0.5 0.0;
+ offset: 0 0;
+ to_x: "video_progress";
+ to_y: "prog_container";
+ }
+ image {
+ normal: "sl.png";
+ }
+ }
+ }
+ part {
+ name: "b_stop";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ min: 22 22;
+ max: 22 22;
+ align: 0.0 0.0;
+ rel1 {
+ relative: 1.0 1.0;
+ offset: -5 -5;
+ to: "panel_clip";
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -5 -5;
+ to: "panel_clip";
+ }
+ image {
+ normal: "bstop.png";
+ }
+ }
+ }
+ part {
+ name: "b_play";
+ mouse_events: 0;
+ clip_to: "panel_clip";
+ description {
+ state: "default" 0.0;
+ min: 22 22;
+ max: 22 22;
+ align: 0.0 1.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ rel2 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ image {
+ normal: "bstop.png";
+ }
+ }
+ description {
+ state: "play" 0.0;
+ min: 22 22;
+ max: 22 22;
+ align: 0.0 1.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ rel2 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ image {
+ normal: "bplay.png";
+ }
+ }
+ description {
+ state: "pause" 0.0;
+ min: 22 22;
+ max: 22 22;
+ align: 0.0 1.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ rel2 {
+ relative: 0.0 0.0;
+ offset: -1 0;
+ to: "b_stop";
+ }
+ image {
+ normal: "bpause.png";
+ }
+ }
+ }
+ part {
+ name: "panel_clip";
+ mouse_events: 0;
+ type: RECT;
+ clip_to: "vclip";
+ description {
+ visible: 0;
+ state: "default" 0.0;
+ rel1 {
+ to: "panel";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "panel";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 255 255 255 0;
+ }
+ description {
+ visible: 1;
+ state: "shown" 0.0;
+ rel1 {
+ to: "panel";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "panel";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 255 255 255 255;
+ }
+ }
+ part {
+ name: "video_progress_txt";
+ type: TEXT;
+ mouse_events: 0;
+ effect: OUTLINE;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "panel";
+ relative: 0.0 1.0;
+ offset: 0 -10;
+ }
+ rel2 {
+ to: "panel";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 255 255 255 255;
+ color2: 0 0 0 255;
+ text {
+ text: "Video Progress";
+ font: "Sans";
+ size: 6;
+ align: 0.0 1.0;
+ };
+ }
+ }
+ part {
+ name: "panel";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_c3";
+ relative: 0.0 0.0;
+ offset: 20 -46;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 14 14;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "panel2";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_c1";
+ relative: 0.0 0.0;
+ offset: 20 20;
+ }
+ rel2 {
+ to: "fr_c2";
+ relative: 1.0 0.0;
+ offset: -1 -1;
+ to: "panel";
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_resizer";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_c4";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "fr_c4";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_mover";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "fr_t";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "fr_t";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ }
+ programs {
+ program {
+ name: "video_move_start";
+ signal: "mouse,down,*";
+ source: "video_mover";
+ action: SIGNAL_EMIT "frame_move" "start";
+ }
+ program {
+ name: "video_move_stop";
+ signal: "mouse,up,*";
+ source: "video_mover";
+ action: SIGNAL_EMIT "frame_move" "stop";
+ }
+ program {
+ name: "video_resize_start";
+ signal: "mouse,down,*";
+ source: "video_resizer";
+ action: SIGNAL_EMIT "frame_resize" "start";
+ }
+ program {
+ name: "video_resize_stop";
+ signal: "mouse,up,*";
+ source: "video_resizer";
+ action: SIGNAL_EMIT "frame_resize" "stop";
+ }
+ program {
+ name: "panel_show";
+ signal: "mouse,in";
+ source: "panel";
+ action: STATE_SET "shown" 0.0;
+ transition: LINEAR 1.0;
+ target: "panel_clip";
+ }
+ program {
+ name: "panel_hide";
+ signal: "mouse,in";
+ source: "panel2";
+ action: STATE_SET "default" 0.0;
+ transition: LINEAR 1.0;
+ target: "panel_clip";
+ }
+ }
+ }
+#else
+ group {
+ name: "video_controller";
+ parts {
+ // need swallow parts:
+ // "video_swallow"
+ //
+ // need txt parts:
+ // "video_speed_txt"
+ // "video_progress_txt"
+ //
+ // need dragables:
+ // "video_progress" horizontal
+ // "video_speed" vertical
+ part {
+ name: "video_swallow";
+ mouse_events: 0;
+ type: SWALLOW;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 0.0;
+ offset: -8 23;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: 7 -25;
+ }
+ }
+ }
+ part {
+ name: "video_frame_left";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ relative: 0.0 1.0;
+ offset: 32 -1;
+ }
+ image {
+ border: 0 0 33 33;
+ normal: "video_frame_left.png";
+ }
+ }
+ }
+ part {
+ name: "video_frame_right";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 0.0;
+ offset: -32 0;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ image {
+ border: 0 0 33 33;
+ normal: "video_frame_right.png";
+ }
+ }
+ }
+ part {
+ name: "video_frame_top";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 0.0;
+ offset: -1 31;
+ }
+ image {
+ normal: "video_frame_top.png";
+ }
+ }
+ }
+ part {
+ name: "video_frame_bottom";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 1.0;
+ offset: 0 -32;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: -1 -1;
+ }
+ image {
+ normal: "video_frame_bottom.png";
+ }
+ }
+ }
+ part {
+ name: "video_speed_txt";
+ type: TEXT;
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 0.0 0.0;
+ offset: 0 8;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 1.0 1.0;
+ offset: -1 -13;
+ }
+ color: 0 0 0 255;
+ text {
+ text: "Video Speed";
+ font: "Sans";
+ size: 6;
+ align: 1.0 0.5;
+ };
+ }
+ }
+ part {
+ name: "video_progress_confine";
+ mouse_events: 0;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 1 18;
+ align: 0.5 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 1.0;
+ offset: 0 -25;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: -1 -25;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_progress";
+ type: RECT;
+ mouse_events: 1;
+ dragable {
+ x: 1 1 0;
+ y: 0 0 0;
+ confine: "video_progress_confine";
+ }
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 34 18;
+ rel1 {
+ to: "video_progress_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_progress_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_progress_img";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 42 26;
+ rel1 {
+ to: "video_progress";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_progress";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "h_slider.png";
+ }
+ }
+ }
+ part {
+ name: "video_speed_confine";
+ mouse_events: 0;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 0.0;
+ offset: 0 24;
+ }
+ rel2 {
+ to: "video_frame_left";
+ relative: 1.0 1.0;
+ offset: 48 -49;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_speed";
+ mouse_events: 1;
+ dragable {
+ x: 0 0 0;
+ y: -1 1 0;
+ confine: "video_speed_confine";
+ }
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_speed_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_speed_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_play";
+ mouse_events: 1;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 0.0 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 0.0 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_pause";
+ mouse_events: 1;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_stop";
+ mouse_events: 1;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 1.0 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 1.0 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_mover";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_top";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_top";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 255 20 20 20;
+ }
+ }
+ part {
+ name: "video_resizer";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: 0 -31;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 20 20 255 20;
+ }
+ }
+ part {
+ name: "video_progress_txt";
+ type: TEXT;
+ mouse_events: 0;
+ effect: OUTLINE;
+ description {
+ state: "default" 0.0;
+ align: 1.0 1.0;
+ fixed: 1 1;
+ rel1 {
+ relative: 1.0 1.0;
+ offset: -2 -2;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -2 -2;
+ }
+ color: 255 255 255 255;
+ color2: 0 0 0 255;
+ text {
+ text: "XX:XX:XX / XX:XX:XX";
+ font: "Sans";
+ size: 10;
+ align: 1.0 1.0;
+ min: 1 1;
+ };
+ }
+ }
+ }
+ programs {
+ // emit signals:
+ // "video_control" "play"
+ // "video_control" "pause"
+ // "video_control" "stop"
+ // "drag" "video_progress"
+ // "drag" "video_speed"
+ //
+ // get signals:
+ // "video_state" "play"
+ // "video_state" "pause"
+ // "video_state" "stop"
+ program {
+ name: "video_play";
+ signal: "mouse,down,1";
+ source: "video_play";
+ action: SIGNAL_EMIT "video_control" "play";
+ }
+ program {
+ name: "video_pause";
+ signal: "mouse,down,1";
+ source: "video_pause";
+ action: SIGNAL_EMIT "video_control" "pause";
+ }
+ program {
+ name: "video_stop";
+ signal: "mouse,down,1";
+ source: "video_stop";
+ action: SIGNAL_EMIT "video_control" "stop";
+ }
+ program {
+ name: "video_move_start";
+ signal: "mouse,down,*";
+ source: "video_mover";
+ action: SIGNAL_EMIT "frame_move" "start";
+ }
+ program {
+ name: "video_move_stop";
+ signal: "mouse,up,*";
+ source: "video_mover";
+ action: SIGNAL_EMIT "frame_move" "stop";
+ }
+ program {
+ name: "video_resize_start";
+ signal: "mouse,down,*";
+ source: "video_resizer";
+ action: SIGNAL_EMIT "frame_resize" "start";
+ }
+ program {
+ name: "video_resize_stop";
+ signal: "mouse,up,*";
+ source: "video_resizer";
+ action: SIGNAL_EMIT "frame_resize" "stop";
+ }
+ }
+ }
+
+ group {
+ name: "video_controller/reflex";
+ parts {
+ // need swallow parts:
+ // "video_swallow"
+ //
+ // need txt parts:
+ // "video_speed_txt"
+ // "video_progress_txt"
+ //
+ // need dragables:
+ // "video_progress" horizontal
+ // "video_speed" vertical
+ part {
+ name: "video_swallow";
+ mouse_events: 0;
+ type: SWALLOW;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 0.0;
+ offset: -8 23;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: 7 -25;
+ }
+ }
+ }
+
+ part {
+ name: "swallow_center";
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ visible: 0;
+ rel1 {
+ to: "video_swallow";
+ relative: 0.5 1.0;
+ }
+ rel2 {
+ to: "video_swallow";
+ relative: 0.5 1.0;
+ }
+ }
+ }
+ part {
+ name: "swallow_mirror";
+ type: PROXY;
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ source: "video_swallow";
+ rel1 {
+ to: "video_swallow";
+ }
+ rel2 {
+ to: "video_swallow";
+ }
+ perspective {
+ zplane: 0;
+ focal: 1000;
+ }
+ color: 255 255 255 96;
+ map {
+ on: 1;
+ smooth: 0;
+ alpha: 1;
+ rotation {
+ center: "swallow_center";
+ x: 100;
+ }
+ }
+ }
+ }
+
+ part {
+ name: "video_frame_left";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ relative: 0.0 1.0;
+ offset: 32 -1;
+ }
+ image {
+ border: 0 0 33 33;
+ normal: "video_frame_left.png";
+ }
+ }
+ }
+ part {
+ name: "video_frame_right";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ relative: 1.0 0.0;
+ offset: -32 0;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ image {
+ border: 0 0 33 33;
+ normal: "video_frame_right.png";
+ }
+ }
+ }
+ part {
+ name: "video_frame_top";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 0.0;
+ offset: -1 31;
+ }
+ image {
+ normal: "video_frame_top.png";
+ }
+ }
+ }
+ part {
+ name: "video_frame_bottom";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 1.0;
+ offset: 0 -32;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: -1 -1;
+ }
+ image {
+ normal: "video_frame_bottom.png";
+ }
+ }
+ }
+ part {
+ name: "video_speed_txt";
+ type: TEXT;
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 0.0 0.0;
+ offset: 0 8;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 1.0 1.0;
+ offset: -1 -13;
+ }
+ color: 0 0 0 255;
+ text {
+ text: "Video Speed";
+ font: "Sans";
+ size: 6;
+ align: 1.0 0.5;
+ };
+ }
+ }
+ part {
+ name: "video_progress_confine";
+ mouse_events: 0;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 1 18;
+ align: 0.5 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 1.0;
+ offset: 0 -25;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: -1 -25;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_progress";
+ type: RECT;
+ mouse_events: 1;
+ dragable {
+ x: 1 1 0;
+ y: 0 0 0;
+ confine: "video_progress_confine";
+ }
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 34 18;
+ rel1 {
+ to: "video_progress_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_progress_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_progress_img";
+ mouse_events: 0;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 42 26;
+ rel1 {
+ to: "video_progress";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_progress";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "h_slider.png";
+ }
+ }
+ }
+ part {
+ name: "video_speed_confine";
+ mouse_events: 0;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_left";
+ relative: 1.0 0.0;
+ offset: 0 24;
+ }
+ rel2 {
+ to: "video_frame_left";
+ relative: 1.0 1.0;
+ offset: 48 -49;
+ }
+ color: 0 0 0 0;
+ }
+ }
+ part {
+ name: "video_speed";
+ mouse_events: 1;
+ dragable {
+ x: 0 0 0;
+ y: -1 1 0;
+ confine: "video_speed_confine";
+ }
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_speed_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_speed_confine";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_play";
+ mouse_events: 1;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 0.0 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 0.0 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_pause";
+ mouse_events: 1;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 0.5 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_stop";
+ mouse_events: 1;
+ description {
+ state: "default" 0.0;
+ fixed: 1 1;
+ min: 24 24;
+ rel1 {
+ to: "video_frame_bottom";
+ relative: 1.0 0.5;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_bottom";
+ relative: 1.0 0.5;
+ offset: 0 0;
+ }
+ image {
+ normal: "knob.png";
+ }
+ }
+ }
+ part {
+ name: "video_mover";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_top";
+ relative: 0.0 0.0;
+ offset: 0 0;
+ }
+ rel2 {
+ to: "video_frame_top";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 255 20 20 20;
+ }
+ }
+ part {
+ name: "video_resizer";
+ mouse_events: 1;
+ type: RECT;
+ description {
+ state: "default" 0.0;
+ rel1 {
+ to: "video_frame_right";
+ relative: 0.0 1.0;
+ offset: 0 -31;
+ }
+ rel2 {
+ to: "video_frame_right";
+ relative: 1.0 1.0;
+ offset: -1 -1;
+ }
+ color: 20 20 255 20;
+ }
+ }
+ part {
+ name: "video_progress_txt";
+ type: TEXT;
+ mouse_events: 0;
+ effect: OUTLINE;
+ description {
+ state: "default" 0.0;
+ align: 1.0 1.0;
+ fixed: 1 1;
+ rel1 {
+ relative: 1.0 1.0;
+ offset: -2 -2;
+ }
+ rel2 {
+ relative: 1.0 1.0;
+ offset: -2 -2;
+ }
+ color: 255 255 255 255;
+ color2: 0 0 0 255;
+ text {
+ text: "XX:XX:XX / XX:XX:XX";
+ font: "Sans";
+ size: 10;
+ align: 1.0 1.0;
+ min: 1 1;
+ };
+ }
+ }
+ }
+ programs {
+ // emit signals:
+ // "video_control" "play"
+ // "video_control" "pause"
+ // "video_control" "stop"
+ // "drag" "video_progress"
+ // "drag" "video_speed"
+ //
+ // get signals:
+ // "video_state" "play"
+ // "video_state" "pause"
+ // "video_state" "stop"
+ program {
+ name: "video_play";
+ signal: "mouse,down,1";
+ source: "video_play";
+ action: SIGNAL_EMIT "video_control" "play";
+ }
+ program {
+ name: "video_pause";
+ signal: "mouse,down,1";
+ source: "video_pause";
+ action: SIGNAL_EMIT "video_control" "pause";
+ }
+ program {
+ name: "video_stop";
+ signal: "mouse,down,1";
+ source: "video_stop";
+ action: SIGNAL_EMIT "video_control" "stop";
+ }
+ program {
+ name: "video_move_start";
+ signal: "mouse,down,*";
+ source: "video_mover";
+ action: SIGNAL_EMIT "frame_move" "start";
+ }
+ program {
+ name: "video_move_stop";
+ signal: "mouse,up,*";
+ source: "video_mover";
+ action: SIGNAL_EMIT "frame_move" "stop";
+ }
+ program {
+ name: "video_resize_start";
+ signal: "mouse,down,*";
+ source: "video_resizer";
+ action: SIGNAL_EMIT "frame_resize" "start";
+ }
+ program {
+ name: "video_resize_stop";
+ signal: "mouse,up,*";
+ source: "video_resizer";
+ action: SIGNAL_EMIT "frame_resize" "stop";
+ }
+ }
+ }
+#endif
+}
diff --git a/src/tests/emotion/data/tiles.png b/src/tests/emotion/data/tiles.png
new file mode 100644
index 0000000000..7e92bd15fa
--- /dev/null
+++ b/src/tests/emotion/data/tiles.png
Binary files differ
diff --git a/src/tests/emotion/data/video_frame_bottom.png b/src/tests/emotion/data/video_frame_bottom.png
new file mode 100644
index 0000000000..97eb5dd5a6
--- /dev/null
+++ b/src/tests/emotion/data/video_frame_bottom.png
Binary files differ
diff --git a/src/tests/emotion/data/video_frame_left.png b/src/tests/emotion/data/video_frame_left.png
new file mode 100644
index 0000000000..3d27d84d01
--- /dev/null
+++ b/src/tests/emotion/data/video_frame_left.png
Binary files differ
diff --git a/src/tests/emotion/data/video_frame_right.png b/src/tests/emotion/data/video_frame_right.png
new file mode 100644
index 0000000000..1cefbafcef
--- /dev/null
+++ b/src/tests/emotion/data/video_frame_right.png
Binary files differ
diff --git a/src/tests/emotion/data/video_frame_top.png b/src/tests/emotion/data/video_frame_top.png
new file mode 100644
index 0000000000..9317a61478
--- /dev/null
+++ b/src/tests/emotion/data/video_frame_top.png
Binary files differ
diff --git a/src/tests/emotion/data/whb.png b/src/tests/emotion/data/whb.png
new file mode 100644
index 0000000000..5f141d4b6d
--- /dev/null
+++ b/src/tests/emotion/data/whb.png
Binary files differ
diff --git a/src/tests/emotion/data/window_inner_shadow.png b/src/tests/emotion/data/window_inner_shadow.png
new file mode 100644
index 0000000000..93410708e5
--- /dev/null
+++ b/src/tests/emotion/data/window_inner_shadow.png
Binary files differ
diff --git a/src/tests/emotion/emotion_test_main.c b/src/tests/emotion/emotion_test_main.c
new file mode 100644
index 0000000000..5bf3d3ab8a
--- /dev/null
+++ b/src/tests/emotion/emotion_test_main.c
@@ -0,0 +1,748 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <Evas.h>
+#include <Ecore.h>
+#include <Ecore_Getopt.h>
+#include <Ecore_Evas.h>
+#include <Edje.h>
+
+#include "Emotion.h"
+
+static const Ecore_Getopt options = {
+ "emotion_test",
+ "%prog [options] <filename>",
+ "1.0.0",
+ "(C) 2011 Enlightenment",
+ "BSD\nThis is a 3 clause bsd bla bla",
+ "a simple test program for emotion.",
+ 1,
+ {
+ ECORE_GETOPT_STORE_STR('e', "engine", "ecore-evas engine to use"),
+ ECORE_GETOPT_CALLBACK_NOARGS('E', "list-engines", "list ecore-evas engines",
+ ecore_getopt_callback_ecore_evas_list_engines, NULL),
+ ECORE_GETOPT_CALLBACK_ARGS('g', "geometry", "geometry to use in x:y:w:h form.", "X:Y:W:H",
+ ecore_getopt_callback_geometry_parse, NULL),
+ ECORE_GETOPT_STORE_STR('b', "backend", "backend to use"),
+ ECORE_GETOPT_STORE_INT('v', "vis", "visualization type"),
+ ECORE_GETOPT_COUNT('v', "verbose", "be more verbose"),
+ ECORE_GETOPT_STORE_TRUE('R', "reflex", "show video reflex effect"),
+ ECORE_GETOPT_VERSION('V', "version"),
+ ECORE_GETOPT_COPYRIGHT('R', "copyright"),
+ ECORE_GETOPT_LICENSE('L', "license"),
+ ECORE_GETOPT_HELP('h', "help"),
+ ECORE_GETOPT_SENTINEL
+ }
+};
+
+typedef struct _Frame_Data Frame_Data;
+
+struct _Frame_Data
+{
+ unsigned char moving : 1;
+ unsigned char resizing : 1;
+ int button;
+ Evas_Coord x, y;
+};
+
+static void main_resize(Ecore_Evas *ee);
+static Eina_Bool main_signal_exit(void *data, int ev_type, void *ev);
+static void main_delete_request(Ecore_Evas *ee);
+
+static void bg_setup(void);
+static void bg_resize(Evas_Coord w, Evas_Coord h);
+static void bg_key_down(void *data, Evas * e, Evas_Object * obj, void *event_info);
+
+static Evas_Object *o_bg = NULL;
+
+static double start_time = 0.0;
+static Ecore_Evas *ecore_evas = NULL;
+static Evas *evas = NULL;
+static int startw = 800;
+static int starth = 600;
+
+static Eina_List *video_objs = NULL;
+static Emotion_Vis vis = EMOTION_VIS_NONE;
+static unsigned char reflex = 0;
+
+static void
+main_resize(Ecore_Evas *ee)
+{
+ Evas_Coord w, h;
+
+ evas_output_viewport_get(ecore_evas_get(ee), NULL, NULL, &w, &h);
+ bg_resize(w, h);
+}
+
+static Eina_Bool
+main_signal_exit(void *data EINA_UNUSED, int ev_type EINA_UNUSED, void *ev EINA_UNUSED)
+{
+ Evas_Object *o;
+
+ ecore_main_loop_quit();
+ EINA_LIST_FREE(video_objs, o)
+ {
+ emotion_object_last_position_save(o);
+ evas_object_del(o);
+ }
+ return EINA_TRUE;
+}
+
+static void
+main_delete_request(Ecore_Evas *ee EINA_UNUSED)
+{
+ ecore_main_loop_quit();
+}
+
+void
+bg_setup(void)
+{
+ Evas_Object *o;
+
+ o = edje_object_add(evas);
+ edje_object_file_set(o, PACKAGE_DATA_DIR"/data/theme.edj", "background");
+ evas_object_move(o, 0, 0);
+ evas_object_resize(o, startw, starth);
+ evas_object_layer_set(o, -999);
+ evas_object_show(o);
+ evas_object_focus_set(o, 1);
+ evas_object_event_callback_add(o, EVAS_CALLBACK_KEY_DOWN, bg_key_down, NULL);
+ o_bg = o;
+}
+
+void
+bg_resize(Evas_Coord w, Evas_Coord h)
+{
+ evas_object_resize(o_bg, w, h);
+}
+
+static void
+broadcast_event(Emotion_Event ev)
+{
+ Eina_List *l;
+ Evas_Object *obj;
+
+ EINA_LIST_FOREACH(video_objs, l, obj)
+ emotion_object_event_simple_send(obj, ev);
+}
+
+static void
+bg_key_down(void *data EINA_UNUSED, Evas *e EINA_UNUSED, Evas_Object *obj EINA_UNUSED, void *event_info)
+{
+ Evas_Event_Key_Down *ev = event_info;
+ Eina_List *l;
+ Evas_Object *o;
+
+ if (!strcmp(ev->keyname, "Escape"))
+ ecore_main_loop_quit();
+ else if (!strcmp(ev->keyname, "Up"))
+ broadcast_event(EMOTION_EVENT_UP);
+ else if (!strcmp(ev->keyname, "Down"))
+ broadcast_event(EMOTION_EVENT_DOWN);
+ else if (!strcmp(ev->keyname, "Left"))
+ broadcast_event(EMOTION_EVENT_LEFT);
+ else if (!strcmp(ev->keyname, "Right"))
+ broadcast_event(EMOTION_EVENT_RIGHT);
+ else if (!strcmp(ev->keyname, "Return"))
+ broadcast_event(EMOTION_EVENT_SELECT);
+ else if (!strcmp(ev->keyname, "m"))
+ broadcast_event(EMOTION_EVENT_MENU1);
+ else if (!strcmp(ev->keyname, "Prior"))
+ broadcast_event(EMOTION_EVENT_PREV);
+ else if (!strcmp(ev->keyname, "Next"))
+ broadcast_event(EMOTION_EVENT_NEXT);
+ else if (!strcmp(ev->keyname, "0"))
+ broadcast_event(EMOTION_EVENT_0);
+ else if (!strcmp(ev->keyname, "1"))
+ broadcast_event(EMOTION_EVENT_1);
+ else if (!strcmp(ev->keyname, "2"))
+ broadcast_event(EMOTION_EVENT_2);
+ else if (!strcmp(ev->keyname, "3"))
+ broadcast_event(EMOTION_EVENT_3);
+ else if (!strcmp(ev->keyname, "4"))
+ broadcast_event(EMOTION_EVENT_4);
+ else if (!strcmp(ev->keyname, "5"))
+ broadcast_event(EMOTION_EVENT_5);
+ else if (!strcmp(ev->keyname, "6"))
+ broadcast_event(EMOTION_EVENT_6);
+ else if (!strcmp(ev->keyname, "7"))
+ broadcast_event(EMOTION_EVENT_7);
+ else if (!strcmp(ev->keyname, "8"))
+ broadcast_event(EMOTION_EVENT_8);
+ else if (!strcmp(ev->keyname, "9"))
+ broadcast_event(EMOTION_EVENT_9);
+ else if (!strcmp(ev->keyname, "-"))
+ broadcast_event(EMOTION_EVENT_10);
+ else if (!strcmp(ev->keyname, "bracketleft"))
+ {
+ EINA_LIST_FOREACH(video_objs, l, o)
+ emotion_object_audio_volume_set(o, emotion_object_audio_volume_get(o) - 0.1);
+ }
+ else if (!strcmp(ev->keyname, "bracketright"))
+ {
+ EINA_LIST_FOREACH(video_objs, l, o)
+ emotion_object_audio_volume_set(o, emotion_object_audio_volume_get(o) + 0.1);
+ }
+ else if (!strcmp(ev->keyname, "v"))
+ {
+ EINA_LIST_FOREACH(video_objs, l, o)
+ {
+ if (emotion_object_video_mute_get(o))
+ emotion_object_video_mute_set(o, 0);
+ else
+ emotion_object_video_mute_set(o, 1);
+ }
+ }
+ else if (!strcmp(ev->keyname, "a"))
+ {
+ EINA_LIST_FOREACH(video_objs, l, o)
+ {
+ if (emotion_object_audio_mute_get(o))
+ {
+ emotion_object_audio_mute_set(o, 0);
+ printf("unmute\n");
+ }
+ else
+ {
+ emotion_object_audio_mute_set(o, 1);
+ printf("mute\n");
+ }
+ }
+ }
+ else if (!strcmp(ev->keyname, "i"))
+ {
+ EINA_LIST_FOREACH(video_objs, l, o)
+ {
+ printf("audio channels: %i\n", emotion_object_audio_channel_count(o));
+ printf("video channels: %i\n", emotion_object_video_channel_count(o));
+ printf("spu channels: %i\n", emotion_object_spu_channel_count(o));
+ printf("seekable: %i\n", emotion_object_seekable_get(o));
+ }
+ }
+ else if (!strcmp(ev->keyname, "f"))
+ {
+ if (!ecore_evas_fullscreen_get(ecore_evas))
+ ecore_evas_fullscreen_set(ecore_evas, 1);
+ else
+ ecore_evas_fullscreen_set(ecore_evas, 0);
+ }
+ else if (!strcmp(ev->keyname, "d"))
+ {
+ if (!ecore_evas_avoid_damage_get(ecore_evas))
+ ecore_evas_avoid_damage_set(ecore_evas, 1);
+ else
+ ecore_evas_avoid_damage_set(ecore_evas, 0);
+ }
+ else if (!strcmp(ev->keyname, "s"))
+ {
+ if (!ecore_evas_shaped_get(ecore_evas))
+ {
+ ecore_evas_shaped_set(ecore_evas, 1);
+ evas_object_hide(o_bg);
+ }
+ else
+ {
+ ecore_evas_shaped_set(ecore_evas, 0);
+ evas_object_show(o_bg);
+ }
+ }
+ else if (!strcmp(ev->keyname, "b"))
+ {
+ if (!ecore_evas_borderless_get(ecore_evas))
+ ecore_evas_borderless_set(ecore_evas, 1);
+ else
+ ecore_evas_borderless_set(ecore_evas, 0);
+ }
+ else if (!strcmp(ev->keyname, "q"))
+ {
+ ecore_main_loop_quit();
+ while (video_objs)
+ {
+ printf("del obj!\n");
+ evas_object_del(video_objs->data);
+ video_objs = eina_list_remove_list(video_objs, video_objs);
+ printf("done\n");
+ }
+ }
+ else if (!strcmp(ev->keyname, "z"))
+ {
+ vis = (vis + 1) % EMOTION_VIS_LAST;
+ printf("new visualization: %d\n", vis);
+
+ EINA_LIST_FOREACH(video_objs, l, o)
+ {
+ Eina_Bool supported;
+
+ supported = emotion_object_vis_supported(o, vis);
+ if (supported)
+ emotion_object_vis_set(o, vis);
+ else
+ {
+ const char *file;
+
+ file = emotion_object_file_get(o);
+ printf("object %p (%s) does not support visualization %d\n",
+ o, file, vis);
+ }
+ }
+ }
+ else
+ {
+ printf("UNHANDLED: %s\n", ev->keyname);
+ }
+}
+
+static void
+video_obj_time_changed(Evas_Object *obj, Evas_Object *edje)
+{
+ double pos, len, scale;
+ char buf[256];
+ int ph, pm, ps, pf, lh, lm, ls;
+
+ pos = emotion_object_position_get(obj);
+ len = emotion_object_play_length_get(obj);
+ scale = (len > 0.0) ? pos / len : 0.0;
+ edje_object_part_drag_value_set(edje, "video_progress", scale, 0.0);
+ lh = len / 3600;
+ lm = len / 60 - (lh * 60);
+ ls = len - (lm * 60);
+ ph = pos / 3600;
+ pm = pos / 60 - (ph * 60);
+ ps = pos - (pm * 60);
+ pf = pos * 100 - (ps * 100) - (pm * 60 * 100) - (ph * 60 * 60 * 100);
+ snprintf(buf, sizeof(buf), "%i:%02i:%02i.%02i / %i:%02i:%02i",
+ ph, pm, ps, pf, lh, lm, ls);
+ edje_object_part_text_set(edje, "video_progress_txt", buf);
+}
+
+static void
+video_obj_frame_decode_cb(void *data, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ video_obj_time_changed(obj, data);
+
+ if (0)
+ {
+ double t;
+ static double pt = 0.0;
+ t = ecore_time_get();
+ printf("FPS: %3.3f\n", 1.0 / (t - pt));
+ pt = t;
+ }
+}
+
+static void
+video_obj_frame_resize_cb(void *data, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ Evas_Object *oe;
+ int iw, ih;
+ Evas_Coord w, h;
+ double ratio;
+
+ oe = data;
+ emotion_object_size_get(obj, &iw, &ih);
+ ratio = emotion_object_ratio_get(obj);
+ printf("HANDLE %ix%i @ %3.3f\n", iw, ih, ratio);
+ if (ratio > 0.0) iw = (ih * ratio) + 0.5;
+ edje_extern_object_min_size_set(obj, iw, ih);
+ edje_object_part_swallow(oe, "video_swallow", obj);
+ edje_object_size_min_calc(oe, &w, &h);
+ evas_object_resize(oe, w, h);
+ edje_extern_object_min_size_set(obj, 0, 0);
+ edje_object_part_swallow(oe, "video_swallow", obj);
+}
+
+static void
+video_obj_length_change_cb(void *data, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ video_obj_time_changed(obj, data);
+}
+
+static void
+video_obj_position_update_cb(void *data, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ video_obj_time_changed(obj, data);
+}
+
+static void
+video_obj_stopped_cb(void *data EINA_UNUSED, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ printf("video stopped!\n");
+ emotion_object_position_set(obj, 0.0);
+ emotion_object_play_set(obj, 1);
+}
+
+static void
+video_obj_channels_cb(void *data EINA_UNUSED, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ printf("channels changed: [AUD %i][VID %i][SPU %i]\n",
+ emotion_object_audio_channel_count(obj),
+ emotion_object_video_channel_count(obj),
+ emotion_object_spu_channel_count(obj));
+}
+
+static void
+video_obj_title_cb(void *data EINA_UNUSED, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ printf("video title to: \"%s\"\n", emotion_object_title_get(obj));
+}
+
+static void
+video_obj_progress_cb(void *data EINA_UNUSED, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ printf("progress: \"%s\" %3.3f\n",
+ emotion_object_progress_info_get(obj),
+ emotion_object_progress_status_get(obj));
+}
+
+static void
+video_obj_ref_cb(void *data EINA_UNUSED, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ printf("video ref to: \"%s\" %i\n",
+ emotion_object_ref_file_get(obj),
+ emotion_object_ref_num_get(obj));
+}
+
+static void
+video_obj_button_num_cb(void *data EINA_UNUSED, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ printf("video spu buttons to: %i\n",
+ emotion_object_spu_button_count_get(obj));
+}
+
+static void
+video_obj_button_cb(void *data EINA_UNUSED, Evas_Object *obj, void *event_info EINA_UNUSED)
+{
+ printf("video selected spu button: %i\n",
+ emotion_object_spu_button_get(obj));
+}
+
+
+
+static void
+video_obj_signal_play_cb(void *data, Evas_Object *o, const char *emission EINA_UNUSED, const char *source EINA_UNUSED)
+{
+ Evas_Object *ov = data;
+ emotion_object_play_set(ov, 1);
+ edje_object_signal_emit(o, "video_state", "play");
+}
+
+static void
+video_obj_signal_pause_cb(void *data, Evas_Object *o, const char *emission EINA_UNUSED, const char *source EINA_UNUSED)
+{
+ Evas_Object *ov = data;
+ emotion_object_play_set(ov, 0);
+ edje_object_signal_emit(o, "video_state", "pause");
+}
+
+static void
+video_obj_signal_stop_cb(void *data, Evas_Object *o, const char *emission EINA_UNUSED, const char *source EINA_UNUSED)
+{
+ Evas_Object *ov = data;
+ emotion_object_play_set(ov, 0);
+ emotion_object_position_set(ov, 0);
+ edje_object_signal_emit(o, "video_state", "stop");
+}
+
+static void
+video_obj_signal_jump_cb(void *data, Evas_Object *o, const char *emission EINA_UNUSED, const char *source EINA_UNUSED)
+{
+ Evas_Object *ov = data;
+ double len;
+ double x, y;
+
+ edje_object_part_drag_value_get(o, source, &x, &y);
+ len = emotion_object_play_length_get(ov);
+ emotion_object_position_set(ov, x * len);
+}
+
+static void
+video_obj_signal_speed_cb(void *data, Evas_Object *o, const char *emission EINA_UNUSED, const char *source EINA_UNUSED)
+{
+ Evas_Object *ov = data;
+ double spd;
+ double x, y;
+ char buf[256];
+
+ edje_object_part_drag_value_get(o, source, &x, &y);
+ spd = 255 * y;
+ evas_object_color_set(ov, spd, spd, spd, spd);
+ snprintf(buf, sizeof(buf), "%.0f", spd);
+ edje_object_part_text_set(o, "video_speed_txt", buf);
+}
+
+static void
+video_obj_signal_frame_move_start_cb(void *data EINA_UNUSED, Evas_Object *o, const char *emission EINA_UNUSED, const char *source EINA_UNUSED)
+{
+ Frame_Data *fd;
+ Evas_Coord x, y;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->moving = 1;
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ fd->x = x;
+ fd->y = y;
+ evas_object_raise(o);
+}
+
+static void
+video_obj_signal_frame_move_stop_cb(void *data EINA_UNUSED, Evas_Object *o, const char *emission EINA_UNUSED, const char *source EINA_UNUSED)
+{
+ Frame_Data *fd;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->moving = 0;
+}
+
+static void
+video_obj_signal_frame_resize_start_cb(void *data EINA_UNUSED, Evas_Object *o, const char *emission EINA_UNUSED, const char *source EINA_UNUSED)
+{
+ Frame_Data *fd;
+ Evas_Coord x, y;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->resizing = 1;
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ fd->x = x;
+ fd->y = y;
+ evas_object_raise(o);
+}
+
+static void
+video_obj_signal_frame_resize_stop_cb(void *data EINA_UNUSED, Evas_Object *o, const char *emission EINA_UNUSED, const char *source EINA_UNUSED)
+{
+ Frame_Data *fd;
+
+ fd = evas_object_data_get(o, "frame_data");
+ fd->resizing = 0;
+}
+
+static void
+video_obj_signal_frame_move_cb(void *data EINA_UNUSED, Evas_Object *o, const char *emission EINA_UNUSED, const char *source EINA_UNUSED)
+{
+ Frame_Data *fd;
+
+ fd = evas_object_data_get(o, "frame_data");
+ if (fd->moving)
+ {
+ Evas_Coord x, y, ox, oy;
+
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ evas_object_geometry_get(o, &ox, &oy, NULL, NULL);
+ evas_object_move(o, ox + (x - fd->x), oy + (y - fd->y));
+ fd->x = x;
+ fd->y = y;
+ }
+ else if (fd->resizing)
+ {
+ Evas_Coord x, y, ow, oh;
+
+ evas_pointer_canvas_xy_get(evas_object_evas_get(o), &x, &y);
+ evas_object_geometry_get(o, NULL, NULL, &ow, &oh);
+ evas_object_resize(o, ow + (x - fd->x), oh + (y - fd->y));
+ fd->x = x;
+ fd->y = y;
+ }
+}
+
+
+static void
+init_video_object(const char *module_filename, const char *filename)
+{
+ Evas_Object *o, *oe;
+ int iw, ih;
+ Evas_Coord w, h;
+ Frame_Data *fd;
+
+
+/* basic video object setup */
+ o = emotion_object_add(evas);
+ if (!emotion_object_init(o, module_filename))
+ return;
+ emotion_object_vis_set(o, vis);
+ if (!emotion_object_file_set(o, filename))
+ {
+ return;
+ }
+ emotion_object_last_position_load(o);
+ emotion_object_play_set(o, 1);
+ evas_object_move(o, 0, 0);
+ evas_object_resize(o, 320, 240);
+ emotion_object_smooth_scale_set(o, 1);
+ evas_object_show(o);
+/* end basic video setup. all the rest here is just to be fancy */
+
+
+ video_objs = eina_list_append(video_objs, o);
+
+ emotion_object_size_get(o, &iw, &ih);
+ w = iw; h = ih;
+
+ fd = calloc(1, sizeof(Frame_Data));
+
+ oe = edje_object_add(evas);
+ evas_object_data_set(oe, "frame_data", fd);
+ if (reflex)
+ edje_object_file_set(oe, PACKAGE_DATA_DIR"/data/theme.edj", "video_controller/reflex");
+ else
+ edje_object_file_set(oe, PACKAGE_DATA_DIR"/data/theme.edj", "video_controller");
+ edje_extern_object_min_size_set(o, w, h);
+ edje_object_part_swallow(oe, "video_swallow", o);
+ edje_object_size_min_calc(oe, &w, &h);
+// evas_object_move(oe, rand() % (int)(startw - w), rand() % (int)(starth - h));
+ evas_object_move(oe, 0, 0);
+ evas_object_resize(oe, w, h);
+ edje_extern_object_min_size_set(o, 0, 0);
+ edje_object_part_swallow(oe, "video_swallow", o);
+
+ evas_object_smart_callback_add(o, "frame_decode", video_obj_frame_decode_cb, oe);
+ evas_object_smart_callback_add(o, "frame_resize", video_obj_frame_resize_cb, oe);
+ evas_object_smart_callback_add(o, "length_change", video_obj_length_change_cb, oe);
+ evas_object_smart_callback_add(o, "position_update", video_obj_position_update_cb, oe);
+
+ evas_object_smart_callback_add(o, "decode_stop", video_obj_stopped_cb, oe);
+ evas_object_smart_callback_add(o, "channels_change", video_obj_channels_cb, oe);
+ evas_object_smart_callback_add(o, "title_change", video_obj_title_cb, oe);
+ evas_object_smart_callback_add(o, "progress_change", video_obj_progress_cb, oe);
+ evas_object_smart_callback_add(o, "ref_change", video_obj_ref_cb, oe);
+ evas_object_smart_callback_add(o, "button_num_change", video_obj_button_num_cb, oe);
+ evas_object_smart_callback_add(o, "button_change", video_obj_button_cb, oe);
+
+ edje_object_signal_callback_add(oe, "video_control", "play", video_obj_signal_play_cb, o);
+ edje_object_signal_callback_add(oe, "video_control", "pause", video_obj_signal_pause_cb, o);
+ edje_object_signal_callback_add(oe, "video_control", "stop", video_obj_signal_stop_cb, o);
+ edje_object_signal_callback_add(oe, "drag", "video_progress", video_obj_signal_jump_cb, o);
+ edje_object_signal_callback_add(oe, "drag", "video_speed", video_obj_signal_speed_cb, o);
+
+ edje_object_signal_callback_add(oe, "frame_move", "start", video_obj_signal_frame_move_start_cb, oe);
+ edje_object_signal_callback_add(oe, "frame_move", "stop", video_obj_signal_frame_move_stop_cb, oe);
+ edje_object_signal_callback_add(oe, "frame_resize", "start", video_obj_signal_frame_resize_start_cb, oe);
+ edje_object_signal_callback_add(oe, "frame_resize", "stop", video_obj_signal_frame_resize_stop_cb, oe);
+ edje_object_signal_callback_add(oe, "mouse,move", "*", video_obj_signal_frame_move_cb, oe);
+
+ edje_object_part_drag_value_set(oe, "video_speed", 0.0, 1.0);
+ edje_object_part_text_set(oe, "video_speed_txt", "1.0");
+
+ edje_object_signal_emit(o, "video_state", "play");
+
+ evas_object_show(oe);
+}
+
+static Eina_Bool
+check_positions(void *data EINA_UNUSED)
+{
+ const Eina_List *lst;
+ Evas_Object *o;
+
+ EINA_LIST_FOREACH(video_objs, lst, o)
+ video_obj_time_changed(o, evas_object_smart_parent_get(o));
+
+ return !!video_objs;
+}
+
+int
+main(int argc, char **argv)
+{
+ int args;
+ Eina_Rectangle geometry = {0, 0, startw, starth};
+ char *engine = NULL;
+ char *backend = NULL;
+ int verbose = 0;
+ int visual = EMOTION_VIS_NONE;
+ unsigned char help = 0;
+ unsigned char engines_listed = 0;
+ Ecore_Getopt_Value values[] = {
+ ECORE_GETOPT_VALUE_STR(engine),
+ ECORE_GETOPT_VALUE_BOOL(engines_listed),
+ ECORE_GETOPT_VALUE_PTR_CAST(geometry),
+ ECORE_GETOPT_VALUE_STR(backend),
+ ECORE_GETOPT_VALUE_INT(visual),
+ ECORE_GETOPT_VALUE_INT(verbose),
+ ECORE_GETOPT_VALUE_BOOL(reflex),
+ ECORE_GETOPT_VALUE_NONE,
+ ECORE_GETOPT_VALUE_NONE,
+ ECORE_GETOPT_VALUE_NONE,
+ ECORE_GETOPT_VALUE_BOOL(help),
+ ECORE_GETOPT_VALUE_NONE
+ };
+
+
+ if (!ecore_evas_init())
+ return -1;
+ if (!edje_init())
+ goto shutdown_ecore_evas;
+
+ start_time = ecore_time_get();
+ ecore_event_handler_add(ECORE_EVENT_SIGNAL_EXIT, main_signal_exit, NULL);
+ edje_frametime_set(1.0 / 30.0);
+
+ ecore_app_args_set(argc, (const char **)argv);
+ args = ecore_getopt_parse(&options, values, argc, argv);
+ if (args < 0) goto shutdown_edje;
+ else if (help) goto shutdown_edje;
+ else if (engines_listed) goto shutdown_edje;
+ else if (args == argc)
+ {
+ printf("must provide at least one file to play!\n");
+ goto shutdown_edje;
+ }
+
+ if ((geometry.w == 0) || (geometry.h == 0))
+ {
+ if (geometry.w == 0) geometry.w = 320;
+ if (geometry.h == 0) geometry.h = 240;
+ }
+
+ printf("evas engine: %s\n", engine ? engine : "<auto>");
+ printf("emotion backend: %s\n", backend ? backend : "<auto>");
+ printf("vis: %d\n", vis);
+ printf("geometry: %d %d %dx%d\n", geometry.x, geometry.y, geometry.w, geometry.h);
+
+ ecore_evas = ecore_evas_new
+ (engine, geometry.x, geometry.y, geometry.w, geometry.h, NULL);
+ if (!ecore_evas)
+ goto shutdown_edje;
+
+// ecore_evas_alpha_set(ecore_evas, EINA_TRUE);
+
+ ecore_evas_callback_delete_request_set(ecore_evas, main_delete_request);
+ ecore_evas_callback_resize_set(ecore_evas, main_resize);
+ ecore_evas_title_set(ecore_evas, "Evas Media Test Program");
+ ecore_evas_name_class_set(ecore_evas, "evas_media_test", "main");
+ ecore_evas_show(ecore_evas);
+ evas = ecore_evas_get(ecore_evas);
+ evas_image_cache_set(evas, 8 * 1024 * 1024);
+ evas_font_cache_set(evas, 1 * 1024 * 1024);
+ evas_font_path_append(evas, PACKAGE_DATA_DIR"/data/fonts");
+
+ emotion_init();
+
+ bg_setup();
+
+ for (; args < argc; args++)
+ init_video_object(backend, argv[args]);
+
+ ecore_animator_add(check_positions, NULL);
+
+ ecore_main_loop_begin();
+
+ main_signal_exit(NULL, 0, NULL);
+
+ emotion_shutdown();
+ ecore_evas_free(ecore_evas);
+ ecore_evas_shutdown();
+ edje_shutdown();
+
+ return 0;
+
+ shutdown_edje:
+ edje_shutdown();
+ shutdown_ecore_evas:
+ ecore_evas_shutdown();
+
+ return -1;
+}