summaryrefslogtreecommitdiff
path: root/src/lib
diff options
context:
space:
mode:
authorGustavo Sverzut Barbieri <barbieri@gmail.com>2013-01-10 03:43:32 +0000
committerGustavo Sverzut Barbieri <barbieri@gmail.com>2013-01-10 03:43:32 +0000
commitdfb84c1657bfb14a5236b881193b81f4c0b8a69b (patch)
treeb51b210fc88a21eec8e5907b8bbfe12ebc669f90 /src/lib
parent532284dbbe4259a9f2291f44d3eff376849e8031 (diff)
downloadefl-dfb84c1657bfb14a5236b881193b81f4c0b8a69b.tar.gz
efl: merge emotion.
this one was quite a huge work, but hopefully it's correct. NOTES: * removed vlc generic module, it should go into a separate package. * gstreamer is enabled by default (see --disable-gstreamer) * xine is disabled by default (see --enable-gstreamer) * generic is always built statically if supported * gstreamer and xine can't be configured as static (just lacks command line options, build system supports it) * v4l2 is enabled by default on linux if eeze is built (see --disable-v4l2) * emotion_test moved to src/tests/emotion and depends on EFL_ENABLE_TESTS (--with-tests), but is still installed if enabled. TODO (need your help!): * fix warnings with gstreamer and xine engine * call engine shutdown functions if building as static * remove direct usage of PACKAGE_*_DIR and use eina_prefix * add eina_prefix checkme file as evas and others * add support for $EFL_RUN_IN_TREE * create separate package for emotion_generic_modules * check docs hierarchy (doxygen is segv'in here) SVN revision: 82501
Diffstat (limited to 'src/lib')
-rw-r--r--src/lib/emotion/Emotion.h1332
-rw-r--r--src/lib/emotion/emotion_main.c464
-rw-r--r--src/lib/emotion/emotion_private.h137
-rw-r--r--src/lib/emotion/emotion_smart.c2133
4 files changed, 4066 insertions, 0 deletions
diff --git a/src/lib/emotion/Emotion.h b/src/lib/emotion/Emotion.h
new file mode 100644
index 0000000000..454ee0f42e
--- /dev/null
+++ b/src/lib/emotion/Emotion.h
@@ -0,0 +1,1332 @@
+#ifndef EMOTION_H
+#define EMOTION_H
+
+/**
+ * @file
+ * @brief Emotion Media Library
+ *
+ * These routines are used for Emotion.
+ */
+
+/**
+ *
+ * @page emotion_main Emotion
+ *
+ * @date 2003 (created)
+ *
+ * @section toc Table of Contents
+ *
+ * @li @ref emotion_main_intro
+ * @li @ref emotion_main_work
+ * @li @ref emotion_main_compiling
+ * @li @ref emotion_main_next_steps
+ * @li @ref emotion_main_intro_example
+ *
+ * @section emotion_main_intro Introduction
+ *
+ * A media object library for Evas and Ecore.
+ *
+ * Emotion is a library that allows playing audio and video files, using one of
+ * its backends (gstreamer, xine or generic shm player).
+ *
+ * It is integrated into Ecore through its mainloop, and is transparent to the
+ * user of the library how the decoding of audio and video is being done. Once
+ * the objects are created, the user can set callbacks to the specific events
+ * and set options to this object, all in the main loop (no threads are needed).
+ *
+ * Emotion is also integrated with Evas. The emotion object returned by
+ * emotion_object_add() is an Evas smart object, so it can be manipulated with
+ * default Evas object functions. Callbacks can be added to the signals emitted
+ * by this object with evas_object_smart_callback_add().
+ *
+ * @section emotion_main_work How does Emotion work?
+ *
+ * The Emotion library uses Evas smart objects to allow you to manipulate the
+ * created object as any other Evas object, and to connect to its signals,
+ * handling them when needed. It's also possible to swallow Emotion objects
+ * inside Edje themes, and expect it to behave as a normal image or rectangle
+ * when regarding to its dimensions.
+ *
+ * @section emotion_main_compiling How to compile
+ *
+ * Emotion is a library your application links to. The procedure for this is
+ * very simple. You simply have to compile your application with the
+ * appropriate compiler flags that the @c pkg-config script outputs. For
+ * example:
+ *
+ * Compiling C or C++ files into object files:
+ *
+ * @verbatim
+ gcc -c -o main.o main.c `pkg-config --cflags emotion`
+ @endverbatim
+ *
+ * Linking object files into a binary executable:
+ *
+ * @verbatim
+ gcc -o my_application main.o `pkg-config --libs emotion`
+ @endverbatim
+ *
+ * See @ref pkgconfig
+ *
+ * @section emotion_main_next_steps Next Steps
+ *
+ * After you understood what Emotion is and installed it in your
+ * system you should proceed understanding the programming
+ * interface. We'd recommend you to take a while to learn @ref Ecore and
+ * @ref Evas to get started.
+ *
+ * Recommended reading:
+ *
+ * @li @ref Emotion_Init to initialize the library.
+ * @li @ref Emotion_Video to control video parameters.
+ * @li @ref Emotion_Audio to control audio parameters.
+ * @li @ref Emotion_Play to control playback.
+ * @li @ref Emotion_Webcam to show cameras.
+ * @li @ref Emotion_API for general programming interface.
+ *
+ * @section emotion_main_intro_example Introductory Example
+ *
+ * @include emotion_basic_example.c
+ *
+ * More examples can be found at @ref emotion_examples.
+ */
+
+#include <Evas.h>
+
+#ifdef EAPI
+# undef EAPI
+#endif
+
+#ifdef _WIN32
+# ifdef EFL_EMOTION_BUILD
+# ifdef DLL_EXPORT
+# define EAPI __declspec(dllexport)
+# else
+# define EAPI
+# endif /* ! DLL_EXPORT */
+# else
+# define EAPI __declspec(dllimport)
+# endif /* ! EFL_EMOTION_BUILD */
+#else
+# ifdef __GNUC__
+# if __GNUC__ >= 4
+# define EAPI __attribute__ ((visibility("default")))
+# else
+# define EAPI
+# endif
+# else
+# define EAPI
+# endif
+#endif /* ! _WIN32 */
+
+/**
+ * @file Emotion.h
+ * @brief The file that provides Emotion the API, with functions available for
+ * play, seek, change volume, etc.
+ */
+
+enum _Emotion_Module
+{
+ EMOTION_MODULE_XINE,
+ EMOTION_MODULE_GSTREAMER
+};
+
+enum _Emotion_Event
+{
+ EMOTION_EVENT_MENU1, // Escape Menu
+ EMOTION_EVENT_MENU2, // Title Menu
+ EMOTION_EVENT_MENU3, // Root Menu
+ EMOTION_EVENT_MENU4, // Subpicture Menu
+ EMOTION_EVENT_MENU5, // Audio Menu
+ EMOTION_EVENT_MENU6, // Angle Menu
+ EMOTION_EVENT_MENU7, // Part Menu
+ EMOTION_EVENT_UP,
+ EMOTION_EVENT_DOWN,
+ EMOTION_EVENT_LEFT,
+ EMOTION_EVENT_RIGHT,
+ EMOTION_EVENT_SELECT,
+ EMOTION_EVENT_NEXT,
+ EMOTION_EVENT_PREV,
+ EMOTION_EVENT_ANGLE_NEXT,
+ EMOTION_EVENT_ANGLE_PREV,
+ EMOTION_EVENT_FORCE,
+ EMOTION_EVENT_0,
+ EMOTION_EVENT_1,
+ EMOTION_EVENT_2,
+ EMOTION_EVENT_3,
+ EMOTION_EVENT_4,
+ EMOTION_EVENT_5,
+ EMOTION_EVENT_6,
+ EMOTION_EVENT_7,
+ EMOTION_EVENT_8,
+ EMOTION_EVENT_9,
+ EMOTION_EVENT_10
+};
+
+/**
+ * @enum _Emotion_Meta_Info
+ *
+ * Used for retrieving information about the media file being played.
+ *
+ * @see emotion_object_meta_info_get()
+ *
+ * @ingroup Emotion_Info
+ */
+enum _Emotion_Meta_Info
+{
+ EMOTION_META_INFO_TRACK_TITLE, /**< track title */
+ EMOTION_META_INFO_TRACK_ARTIST, /**< artist name */
+ EMOTION_META_INFO_TRACK_ALBUM, /**< album name */
+ EMOTION_META_INFO_TRACK_YEAR, /**< track year */
+ EMOTION_META_INFO_TRACK_GENRE, /**< track genre */
+ EMOTION_META_INFO_TRACK_COMMENT, /**< track comments */
+ EMOTION_META_INFO_TRACK_DISC_ID, /**< track disc ID */
+ EMOTION_META_INFO_TRACK_COUNT /**< track count - number of the track in the album */
+};
+
+/**
+ * @enum _Emotion_Vis
+ *
+ * Used for displaying a visualization on the emotion object.
+ *
+ * @see emotion_object_vis_set()
+ *
+ * @ingroup Emotion_Visualization
+ */
+enum _Emotion_Vis
+{
+ EMOTION_VIS_NONE, /**< no visualization set */
+ EMOTION_VIS_GOOM, /**< goom */
+ EMOTION_VIS_LIBVISUAL_BUMPSCOPE, /**< bumpscope */
+ EMOTION_VIS_LIBVISUAL_CORONA, /**< corona */
+ EMOTION_VIS_LIBVISUAL_DANCING_PARTICLES, /**< dancing particles */
+ EMOTION_VIS_LIBVISUAL_GDKPIXBUF, /**< gdkpixbuf */
+ EMOTION_VIS_LIBVISUAL_G_FORCE, /**< G force */
+ EMOTION_VIS_LIBVISUAL_GOOM, /**< goom */
+ EMOTION_VIS_LIBVISUAL_INFINITE, /**< infinite */
+ EMOTION_VIS_LIBVISUAL_JAKDAW, /**< jakdaw */
+ EMOTION_VIS_LIBVISUAL_JESS, /**< jess */
+ EMOTION_VIS_LIBVISUAL_LV_ANALYSER, /**< lv analyser */
+ EMOTION_VIS_LIBVISUAL_LV_FLOWER, /**< lv flower */
+ EMOTION_VIS_LIBVISUAL_LV_GLTEST, /**< lv gltest */
+ EMOTION_VIS_LIBVISUAL_LV_SCOPE, /**< lv scope */
+ EMOTION_VIS_LIBVISUAL_MADSPIN, /**< madspin */
+ EMOTION_VIS_LIBVISUAL_NEBULUS, /**< nebulus */
+ EMOTION_VIS_LIBVISUAL_OINKSIE, /**< oinksie */
+ EMOTION_VIS_LIBVISUAL_PLASMA, /**< plasma */
+ EMOTION_VIS_LAST /* sentinel */
+};
+
+/**
+ * @enum Emotion_Suspend
+ *
+ * Used for emotion pipeline ressource management.
+ *
+ * @see emotion_object_suspend_set()
+ * @see emotion_object_suspend_get()
+ *
+ * @ingroup Emotion_Ressource
+ */
+typedef enum
+{
+ EMOTION_WAKEUP, /**< pipeline is up and running */
+ EMOTION_SLEEP, /**< turn off hardware ressource usage like overlay */
+ EMOTION_DEEP_SLEEP, /**< destroy the pipeline, but keep full resolution pixels output around */
+ EMOTION_HIBERNATE /**< destroy the pipeline, and keep half resolution or object resolution if lower */
+} Emotion_Suspend;
+
+/**
+ * @enum _Emotion_Aspect
+ * Defines the aspect ratio option.
+ */
+enum _Emotion_Aspect
+{
+ EMOTION_ASPECT_KEEP_NONE, /**< ignore video aspect ratio */
+ EMOTION_ASPECT_KEEP_WIDTH, /**< respect video aspect, fitting its width inside the object width */
+ EMOTION_ASPECT_KEEP_HEIGHT, /**< respect video aspect, fitting its height inside the object height */
+ EMOTION_ASPECT_KEEP_BOTH, /**< respect video aspect, fitting it inside the object area */
+ EMOTION_ASPECT_CROP, /**< respect video aspect, cropping exceding area */
+ EMOTION_ASPECT_CUSTOM, /**< use custom borders/crop for the video */
+};
+
+typedef enum _Emotion_Module Emotion_Module;
+typedef enum _Emotion_Event Emotion_Event;
+typedef enum _Emotion_Meta_Info Emotion_Meta_Info; /**< Meta info type to be retrieved. */
+typedef enum _Emotion_Vis Emotion_Vis; /**< Type of visualization. */
+typedef enum _Emotion_Aspect Emotion_Aspect; /**< Aspect ratio option. */
+
+#define EMOTION_CHANNEL_AUTO -1
+#define EMOTION_CHANNEL_DEFAULT 0
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EMOTION_VERSION_MAJOR 1
+#define EMOTION_VERSION_MINOR 8
+
+ typedef struct _Emotion_Version
+ {
+ int major;
+ int minor;
+ int micro;
+ int revision;
+ } Emotion_Version;
+
+ EAPI extern Emotion_Version *emotion_version;
+
+/* api calls available */
+
+/**
+ * @brief How to create, initialize, manipulate and connect to signals of an
+ * Emotion object.
+ * @defgroup Emotion_API API available for manipulating Emotion object.
+ * @ingroup Emotion
+ *
+ * @{
+ *
+ * Emotion provides an Evas smart object that allows to play, control and
+ * display a video or audio file. The API is synchronous but not everything
+ * happens immediately. There are also some signals to report changed states.
+ *
+ * Basically, once the object is created and initialized, a file will be set to
+ * it, and then it can be resized, moved, and controlled by other Evas object
+ * functions.
+ *
+ * However, the decoding of the music and video occurs not in the Ecore main
+ * loop, but usually in another thread (this depends on the module being used).
+ * The synchronization between this other thread and the main loop not visible
+ * to the end user of the library. The user can just register callbacks to the
+ * available signals to receive information about the changed states, and can
+ * call other functions from the API to request more changes on the current
+ * loaded file.
+ *
+ * There will be a delay between an API being called and it being really
+ * executed, since this request will be done in the main thread, and it needs to
+ * be sent to the decoding thread. For this reason, always call functions like
+ * emotion_object_size_get() or emotion_object_length_get() after some signal
+ * being sent, like "playback_started" or "open_done". @ref
+ * emotion_signals_example.c "This example demonstrates this behavior".
+ *
+ * @section signals Available signals
+ * The Evas_Object returned by emotion_object_add() has a number of signals that
+ * can be listened to using evas' smart callbacks mechanism. All signals have
+ * NULL as event info. The following is a list of interesting signals:
+ * @li "playback_started" - Emitted when the playback starts
+ * @li "playback_finished" - Emitted when the playback finishes
+ * @li "frame_decode" - Emitted every time a frame is decoded
+ * @li "open_done" - Emitted when the media file is opened
+ * @li "position_update" - Emitted when emotion_object_position_set is called
+ * @li "decode_stop" - Emitted after the last frame is decoded
+ *
+ * @section Examples
+ *
+ * The following examples exemplify the emotion usage. There's also the
+ * emotion_test binary that is distributed with this library and cover the
+ * entire API, but since it is too long and repetitive to be explained, its code
+ * is just displayed as another example.
+ *
+ * @li @ref emotion_basic_example_c
+ * @li @ref emotion_signals_example.c "Emotion signals"
+ * @li @ref emotion_test_main.c "emotion_test - full API usage"
+ *
+ */
+
+/**
+ * @defgroup Emotion_Init Creation and initialization functions
+ */
+
+/**
+ * @defgroup Emotion_Audio Audio control functions
+ */
+
+/**
+ * @defgroup Emotion_Video Video control functions
+ */
+
+/**
+ * @defgroup Emotion_Visualization Visualization control functions
+ */
+
+/**
+ * @defgroup Emotion_Info Miscellaneous information retrieval functions
+ */
+
+/**
+ * @defgroup Emotion_Ressource Video ressource management
+ */
+
+EAPI Eina_Bool emotion_init(void);
+EAPI Eina_Bool emotion_shutdown(void);
+
+/**
+ * @brief Add an emotion object to the canvas.
+ *
+ * @param evas The canvas where the object will be added to.
+ * @return The emotion object just created.
+ *
+ * This function creates an emotion object and adds it to the specified @p evas.
+ * The returned object can be manipulated as any other Evas object, using the
+ * default object manipulation functions - evas_object_*.
+ *
+ * After creating the object with this function, it's still necessary to
+ * initialize it with emotion_object_init(), and if an audio file is going to be
+ * played with this object instead of a video, use
+ * emotion_object_video_mute_set().
+ *
+ * The next step is to open the desired file with emotion_object_file_set(), and
+ * start playing it with emotion_object_play_set().
+ *
+ * @see emotion_object_init()
+ * @see emotion_object_video_mute_set()
+ * @see emotion_object_file_set()
+ * @see emotion_object_play_set()
+ *
+ * @ingroup Emotion_Init
+ */
+EAPI Evas_Object *emotion_object_add (Evas *evas);
+
+/**
+ * @brief Set the specified option for the current module.
+ *
+ * @param obj The emotion object which the option is being set to.
+ * @param opt The option that is being set. Currently supported optiosn: "video"
+ * and "audio".
+ * @param val The value of the option. Currently only supports "off" (?!?!?!)
+ *
+ * This function allows one to mute the video or audio of the emotion object.
+ *
+ * @note Please don't use this function, consider using
+ * emotion_object_audio_mute_set() and emotion_object_video_mute_set() instead.
+ *
+ * @see emotion_object_audio_mute_set()
+ * @see emotion_object_video_mute_set()
+ *
+ * @ingroup Emotion_Init
+ */
+EAPI void emotion_object_module_option_set (Evas_Object *obj, const char *opt, const char *val);
+
+/**
+ * @brief Initializes an emotion object with the specified module.
+ *
+ * @param obj The emotion object to be initialized.
+ * @param module_filename The name of the module to be used (gstreamer or xine).
+ * @return @c EINA_TRUE if the specified module was successfully initialized for
+ * this object, @c EINA_FALSE otherwise.
+ *
+ * This function is required after creating the emotion object, in order to
+ * specify which module will be used with this object. Different objects can
+ * use different modules to play a media file. The current supported modules are
+ * @b gstreamer and @b xine.
+ *
+ * To use any of them, you need to make sure that support for them was compiled
+ * correctly.
+ *
+ * @note It's possible to disable the build of a module with
+ * --disable-module_name.
+ *
+ * @see emotion_object_add()
+ * @see emotion_object_file_set()
+ *
+ * @ingroup Emotion_Init
+ */
+EAPI Eina_Bool emotion_object_init (Evas_Object *obj, const char *module_filename);
+
+/**
+ * @brief Set borders for the emotion object.
+ *
+ * @param obj The emotion object where borders are being set.
+ * @param l The left border.
+ * @param r The right border.
+ * @param t The top border.
+ * @param b The bottom border.
+ *
+ * This function sets borders for the emotion video object (just when a video is
+ * present). When positive values are given to one of the parameters, a border
+ * will be added to the respective position of the object, representing that
+ * size on the original video size. However, if the video is scaled up or down
+ * (i.e. the emotion object size is different from the video size), the borders
+ * will be scaled respectively too.
+ *
+ * If a negative value is given to one of the parameters, instead of a border,
+ * that respective side of the video will be cropped.
+ *
+ * It's possible to set a color for the added borders (default is transparent)
+ * with emotion_object_bg_color_set(). By default, an Emotion object doesn't
+ * have any border.
+ *
+ * @see emotion_object_border_get()
+ * @see emotion_object_bg_color_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_border_set(Evas_Object *obj, int l, int r, int t, int b);
+
+/**
+ * @brief Get the borders set for the emotion object.
+ *
+ * @param obj The emotion object from which the borders are being retrieved.
+ * @param l The left border.
+ * @param r The right border.
+ * @param t The top border.
+ * @param b The bottom border.
+ *
+ * @see emotion_object_border_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_border_get(const Evas_Object *obj, int *l, int *r, int *t, int *b);
+
+/**
+ * @brief Set a color for the background rectangle of this emotion object.
+ *
+ * @param obj The emotion object where the background color is being set.
+ * @param r Red component of the color.
+ * @param g Green component of the color.
+ * @param b Blue component of the color.
+ * @param a Alpha channel of the color.
+ *
+ * This is useful when a border is added to any side of the Emotion object. The
+ * area between the edge of the video and the edge of the object will be filled
+ * with the specified color.
+ *
+ * The default color is 0, 0, 0, 0 (transparent).
+ *
+ * @see emotion_object_bg_color_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_bg_color_set(Evas_Object *obj, int r, int g, int b, int a);
+
+/**
+ * @brief Get the background color set for the emotion object.
+ *
+ * @param obj The emotion object from which the background color is being retrieved.
+ * @param r Red component of the color.
+ * @param g Green component of the color.
+ * @param b Blue component of the color.
+ * @param a AAlpha channel of the color.
+ *
+ * @see emotion_object_bg_color_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_bg_color_get(const Evas_Object *obj, int *r, int *g, int *b, int *a);
+
+/**
+ * @brief Set whether emotion should keep the aspect ratio of the video.
+ *
+ * @param obj The emotion object where to set the aspect.
+ * @param a The aspect ratio policy.
+ *
+ * Instead of manually calculating the required border to set with
+ * emotion_object_border_set(), and using this to fix the aspect ratio of the
+ * video when the emotion object has a different aspect, it's possible to just
+ * set the policy to be used.
+ *
+ * The options are:
+ *
+ * - @b #EMOTION_ASPECT_KEEP_NONE - ignore the video aspect ratio, and reset any
+ * border set to 0, stretching the video inside the emotion object area. This
+ * option is similar to EVAS_ASPECT_CONTROL_NONE size hint.
+ * - @b #EMOTION_ASPECT_KEEP_WIDTH - respect the video aspect ratio, fitting the
+ * video width inside the object width. This option is similar to
+ * EVAS_ASPECT_CONTROL_HORIZONTAL size hint.
+ * - @b #EMOTION_ASPECT_KEEP_HEIGHT - respect the video aspect ratio, fitting
+ * the video height inside the object height. This option is similar to
+ * EVAS_ASPECT_CONTROL_VERTIAL size hint.
+ * - @b #EMOTION_ASPECT_KEEP_BOTH - respect the video aspect ratio, fitting both
+ * its width and height inside the object area. This option is similar to
+ * EVAS_ASPECT_CONTROL_BOTH size hint. It's the effect called letterboxing.
+ * - @b #EMOTION_ASPECT_CROP - respect the video aspect ratio, fitting the width
+ * or height inside the object area, and cropping the exceding areas of the
+ * video in height or width. It's the effect called pan-and-scan.
+ * - @b #EMOTION_ASPECT_CUSTOM - ignore the video aspect ratio, and use the
+ * current set from emotion_object_border_set().
+ *
+ * @note Calling this function with any value except #EMOTION_ASPECT_CUSTOM will
+ * invalidate borders set with emotion_object_border_set().
+ *
+ * @note Calling emotion_object_border_set() will automatically set the aspect
+ * policy to #EMOTION_ASPECT_CUSTOM.
+ *
+ * @see emotion_object_border_set()
+ * @see emotion_object_keep_aspect_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_keep_aspect_set(Evas_Object *obj, Emotion_Aspect a);
+
+/**
+ * @brief Get the current emotion aspect ratio policy.
+ *
+ * @param obj The emotion object from which we are fetching the aspect ratio
+ * policy.
+ * @return The current aspect ratio policy.
+ *
+ * @see emotion_object_keep_aspect_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI Emotion_Aspect emotion_object_keep_aspect_get(const Evas_Object *obj);
+
+/**
+ * @brief Set the file to be played in the Emotion object.
+ *
+ * @param obj The emotion object where the file is being loaded.
+ * @param filename Path to the file to be loaded. It can be absolute or relative
+ * path.
+ * @return EINA_TRUE if the new file could be loaded successfully, and
+ * EINA_FALSE if the file could not be loaded. This happens when the filename is
+ * could not be found, when the module couldn't open the file, when no module is
+ * initialized in this object, or when the @p filename is the same as the
+ * one previously set.
+ *
+ * This function sets the file to be used with this emotion object. If the
+ * object already has another file set, this file will be unset and unloaded,
+ * and the new file will be loaded to this emotion object. The seek position
+ * will be set to 0, and the emotion object will be paused, instead of playing.
+ *
+ * If there was already a filename set, and it's the same as the one being set
+ * now, this function does nothing and returns EINA_FALSE.
+ *
+ * Use @c NULL as argument to @p filename if you want to unload the current file
+ * but don't want to load anything else.
+ *
+ * @see emotion_object_init()
+ * @see emotion_object_play_set()
+ * @see emotion_object_file_get()
+ *
+ * @ingroup Emotion_Init
+ */
+EAPI Eina_Bool emotion_object_file_set (Evas_Object *obj, const char *filename);
+
+/**
+ * @brief Get the filename of the file associated with the emotion object.
+ *
+ * @param obj The emotion object from which the filename will be retrieved.
+ * @return The path to the file loaded into this emotion object.
+ *
+ * This function returns the path of the file loaded in this emotion object. If
+ * no object is loaded, it will return @c NULL.
+ *
+ * @note Don't free or change the string returned by this function in any way.
+ * If you want to unset it, use @c emotion_object_file_set(obj, NULL).
+ *
+ * @see emotion_object_file_set()
+ *
+ * @ingroup Emotion_Init
+ */
+EAPI const char *emotion_object_file_get (const Evas_Object *obj);
+/**
+ * @defgroup Emotion_Play Play control functions
+ * @ingroup Emotion
+ *
+ * @{
+ */
+/**
+ *
+ * @brief Set play/pause state of the media file.
+ *
+ * @param obj The emotion object whose state will be changed.
+ * @param play EINA_TRUE to play, EINA_FALSE to pause.
+ *
+ * This functions sets the currently playing status of the video. Using this
+ * function to play or pause the video doesn't alter it's current position.
+ */
+EAPI void emotion_object_play_set (Evas_Object *obj, Eina_Bool play);
+/**
+ * @brief Get play/pause state of the media file.
+ *
+ * @param obj The emotion object from which the state will be retrieved.
+ * @return EINA_TRUE if playing. EINA_FALSE if not playing.
+ */
+EAPI Eina_Bool emotion_object_play_get (const Evas_Object *obj);
+/**
+ * @brief Set the position in the media file.
+ *
+ * @param obj The emotion object whose position will be changed.
+ * @param sec The position(in seconds) to which the media file will be set.
+ *
+ * This functions sets the current position of the media file to @p sec, this
+ * only works on seekable streams. Setting the position doesn't change the
+ * playing state of the media file.
+ *
+ * @see emotion_object_seekable_get
+ */
+EAPI void emotion_object_position_set (Evas_Object *obj, double sec);
+/**
+ * @brief Get the position in the media file.
+ *
+ * @param obj The emotion object from which the position will be retrieved.
+ * @return The position of the media file.
+ *
+ * The position is returned as the number of seconds since the beginning of the
+ * media file.
+ */
+EAPI double emotion_object_position_get (const Evas_Object *obj);
+
+/**
+ * @brief Get the percentual size of the buffering cache.
+ *
+ * @param obj The emotion object from which the buffer size will be retrieved.
+ * @return The buffer percent size, ranging from 0.0 to 1.0
+ *
+ * The buffer size is returned as a number between 0.0 and 1.0, 0.0 means
+ * the buffer if empty, 1.0 means full.
+ * If no buffering is in progress 1.0 is returned. In all other cases (maybe
+ * the backend don't support buffering) 1.0 is returned, thus you can always
+ * check for buffer_size < 1.0 to know if buffering is in progress.
+ *
+ * @warning Generic backend don't implement this (will return 1.0).
+ */
+EAPI double emotion_object_buffer_size_get (const Evas_Object *obj);
+
+/**
+ * @brief Get whether the media file is seekable.
+ *
+ * @param obj The emotion object from which the seekable status will be
+ * retrieved.
+ * @return EINA_TRUE if the media file is seekable, EINA_FALSE otherwise.
+ */
+EAPI Eina_Bool emotion_object_seekable_get (const Evas_Object *obj);
+/**
+ * @brief Get the length of play for the media file.
+ *
+ * @param obj The emotion object from which the length will be retrieved.
+ * @return The length of the media file in seconds.
+ *
+ * This function returns the length of the media file in seconds.
+ *
+ * @warning This will return 0 if called before the "length_change" signal has,
+ * been emitted.
+ */
+EAPI double emotion_object_play_length_get (const Evas_Object *obj);
+
+/**
+ * @brief Set the play speed of the media file.
+ *
+ * @param obj The emotion object whose speed will be set.
+ * @param speed The speed to be set in the range [0,infinity)
+ *
+ * This function sets the speed with which the media file will be played. 1.0
+ * represents the normal speed, 2 double speed, 0.5 half speed and so on.
+ *
+ * @warning The only backend that implements this is the experimental VLC
+ * backend.
+ */
+EAPI void emotion_object_play_speed_set (Evas_Object *obj, double speed);
+/**
+ * @brief Get the play speed of the media file.
+ *
+ * @param obj The emotion object from which the filename will be retrieved.
+ * @return The current speed of the media file.
+ *
+ * @see emotion_object_play_speed_set
+ */
+EAPI double emotion_object_play_speed_get (const Evas_Object *obj);
+/**
+ * @brief Get how much of the file has been played.
+ *
+ * @param obj The emotion object from which the filename will be retrieved.
+ * @return The progress of the media file.
+ *
+ * @warning Don't change of free the returned string.
+ * @warning gstreamer xine backends don't implement this(will return NULL).
+ */
+EAPI const char *emotion_object_progress_info_get (const Evas_Object *obj);
+/**
+ * @brief Get how much of the file has been played.
+ *
+ * @param obj The emotion object from which the filename will be retrieved
+ * @return The progress of the media file.
+ *
+ * This function gets the progress in playing the file, the return value is in
+ * the [0, 1] range.
+ *
+ * @warning gstreamer xine backends don't implement this(will return 0).
+ */
+EAPI double emotion_object_progress_status_get (const Evas_Object *obj);
+/**
+ * @}
+ */
+EAPI Eina_Bool emotion_object_video_handled_get (const Evas_Object *obj);
+EAPI Eina_Bool emotion_object_audio_handled_get (const Evas_Object *obj);
+
+/**
+ * @brief Retrieve the video aspect ratio of the media file loaded.
+ *
+ * @param obj The emotion object which the video aspect ratio will be retrieved
+ * from.
+ * @return The video aspect ratio of the file loaded.
+ *
+ * This function returns the video aspect ratio (width / height) of the file
+ * loaded. It can be used to adapt the size of the emotion object in the canvas,
+ * so the aspect won't be changed (by wrongly resizing the object). Or to crop
+ * the video correctly, if necessary.
+ *
+ * The described behavior can be applied like following. Consider a given
+ * emotion object that we want to position inside an area, which we will
+ * represent by @c w and @c h. Since we want to position this object either
+ * stretching, or filling the entire area but overflowing the video, or just
+ * adjust the video to fit inside the area without keeping the aspect ratio, we
+ * must compare the video aspect ratio with the area aspect ratio:
+ * @code
+ * int w = 200, h = 300; // an arbitrary value which represents the area where
+ * // the video would be placed
+ * int vw, vh;
+ * double r, vr = emotion_object_ratio_get(obj);
+ * r = (double)w / h;
+ * @endcode
+ *
+ * Now, if we want to make the video fit inside the area, the following code
+ * would do it:
+ * @code
+ * if (vr > r) // the video is wider than the area
+ * {
+ * vw = w;
+ * vh = w / vr;
+ * }
+ * else // the video is taller than the area
+ * {
+ * vh = h;
+ * vw = h * vr;
+ * }
+ * evas_object_resize(obj, vw, vh);
+ * @endcode
+ *
+ * And for keeping the aspect ratio but making the video fill the entire area,
+ * overflowing the content which can't fit inside it, we would do:
+ * @code
+ * if (vr > r) // the video is wider than the area
+ * {
+ * vh = h;
+ * vw = h * vr;
+ * }
+ * else // the video is taller than the area
+ * {
+ * vw = w;
+ * vh = w / vr;
+ * }
+ * evas_object_resize(obj, vw, vh);
+ * @endcode
+ *
+ * Finally, by just resizing the video to the video area, we would have the
+ * video stretched:
+ * @code
+ * vw = w;
+ * vh = h;
+ * evas_object_resize(obj, vw, vh);
+ * @endcode
+ *
+ * The following diagram exemplifies what would happen to the video,
+ * respectively, in each case:
+ *
+ * @image html emotion_ratio.png
+ * @image latex emotion_ratio.eps width=\textwidth
+ *
+ * @note This function returns the aspect ratio that the video @b should be, but
+ * sometimes the reported size from emotion_object_size_get() represents a
+ * different aspect ratio. You can safely resize the video to respect the aspect
+ * ratio returned by @b this function.
+ *
+ * @see emotion_object_size_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI double emotion_object_ratio_get (const Evas_Object *obj);
+
+/**
+ * @brief Retrieve the video size of the loaded file.
+ *
+ * @param obj The object from which we are retrieving the video size.
+ * @param iw A pointer to a variable where the width will be stored.
+ * @param ih A pointer to a variable where the height will be stored.
+ *
+ * This function returns the reported size of the loaded video file. If a file
+ * that doesn't contain a video channel is loaded, then this size can be
+ * ignored.
+ *
+ * The value reported by this function should be consistent with the aspect
+ * ratio returned by emotion_object_ratio_get(), but sometimes the information
+ * stored in the file is wrong. So use the ratio size reported by
+ * emotion_object_ratio_get(), since it is more likely going to be accurate.
+ *
+ * @note Use @c NULL for @p iw or @p ih if you don't need one of these values.
+ *
+ * @see emotion_object_ratio_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_size_get (const Evas_Object *obj, int *iw, int *ih);
+
+/**
+ * @brief Sets whether to use of high-quality image scaling algorithm
+ * of the given video object.
+ *
+ * When enabled, a higher quality video scaling algorithm is used when
+ * scaling videos to sizes other than the source video. This gives
+ * better results but is more computationally expensive.
+ *
+ * @param obj The given video object.
+ * @param smooth Whether to use smooth scale or not.
+ *
+ * @see emotion_object_smooth_scale_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_smooth_scale_set (Evas_Object *obj, Eina_Bool smooth);
+
+/**
+ * @brief Gets whether the high-quality image scaling algorithm
+ * of the given video object is used.
+ *
+ * @param obj The given video object.
+ * @return Whether the smooth scale is used or not.
+ *
+ * @see emotion_object_smooth_scale_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI Eina_Bool emotion_object_smooth_scale_get (const Evas_Object *obj);
+EAPI void emotion_object_event_simple_send (Evas_Object *obj, Emotion_Event ev);
+
+/**
+ * @brief Set the audio volume.
+ *
+ * @param obj The object where the volume is being set.
+ * @param vol The new volume parameter. Range is from 0.0 to 1.0.
+ *
+ * Sets the audio volume of the stream being played. This has nothing to do with
+ * the system volume. This volume will be multiplied by the system volume. e.g.:
+ * if the current volume level is 0.5, and the system volume is 50%, it will be
+ * 0.5 * 0.5 = 0.25.
+ *
+ * The default value depends on the module used. This value doesn't get changed
+ * when another file is loaded.
+ *
+ * @see emotion_object_audio_volume_get()
+ *
+ * @ingroup Emotion_Audio
+ */
+EAPI void emotion_object_audio_volume_set (Evas_Object *obj, double vol);
+
+/**
+ * @brief Get the audio volume.
+ *
+ * @param obj The object from which we are retrieving the volume.
+ * @return The current audio volume level for this object.
+ *
+ * Get the current value for the audio volume level. Range is from 0.0 to 1.0.
+ * This volume is set with emotion_object_audio_volume_set().
+ *
+ * @see emotion_object_audio_volume_set()
+ *
+ * @ingroup Emotion_Audio
+ */
+EAPI double emotion_object_audio_volume_get (const Evas_Object *obj);
+
+/**
+ * @brief Set the mute audio option for this object.
+ *
+ * @param obj The object which we are setting the mute audio option.
+ * @param mute Whether the audio should be muted (@c EINA_TRUE) or not (@c
+ * EINA_FALSE).
+ *
+ * This function sets the mute audio option for this emotion object. The current
+ * module used for this object can use this to avoid decoding the audio portion
+ * of the loaded media file.
+ *
+ * @see emotion_object_audio_mute_get()
+ * @see emotion_object_video_mute_set()
+ *
+ * @ingroup Emotion_Audio
+ */
+EAPI void emotion_object_audio_mute_set (Evas_Object *obj, Eina_Bool mute);
+
+/**
+ * @brief Get the mute audio option of this object.
+ *
+ * @param obj The object which we are retrieving the mute audio option from.
+ * @return Whether the audio is muted (@c EINA_TRUE) or not (@c EINA_FALSE).
+ *
+ * This function return the mute audio option from this emotion object. It can
+ * be set with emotion_object_audio_mute_set().
+ *
+ * @see emotion_object_audio_mute_set()
+ *
+ * @ingroup Emotion_Audio
+ */
+EAPI Eina_Bool emotion_object_audio_mute_get (const Evas_Object *obj);
+EAPI int emotion_object_audio_channel_count (const Evas_Object *obj);
+EAPI const char *emotion_object_audio_channel_name_get(const Evas_Object *obj, int channel);
+EAPI void emotion_object_audio_channel_set (Evas_Object *obj, int channel);
+EAPI int emotion_object_audio_channel_get (const Evas_Object *obj);
+
+/**
+ * @brief Set the mute video option for this object.
+ *
+ * @param obj The object which we are setting the mute video option.
+ * @param mute Whether the video should be muted (@c EINA_TRUE) or not (@c
+ * EINA_FALSE).
+ *
+ * This function sets the mute video option for this emotion object. The
+ * current module used for this object can use this information to avoid
+ * decoding the video portion of the loaded media file.
+ *
+ * @see emotion_object_video_mute_get()
+ * @see emotion_object_audio_mute_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI void emotion_object_video_mute_set (Evas_Object *obj, Eina_Bool mute);
+
+/**
+ * @brief Get the mute video option of this object.
+ *
+ * @param obj The object which we are retrieving the mute video option from.
+ * @return Whether the video is muted (@c EINA_TRUE) or not (@c EINA_FALSE).
+ *
+ * This function returns the mute video option from this emotion object. It can
+ * be set with emotion_object_video_mute_set().
+ *
+ * @see emotion_object_video_mute_set()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI Eina_Bool emotion_object_video_mute_get (const Evas_Object *obj);
+
+/**
+ * @brief Set the video's subtitle file path.
+ *
+ * @param obj The object which we are setting a subtitle file path.
+ * @param filepath The subtitle file path.
+ *
+ * This function sets a video's subtitle file path(i.e an .srt file) for
+ * supported subtitle formats consult the backend's documentation.
+ *
+ * @see emotion_object_video_subtitle_file_get().
+ *
+ * @ingroup Emotion_Video
+ * @since 1.8
+ */
+EAPI void emotion_object_video_subtitle_file_set (Evas_Object *obj, const char *filepath);
+
+/**
+ * @brief Get the video's subtitle file path.
+ *
+ * @param obj The object which we are retrieving the subtitle file path from.
+ * @return The video's subtitle file path previously set, NULL otherwise.
+ *
+ * This function returns the video's subtitle file path, if not previously set
+ * or in error NULL is returned.
+ *
+ * @see emotion_object_video_subtitle_file_set().
+ *
+ * @ingroup Emotion_Video
+ * @since 1.8
+ */
+EAPI const char *emotion_object_video_subtitle_file_get (const Evas_Object *obj);
+
+/**
+ * @brief Get the number of available video channel
+ *
+ * @param obj The object which we are retrieving the channel count from
+ * @return the number of available channel.
+ *
+ * @see emotion_object_video_channel_name_get()
+ *
+ * @ingroup Emotion_Video
+ */
+EAPI int emotion_object_video_channel_count (const Evas_Object *obj);
+EAPI const char *emotion_object_video_channel_name_get(const Evas_Object *obj, int channel);
+EAPI void emotion_object_video_channel_set (Evas_Object *obj, int channel);
+EAPI int emotion_object_video_channel_get (const Evas_Object *obj);
+EAPI void emotion_object_spu_mute_set (Evas_Object *obj, Eina_Bool mute);
+EAPI Eina_Bool emotion_object_spu_mute_get (const Evas_Object *obj);
+EAPI int emotion_object_spu_channel_count (const Evas_Object *obj);
+EAPI const char *emotion_object_spu_channel_name_get (const Evas_Object *obj, int channel);
+EAPI void emotion_object_spu_channel_set (Evas_Object *obj, int channel);
+EAPI int emotion_object_spu_channel_get (const Evas_Object *obj);
+EAPI int emotion_object_chapter_count (const Evas_Object *obj);
+EAPI void emotion_object_chapter_set (Evas_Object *obj, int chapter);
+EAPI int emotion_object_chapter_get (const Evas_Object *obj);
+EAPI const char *emotion_object_chapter_name_get (const Evas_Object *obj, int chapter);
+EAPI void emotion_object_eject (Evas_Object *obj);
+
+/**
+ * @brief Get the dvd title from this emotion object.
+ *
+ * @param obj The object which the title will be retrieved from.
+ * @return A string containing the title.
+ *
+ * This function is only useful when playing a DVD.
+ *
+ * @note Don't change or free the string returned by this function.
+ *
+ * @ingroup Emotion_Info
+ */
+EAPI const char *emotion_object_title_get (const Evas_Object *obj);
+EAPI const char *emotion_object_ref_file_get (const Evas_Object *obj);
+EAPI int emotion_object_ref_num_get (const Evas_Object *obj);
+EAPI int emotion_object_spu_button_count_get (const Evas_Object *obj);
+EAPI int emotion_object_spu_button_get (const Evas_Object *obj);
+
+/**
+ * @brief Retrieve meta information from this file being played.
+ *
+ * @param obj The object which the meta info will be extracted from.
+ * @param meta The type of meta information that will be extracted.
+ *
+ * This function retrieves information about the file loaded. It can retrieve
+ * the track title, artist name, album name, etc. See @ref Emotion_Meta_Info
+ * for all the possibilities.
+ *
+ * The meta info may be not available on all types of files. It will return @c
+ * NULL if the the file doesn't have meta info, or if this specific field is
+ * empty.
+ *
+ * @note Don't change or free the string returned by this function.
+ *
+ * @see Emotion_Meta_Info
+ *
+ * @ingroup Emotion_Info
+ */
+EAPI const char *emotion_object_meta_info_get (const Evas_Object *obj, Emotion_Meta_Info meta);
+
+/**
+ * @brief Set the visualization to be used with this object.
+ *
+ * @param obj The object where the visualization will be set on.
+ * @param visualization The type of visualization to be used.
+ *
+ * The @p visualization specified will be played instead of a video. This is
+ * commonly used to display a visualization for audio only files (musics).
+ *
+ * The available visualizations are @ref Emotion_Vis.
+ *
+ * @see Emotion_Vis
+ * @see emotion_object_vis_get()
+ * @see emotion_object_vis_supported()
+ *
+ * @ingroup Emotion_Visualization
+ */
+EAPI void emotion_object_vis_set (Evas_Object *obj, Emotion_Vis visualization);
+
+/**
+ * @brief Get the type of visualization in use by this emotion object.
+ *
+ * @param obj The emotion object which the visualization is being retrieved
+ * from.
+ * @return The type of visualization in use by this object.
+ *
+ * The type of visualization can be set by emotion_object_vis_set().
+ *
+ * @see Emotion_Vis
+ * @see emotion_object_vis_set()
+ * @see emotion_object_vis_supported()
+ *
+ * @ingroup Emotion_Visualization
+ */
+EAPI Emotion_Vis emotion_object_vis_get (const Evas_Object *obj);
+
+/**
+ * @brief Query whether a type of visualization is supported by this object.
+ *
+ * @param obj The object which the query is being ran on.
+ * @param visualization The type of visualization that is being queried.
+ * @return EINA_TRUE if the visualization is supported, EINA_FALSE otherwise.
+ *
+ * This can be used to check if a visualization is supported. e.g.: one wants to
+ * display a list of available visualizations for a specific object.
+ *
+ * @see Emotion_Vis
+ * @see emotion_object_vis_set()
+ * @see emotion_object_vis_get()
+ *
+ * @ingroup Emotion_Visualization
+ */
+EAPI Eina_Bool emotion_object_vis_supported (const Evas_Object *obj, Emotion_Vis visualization);
+
+/**
+ * @brief Raise priority of an object so it will have a priviledged access to hardware ressource.
+ *
+ * @param obj The object which the query is being ran on.
+ * @param priority EINA_TRUE means give me a priority access to the hardware ressource.
+ *
+ * Hardware have a few dedicated hardware pipeline that process the video at no cost for the CPU.
+ * Especially on SoC, you mostly have one (on mobile phone SoC) or two (on Set Top Box SoC) when
+ * Picture in Picture is needed. And most application just have a few video stream that really
+ * deserve high frame rate, hiogh quality output. That's why this call is for.
+ *
+ * Please note that if Emotion can't acquire a priviledged hardware ressource, it will fallback
+ * to the no-priority path. This work on the first asking first get basis system.
+ *
+ * @see emotion_object_priority_get()
+ *
+ * @ingroup Emotion_Ressource
+ */
+EAPI void emotion_object_priority_set(Evas_Object *obj, Eina_Bool priority);
+
+/**
+ * @brief Get the actual priority of an object.
+ *
+ * @param obj The object which the query is being ran on.
+ * @return EINA_TRUE if the object has a priority access to the hardware.
+ *
+ * This actually return the priority status of an object. If it failed to have a priviledged
+ * access to the hardware, it will return EINA_FALSE.
+ *
+ * @see emotion_object_priority_get()
+ *
+ * @ingroup Emotion_Ressource
+ */
+EAPI Eina_Bool emotion_object_priority_get(const Evas_Object *obj);
+
+/**
+ * @brief Change the state of an object pipeline.
+ *
+ * @param obj The object which the query is being ran on.
+ * @param state The new state for the object.
+ *
+ * Changing the state of a pipeline should help preserve the battery of an embedded device.
+ * But it will only work sanely if the pipeline is not playing at the time you change its
+ * state. Depending on the engine all state may be not implemented.
+ *
+ * @see Emotion_Suspend
+ * @see emotion_object_suspend_get()
+ *
+ * @ingroup Emotion_Ressource
+ */
+EAPI void emotion_object_suspend_set(Evas_Object *obj, Emotion_Suspend state);
+
+/**
+ * @brief Get the current state of the pipeline
+ *
+ * @param obj The object which the query is being ran on.
+ * @return the current state of the pipeline.
+ *
+ * @see Emotion_Suspend
+ * @see emotion_object_suspend_set()
+ *
+ * @ingroup Emotion_Ressource
+ */
+EAPI Emotion_Suspend emotion_object_suspend_get(Evas_Object *obj);
+
+/**
+ * @brief Load the last known position if available
+ *
+ * @param obj The object which the query is being ran on.
+ *
+ * By using Xattr, Emotion is able, if the system permitt it, to store and retrieve
+ * the latest position. It should trigger some smart callback to let the application
+ * know when it succeed or fail. Every operation is fully asynchronous and not
+ * linked to the actual engine used to play the vide.
+ *
+ * @see emotion_object_last_position_save()
+ *
+ * @ingroup Emotion_Info
+ */
+EAPI void emotion_object_last_position_load(Evas_Object *obj);
+
+/**
+ * @brief Save the lastest position if possible
+ *
+ * @param obj The object which the query is being ran on.
+ *
+ * By using Xattr, Emotion is able, if the system permitt it, to store and retrieve
+ * the latest position. It should trigger some smart callback to let the application
+ * know when it succeed or fail. Every operation is fully asynchronous and not
+ * linked to the actual engine used to play the vide.
+ *
+ * @see emotion_object_last_position_load()
+ *
+ * @ingroup Emotion_Info
+ */
+EAPI void emotion_object_last_position_save(Evas_Object *obj);
+
+/**
+ * @brief Do we have a chance to play that file
+ *
+ * @param file A stringshared filename that we want to know if Emotion can play.
+ *
+ * This just actually look at the extention of the file, it doesn't check the mime-type
+ * nor if the file is actually sane. So this is just an hint for your application.
+ *
+ * @see emotion_object_extension_may_play_get()
+ */
+EAPI Eina_Bool emotion_object_extension_may_play_fast_get(const char *file);
+
+/**
+ * @brief Do we have a chance to play that file
+ *
+ * @param file A filename that we want to know if Emotion can play.
+ *
+ * This just actually look at the extention of the file, it doesn't check the mime-type
+ * nor if the file is actually sane. So this is just an hint for your application.
+ *
+ * @see emotion_object_extension_may_play_fast_get()
+ */
+EAPI Eina_Bool emotion_object_extension_may_play_get(const char *file);
+
+/**
+ * @brief Get the actual image object that contains the pixels of the video stream
+ *
+ * @param obj The object which the query is being ran on.
+ *
+ * This function is usefull when you want to get a direct access to the pixels.
+ *
+ * @see emotion_object_image_get()
+ */
+EAPI Evas_Object *emotion_object_image_get(const Evas_Object *obj);
+
+/**
+ * @defgroup Emotion_Webcam API available for accessing webcam
+ * @ingroup Emotion
+ */
+
+typedef struct _Emotion_Webcam Emotion_Webcam; /**< Webcam description */
+
+EAPI extern int EMOTION_WEBCAM_UPDATE; /**< Ecore_Event triggered when a new webcam is plugged in */
+
+/**
+ * @brief Get a list of active and available webcam
+ *
+ * @return the list of available webcam at the time of the call.
+ *
+ * It will return the current live list of webcam. It is updated before
+ * triggering EMOTION_WEBCAM_UPDATE and should never be modified.
+ *
+ * @ingroup Emotion_Webcam
+ */
+EAPI const Eina_List *emotion_webcams_get(void);
+
+/**
+ * @brief Get the human understandable name of a Webcam
+ *
+ * @param ew The webcam to get the name from.
+ * @return the actual human readable name.
+ *
+ * @ingroup Emotion_Webcam
+ */
+EAPI const char *emotion_webcam_name_get(const Emotion_Webcam *ew);
+
+/**
+ * @brief Get the uri of a Webcam that will be understood by emotion
+ *
+ * @param ew The webcam to get the uri from.
+ * @return the actual uri that emotion will later understood.
+ *
+ * @ingroup Emotion_Webcam
+ */
+EAPI const char *emotion_webcam_device_get(const Emotion_Webcam *ew);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/lib/emotion/emotion_main.c b/src/lib/emotion/emotion_main.c
new file mode 100644
index 0000000000..8416f50b1c
--- /dev/null
+++ b/src/lib/emotion/emotion_main.c
@@ -0,0 +1,464 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+# include <stdlib.h>
+# endif
+#endif
+
+#include <stdio.h>
+
+#ifdef HAVE_EEZE
+# include <sys/types.h>
+# include <sys/stat.h>
+# include <fcntl.h>
+# ifdef HAVE_V4L2
+# include <sys/ioctl.h>
+# include <linux/videodev2.h>
+# endif
+# include <Eeze.h>
+#endif
+
+#include <Ecore.h>
+#include <Eet.h>
+
+#include "Emotion.h"
+#include "emotion_private.h"
+
+static Emotion_Version _version = { VMAJ, VMIN, VMIC, VREV };
+static int emotion_pending_objects = 0;
+EAPI Emotion_Version *emotion_version = &_version;
+
+EAPI int EMOTION_WEBCAM_UPDATE = 0;
+
+struct ext_match_s
+{
+ unsigned int length;
+ const char *extension;
+};
+
+#define MATCHING(Ext) \
+ { sizeof (Ext), Ext }
+
+static const struct ext_match_s matchs[] =
+{ /* map extensions to know if it's a emotion playable content for good first-guess tries */
+ MATCHING(".264"),
+ MATCHING(".3g2"),
+ MATCHING(".3gp"),
+ MATCHING(".3gp2"),
+ MATCHING(".3gpp"),
+ MATCHING(".3gpp2"),
+ MATCHING(".3p2"),
+ MATCHING(".asf"),
+ MATCHING(".avi"),
+ MATCHING(".bdm"),
+ MATCHING(".bdmv"),
+ MATCHING(".clpi"),
+ MATCHING(".clp"),
+ MATCHING(".fla"),
+ MATCHING(".flv"),
+ MATCHING(".m1v"),
+ MATCHING(".m2v"),
+ MATCHING(".m2t"),
+ MATCHING(".m4v"),
+ MATCHING(".mkv"),
+ MATCHING(".mov"),
+ MATCHING(".mp2"),
+ MATCHING(".mp2ts"),
+ MATCHING(".mp4"),
+ MATCHING(".mpe"),
+ MATCHING(".mpeg"),
+ MATCHING(".mpg"),
+ MATCHING(".mpl"),
+ MATCHING(".mpls"),
+ MATCHING(".mts"),
+ MATCHING(".mxf"),
+ MATCHING(".nut"),
+ MATCHING(".nuv"),
+ MATCHING(".ogg"),
+ MATCHING(".ogm"),
+ MATCHING(".ogv"),
+ MATCHING(".rm"),
+ MATCHING(".rmj"),
+ MATCHING(".rmm"),
+ MATCHING(".rms"),
+ MATCHING(".rmx"),
+ MATCHING(".rmvb"),
+ MATCHING(".swf"),
+ MATCHING(".ts"),
+ MATCHING(".weba"),
+ MATCHING(".webm"),
+ MATCHING(".wmv")
+};
+
+Eina_Bool
+_emotion_object_extension_can_play_generic_get(const void *data EINA_UNUSED, const char *file)
+{
+ unsigned int length;
+ unsigned int i;
+
+ length = eina_stringshare_strlen(file) + 1;
+ if (length < 5) return EINA_FALSE;
+
+ for (i = 0; i < sizeof (matchs) / sizeof (struct ext_match_s); ++i)
+ {
+ if (matchs[i].length > length) continue;
+
+ if (!strcasecmp(matchs[i].extension,
+ file + length - matchs[i].length))
+ return EINA_TRUE;
+ }
+
+ return EINA_FALSE;
+}
+
+EAPI Eina_Bool
+emotion_object_extension_may_play_fast_get(const char *file)
+{
+ if (!file) return EINA_FALSE;
+ return _emotion_object_extension_can_play_generic_get(NULL, file);
+}
+
+EAPI Eina_Bool
+emotion_object_extension_may_play_get(const char *file)
+{
+ const char *tmp;
+ Eina_Bool result;
+
+ if (!file) return EINA_FALSE;
+ tmp = eina_stringshare_add(file);
+ result = emotion_object_extension_may_play_fast_get(tmp);
+ eina_stringshare_del(tmp);
+
+ return result;
+}
+
+typedef struct _Emotion_Webcams Emotion_Webcams;
+
+struct _Emotion_Webcams
+{
+ Eina_List *webcams;
+};
+
+struct _Emotion_Webcam
+{
+ EINA_REFCOUNT;
+
+ const char *syspath;
+ const char *device;
+ const char *name;
+
+ const char *custom;
+
+ const char *filename;
+};
+
+static int _emotion_webcams_count = 0;
+static Eet_Data_Descriptor *_webcam_edd;
+static Eet_Data_Descriptor *_webcams_edd;
+
+static Emotion_Webcams *_emotion_webcams = NULL;
+static Eet_File *_emotion_webcams_file = NULL;
+
+static Eet_Data_Descriptor *
+_emotion_webcams_data(void)
+{
+ Eet_Data_Descriptor_Class eddc;
+
+ EET_EINA_FILE_DATA_DESCRIPTOR_CLASS_SET(&eddc, Emotion_Webcam);
+ _webcam_edd = eet_data_descriptor_file_new(&eddc);
+ EET_DATA_DESCRIPTOR_ADD_BASIC(_webcam_edd, Emotion_Webcam, "device", device, EET_T_STRING);
+ EET_DATA_DESCRIPTOR_ADD_BASIC(_webcam_edd, Emotion_Webcam, "name", name, EET_T_STRING);
+ EET_DATA_DESCRIPTOR_ADD_BASIC(_webcam_edd, Emotion_Webcam, "custom", custom, EET_T_STRING);
+ EET_DATA_DESCRIPTOR_ADD_BASIC(_webcam_edd, Emotion_Webcam, "filename", filename, EET_T_STRING);
+
+ EET_EINA_FILE_DATA_DESCRIPTOR_CLASS_SET(&eddc, Emotion_Webcams);
+ _webcams_edd = eet_data_descriptor_file_new(&eddc);
+ EET_DATA_DESCRIPTOR_ADD_LIST(_webcams_edd, Emotion_Webcams, "webcams", webcams, _webcam_edd);
+
+ return _webcams_edd;
+}
+
+static void
+emotion_webcam_destroy(Emotion_Webcam *ew)
+{
+ if (!ew->custom)
+ {
+ eina_stringshare_del(ew->syspath);
+ eina_stringshare_del(ew->device);
+ eina_stringshare_del(ew->name);
+ }
+ free(ew);
+}
+
+#ifdef HAVE_EEZE
+static Eeze_Udev_Watch *eeze_watcher = NULL;
+
+static void
+_emotion_check_device(Emotion_Webcam *ew)
+{
+#ifdef HAVE_V4L2
+ Emotion_Webcam *check;
+ Eina_List *l;
+ struct v4l2_capability caps;
+ int fd;
+#endif
+
+ if (!ew) return ;
+#ifdef HAVE_V4L2
+ if (!ew->device) goto on_error;
+
+ fd = open(ew->filename, O_RDONLY);
+ if (fd < 0) goto on_error;
+
+ if (ioctl(fd, VIDIOC_QUERYCAP, &caps) == -1) goto on_error;
+
+ /* Likely not a webcam */
+ if (!caps.capabilities & V4L2_CAP_VIDEO_CAPTURE) goto on_error;
+ if (caps.capabilities & V4L2_CAP_TUNER
+ || caps.capabilities & V4L2_CAP_RADIO
+ || caps.capabilities & V4L2_CAP_MODULATOR)
+ goto on_error;
+
+ EINA_LIST_FOREACH(_emotion_webcams->webcams, l, check)
+ if (check->device == ew->device)
+ goto on_error;
+
+ _emotion_webcams->webcams = eina_list_append(_emotion_webcams->webcams, ew);
+
+ EINA_REFCOUNT_INIT(ew);
+
+ return ;
+
+ on_error:
+#endif
+ EINA_LOG_ERR("'%s' is not a webcam ['%s']", ew->name, strerror(errno));
+ eina_stringshare_del(ew->syspath);
+ eina_stringshare_del(ew->device);
+ eina_stringshare_del(ew->name);
+ free(ew);
+}
+
+static Emotion_Webcam *
+_emotion_webcam_new(const char *syspath)
+{
+ Emotion_Webcam *test;
+ const char *device;
+ char *local;
+
+ test = malloc(sizeof (Emotion_Webcam));
+ if (!test) return NULL;
+
+ test->custom = NULL;
+ test->syspath = eina_stringshare_ref(syspath);
+ test->name = eeze_udev_syspath_get_sysattr(syspath, "name");
+
+ device = eeze_udev_syspath_get_property(syspath, "DEVNAME");
+ local = alloca(eina_stringshare_strlen(device) + 8);
+ snprintf(local, eina_stringshare_strlen(device) + 8, "v4l2://%s", device);
+ test->device = eina_stringshare_add(local);
+ eina_stringshare_del(device);
+ test->filename = test->device + 7;
+
+ return test;
+}
+
+static void
+_emotion_enumerate_all_webcams(void)
+{
+ Eina_List *devices;
+ const char *syspath;
+
+ devices = eeze_udev_find_by_type(EEZE_UDEV_TYPE_V4L, NULL);
+
+ EINA_LIST_FREE(devices, syspath)
+ {
+ Emotion_Webcam *test;
+
+ test = _emotion_webcam_new(syspath);
+ if (test) _emotion_check_device(test);
+
+ eina_stringshare_del(syspath);
+ }
+}
+
+static void
+_emotion_eeze_events(const char *syspath,
+ Eeze_Udev_Event ev,
+ void *data EINA_UNUSED,
+ Eeze_Udev_Watch *watcher EINA_UNUSED)
+{
+ if (ev == EEZE_UDEV_EVENT_REMOVE)
+ {
+ Emotion_Webcam *check;
+ Eina_List *l;
+
+ EINA_LIST_FOREACH(_emotion_webcams->webcams, l, check)
+ if (check->syspath == syspath)
+ {
+ _emotion_webcams->webcams = eina_list_remove_list(_emotion_webcams->webcams, l);
+ EINA_REFCOUNT_UNREF(check)
+ emotion_webcam_destroy(check);
+ break ;
+ }
+ }
+ else if (ev == EEZE_UDEV_EVENT_ADD)
+ {
+ Emotion_Webcam *test;
+
+ test = _emotion_webcam_new(syspath);
+ if (test) _emotion_check_device(test);
+ }
+ ecore_event_add(EMOTION_WEBCAM_UPDATE, NULL, NULL, NULL);
+}
+
+#endif
+
+EAPI Eina_Bool
+emotion_init(void)
+{
+ char buffer[4096];
+
+ if (_emotion_webcams_count++) return EINA_TRUE;
+
+ ecore_init();
+
+ snprintf(buffer, 4096, "%s/emotion.cfg", PACKAGE_DATA_DIR);
+ _emotion_webcams_file = eet_open(buffer, EET_FILE_MODE_READ);
+ if (_emotion_webcams_file)
+ {
+ Eet_Data_Descriptor *edd;
+
+ edd = _emotion_webcams_data();
+
+ _emotion_webcams = eet_data_read(_emotion_webcams_file, edd, "config");
+
+ eet_data_descriptor_free(_webcams_edd); _webcams_edd = NULL;
+ eet_data_descriptor_free(_webcam_edd); _webcam_edd = NULL;
+ }
+
+ if (!_emotion_webcams)
+ {
+ _emotion_webcams = calloc(1, sizeof (Emotion_Webcams));
+ if (!_emotion_webcams) return EINA_FALSE;
+ }
+
+#ifdef HAVE_EEZE
+ EMOTION_WEBCAM_UPDATE = ecore_event_type_new();
+
+ eeze_init();
+
+ _emotion_enumerate_all_webcams();
+
+ eeze_watcher = eeze_udev_watch_add(EEZE_UDEV_TYPE_V4L,
+ (EEZE_UDEV_EVENT_ADD | EEZE_UDEV_EVENT_REMOVE),
+ _emotion_eeze_events, NULL);
+#endif
+
+ return EINA_TRUE;
+}
+
+EAPI Eina_Bool
+emotion_shutdown(void)
+{
+ Emotion_Webcam *ew;
+ double start;
+
+ if (_emotion_webcams_count <= 0)
+ {
+ EINA_LOG_ERR("Init count not greater than 0 in shutdown.");
+ return EINA_FALSE;
+ }
+ if (--_emotion_webcams_count) return EINA_TRUE;
+
+ EINA_LIST_FREE(_emotion_webcams->webcams, ew)
+ {
+ /* There is currently no way to refcount from the outside, this help, but could lead to some issue */
+ EINA_REFCOUNT_UNREF(ew)
+ emotion_webcam_destroy(ew);
+ }
+ free(_emotion_webcams);
+ _emotion_webcams = NULL;
+
+ if (_emotion_webcams_file)
+ {
+ /* As long as there is no one reference any pointer, you are safe */
+ eet_close(_emotion_webcams_file);
+ _emotion_webcams_file = NULL;
+ }
+
+#ifdef HAVE_EEZE
+ eeze_udev_watch_del(eeze_watcher);
+ eeze_watcher = NULL;
+
+ eeze_shutdown();
+#endif
+
+ start = ecore_time_get();
+ while (emotion_pending_objects && ecore_time_get() - start < 0.5)
+ ecore_main_loop_iterate();
+
+ if (emotion_pending_objects)
+ {
+ EINA_LOG_ERR("There is still %i Emotion pipeline running", emotion_pending_objects);
+ }
+
+ ecore_shutdown();
+
+ return EINA_TRUE;
+}
+
+EAPI const Eina_List *
+emotion_webcams_get(void)
+{
+ return _emotion_webcams->webcams;
+}
+
+EAPI const char *
+emotion_webcam_name_get(const Emotion_Webcam *ew)
+{
+ if (!ew) return NULL;
+
+ return ew->name;
+}
+
+EAPI const char *
+emotion_webcam_device_get(const Emotion_Webcam *ew)
+{
+ if (!ew) return NULL;
+
+ return ew->device;
+}
+
+EAPI const char *
+emotion_webcam_custom_get(const char *device)
+{
+ const Emotion_Webcam *ew;
+ const Eina_List *l;
+
+ if (_emotion_webcams)
+ {
+ EINA_LIST_FOREACH(_emotion_webcams->webcams, l, ew)
+ if (ew->device && strcmp(device, ew->device) == 0)
+ return ew->custom;
+ }
+
+ return NULL;
+}
+
+EAPI void
+_emotion_pending_object_ref(void)
+{
+ emotion_pending_objects++;
+}
+
+EAPI void
+_emotion_pending_object_unref(void)
+{
+ emotion_pending_objects--;
+}
diff --git a/src/lib/emotion/emotion_private.h b/src/lib/emotion/emotion_private.h
new file mode 100644
index 0000000000..73a1b7ddf0
--- /dev/null
+++ b/src/lib/emotion/emotion_private.h
@@ -0,0 +1,137 @@
+#ifndef EMOTION_PRIVATE_H
+#define EMOTION_PRIVATE_H
+
+#define META_TRACK_TITLE 1
+#define META_TRACK_ARTIST 2
+#define META_TRACK_GENRE 3
+#define META_TRACK_COMMENT 4
+#define META_TRACK_ALBUM 5
+#define META_TRACK_YEAR 6
+#define META_TRACK_DISCID 7
+#define META_TRACK_COUNT 8
+
+typedef enum _Emotion_Format Emotion_Format;
+typedef struct _Emotion_Video_Module Emotion_Video_Module;
+typedef struct _Emotion_Module_Options Emotion_Module_Options;
+typedef struct _Eina_Emotion_Plugins Eina_Emotion_Plugins;
+
+typedef Eina_Bool (*Emotion_Module_Open)(Evas_Object *, const Emotion_Video_Module **, void **, Emotion_Module_Options *);
+typedef void (*Emotion_Module_Close)(Emotion_Video_Module *module, void *);
+
+enum _Emotion_Format
+{
+ EMOTION_FORMAT_NONE,
+ EMOTION_FORMAT_I420,
+ EMOTION_FORMAT_YV12,
+ EMOTION_FORMAT_YUY2, /* unused for now since evas does not support yuy2 format */
+ EMOTION_FORMAT_BGRA
+};
+
+struct _Emotion_Module_Options
+{
+ const char *player;
+ Eina_Bool no_video : 1;
+ Eina_Bool no_audio : 1;
+};
+
+struct _Eina_Emotion_Plugins
+{
+ Emotion_Module_Open open;
+ Emotion_Module_Close close;
+};
+
+struct _Emotion_Video_Module
+{
+ unsigned char (*init) (Evas_Object *obj, void **video, Emotion_Module_Options *opt);
+ int (*shutdown) (void *video);
+ unsigned char (*file_open) (const char *file, Evas_Object *obj, void *video);
+ void (*file_close) (void *ef);
+ void (*play) (void *ef, double pos);
+ void (*stop) (void *ef);
+ void (*size_get) (void *ef, int *w, int *h);
+ void (*pos_set) (void *ef, double pos);
+ double (*len_get) (void *ef);
+ double (*buffer_size_get) (void *ef);
+ int (*fps_num_get) (void *ef);
+ int (*fps_den_get) (void *ef);
+ double (*fps_get) (void *ef);
+ double (*pos_get) (void *ef);
+ void (*vis_set) (void *ef, Emotion_Vis vis);
+ Emotion_Vis (*vis_get) (void *ef);
+ Eina_Bool (*vis_supported) (void *ef, Emotion_Vis vis);
+ double (*ratio_get) (void *ef);
+ int (*video_handled) (void *ef);
+ int (*audio_handled) (void *ef);
+ int (*seekable) (void *ef);
+ void (*frame_done) (void *ef);
+ Emotion_Format (*format_get) (void *ef);
+ void (*video_data_size_get) (void *ef, int *w, int *h);
+ int (*yuv_rows_get) (void *ef, int w, int h, unsigned char **yrows, unsigned char **urows, unsigned char **vrows);
+ int (*bgra_data_get) (void *ef, unsigned char **bgra_data);
+ void (*event_feed) (void *ef, int event);
+ void (*event_mouse_button_feed) (void *ef, int button, int x, int y);
+ void (*event_mouse_move_feed) (void *ef, int x, int y);
+ int (*video_channel_count) (void *ef);
+ void (*video_channel_set) (void *ef, int channel);
+ int (*video_channel_get) (void *ef);
+ void (*video_subtitle_file_set) (void *ef, const char *filepath);
+ const char * (*video_subtitle_file_get) (void *ef);
+ const char * (*video_channel_name_get) (void *ef, int channel);
+ void (*video_channel_mute_set) (void *ef, int mute);
+ int (*video_channel_mute_get) (void *ef);
+ int (*audio_channel_count) (void *ef);
+ void (*audio_channel_set) (void *ef, int channel);
+ int (*audio_channel_get) (void *ef);
+ const char * (*audio_channel_name_get) (void *ef, int channel);
+ void (*audio_channel_mute_set) (void *ef, int mute);
+ int (*audio_channel_mute_get) (void *ef);
+ void (*audio_channel_volume_set) (void *ef, double vol);
+ double (*audio_channel_volume_get) (void *ef);
+ int (*spu_channel_count) (void *ef);
+ void (*spu_channel_set) (void *ef, int channel);
+ int (*spu_channel_get) (void *ef);
+ const char * (*spu_channel_name_get) (void *ef, int channel);
+ void (*spu_channel_mute_set) (void *ef, int mute);
+ int (*spu_channel_mute_get) (void *ef);
+ int (*chapter_count) (void *ef);
+ void (*chapter_set) (void *ef, int chapter);
+ int (*chapter_get) (void *ef);
+ const char * (*chapter_name_get) (void *ef, int chapter);
+ void (*speed_set) (void *ef, double speed);
+ double (*speed_get) (void *ef);
+ int (*eject) (void *ef);
+ const char * (*meta_get) (void *ef, int meta);
+ void (*priority_set) (void *ef, Eina_Bool priority);
+ Eina_Bool (*priority_get) (void *ef);
+
+ Eina_Emotion_Plugins *plugin;
+};
+
+EAPI void *_emotion_video_get(const Evas_Object *obj);
+EAPI void _emotion_frame_new(Evas_Object *obj);
+EAPI void _emotion_video_pos_update(Evas_Object *obj, double pos, double len);
+EAPI void _emotion_frame_resize(Evas_Object *obj, int w, int h, double ratio);
+EAPI void _emotion_frame_refill(Evas_Object *obj, double w, double h);
+EAPI void _emotion_decode_stop(Evas_Object *obj);
+EAPI void _emotion_open_done(Evas_Object *obj);
+EAPI void _emotion_playback_started(Evas_Object *obj);
+EAPI void _emotion_playback_finished(Evas_Object *obj);
+EAPI void _emotion_audio_level_change(Evas_Object *obj);
+EAPI void _emotion_channels_change(Evas_Object *obj);
+EAPI void _emotion_title_set(Evas_Object *obj, char *title);
+EAPI void _emotion_progress_set(Evas_Object *obj, char *info, double stat);
+EAPI void _emotion_file_ref_set(Evas_Object *obj, const char *file, int num);
+EAPI void _emotion_spu_button_num_set(Evas_Object *obj, int num);
+EAPI void _emotion_spu_button_set(Evas_Object *obj, int button);
+EAPI void _emotion_seek_done(Evas_Object *obj);
+EAPI void _emotion_image_reset(Evas_Object *obj);
+
+EAPI Eina_Bool _emotion_module_register(const char *name, Emotion_Module_Open open, Emotion_Module_Close close);
+EAPI Eina_Bool _emotion_module_unregister(const char *name);
+
+EAPI const char *emotion_webcam_custom_get(const char *device);
+
+EAPI void _emotion_pending_object_ref(void);
+EAPI void _emotion_pending_object_unref(void);
+
+#endif
diff --git a/src/lib/emotion/emotion_smart.c b/src/lib/emotion/emotion_smart.c
new file mode 100644
index 0000000000..709414459c
--- /dev/null
+++ b/src/lib/emotion/emotion_smart.c
@@ -0,0 +1,2133 @@
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <Evas.h>
+#include <Ecore.h>
+
+#ifdef HAVE_EIO
+# include <math.h>
+# include <Eio.h>
+#endif
+
+#include "Emotion.h"
+#include "emotion_private.h"
+
+#ifdef _WIN32
+# define FMT_UCHAR "%c"
+#else
+# define FMT_UCHAR "%hhu"
+#endif
+
+#define E_SMART_OBJ_GET(smart, o, type) \
+ { \
+ char *_e_smart_str; \
+ \
+ if (!o) return; \
+ smart = evas_object_smart_data_get(o); \
+ if (!smart) return; \
+ _e_smart_str = (char *)evas_object_type_get(o); \
+ if (!_e_smart_str) return; \
+ if (strcmp(_e_smart_str, type)) return; \
+ }
+
+#define E_SMART_OBJ_GET_RETURN(smart, o, type, ret) \
+ { \
+ char *_e_smart_str; \
+ \
+ if (!o) return ret; \
+ smart = evas_object_smart_data_get(o); \
+ if (!smart) return ret; \
+ _e_smart_str = (char *)evas_object_type_get(o); \
+ if (!_e_smart_str) return ret; \
+ if (strcmp(_e_smart_str, type)) return ret; \
+ }
+
+#define DBG(...) EINA_LOG_DOM_DBG(_log_domain, __VA_ARGS__)
+#define INF(...) EINA_LOG_DOM_INFO(_log_domain, __VA_ARGS__)
+#define WRN(...) EINA_LOG_DOM_WARN(_log_domain, __VA_ARGS__)
+#define ERR(...) EINA_LOG_DOM_ERR(_log_domain, __VA_ARGS__)
+#define CRITICAL(...) EINA_LOG_DOM_CRIT(_log_domain, __VA_ARGS__)
+
+#define E_OBJ_NAME "emotion_object"
+
+typedef struct _Smart_Data Smart_Data;
+
+struct _Smart_Data
+{
+ EINA_REFCOUNT;
+ Emotion_Video_Module *module;
+ void *video_data;
+
+ char *module_name;
+
+ const char *file;
+ Evas_Object *obj;
+ Evas_Object *bg;
+
+ Ecore_Job *job;
+
+ char *title;
+
+#ifdef HAVE_EIO
+ Eio_File *load_xattr;
+ Eio_File *save_xattr;
+#endif
+
+ struct {
+ char *info;
+ double stat;
+ } progress;
+ struct {
+ char *file;
+ int num;
+ } ref;
+ struct {
+ int button_num;
+ int button;
+ } spu;
+ struct {
+ int l; /* left */
+ int r; /* right */
+ int t; /* top */
+ int b; /* bottom */
+ Evas_Object *clipper;
+ } crop;
+
+ struct {
+ int w, h;
+ } video;
+ struct {
+ double w, h;
+ } fill;
+
+ double ratio;
+ double pos;
+ double remember_jump;
+ double seek_pos;
+ double len;
+
+ Emotion_Module_Options module_options;
+
+ Emotion_Suspend state;
+ Emotion_Aspect aspect;
+
+ Ecore_Animator *anim;
+
+ Eina_Bool open : 1;
+ Eina_Bool play : 1;
+ Eina_Bool remember_play : 1;
+ Eina_Bool seek : 1;
+ Eina_Bool seeking : 1;
+};
+
+static void _mouse_move(void *data, Evas *ev, Evas_Object *obj, void *event_info);
+static void _mouse_down(void *data, Evas *ev, Evas_Object *obj, void *event_info);
+static void _pos_set_job(void *data);
+static void _pixels_get(void *data, Evas_Object *obj);
+
+static void _smart_init(void);
+static void _smart_add(Evas_Object * obj);
+static void _smart_del(Evas_Object * obj);
+static void _smart_move(Evas_Object * obj, Evas_Coord x, Evas_Coord y);
+static void _smart_resize(Evas_Object * obj, Evas_Coord w, Evas_Coord h);
+static void _smart_show(Evas_Object * obj);
+static void _smart_hide(Evas_Object * obj);
+static void _smart_color_set(Evas_Object * obj, int r, int g, int b, int a);
+static void _smart_clip_set(Evas_Object * obj, Evas_Object * clip);
+static void _smart_clip_unset(Evas_Object * obj);
+
+/**********************************/
+/* Globals for the E Video Object */
+/**********************************/
+static Evas_Smart *smart = NULL;
+static Eina_Hash *_backends = NULL;
+static Eina_Array *_modules = NULL;
+static int _log_domain = -1;
+
+static const char *_backend_priority[] = {
+ "gstreamer",
+ "xine",
+ "generic"
+};
+
+static const char SIG_FRAME_DECODE[] = "frame_decode";
+static const char SIG_POSITION_UPDATE[] = "position_update";
+static const char SIG_LENGTH_CHANGE[] = "length_change";
+static const char SIG_FRAME_RESIZE[] = "frame_resize";
+static const char SIG_DECODE_STOP[] = "decode_stop";
+static const char SIG_PLAYBACK_STARTED[] = "playback_started";
+static const char SIG_PLAYBACK_FINISHED[] = "playback_finished";
+static const char SIG_AUDIO_LEVEL_CHANGE[] = "audio_level_change";
+static const char SIG_CHANNELS_CHANGE[] = "channels_change";
+static const char SIG_TITLE_CHANGE[] = "title_change";
+static const char SIG_PROGRESS_CHANGE[] = "progress_change";
+static const char SIG_REF_CHANGE[] = "ref_change";
+static const char SIG_BUTTON_NUM_CHANGE[] = "button_num_change";
+static const char SIG_BUTTON_CHANGE[] = "button_change";
+static const char SIG_OPEN_DONE[] = "open_done";
+static const char SIG_POSITION_SAVE_SUCCEED[] = "position_save,succeed";
+static const char SIG_POSITION_SAVE_FAILED[] = "position_save,failed";
+static const char SIG_POSITION_LOAD_SUCCEED[] = "position_load,succeed";
+static const char SIG_POSITION_LOAD_FAILED[] = "position_load,failed";
+
+static const Evas_Smart_Cb_Description _smart_callbacks[] = {
+ {SIG_FRAME_DECODE, ""},
+ {SIG_POSITION_UPDATE, ""},
+ {SIG_LENGTH_CHANGE, ""},
+ {SIG_FRAME_RESIZE, ""},
+ {SIG_DECODE_STOP, ""},
+ {SIG_PLAYBACK_STARTED, ""},
+ {SIG_PLAYBACK_FINISHED, ""},
+ {SIG_AUDIO_LEVEL_CHANGE, ""},
+ {SIG_CHANNELS_CHANGE, ""},
+ {SIG_TITLE_CHANGE, ""},
+ {SIG_PROGRESS_CHANGE, ""},
+ {SIG_REF_CHANGE, ""},
+ {SIG_BUTTON_NUM_CHANGE, ""},
+ {SIG_BUTTON_CHANGE, ""},
+ {SIG_OPEN_DONE, ""},
+ {NULL, NULL}
+};
+
+static void
+_emotion_image_data_zero(Evas_Object *img)
+{
+ void *data;
+
+ data = evas_object_image_data_get(img, 1);
+ if (data)
+ {
+ int w, h, sz = 0;
+ Evas_Colorspace cs;
+
+ evas_object_image_size_get(img, &w, &h);
+ cs = evas_object_image_colorspace_get(img);
+ if (cs == EVAS_COLORSPACE_ARGB8888)
+ sz = w * h * 4;
+ if ((cs == EVAS_COLORSPACE_YCBCR422P601_PL) ||
+ (cs == EVAS_COLORSPACE_YCBCR422P709_PL))
+ sz = h * 2 * sizeof(unsigned char *);
+ if (sz != 0) memset(data, 0, sz);
+ }
+ evas_object_image_data_set(img, data);
+}
+
+static void
+_emotion_module_close(Emotion_Video_Module *mod, void *video)
+{
+ if (!mod) return;
+ if (mod->plugin->close && video)
+ mod->plugin->close(mod, video);
+ /* FIXME: we can't go dlclosing here as a thread still may be running from
+ * the module - this in theory will leak- but it shouldn't be too bad and
+ * mean that once a module is dlopened() it can't be closed - its refcount
+ * will just keep going up
+ */
+}
+
+static void
+_smart_data_free(Smart_Data *sd)
+{
+ if (sd->video_data) sd->module->file_close(sd->video_data);
+ _emotion_module_close(sd->module, sd->video_data);
+ evas_object_del(sd->obj);
+ evas_object_del(sd->crop.clipper);
+ evas_object_del(sd->bg);
+ eina_stringshare_del(sd->file);
+ free(sd->module_name);
+ if (sd->job) ecore_job_del(sd->job);
+ if (sd->anim) ecore_animator_del(sd->anim);
+ free(sd->progress.info);
+ free(sd->ref.file);
+ free(sd);
+
+ ecore_shutdown();
+}
+
+EAPI Eina_Bool
+_emotion_module_register(const char *name, Emotion_Module_Open mod_open, Emotion_Module_Close mod_close)
+{
+ Eina_Emotion_Plugins *plugin;
+
+ plugin = malloc(sizeof (Eina_Emotion_Plugins));
+ if (!plugin) return EINA_FALSE;
+
+ plugin->open = mod_open;
+ plugin->close = mod_close;
+
+ return eina_hash_add(_backends, name, plugin);
+}
+
+EAPI Eina_Bool
+_emotion_module_unregister(const char *name)
+{
+ return eina_hash_del(_backends, name, NULL);
+}
+
+static const char *
+_emotion_module_open(const char *name, Evas_Object *obj, Emotion_Video_Module **mod, void **video)
+{
+ Eina_Emotion_Plugins *plugin;
+ Smart_Data *sd;
+ unsigned int i = 0;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!_backends)
+ {
+ ERR("No backend loaded");
+ return NULL;
+ }
+
+ if (!name && getenv("EMOTION_ENGINE"))
+ name = getenv("EMOTION_ENGINE");
+
+ /* FIXME: Always look for a working backend. */
+ retry:
+ if (!name || i > 0)
+ name = _backend_priority[i++];
+
+ plugin = eina_hash_find(_backends, name);
+ if (!plugin)
+ {
+ if (i != 0 && i < (sizeof (_backend_priority) / sizeof (char*)))
+ goto retry;
+
+ ERR("No backend loaded");
+ return EINA_FALSE;
+ }
+
+ if (plugin->open(obj, (const Emotion_Video_Module **) mod, video, &(sd->module_options)))
+ {
+ if (*mod)
+ {
+ (*mod)->plugin = plugin;
+ return name;
+ }
+ }
+
+ if (i != 0 && i < (sizeof (_backend_priority) / sizeof (char*)))
+ goto retry;
+
+ ERR("Unable to load module: %s", name);
+
+ return NULL;
+}
+
+static void
+_clipper_position_size_update(Evas_Object *obj, int x, int y, int w, int h, int vid_w, int vid_h)
+{
+ Smart_Data *sd;
+ double scale_w, scale_h;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ if (vid_w == 0 || vid_h == 0)
+ {
+ evas_object_image_fill_set(sd->obj, 0, 0, 0, 0);
+ evas_object_move(sd->obj, x, y);
+ evas_object_resize(sd->obj, 0, 0);
+ evas_object_move(sd->crop.clipper, x, y);
+ evas_object_resize(sd->crop.clipper, 0, 0);
+ }
+ else
+ {
+ evas_object_move(sd->crop.clipper, x, y);
+ scale_w = (double)w / (double)(vid_w - sd->crop.l - sd->crop.r);
+ scale_h = (double)h / (double)(vid_h - sd->crop.t - sd->crop.b);
+
+ if (sd->fill.w < 0 && sd->fill.h < 0)
+ evas_object_image_fill_set(sd->obj, 0, 0, vid_w * scale_w, vid_h * scale_h);
+ else
+ evas_object_image_fill_set(sd->obj, 0, 0, sd->fill.w * w, sd->fill.h * h);
+ evas_object_resize(sd->obj, vid_w * scale_w, vid_h * scale_h);
+ evas_object_move(sd->obj, x - sd->crop.l * scale_w, y - sd->crop.t * scale_h);
+ evas_object_resize(sd->crop.clipper, w, h);
+ }
+}
+
+/*******************************/
+/* Externally accessible calls */
+/*******************************/
+
+
+
+EAPI Evas_Object *
+emotion_object_add(Evas *evas)
+{
+ _smart_init();
+ return evas_object_smart_add(evas, smart);
+}
+
+EAPI Evas_Object *
+emotion_object_image_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return NULL;
+ return sd->obj;
+}
+
+EAPI void
+emotion_object_module_option_set(Evas_Object *obj, const char *opt, const char *val)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if ((!opt) || (!val)) return;
+
+ if (!strcmp(opt, "player"))
+ eina_stringshare_replace(&sd->module_options.player, val);
+}
+
+EAPI Eina_Bool
+emotion_object_init(Evas_Object *obj, const char *module_filename)
+{
+ Smart_Data *sd;
+ const char *file;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+
+ if ((sd->module_name) && module_filename && (!strcmp(sd->module_name, module_filename)))
+ return EINA_TRUE;
+ free(sd->module_name);
+ sd->module_name = NULL;
+
+ file = sd->file;
+ sd->file = NULL;
+
+ free(sd->title);
+ sd->title = NULL;
+ free(sd->progress.info);
+ sd->progress.info = NULL;
+ sd->progress.stat = 0.0;
+ free(sd->ref.file);
+ sd->ref.file = NULL;
+ sd->ref.num = 0;
+ sd->spu.button_num = 0;
+ sd->spu.button = -1;
+ sd->ratio = 1.0;
+ sd->pos = 0;
+ sd->remember_jump = 0;
+ sd->seek_pos = 0;
+ sd->len = 0;
+ sd->remember_play = 0;
+
+ if (sd->anim) ecore_animator_del(sd->anim);
+ sd->anim = NULL;
+
+ _emotion_module_close(sd->module, sd->video_data);
+ sd->module = NULL;
+ sd->video_data = NULL;
+
+ module_filename = _emotion_module_open(module_filename, obj, &sd->module, &sd->video_data);
+ if (!module_filename)
+ return EINA_FALSE;
+
+ sd->module_name = strdup(module_filename);
+
+ if (file)
+ {
+ emotion_object_file_set(obj, file);
+ eina_stringshare_del(file);
+ }
+
+ return EINA_TRUE;
+}
+
+EAPI Eina_Bool
+emotion_object_file_set(Evas_Object *obj, const char *file)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, EINA_FALSE);
+
+ DBG("file=%s", file);
+ if (!sd->module) return EINA_FALSE;
+
+ sd->video.w = 0;
+ sd->video.h = 0;
+ if ((file) && (sd->file) &&
+ ((file == sd->file) || (!strcmp(file, sd->file)))) return EINA_FALSE;
+ if ((file) && (file[0] != 0))
+ {
+ eina_stringshare_replace(&sd->file, file);
+ sd->module->file_close(sd->video_data);
+ evas_object_image_data_set(sd->obj, NULL);
+ evas_object_image_size_set(sd->obj, 1, 1);
+ _emotion_image_data_zero(sd->obj);
+ sd->open = 0;
+ if (!sd->module->file_open(sd->file, obj, sd->video_data))
+ return EINA_FALSE;
+ sd->pos = 0.0;
+ if (sd->play) sd->module->play(sd->video_data, 0.0);
+ }
+ else
+ {
+ if (sd->video_data && sd->module)
+ {
+ sd->module->file_close(sd->video_data);
+ evas_object_image_data_set(sd->obj, NULL);
+ evas_object_image_size_set(sd->obj, 1, 1);
+ _emotion_image_data_zero(sd->obj);
+ }
+ eina_stringshare_replace(&sd->file, NULL);
+ }
+
+ if (sd->anim) ecore_animator_del(sd->anim);
+ sd->anim = NULL;
+
+#ifdef HAVE_EIO
+ /* Only cancel the load_xattr or we will loose ref to time_seek stringshare */
+ if (sd->load_xattr) eio_file_cancel(sd->load_xattr);
+ sd->load_xattr = NULL;
+ if (sd->save_xattr) eio_file_cancel(sd->save_xattr);
+ sd->save_xattr = NULL;
+#endif
+
+ return EINA_TRUE;
+}
+
+EAPI const char *
+emotion_object_file_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->file;
+}
+
+static void
+_emotion_aspect_borders_apply(Evas_Object *obj, Smart_Data *sd, int w, int h, int iw, int ih)
+{
+ int x, y;
+
+ evas_object_geometry_get(obj, &x, &y, NULL, NULL);
+
+ /* applying calculated borders */
+ if (sd->crop.l == 0 && sd->crop.r == 0 &&
+ sd->crop.t == 0 && sd->crop.b == 0)
+ {
+ Evas_Object *old_clipper;
+ if (sd->crop.clipper)
+ {
+ old_clipper = evas_object_clip_get(sd->crop.clipper);
+ evas_object_clip_unset(sd->obj);
+ evas_object_clip_set(sd->obj, old_clipper);
+ evas_object_del(sd->crop.clipper);
+ sd->crop.clipper = NULL;
+ }
+ }
+ else
+ {
+ if (!sd->crop.clipper)
+ {
+ Evas_Object *old_clipper;
+ sd->crop.clipper = evas_object_rectangle_add(
+ evas_object_evas_get(obj));
+ evas_object_color_set(sd->crop.clipper, 255, 255, 255, 255);
+ evas_object_smart_member_add(sd->crop.clipper, obj);
+ old_clipper = evas_object_clip_get(sd->obj);
+ evas_object_clip_set(sd->obj, sd->crop.clipper);
+ evas_object_clip_set(sd->crop.clipper, old_clipper);
+ if (evas_object_visible_get(sd->obj))
+ evas_object_show(sd->crop.clipper);
+ }
+ }
+ _clipper_position_size_update(obj, x, y, w, h, iw, ih);
+}
+
+static void
+_emotion_object_aspect_border_apply(Evas_Object *obj, Smart_Data *sd, int w, int h)
+{
+ int iw, ih;
+ double ir;
+ double r;
+
+ int aspect_opt = 0;
+
+ iw = sd->video.w;
+ ih = sd->video.h;
+
+ ir = (double)iw / ih;
+ r = (double)w / h;
+
+ /* First check if we should fit the width or height of the video inside the
+ * width/height of the object. This check takes into account the original
+ * aspect ratio and the object aspect ratio, if we are keeping both sizes or
+ * cropping the exceding area.
+ */
+ if (sd->aspect == EMOTION_ASPECT_KEEP_NONE)
+ {
+ sd->crop.l = 0;
+ sd->crop.r = 0;
+ sd->crop.t = 0;
+ sd->crop.b = 0;
+ aspect_opt = 0; // just ignore keep_aspect
+ }
+ else if (sd->aspect == EMOTION_ASPECT_KEEP_WIDTH)
+ {
+ aspect_opt = 1;
+ }
+ else if (sd->aspect == EMOTION_ASPECT_KEEP_HEIGHT)
+ {
+ aspect_opt = 2;
+ }
+ else if (sd->aspect == EMOTION_ASPECT_KEEP_BOTH)
+ {
+ if (ir > r)
+ aspect_opt = 1;
+ else
+ aspect_opt = 2;
+ }
+ else if (sd->aspect == EMOTION_ASPECT_CROP)
+ {
+ if (ir > r)
+ aspect_opt = 2;
+ else
+ aspect_opt = 1;
+ }
+ else if (sd->aspect == EMOTION_ASPECT_CUSTOM)
+ {
+ // nothing to do, just respect the border settings
+ aspect_opt = 0;
+ }
+
+ /* updating borders based on keep_aspect settings */
+ if (aspect_opt == 1) // keep width
+ {
+ int th, dh;
+ double scale;
+
+ sd->crop.l = 0;
+ sd->crop.r = 0;
+ scale = (double)iw / w;
+ th = h * scale;
+ dh = ih - th;
+ sd->crop.t = sd->crop.b = dh / 2;
+ }
+ else if (aspect_opt == 2) // keep height
+ {
+ int tw, dw;
+ double scale;
+
+ sd->crop.t = 0;
+ sd->crop.b = 0;
+ scale = (double)ih / h;
+ tw = w * scale;
+ dw = iw - tw;
+ sd->crop.l = sd->crop.r = dw / 2;
+ }
+
+ _emotion_aspect_borders_apply(obj, sd, w, h, iw, ih);
+}
+
+EAPI void
+emotion_object_border_set(Evas_Object *obj, int l, int r, int t, int b)
+{
+ Smart_Data *sd;
+ int w, h;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ sd->aspect = EMOTION_ASPECT_CUSTOM;
+ sd->crop.l = -l;
+ sd->crop.r = -r;
+ sd->crop.t = -t;
+ sd->crop.b = -b;
+ evas_object_geometry_get(obj, NULL, NULL, &w, &h);
+ _emotion_object_aspect_border_apply(obj, sd, w, h);
+}
+
+EAPI void
+emotion_object_border_get(const Evas_Object *obj, int *l, int *r, int *t, int *b)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ *l = -sd->crop.l;
+ *r = -sd->crop.r;
+ *t = -sd->crop.t;
+ *b = -sd->crop.b;
+}
+
+EAPI void
+emotion_object_bg_color_set(Evas_Object *obj, int r, int g, int b, int a)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ evas_object_color_set(sd->bg, r, g, b, a);
+
+ if (!evas_object_visible_get(obj))
+ return;
+
+ if (a > 0)
+ evas_object_show(sd->bg);
+ else
+ evas_object_hide(sd->bg);
+}
+
+EAPI void
+emotion_object_bg_color_get(const Evas_Object *obj, int *r, int *g, int *b, int *a)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_color_get(sd->bg, r, g, b, a);
+}
+
+EAPI void
+emotion_object_keep_aspect_set(Evas_Object *obj, Emotion_Aspect a)
+{
+ Smart_Data *sd;
+ int w, h;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ if (a == sd->aspect)
+ return;
+
+ sd->aspect = a;
+ evas_object_geometry_get(obj, NULL, NULL, &w, &h);
+ _emotion_object_aspect_border_apply(obj, sd, w, h);
+}
+
+EAPI Emotion_Aspect
+emotion_object_keep_aspect_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, EMOTION_ASPECT_KEEP_NONE);
+
+ return sd->aspect;
+}
+
+EAPI void
+emotion_object_play_set(Evas_Object *obj, Eina_Bool play)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("play=" FMT_UCHAR ", was=" FMT_UCHAR, play, sd->play);
+ if (play == sd->play) return;
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ if (!sd->open)
+ {
+ sd->remember_play = play;
+ return;
+ }
+ sd->play = play;
+ sd->remember_play = play;
+ if (sd->state != EMOTION_WAKEUP) emotion_object_suspend_set(obj, EMOTION_WAKEUP);
+ if (sd->play) sd->module->play(sd->video_data, sd->pos);
+ else sd->module->stop(sd->video_data);
+}
+
+EAPI Eina_Bool
+emotion_object_play_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->video_data) return EINA_FALSE;
+
+ return sd->play;
+}
+
+EAPI void
+emotion_object_position_set(Evas_Object *obj, double sec)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("sec=%f", sec);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ if (!sd->open)
+ {
+ sd->remember_jump = sec;
+ return ;
+ }
+ sd->remember_jump = 0;
+ sd->seek_pos = sec;
+ sd->seek = 1;
+ sd->pos = sd->seek_pos;
+ if (sd->job) ecore_job_del(sd->job);
+ sd->job = ecore_job_add(_pos_set_job, obj);
+}
+
+EAPI double
+emotion_object_position_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video_data) return 0.0;
+ if (!sd->module->pos_get) return 0.0;
+ sd->pos = sd->module->pos_get(sd->video_data);
+ return sd->pos;
+}
+
+EAPI double
+emotion_object_buffer_size_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 1.0);
+ if (!sd->module) return 1.0;
+ if (!sd->video_data) return 1.0;
+ if (!sd->module->buffer_size_get) return 1.0;
+ return sd->module->buffer_size_get(sd->video_data);
+}
+
+EAPI Eina_Bool
+emotion_object_seekable_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->seekable(sd->video_data);
+}
+
+EAPI Eina_Bool
+emotion_object_video_handled_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->video_handled(sd->video_data);
+}
+
+EAPI Eina_Bool
+emotion_object_audio_handled_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->audio_handled(sd->video_data);
+}
+
+EAPI double
+emotion_object_play_length_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video_data) return 0.0;
+ sd->len = sd->module->len_get(sd->video_data);
+ return sd->len;
+}
+
+EAPI void
+emotion_object_size_get(const Evas_Object *obj, int *iw, int *ih)
+{
+ Smart_Data *sd;
+
+ if (iw) *iw = 0;
+ if (ih) *ih = 0;
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (iw) *iw = sd->video.w;
+ if (ih) *ih = sd->video.h;
+}
+
+EAPI void
+emotion_object_smooth_scale_set(Evas_Object *obj, Eina_Bool smooth)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_image_smooth_scale_set(sd->obj, smooth);
+}
+
+EAPI Eina_Bool
+emotion_object_smooth_scale_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return evas_object_image_smooth_scale_get(sd->obj);
+}
+
+EAPI double
+emotion_object_ratio_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 1.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video_data) return 0.0;
+ return sd->ratio;
+}
+
+/*
+ * Send a control event to the DVD.
+ */
+EAPI void
+emotion_object_event_simple_send(Evas_Object *obj, Emotion_Event ev)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->event_feed(sd->video_data, ev);
+}
+
+EAPI void
+emotion_object_audio_volume_set(Evas_Object *obj, double vol)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("vol=%f", vol);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->audio_channel_volume_set(sd->video_data, vol);
+}
+
+EAPI double
+emotion_object_audio_volume_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 1.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video_data) return 0.0;
+ return sd->module->audio_channel_volume_get(sd->video_data);
+}
+
+EAPI void
+emotion_object_audio_mute_set(Evas_Object *obj, Eina_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("mute=" FMT_UCHAR, mute);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->audio_channel_mute_set(sd->video_data, mute);
+}
+
+EAPI Eina_Bool
+emotion_object_audio_mute_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->audio_channel_mute_get(sd->video_data);
+}
+
+EAPI int
+emotion_object_audio_channel_count(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->audio_channel_count(sd->video_data);
+}
+
+EAPI const char *
+emotion_object_audio_channel_name_get(const Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video_data) return NULL;
+ return sd->module->audio_channel_name_get(sd->video_data, channel);
+}
+
+EAPI void
+emotion_object_audio_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("channel=%d", channel);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->audio_channel_set(sd->video_data, channel);
+}
+
+EAPI int
+emotion_object_audio_channel_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->audio_channel_get(sd->video_data);
+}
+
+EAPI void
+emotion_object_video_mute_set(Evas_Object *obj, Eina_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("mute=" FMT_UCHAR, mute);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->video_channel_mute_set(sd->video_data, mute);
+}
+
+EAPI Eina_Bool
+emotion_object_video_mute_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->video_channel_mute_get(sd->video_data);
+}
+
+EAPI void
+emotion_object_video_subtitle_file_set(Evas_Object *obj, const char *filepath)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("subtitle=%s", filepath);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->video_subtitle_file_set(sd->video_data, filepath);
+}
+
+EAPI const char *
+emotion_object_video_subtitle_file_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->video_subtitle_file_get(sd->video_data);
+}
+
+EAPI int
+emotion_object_video_channel_count(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->video_channel_count(sd->video_data);
+}
+
+EAPI const char *
+emotion_object_video_channel_name_get(const Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video_data) return NULL;
+ return sd->module->video_channel_name_get(sd->video_data, channel);
+}
+
+EAPI void
+emotion_object_video_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("channel=%d", channel);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->video_channel_set(sd->video_data, channel);
+}
+
+EAPI int
+emotion_object_video_channel_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->video_channel_get(sd->video_data);
+}
+
+EAPI void
+emotion_object_spu_mute_set(Evas_Object *obj, Eina_Bool mute)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("mute=" FMT_UCHAR, mute);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->spu_channel_mute_set(sd->video_data, mute);
+}
+
+EAPI Eina_Bool
+emotion_object_spu_mute_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ return sd->module->spu_channel_mute_get(sd->video_data);
+}
+
+EAPI int
+emotion_object_spu_channel_count(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->spu_channel_count(sd->video_data);
+}
+
+EAPI const char *
+emotion_object_spu_channel_name_get(const Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video_data) return NULL;
+ return sd->module->spu_channel_name_get(sd->video_data, channel);
+}
+
+EAPI void
+emotion_object_spu_channel_set(Evas_Object *obj, int channel)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("channel=%d", channel);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->spu_channel_set(sd->video_data, channel);
+}
+
+EAPI int
+emotion_object_spu_channel_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->spu_channel_get(sd->video_data);
+}
+
+EAPI int
+emotion_object_chapter_count(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->chapter_count(sd->video_data);
+}
+
+EAPI void
+emotion_object_chapter_set(Evas_Object *obj, int chapter)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("chapter=%d", chapter);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->chapter_set(sd->video_data, chapter);
+}
+
+EAPI int
+emotion_object_chapter_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return 0;
+ if (!sd->video_data) return 0;
+ return sd->module->chapter_get(sd->video_data);
+}
+
+EAPI const char *
+emotion_object_chapter_name_get(const Evas_Object *obj, int chapter)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video_data) return NULL;
+ return sd->module->chapter_name_get(sd->video_data, chapter);
+}
+
+EAPI void
+emotion_object_play_speed_set(Evas_Object *obj, double speed)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("speed=%f", speed);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->speed_set(sd->video_data, speed);
+}
+
+EAPI double
+emotion_object_play_speed_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ if (!sd->module) return 0.0;
+ if (!sd->video_data) return 0.0;
+ return sd->module->speed_get(sd->video_data);
+}
+
+EAPI void
+emotion_object_eject(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ sd->module->eject(sd->video_data);
+}
+
+EAPI const char *
+emotion_object_title_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->title;
+}
+
+EAPI const char *
+emotion_object_progress_info_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->progress.info;
+}
+
+EAPI double
+emotion_object_progress_status_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0.0);
+ return sd->progress.stat;
+}
+
+EAPI const char *
+emotion_object_ref_file_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->ref.file;
+}
+
+EAPI int
+emotion_object_ref_num_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return sd->ref.num;
+}
+
+EAPI int
+emotion_object_spu_button_count_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return sd->spu.button_num;
+}
+
+EAPI int
+emotion_object_spu_button_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ return sd->spu.button;
+}
+
+EAPI const char *
+emotion_object_meta_info_get(const Evas_Object *obj, Emotion_Meta_Info meta)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ if (!sd->module) return NULL;
+ if (!sd->video_data) return NULL;
+ switch (meta)
+ {
+ case EMOTION_META_INFO_TRACK_TITLE:
+ return sd->module->meta_get(sd->video_data, META_TRACK_TITLE);
+ case EMOTION_META_INFO_TRACK_ARTIST:
+ return sd->module->meta_get(sd->video_data, META_TRACK_ARTIST);
+ case EMOTION_META_INFO_TRACK_ALBUM:
+ return sd->module->meta_get(sd->video_data, META_TRACK_ALBUM);
+ case EMOTION_META_INFO_TRACK_YEAR:
+ return sd->module->meta_get(sd->video_data, META_TRACK_YEAR);
+ case EMOTION_META_INFO_TRACK_GENRE:
+ return sd->module->meta_get(sd->video_data, META_TRACK_GENRE);
+ case EMOTION_META_INFO_TRACK_COMMENT:
+ return sd->module->meta_get(sd->video_data, META_TRACK_COMMENT);
+ case EMOTION_META_INFO_TRACK_DISC_ID:
+ return sd->module->meta_get(sd->video_data, META_TRACK_DISCID);
+ default:
+ break;
+ }
+ return NULL;
+}
+
+EAPI void
+emotion_object_vis_set(Evas_Object *obj, Emotion_Vis visualization)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ DBG("visualization=%d", visualization);
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ if (!sd->module->vis_set) return;
+ sd->module->vis_set(sd->video_data, visualization);
+}
+
+EAPI Emotion_Vis
+emotion_object_vis_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, EMOTION_VIS_NONE);
+ if (!sd->module) return EMOTION_VIS_NONE;
+ if (!sd->video_data) return EMOTION_VIS_NONE;
+ if (!sd->module->vis_get) return EMOTION_VIS_NONE;
+ return sd->module->vis_get(sd->video_data);
+}
+
+EAPI Eina_Bool
+emotion_object_vis_supported(const Evas_Object *obj, Emotion_Vis visualization)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ if (!sd->module->vis_supported) return EINA_FALSE;
+ return sd->module->vis_supported(sd->video_data, visualization);
+}
+
+EAPI void
+emotion_object_priority_set(Evas_Object *obj, Eina_Bool priority)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->module) return ;
+ if (!sd->video_data) return ;
+ if (!sd->module->priority_set) return ;
+ sd->module->priority_set(sd->video_data, priority);
+}
+
+EAPI Eina_Bool
+emotion_object_priority_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, 0);
+ if (!sd->module) return EINA_FALSE;
+ if (!sd->video_data) return EINA_FALSE;
+ if (!sd->module->priority_get) return EINA_FALSE;
+ return sd->module->priority_get(sd->video_data);
+}
+
+#ifdef HAVE_EIO
+static void
+_eio_load_xattr_cleanup(Smart_Data *sd, Eio_File *handler)
+{
+ if (handler == sd->load_xattr) sd->load_xattr = NULL;
+
+ EINA_REFCOUNT_UNREF(sd)
+ _smart_data_free(sd);
+}
+
+static void
+_eio_load_xattr_done(void *data, Eio_File *handler, double xattr_double)
+{
+ Smart_Data *sd = data;
+
+ emotion_object_position_set(evas_object_smart_parent_get(sd->obj), xattr_double);
+ evas_object_smart_callback_call(evas_object_smart_parent_get(sd->obj), SIG_POSITION_LOAD_SUCCEED, NULL);
+ _eio_load_xattr_cleanup(sd, handler);
+}
+
+static void
+_eio_load_xattr_error(void *data, Eio_File *handler, int err EINA_UNUSED)
+{
+ Smart_Data *sd = data;
+
+ evas_object_smart_callback_call(evas_object_smart_parent_get(sd->obj), SIG_POSITION_LOAD_FAILED, NULL);
+ _eio_load_xattr_cleanup(sd, handler);
+}
+#endif
+
+EAPI void
+emotion_object_last_position_load(Evas_Object *obj)
+{
+ Smart_Data *sd;
+ const char *tmp;
+#ifndef HAVE_EIO
+ double xattr;
+#endif
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->file) return ;
+
+ if (!strncmp(sd->file, "file://", 7))
+ tmp = sd->file + 7;
+ else if (!strstr(sd->file, "://"))
+ tmp = sd->file;
+ else
+ return ;
+
+#ifdef HAVE_EIO
+ if (sd->load_xattr) return ;
+
+ EINA_REFCOUNT_REF(sd);
+
+ sd->load_xattr = eio_file_xattr_double_get(tmp,
+ "user.e.time_seek",
+ _eio_load_xattr_done,
+ _eio_load_xattr_error,
+ sd);
+#else
+ if (eina_xattr_double_get(tmp, "user.e.time_seek", &xattr))
+ {
+ emotion_object_position_set(obj, xattr);
+ evas_object_smart_callback_call(obj, SIG_POSITION_LOAD_SUCCEED, NULL);
+ }
+ else
+ {
+ evas_object_smart_callback_call(obj, SIG_POSITION_LOAD_FAILED, NULL);
+ }
+#endif
+}
+
+#ifdef HAVE_EIO
+static void
+_eio_save_xattr_cleanup(Smart_Data *sd, Eio_File *handler)
+{
+ if (handler == sd->save_xattr) sd->save_xattr = NULL;
+
+ EINA_REFCOUNT_UNREF(sd)
+ _smart_data_free(sd);
+}
+
+static void
+_eio_save_xattr_done(void *data, Eio_File *handler)
+{
+ Smart_Data *sd = data;
+
+ evas_object_smart_callback_call(sd->obj, SIG_POSITION_SAVE_SUCCEED, NULL);
+ _eio_save_xattr_cleanup(sd, handler);
+}
+
+static void
+_eio_save_xattr_error(void *data, Eio_File *handler, int err EINA_UNUSED)
+{
+ Smart_Data *sd = data;
+
+ evas_object_smart_callback_call(sd->obj, SIG_POSITION_SAVE_FAILED, NULL);
+ _eio_save_xattr_cleanup(sd, handler);
+}
+#endif
+
+EAPI void
+emotion_object_last_position_save(Evas_Object *obj)
+{
+ Smart_Data *sd;
+ const char *tmp;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (!sd->file) return ;
+
+ if (!strncmp(sd->file, "file://", 7))
+ tmp = sd->file + 7;
+ else if (!strstr(sd->file, "://"))
+ tmp = sd->file;
+ else
+ return ;
+
+#ifdef HAVE_EIO
+ if (sd->save_xattr) return ;
+
+ EINA_REFCOUNT_REF(sd);
+
+ sd->save_xattr = eio_file_xattr_double_set(tmp,
+ "user.e.time_seek",
+ emotion_object_position_get(obj),
+ 0,
+ _eio_save_xattr_done,
+ _eio_save_xattr_error,
+ sd);
+#else
+ if (eina_xattr_double_set(tmp, "user.e.time_seek", emotion_object_position_get(obj), 0))
+ evas_object_smart_callback_call(obj, SIG_POSITION_SAVE_SUCCEED, NULL);
+ else
+ evas_object_smart_callback_call(obj, SIG_POSITION_SAVE_FAILED, NULL);
+#endif
+}
+
+EAPI void
+emotion_object_suspend_set(Evas_Object *obj, Emotion_Suspend state)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ switch (state)
+ {
+ case EMOTION_WAKEUP:
+ /* Restore the rendering pipeline, offset and everything back to play again (this will be called automatically by play_set) */
+ case EMOTION_SLEEP:
+ /* This destroy some part of the rendering pipeline */
+ case EMOTION_DEEP_SLEEP:
+ /* This destroy all the rendering pipeline and just keep the last rendered image (fullscreen) */
+ case EMOTION_HIBERNATE:
+ /* This destroy all the rendering pipeline and keep 1/4 of the last rendered image */
+ default:
+ break;
+ }
+
+ sd->state = state;
+}
+
+EAPI Emotion_Suspend
+emotion_object_suspend_get(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, EMOTION_WAKEUP);
+ return sd->state;
+}
+
+/*****************************/
+/* Utility calls for modules */
+/*****************************/
+
+EAPI void *
+_emotion_video_get(const Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, NULL);
+ return sd->video_data;
+}
+
+static Eina_Bool
+_emotion_frame_anim(void *data)
+{
+ Evas_Object *obj = data;
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET_RETURN(sd, obj, E_OBJ_NAME, EINA_FALSE);
+
+ evas_object_image_pixels_dirty_set(sd->obj, 1);
+ evas_object_smart_callback_call(obj, SIG_FRAME_DECODE, NULL);
+ sd->anim = NULL;
+
+ return EINA_FALSE;
+}
+
+EAPI void
+_emotion_frame_new(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+
+ if (!sd->anim) sd->anim = ecore_animator_add(_emotion_frame_anim, obj);
+}
+
+EAPI void
+_emotion_video_pos_update(Evas_Object *obj, double pos, double len)
+{
+ Smart_Data *sd;
+ int npos = 0, nlen = 0;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (pos != sd->pos) npos = 1;
+ if (len != sd->len) nlen = 1;
+ sd->pos = pos;
+ sd->len = len;
+ if (npos) evas_object_smart_callback_call(obj, SIG_POSITION_UPDATE, NULL);
+ if (nlen) evas_object_smart_callback_call(obj, SIG_LENGTH_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_frame_resize(Evas_Object *obj, int w, int h, double ratio)
+{
+ Smart_Data *sd;
+ double tmp;
+ int changed = 0;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if ((w != sd->video.w) || (h != sd->video.h))
+ {
+ sd->video.w = w;
+ sd->video.h = h;
+ _emotion_image_data_zero(sd->obj);
+ changed = 1;
+ }
+ if (h > 0) tmp = (double)w / (double)h;
+ else tmp = 1.0;
+ if (ratio != tmp) tmp = ratio;
+ if (tmp != sd->ratio)
+ {
+ sd->ratio = tmp;
+ changed = 1;
+ }
+ if (changed)
+ {
+ evas_object_size_hint_request_set(obj, w, h);
+ evas_object_smart_callback_call(obj, SIG_FRAME_RESIZE, NULL);
+ evas_object_geometry_get(obj, NULL, NULL, &w, &h);
+ _emotion_object_aspect_border_apply(obj, sd, w, h);
+ }
+}
+
+EAPI void
+_emotion_image_reset(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ _emotion_image_data_zero(sd->obj);
+}
+
+EAPI void
+_emotion_decode_stop(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (sd->play)
+ {
+ sd->play = 0;
+ evas_object_smart_callback_call(obj, SIG_DECODE_STOP, NULL);
+ }
+}
+
+EAPI void
+_emotion_open_done(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->open = 1;
+
+ if (sd->remember_jump)
+ emotion_object_position_set(obj, sd->remember_jump);
+ if (sd->remember_play != sd->play)
+ emotion_object_play_set(obj, sd->remember_play);
+ evas_object_smart_callback_call(obj, SIG_OPEN_DONE, NULL);
+}
+
+EAPI void
+_emotion_playback_started(Evas_Object *obj)
+{
+ evas_object_smart_callback_call(obj, SIG_PLAYBACK_STARTED, NULL);
+}
+
+EAPI void
+_emotion_playback_finished(Evas_Object *obj)
+{
+ evas_object_smart_callback_call(obj, SIG_PLAYBACK_FINISHED, NULL);
+}
+
+EAPI void
+_emotion_audio_level_change(Evas_Object *obj)
+{
+ evas_object_smart_callback_call(obj, SIG_AUDIO_LEVEL_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_channels_change(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ evas_object_smart_callback_call(obj, SIG_CHANNELS_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_title_set(Evas_Object *obj, char *title)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ free(sd->title);
+ sd->title = strdup(title);
+ evas_object_smart_callback_call(obj, SIG_TITLE_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_progress_set(Evas_Object *obj, char *info, double st)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ free(sd->progress.info);
+ sd->progress.info = strdup(info);
+ sd->progress.stat = st;
+ evas_object_smart_callback_call(obj, SIG_PROGRESS_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_file_ref_set(Evas_Object *obj, const char *file, int num)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ free(sd->ref.file);
+ sd->ref.file = strdup(file);
+ sd->ref.num = num;
+ evas_object_smart_callback_call(obj, SIG_REF_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_spu_button_num_set(Evas_Object *obj, int num)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->spu.button_num = num;
+ evas_object_smart_callback_call(obj, SIG_BUTTON_NUM_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_spu_button_set(Evas_Object *obj, int button)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->spu.button = button;
+ evas_object_smart_callback_call(obj, SIG_BUTTON_CHANGE, NULL);
+}
+
+EAPI void
+_emotion_seek_done(Evas_Object *obj)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (sd->seeking)
+ {
+ sd->seeking = 0;
+ if (sd->seek) emotion_object_position_set(obj, sd->seek_pos);
+ }
+}
+
+EAPI void
+_emotion_frame_refill(Evas_Object *obj, double w, double h)
+{
+ Smart_Data *sd;
+
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ if (sd->fill.w != w || sd->fill.h != h)
+ {
+ Evas_Coord ow, oh;
+
+ evas_object_geometry_get(obj, NULL, NULL, &ow, &oh);
+ if (w <= 0 || h <= 0)
+ {
+ double scale_w, scale_h;
+
+ sd->fill.w = -1;
+ sd->fill.h = -1;
+
+ scale_w = (double) ow / (double)(sd->video.w - sd->crop.l - sd->crop.r);
+ scale_h = (double) oh / (double)(sd->video.h - sd->crop.t - sd->crop.b);
+
+ evas_object_image_fill_set(sd->obj, 0, 0, scale_w * sd->video.w, scale_h * sd->video.h);
+ }
+ else
+ {
+ sd->fill.w = w;
+ sd->fill.h = h;
+
+ evas_object_image_fill_set(sd->obj, 0, 0, w * ow, h * oh);
+ }
+ }
+}
+
+/****************************/
+/* Internal object routines */
+/****************************/
+
+static void
+_mouse_move(void *data, Evas *ev EINA_UNUSED, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Move *e;
+ Smart_Data *sd;
+ int x, y, iw, ih;
+ Evas_Coord ox, oy, ow, oh;
+
+ e = event_info;
+ sd = data;
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ evas_object_geometry_get(obj, &ox, &oy, &ow, &oh);
+ evas_object_image_size_get(obj, &iw, &ih);
+ if ((iw < 1) || (ih < 1)) return;
+ x = (((int)e->cur.canvas.x - ox) * iw) / ow;
+ y = (((int)e->cur.canvas.y - oy) * ih) / oh;
+ sd->module->event_mouse_move_feed(sd->video_data, x, y);
+}
+
+static void
+_mouse_down(void *data, Evas *ev EINA_UNUSED, Evas_Object *obj, void *event_info)
+{
+ Evas_Event_Mouse_Down *e;
+ Smart_Data *sd;
+ int x, y, iw, ih;
+ Evas_Coord ox, oy, ow, oh;
+
+ e = event_info;
+ sd = data;
+ if (!sd->module) return;
+ if (!sd->video_data) return;
+ evas_object_geometry_get(obj, &ox, &oy, &ow, &oh);
+ evas_object_image_size_get(obj, &iw, &ih);
+ if ((iw < 1) || (ih < 1)) return;
+ x = (((int)e->canvas.x - ox) * iw) / ow;
+ y = (((int)e->canvas.y - oy) * ih) / oh;
+ sd->module->event_mouse_button_feed(sd->video_data, 1, x, y);
+}
+
+static void
+_pos_set_job(void *data)
+{
+ Evas_Object *obj;
+ Smart_Data *sd;
+
+ obj = data;
+ E_SMART_OBJ_GET(sd, obj, E_OBJ_NAME);
+ sd->job = NULL;
+ if (sd->seeking) return;
+ if (sd->seek)
+ {
+ sd->seeking = 1;
+ sd->module->pos_set(sd->video_data, sd->seek_pos);
+ sd->seek = 0;
+ }
+}
+
+/* called by evas when it needs pixels for the image object */
+static void
+_pixels_get(void *data, Evas_Object *obj)
+{
+ int iw, ih, w, h;
+ Smart_Data *sd;
+ Emotion_Format format;
+ unsigned char *bgra_data;
+
+ sd = data;
+ sd->module->video_data_size_get(sd->video_data, &w, &h);
+ w = (w >> 1) << 1;
+ h = (h >> 1) << 1;
+
+ evas_object_image_colorspace_set(obj, EVAS_COLORSPACE_YCBCR422P601_PL);
+ evas_object_image_alpha_set(obj, 0);
+ evas_object_image_size_set(obj, w, h);
+ iw = w;
+ ih = h;
+
+ if ((iw <= 1) || (ih <= 1))
+ {
+ _emotion_image_data_zero(sd->obj);
+ evas_object_image_pixels_dirty_set(obj, 0);
+ }
+ else
+ {
+ format = sd->module->format_get(sd->video_data);
+ if ((format == EMOTION_FORMAT_YV12) || (format == EMOTION_FORMAT_I420))
+ {
+ unsigned char **rows;
+
+ evas_object_image_colorspace_set(obj, EVAS_COLORSPACE_YCBCR422P601_PL);
+ rows = evas_object_image_data_get(obj, 1);
+ if (rows)
+ {
+ if (sd->module->yuv_rows_get(sd->video_data, iw, ih,
+ rows,
+ &rows[ih],
+ &rows[ih + (ih / 2)]))
+ evas_object_image_data_update_add(obj, 0, 0, iw, ih);
+ }
+ evas_object_image_data_set(obj, rows);
+ evas_object_image_pixels_dirty_set(obj, 0);
+ }
+ else if (format == EMOTION_FORMAT_BGRA)
+ {
+ evas_object_image_colorspace_set(obj, EVAS_COLORSPACE_ARGB8888);
+ if (sd->module->bgra_data_get(sd->video_data, &bgra_data))
+ {
+ evas_object_image_data_set(obj, bgra_data);
+ evas_object_image_pixels_dirty_set(obj, 0);
+ }
+ }
+ }
+}
+
+/*******************************************/
+/* Internal smart object required routines */
+/*******************************************/
+#ifdef EMOTION_STATIC_BUILD_XINE
+Eina_Bool xine_module_init(void);
+#endif
+#ifdef EMOTION_STATIC_BUILD_GSTREAMER
+Eina_Bool gstreamer_module_init(void);
+#endif
+#ifdef EMOTION_STATIC_BUILD_GENERIC
+Eina_Bool generic_module_init(void);
+#endif
+
+static void
+_smart_init(void)
+{
+ char *path;
+
+ if (smart) return;
+ {
+ eina_init();
+
+ _log_domain = eina_log_domain_register("emotion", EINA_COLOR_LIGHTCYAN);
+ if (_log_domain < 0)
+ {
+ EINA_LOG_CRIT("Could not register log domain 'emotion'");
+ eina_shutdown();
+ return;
+ }
+
+ _backends = eina_hash_string_small_new(free);
+
+ _modules = eina_module_list_get(NULL, PACKAGE_LIB_DIR "/emotion/", 0, NULL, NULL);
+
+ path = eina_module_environment_path_get("HOME", "/.emotion/");
+ _modules = eina_module_list_get(_modules, path, 0, NULL, NULL);
+ if (path) free(path);
+
+ path = eina_module_environment_path_get("EMOTION_MODULES_DIR", "/emotion/");
+ _modules = eina_module_list_get(_modules, path, 0, NULL, NULL);
+ if (path) free(path);
+
+ path = eina_module_symbol_path_get(emotion_object_add, "/emotion/");
+ _modules = eina_module_list_get(_modules, path, 0, NULL, NULL);
+ if (path) free(path);
+
+ if (!_modules)
+ {
+ ERR("No module found!");
+ return;
+ }
+
+ eina_module_list_load(_modules);
+
+ /* Init static module */
+#ifdef EMOTION_STATIC_BUILD_XINE
+ xine_module_init();
+#endif
+#ifdef EMOTION_STATIC_BUILD_GSTREAMER
+ gstreamer_module_init();
+#endif
+#ifdef EMOTION_STATIC_BUILD_GENERIC
+ generic_module_init();
+#endif
+
+ static Evas_Smart_Class sc =
+ EVAS_SMART_CLASS_INIT_NAME_VERSION(E_OBJ_NAME);
+ if (!sc.add)
+ {
+ sc.add = _smart_add;
+ sc.del = _smart_del;
+ sc.move = _smart_move;
+ sc.resize = _smart_resize;
+ sc.show = _smart_show;
+ sc.hide = _smart_hide;
+ sc.color_set = _smart_color_set;
+ sc.clip_set = _smart_clip_set;
+ sc.clip_unset = _smart_clip_unset;
+ sc.callbacks = _smart_callbacks;
+ }
+ smart = evas_smart_class_new(&sc);
+ }
+}
+
+static void
+_smart_add(Evas_Object * obj)
+{
+ Smart_Data *sd;
+ unsigned int *pixel;
+
+ sd = calloc(1, sizeof(Smart_Data));
+ if (!sd) return;
+ EINA_REFCOUNT_INIT(sd);
+ sd->state = EMOTION_WAKEUP;
+ sd->obj = evas_object_image_add(evas_object_evas_get(obj));
+ sd->bg = evas_object_rectangle_add(evas_object_evas_get(obj));
+ evas_object_color_set(sd->bg, 0, 0, 0, 0);
+ evas_object_event_callback_add(sd->obj, EVAS_CALLBACK_MOUSE_MOVE, _mouse_move, sd);
+ evas_object_event_callback_add(sd->obj, EVAS_CALLBACK_MOUSE_DOWN, _mouse_down, sd);
+ evas_object_image_pixels_get_callback_set(sd->obj, _pixels_get, sd);
+ evas_object_smart_member_add(sd->obj, obj);
+ evas_object_smart_member_add(sd->bg, obj);
+ evas_object_lower(sd->bg);
+ sd->ratio = 1.0;
+ sd->spu.button = -1;
+ sd->fill.w = -1;
+ sd->fill.h = -1;
+ evas_object_image_alpha_set(sd->obj, 0);
+ pixel = evas_object_image_data_get(sd->obj, 1);
+ if (pixel)
+ {
+ *pixel = 0xff000000;
+ evas_object_image_data_set(obj, pixel);
+ }
+ evas_object_smart_data_set(obj, sd);
+
+ ecore_init();
+}
+
+static void
+_smart_del(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ EINA_REFCOUNT_UNREF(sd)
+ _smart_data_free(sd);
+}
+
+static void
+_smart_move(Evas_Object * obj, Evas_Coord x, Evas_Coord y)
+{
+ Smart_Data *sd;
+ int w, h;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+
+ evas_object_geometry_get(obj, NULL, NULL, &w, &h);
+ _clipper_position_size_update(obj, x, y, w, h, sd->video.w, sd->video.h);
+ evas_object_move(sd->bg, x, y);
+}
+
+static void
+_smart_resize(Evas_Object * obj, Evas_Coord w, Evas_Coord h)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+
+ _emotion_object_aspect_border_apply(obj, sd, w, h);
+ evas_object_resize(sd->bg, w, h);
+}
+
+static void
+_smart_show(Evas_Object * obj)
+{
+ Smart_Data *sd;
+ int a;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_show(sd->obj);
+ if (sd->crop.clipper)
+ evas_object_show(sd->crop.clipper);
+
+ evas_object_color_get(sd->bg, NULL, NULL, NULL, &a);
+ if (a > 0)
+ evas_object_show(sd->bg);
+}
+
+static void
+_smart_hide(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_hide(sd->obj);
+ if (sd->crop.clipper)
+ evas_object_hide(sd->crop.clipper);
+ evas_object_hide(sd->bg);
+}
+
+static void
+_smart_color_set(Evas_Object * obj, int r, int g, int b, int a)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ evas_object_color_set(sd->obj, r, g, b, a);
+ evas_object_color_set(sd->crop.clipper, r, g, b, a);
+}
+
+static void
+_smart_clip_set(Evas_Object * obj, Evas_Object * clip)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ if (sd->crop.clipper)
+ evas_object_clip_set(sd->crop.clipper, clip);
+ else
+ evas_object_clip_set(sd->obj, clip);
+ evas_object_clip_set(sd->bg, clip);
+}
+
+static void
+_smart_clip_unset(Evas_Object * obj)
+{
+ Smart_Data *sd;
+
+ sd = evas_object_smart_data_get(obj);
+ if (!sd) return;
+ if (sd->crop.clipper)
+ evas_object_clip_unset(sd->crop.clipper);
+ else
+ evas_object_clip_unset(sd->obj);
+ evas_object_clip_unset(sd->bg);
+}
+