servicemp3.cpp: temporary store ac3/pcm delay for gstreamer based playbacks (until...
[enigma2.git] / lib / service / servicemp3.cpp
index 5c982aa1d6841875d808066447320e939ccc7fc0..10a741603f675c538c3caf0d08a99f6b51a7a758 100644 (file)
@@ -2,20 +2,23 @@
 
        /* note: this requires gstreamer 0.10.x and a big list of plugins. */
        /* it's currently hardcoded to use a big-endian alsasink as sink. */
+#include <lib/base/ebase.h>
 #include <lib/base/eerror.h>
+#include <lib/base/init_num.h>
+#include <lib/base/init.h>
+#include <lib/base/nconfig.h>
 #include <lib/base/object.h>
-#include <lib/base/ebase.h>
-#include <string>
+#include <lib/dvb/decoder.h>
+#include <lib/components/file_eraser.h>
+#include <lib/gui/esubtitle.h>
 #include <lib/service/servicemp3.h>
 #include <lib/service/service.h>
-#include <lib/components/file_eraser.h>
-#include <lib/base/init_num.h>
-#include <lib/base/init.h>
+
+#include <string>
+
 #include <gst/gst.h>
 #include <gst/pbutils/missing-plugins.h>
 #include <sys/stat.h>
-/* for subtitles */
-#include <lib/gui/esubtitle.h>
 
 // eServiceFactoryMP3
 
@@ -27,6 +30,7 @@ eServiceFactoryMP3::eServiceFactoryMP3()
        if (sc)
        {
                std::list<std::string> extensions;
+               extensions.push_back("mp2");
                extensions.push_back("mp3");
                extensions.push_back("ogg");
                extensions.push_back("mpg");
@@ -35,9 +39,12 @@ eServiceFactoryMP3::eServiceFactoryMP3()
                extensions.push_back("wave");
                extensions.push_back("mkv");
                extensions.push_back("avi");
+               extensions.push_back("divx");
                extensions.push_back("dat");
                extensions.push_back("flac");
                extensions.push_back("mp4");
+               extensions.push_back("mov");
+               extensions.push_back("m4a");
                sc->addServiceFactory(eServiceFactoryMP3::id, this, extensions);
        }
 
@@ -59,7 +66,7 @@ DEFINE_REF(eServiceFactoryMP3)
 RESULT eServiceFactoryMP3::play(const eServiceReference &ref, ePtr<iPlayableService> &ptr)
 {
                // check resources...
-       ptr = new eServiceMP3(ref.path.c_str());
+       ptr = new eServiceMP3(ref);
        return 0;
 }
 
@@ -90,6 +97,7 @@ public:
        
        RESULT deleteFromDisk(int simulate);
        RESULT getListOfFilenames(std::list<std::string> &);
+       RESULT reindex();
 };
 
 DEFINE_REF(eMP3ServiceOfflineOperations);
@@ -132,6 +140,11 @@ RESULT eMP3ServiceOfflineOperations::getListOfFilenames(std::list<std::string> &
        return 0;
 }
 
+RESULT eMP3ServiceOfflineOperations::reindex()
+{
+       return -1;
+}
+
 
 RESULT eServiceFactoryMP3::offlineOperations(const eServiceReference &ref, ePtr<iServiceOfflineOperations> &ptr)
 {
@@ -157,11 +170,16 @@ eStaticServiceMP3Info::eStaticServiceMP3Info()
 
 RESULT eStaticServiceMP3Info::getName(const eServiceReference &ref, std::string &name)
 {
-       size_t last = ref.path.rfind('/');
-       if (last != std::string::npos)
-               name = ref.path.substr(last+1);
+       if ( ref.name.length() )
+               name = ref.name;
        else
-               name = ref.path;
+       {
+               size_t last = ref.path.rfind('/');
+               if (last != std::string::npos)
+                       name = ref.path.substr(last+1);
+               else
+                       name = ref.path;
+       }
        return 0;
 }
 
@@ -171,308 +189,189 @@ int eStaticServiceMP3Info::getLength(const eServiceReference &ref)
 }
 
 // eServiceMP3
+int eServiceMP3::ac3_delay,
+    eServiceMP3::pcm_delay;
 
-eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eApp, 1)
+eServiceMP3::eServiceMP3(eServiceReference ref)
+       :m_ref(ref), m_pump(eApp, 1)
 {
        m_seekTimeout = eTimer::create(eApp);
+       m_subtitle_sync_timer = eTimer::create(eApp);
        m_stream_tags = 0;
-       m_currentAudioStream = 0;
+       m_currentAudioStream = -1;
        m_currentSubtitleStream = 0;
        m_subtitle_widget = 0;
        m_currentTrickRatio = 0;
+       m_subs_to_pull = 0;
+       m_buffer_size = 1*1024*1024;
        CONNECT(m_seekTimeout->timeout, eServiceMP3::seekTimeoutCB);
+       CONNECT(m_subtitle_sync_timer->timeout, eServiceMP3::pushSubtitles);
        CONNECT(m_pump.recv_msg, eServiceMP3::gstPoll);
-       GstElement *source = 0;
-       
-       GstElement *decoder = 0, *conv = 0, *flt = 0, *sink = 0; /* for audio */
-       
-       GstElement *audio = 0, *switch_audio = 0, *queue_audio = 0, *video = 0, *queue_video = 0, *videodemux = 0;
-       
+       m_aspect = m_width = m_height = m_framerate = m_progressive = -1;
+
        m_state = stIdle;
-       eDebug("SERVICEMP3 construct!");
-       
-               /* FIXME: currently, decodebin isn't possible for 
-                  video streams. in that case, make a manual pipeline. */
+       eDebug("eServiceMP3::construct!");
 
+       const char *filename = m_ref.path.c_str();
        const char *ext = strrchr(filename, '.');
        if (!ext)
                ext = filename;
 
        sourceStream sourceinfo;
+       sourceinfo.is_video = FALSE;
+       sourceinfo.audiotype = atUnknown;
        if ( (strcasecmp(ext, ".mpeg") && strcasecmp(ext, ".mpg") && strcasecmp(ext, ".vob") && strcasecmp(ext, ".bin") && strcasecmp(ext, ".dat") ) == 0 )
+       {
                sourceinfo.containertype = ctMPEGPS;
+               sourceinfo.is_video = TRUE;
+       }
        else if ( strcasecmp(ext, ".ts") == 0 )
+       {
                sourceinfo.containertype = ctMPEGTS;
+               sourceinfo.is_video = TRUE;
+       }
        else if ( strcasecmp(ext, ".mkv") == 0 )
+       {
                sourceinfo.containertype = ctMKV;
-       else if ( strcasecmp(ext, ".avi") == 0 )
+               sourceinfo.is_video = TRUE;
+       }
+       else if ( strcasecmp(ext, ".avi") == 0 || strcasecmp(ext, ".divx") == 0)
+       {
                sourceinfo.containertype = ctAVI;
-       else if ( strcasecmp(ext, ".mp4") == 0 )
+               sourceinfo.is_video = TRUE;
+       }
+       else if ( strcasecmp(ext, ".mp4") == 0 || strcasecmp(ext, ".mov") == 0)
+       {
                sourceinfo.containertype = ctMP4;
+               sourceinfo.is_video = TRUE;
+       }
+       else if ( strcasecmp(ext, ".m4a") == 0 )
+       {
+               sourceinfo.containertype = ctMP4;
+               sourceinfo.audiotype = atAAC;
+       }
+       else if ( strcasecmp(ext, ".mp3") == 0 )
+               sourceinfo.audiotype = atMP3;
        else if ( (strncmp(filename, "/autofs/", 8) || strncmp(filename+strlen(filename)-13, "/track-", 7) || strcasecmp(ext, ".wav")) == 0 )
                sourceinfo.containertype = ctCDA;
        if ( strcasecmp(ext, ".dat") == 0 )
+       {
                sourceinfo.containertype = ctVCD;
-       if ( (strncmp(filename, "http://", 7)) == 0 )
+               sourceinfo.is_video = TRUE;
+       }
+       if ( (strncmp(filename, "http://", 7)) == 0 || (strncmp(filename, "udp://", 6)) == 0 || (strncmp(filename, "rtp://", 6)) == 0  || (strncmp(filename, "https://", 8)) == 0 || (strncmp(filename, "mms://", 6)) == 0 || (strncmp(filename, "rtsp://", 7)) == 0 )
                sourceinfo.is_streaming = TRUE;
 
-       sourceinfo.is_video = ( sourceinfo.containertype && sourceinfo.containertype != ctCDA );
-
-       eDebug("filename=%s, containertype=%d, is_video=%d, is_streaming=%d", filename, sourceinfo.containertype, sourceinfo.is_video, sourceinfo.is_streaming);
-
-       int all_ok = 0;
+       gchar *uri;
 
-       m_gst_pipeline = gst_pipeline_new ("mediaplayer");
-       if (!m_gst_pipeline)
-               m_error_message = "failed to create GStreamer pipeline!\n";
-
-       if ( sourceinfo.containertype == ctCDA )
+       if ( sourceinfo.is_streaming )
        {
-               source = gst_element_factory_make ("cdiocddasrc", "cda-source");
-               if (source)
-                       g_object_set (G_OBJECT (source), "device", "/dev/cdroms/cdrom0", NULL);
-               else
-                       sourceinfo.containertype = ctNone;
+               uri = g_strdup_printf ("%s", filename);
        }
-       if ( !sourceinfo.is_streaming && sourceinfo.containertype != ctCDA )
+       else if ( sourceinfo.containertype == ctCDA )
        {
-               source = gst_element_factory_make ("filesrc", "file-source");
-               if (source)
-                       g_object_set (G_OBJECT (source), "location", filename, NULL);
-               else
-                       m_error_message = "GStreamer can't open filesrc " + (std::string)filename + "!\n";
+               int i_track = atoi(filename+18);
+               uri = g_strdup_printf ("cdda://%i", i_track);
        }
-       else if ( sourceinfo.is_streaming ) 
+       else if ( sourceinfo.containertype == ctVCD )
        {
-               source = gst_element_factory_make ("neonhttpsrc", "http-source");
-               if (source)
-                       g_object_set (G_OBJECT (source), "automatic-redirect", TRUE, NULL);
+               int fd = open(filename,O_RDONLY);
+               char tmp[128*1024];
+               int ret = read(fd, tmp, 128*1024);
+               close(fd);
+               if ( ret == -1 ) // this is a "REAL" VCD
+                       uri = g_strdup_printf ("vcd://");
                else
-                       m_error_message = "GStreamer plugin neonhttpsrc not available!\n";
+                       uri = g_strdup_printf ("file://%s", filename);
        }
        else
-       { 
-               int track = atoi(filename+18);
-               eDebug("play audio CD track #%i",track);
-               if (track > 0)
-                       g_object_set (G_OBJECT (source), "track", track, NULL);
-       }
-       if ( sourceinfo.is_video )
-       {
-                       /* filesrc -> mpegdemux -> | queue_audio -> dvbaudiosink
-                                                  | queue_video -> dvbvideosink */
-
-               audio = gst_element_factory_make("dvbaudiosink", "audiosink");
-               if (!audio)
-                       m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
-               
-               video = gst_element_factory_make("dvbvideosink", "videosink");
-               if (!video)
-                       m_error_message += "failed to create Gstreamer element dvbvideosink\n";
-
-               queue_audio = gst_element_factory_make("queue", "queue_audio");
-               queue_video = gst_element_factory_make("queue", "queue_video");
 
-               std::string demux_type;
-               switch (sourceinfo.containertype)
-               {
-                       case ctMPEGTS:
-                               demux_type = "flutsdemux";
-                               break;
-                       case ctMPEGPS:
-                       case ctVCD:
-                               demux_type = "flupsdemux";
-                               break;
-                       case ctMKV:
-                               demux_type = "matroskademux";
-                               break;
-                       case ctAVI:
-                               demux_type = "avidemux";
-                               break;
-                       case ctMP4:
-                               demux_type = "qtdemux";
-                               break;
-                       default:
-                               break;
-               }
-               videodemux = gst_element_factory_make(demux_type.c_str(), "videodemux");
-               if (!videodemux)
-                       m_error_message = "GStreamer plugin " + demux_type + " not available!\n";
+               uri = g_strdup_printf ("file://%s", filename);
 
-               switch_audio = gst_element_factory_make ("input-selector", "switch_audio");
-               if (!switch_audio)
-                       m_error_message = "GStreamer plugin input-selector not available!\n";
+       eDebug("eServiceMP3::playbin2 uri=%s", uri);
 
-               if (audio && queue_audio && video && queue_video && videodemux && switch_audio)
-               {
-                       g_object_set (G_OBJECT (queue_audio), "max-size-bytes", 256*1024, NULL);
-                       g_object_set (G_OBJECT (queue_audio), "max-size-buffers", 0, NULL);
-                       g_object_set (G_OBJECT (queue_audio), "max-size-time", (guint64)0, NULL);
-                       g_object_set (G_OBJECT (queue_video), "max-size-buffers", 0, NULL);
-                       g_object_set (G_OBJECT (queue_video), "max-size-bytes", 2*1024*1024, NULL);
-                       g_object_set (G_OBJECT (queue_video), "max-size-time", (guint64)0, NULL);
-                       g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL);
-                       all_ok = 1;
-               }
-       } else /* is audio */
-       {
-
-                       /* filesrc -> decodebin -> audioconvert -> capsfilter -> alsasink */
-               decoder = gst_element_factory_make ("decodebin", "decoder");
-               if (!decoder)
-                       m_error_message += "failed to create Gstreamer element decodebin\n";
+       m_gst_playbin = gst_element_factory_make("playbin2", "playbin");
+       if (!m_gst_playbin)
+               m_error_message = "failed to create GStreamer pipeline!\n";
 
-               conv = gst_element_factory_make ("audioconvert", "converter");
-               if (!conv)
-                       m_error_message += "failed to create Gstreamer element audioconvert\n";
+       g_object_set (G_OBJECT (m_gst_playbin), "uri", uri, NULL);
 
-               flt = gst_element_factory_make ("capsfilter", "flt");
-               if (!flt)
-                       m_error_message += "failed to create Gstreamer element capsfilter\n";
+       int flags = 0x47; // ( == GST_PLAY_FLAG_VIDEO | GST_PLAY_FLAG_AUDIO | GST_PLAY_FLAG_NATIVE_VIDEO | GST_PLAY_FLAG_TEXT )
+       g_object_set (G_OBJECT (m_gst_playbin), "flags", flags, NULL);
 
-                       /* for some reasons, we need to set the sample format to depth/width=16, because auto negotiation doesn't work. */
-                       /* endianness, however, is not required to be set anymore. */
-               if (flt)
-               {
-                       GstCaps *caps = gst_caps_new_simple("audio/x-raw-int", /* "endianness", G_TYPE_INT, 4321, */ "depth", G_TYPE_INT, 16, "width", G_TYPE_INT, 16, /*"channels", G_TYPE_INT, 2, */NULL);
-                       g_object_set (G_OBJECT (flt), "caps", caps, NULL);
-                       gst_caps_unref(caps);
-               }
+       g_free(uri);
 
-               sink = gst_element_factory_make ("alsasink", "alsa-output");
-               if (!sink)
-                       m_error_message += "failed to create Gstreamer element alsasink\n";
-
-               if (source && decoder && conv && sink)
-                       all_ok = 1;
-       }
-       if (m_gst_pipeline && all_ok)
+       GstElement *subsink = gst_element_factory_make("appsink", "subtitle_sink");
+       if (!subsink)
+               eDebug("eServiceMP3::sorry, can't play: missing gst-plugin-appsink");
+       else
        {
-               gst_bus_set_sync_handler(gst_pipeline_get_bus (GST_PIPELINE (m_gst_pipeline)), gstBusSyncHandler, this);
-
-               if ( sourceinfo.containertype == ctCDA )
-               {
-                       queue_audio = gst_element_factory_make("queue", "queue_audio");
-                       g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
-                       gst_bin_add_many (GST_BIN (m_gst_pipeline), source, queue_audio, conv, sink, NULL);
-                       gst_element_link_many(source, queue_audio, conv, sink, NULL);
-               }
-               else if ( sourceinfo.is_video )
-               {
-                       char srt_filename[strlen(filename)+1];
-                       strncpy(srt_filename,filename,strlen(filename)-3);
-                       srt_filename[strlen(filename)-3]='\0';
-                       strcat(srt_filename, "srt");
-                       struct stat buffer;
-                       if (stat(srt_filename, &buffer) == 0)
-                       {
-                               eDebug("subtitle file found: %s",srt_filename);
-                               GstElement *subsource = gst_element_factory_make ("filesrc", "srt_source");
-                               g_object_set (G_OBJECT (subsource), "location", srt_filename, NULL);
-                               gst_bin_add(GST_BIN (m_gst_pipeline), subsource);
-                               GstPad *switchpad = gstCreateSubtitleSink(this, stSRT);
-                               gst_pad_link(gst_element_get_pad (subsource, "src"), switchpad);
-                               subtitleStream subs;
-                               subs.pad = switchpad;
-                               subs.type = stSRT;
-                               subs.language_code = std::string("und");
-                               m_subtitleStreams.push_back(subs);
-                       }
-                       gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, audio, queue_audio, video, queue_video, switch_audio, NULL);
-
-                       if ( sourceinfo.containertype == ctVCD )
-                       {
-                               GstElement *cdxaparse = gst_element_factory_make("cdxaparse", "cdxaparse");
-                               gst_bin_add(GST_BIN(m_gst_pipeline), cdxaparse);
-                               gst_element_link(source, cdxaparse);
-                               gst_element_link(cdxaparse, videodemux);
-                       }
-                       else
-                               gst_element_link(source, videodemux);
-
-                       gst_element_link(switch_audio, queue_audio);
-                       gst_element_link(queue_audio, audio);
-                       gst_element_link(queue_video, video);
-                       g_signal_connect(videodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
+               m_subs_to_pull_handler_id = g_signal_connect (subsink, "new-buffer", G_CALLBACK (gstCBsubtitleAvail), this);
+               g_object_set (G_OBJECT (m_gst_playbin), "text-sink", subsink, NULL);
+       }
 
-               } else /* is audio*/
+       if ( m_gst_playbin )
+       {
+               gst_bus_set_sync_handler(gst_pipeline_get_bus (GST_PIPELINE (m_gst_playbin)), gstBusSyncHandler, this);
+               char srt_filename[strlen(filename)+1];
+               strncpy(srt_filename,filename,strlen(filename)-3);
+               srt_filename[strlen(filename)-3]='\0';
+               strcat(srt_filename, "srt");
+               struct stat buffer;
+               if (stat(srt_filename, &buffer) == 0)
                {
-                       queue_audio = gst_element_factory_make("queue", "queue_audio");
-
-                       g_signal_connect (decoder, "new-decoded-pad", G_CALLBACK(gstCBnewPad), this);
-                       g_signal_connect (decoder, "unknown-type", G_CALLBACK(gstCBunknownType), this);
-
-                       g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
-
-                               /* gst_bin will take the 'floating references' */
-                       gst_bin_add_many (GST_BIN (m_gst_pipeline),
-                                               source, queue_audio, decoder, NULL);
-
-                               /* in decodebin's case we can just connect the source with the decodebin, and decodebin will take care about id3demux (or whatever is required) */
-                       gst_element_link_many(source, queue_audio, decoder, NULL);
-
-                               /* create audio bin with the audioconverter, the capsfilter and the audiosink */
-                       audio = gst_bin_new ("audiobin");
-
-                       GstPad *audiopad = gst_element_get_static_pad (conv, "sink");
-                       gst_bin_add_many(GST_BIN(audio), conv, flt, sink, NULL);
-                       gst_element_link_many(conv, flt, sink, NULL);
-                       gst_element_add_pad(audio, gst_ghost_pad_new ("sink", audiopad));
-                       gst_object_unref(audiopad);
-                       gst_bin_add (GST_BIN(m_gst_pipeline), audio);
+                       std::string suburi = "file://" + (std::string)srt_filename;
+                       eDebug("eServiceMP3::subtitle uri: %s",suburi.c_str());
+                       g_object_set (G_OBJECT (m_gst_playbin), "suburi", suburi.c_str(), NULL);
+                       subtitleStream subs;
+                       subs.type = stSRT;
+                       subs.language_code = std::string("und");
+                       m_subtitleStreams.push_back(subs);
                }
        } else
        {
                m_event((iPlayableService*)this, evUser+12);
 
-               if (m_gst_pipeline)
-                       gst_object_unref(GST_OBJECT(m_gst_pipeline));
-               if (source)
-                       gst_object_unref(GST_OBJECT(source));
-               if (decoder)
-                       gst_object_unref(GST_OBJECT(decoder));
-               if (conv)
-                       gst_object_unref(GST_OBJECT(conv));
-               if (sink)
-                       gst_object_unref(GST_OBJECT(sink));
-
-               if (audio)
-                       gst_object_unref(GST_OBJECT(audio));
-               if (queue_audio)
-                       gst_object_unref(GST_OBJECT(queue_audio));
-               if (video)
-                       gst_object_unref(GST_OBJECT(video));
-               if (queue_video)
-                       gst_object_unref(GST_OBJECT(queue_video));
-               if (videodemux)
-                       gst_object_unref(GST_OBJECT(videodemux));
-               if (switch_audio)
-                       gst_object_unref(GST_OBJECT(switch_audio));
-
-               eDebug("sorry, can't play: %s",m_error_message.c_str());
-               m_gst_pipeline = 0;
+               if (m_gst_playbin)
+                       gst_object_unref(GST_OBJECT(m_gst_playbin));
+
+               eDebug("eServiceMP3::sorry, can't play: %s",m_error_message.c_str());
+               m_gst_playbin = 0;
        }
 
-       gst_element_set_state (m_gst_pipeline, GST_STATE_PLAYING);
+       setBufferSize(m_buffer_size);
 }
 
 eServiceMP3::~eServiceMP3()
 {
+       // disconnect subtitle callback
+       GstElement *sink;
+       g_object_get (G_OBJECT (m_gst_playbin), "text-sink", &sink, NULL);
+       if (sink)
+       {
+               g_signal_handler_disconnect (sink, m_subs_to_pull_handler_id);
+               gst_object_unref(sink);
+       }
+
        delete m_subtitle_widget;
+
+       // disconnect sync handler callback
+       gst_bus_set_sync_handler(gst_pipeline_get_bus (GST_PIPELINE (m_gst_playbin)), NULL, NULL);
+
        if (m_state == stRunning)
                stop();
-       
+
        if (m_stream_tags)
                gst_tag_list_free(m_stream_tags);
        
-       if (m_gst_pipeline)
+       if (m_gst_playbin)
        {
-               gst_object_unref (GST_OBJECT (m_gst_pipeline));
-               eDebug("SERVICEMP3 destruct!");
+               gst_object_unref (GST_OBJECT (m_gst_playbin));
+               eDebug("eServiceMP3::destruct!");
        }
 }
 
-DEFINE_REF(eServiceMP3);       
+DEFINE_REF(eServiceMP3);
 
 RESULT eServiceMP3::connectEvent(const Slot2<void,iPlayableService*,int> &event, ePtr<eConnection> &connection)
 {
@@ -482,26 +381,31 @@ RESULT eServiceMP3::connectEvent(const Slot2<void,iPlayableService*,int> &event,
 
 RESULT eServiceMP3::start()
 {
-       assert(m_state == stIdle);
-       
+       ASSERT(m_state == stIdle);
+
        m_state = stRunning;
-       if (m_gst_pipeline)
+       if (m_gst_playbin)
        {
-               eDebug("starting pipeline");
-               gst_element_set_state (m_gst_pipeline, GST_STATE_PLAYING);
+               eDebug("eServiceMP3::starting pipeline");
+               gst_element_set_state (m_gst_playbin, GST_STATE_PLAYING);
        }
+
        m_event(this, evStart);
+
        return 0;
 }
 
 RESULT eServiceMP3::stop()
 {
-       assert(m_state != stIdle);
+       ASSERT(m_state != stIdle);
+
        if (m_state == stStopped)
                return -1;
-       eDebug("MP3: %s stop\n", m_filename.c_str());
-       gst_element_set_state(m_gst_pipeline, GST_STATE_NULL);
+
+       eDebug("eServiceMP3::stop %s", m_ref.path.c_str());
+       gst_element_set_state(m_gst_playbin, GST_STATE_NULL);
        m_state = stStopped;
+
        return 0;
 }
 
@@ -518,18 +422,16 @@ RESULT eServiceMP3::pause(ePtr<iPauseableService> &ptr)
 
 RESULT eServiceMP3::setSlowMotion(int ratio)
 {
-       /* we can't do slomo yet */
-       return -1;
+       if (!ratio)
+               return 0;
+       eDebug("eServiceMP3::setSlowMotion ratio=%f",1/(float)ratio);
+       return trickSeek(1/(float)ratio);
 }
 
 RESULT eServiceMP3::setFastForward(int ratio)
 {
-       m_currentTrickRatio = ratio;
-       if (ratio)
-               m_seekTimeout->start(1000, 0);
-       else
-               m_seekTimeout->stop();
-       return 0;
+       eDebug("eServiceMP3::setFastForward ratio=%i",ratio);
+       return trickSeek(ratio);
 }
 
 void eServiceMP3::seekTimeoutCB()
@@ -557,25 +459,21 @@ void eServiceMP3::seekTimeoutCB()
                // iPausableService
 RESULT eServiceMP3::pause()
 {
-       if (!m_gst_pipeline)
+       if (!m_gst_playbin || m_state != stRunning)
                return -1;
-       GstStateChangeReturn res = gst_element_set_state(m_gst_pipeline, GST_STATE_PAUSED);
-       if (res == GST_STATE_CHANGE_ASYNC)
-       {
-               pts_t ppos;
-               getPlayPosition(ppos);
-               seekTo(ppos);
-       }
+
+       gst_element_set_state(m_gst_playbin, GST_STATE_PAUSED);
+
        return 0;
 }
 
 RESULT eServiceMP3::unpause()
 {
-       if (!m_gst_pipeline)
+       if (!m_gst_playbin || m_state != stRunning)
                return -1;
 
-       GstStateChangeReturn res;
-       res = gst_element_set_state(m_gst_pipeline, GST_STATE_PLAYING);
+       gst_element_set_state(m_gst_playbin, GST_STATE_PLAYING);
+
        return 0;
 }
 
@@ -588,43 +486,100 @@ RESULT eServiceMP3::seek(ePtr<iSeekableService> &ptr)
 
 RESULT eServiceMP3::getLength(pts_t &pts)
 {
-       if (!m_gst_pipeline)
+       if (!m_gst_playbin)
                return -1;
+
        if (m_state != stRunning)
                return -1;
-       
+
        GstFormat fmt = GST_FORMAT_TIME;
        gint64 len;
        
-       if (!gst_element_query_duration(m_gst_pipeline, &fmt, &len))
+       if (!gst_element_query_duration(m_gst_playbin, &fmt, &len))
                return -1;
-       
                /* len is in nanoseconds. we have 90 000 pts per second. */
        
        pts = len / 11111;
        return 0;
 }
 
-RESULT eServiceMP3::seekTo(pts_t to)
+RESULT eServiceMP3::seekToImpl(pts_t to)
 {
-       if (!m_gst_pipeline)
-               return -1;
-
                /* convert pts to nanoseconds */
        gint64 time_nanoseconds = to * 11111LL;
-       if (!gst_element_seek (m_gst_pipeline, 1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH,
+       if (!gst_element_seek (m_gst_playbin, 1.0, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH,
                GST_SEEK_TYPE_SET, time_nanoseconds,
                GST_SEEK_TYPE_NONE, GST_CLOCK_TIME_NONE))
        {
-               eDebug("SEEK failed");
+               eDebug("eServiceMP3::seekTo failed");
+               return -1;
+       }
+
+       return 0;
+}
+
+RESULT eServiceMP3::seekTo(pts_t to)
+{
+       RESULT ret = -1;
+
+       if (m_gst_playbin) {
+               eSingleLocker l(m_subs_to_pull_lock); // this is needed to dont handle incomming subtitles during seek!
+               if (!(ret = seekToImpl(to)))
+               {
+                       m_subtitle_pages.clear();
+                       m_subs_to_pull = 0;
+               }
+       }
+
+       return ret;
+}
+
+
+RESULT eServiceMP3::trickSeek(gdouble ratio)
+{
+       if (!m_gst_playbin)
+               return -1;
+       if (!ratio)
+               return seekRelative(0, 0);
+
+       GstEvent *s_event;
+       int flags;
+       flags = GST_SEEK_FLAG_NONE;
+       flags |= GST_SEEK_FLAG_FLUSH;
+//     flags |= GstSeekFlags (GST_SEEK_FLAG_ACCURATE);
+       flags |= GST_SEEK_FLAG_KEY_UNIT;
+//     flags |= GstSeekFlags (GST_SEEK_FLAG_SEGMENT);
+//     flags |= GstSeekFlags (GST_SEEK_FLAG_SKIP);
+
+       GstFormat fmt = GST_FORMAT_TIME;
+       gint64 pos, len;
+       gst_element_query_duration(m_gst_playbin, &fmt, &len);
+       gst_element_query_position(m_gst_playbin, &fmt, &pos);
+
+       if ( ratio >= 0 )
+       {
+               s_event = gst_event_new_seek (ratio, GST_FORMAT_TIME, (GstSeekFlags)flags, GST_SEEK_TYPE_SET, pos, GST_SEEK_TYPE_SET, len);
+
+               eDebug("eServiceMP3::trickSeek with rate %lf to %" GST_TIME_FORMAT " ", ratio, GST_TIME_ARGS (pos));
+       }
+       else
+       {
+               s_event = gst_event_new_seek (ratio, GST_FORMAT_TIME, (GstSeekFlags)(GST_SEEK_FLAG_SKIP|GST_SEEK_FLAG_FLUSH), GST_SEEK_TYPE_NONE, -1, GST_SEEK_TYPE_NONE, -1);
+       }
+
+       if (!gst_element_send_event ( GST_ELEMENT (m_gst_playbin), s_event))
+       {
+               eDebug("eServiceMP3::trickSeek failed");
                return -1;
        }
+
        return 0;
 }
 
+
 RESULT eServiceMP3::seekRelative(int direction, pts_t to)
 {
-       if (!m_gst_pipeline)
+       if (!m_gst_playbin)
                return -1;
 
        pts_t ppos;
@@ -639,19 +594,40 @@ RESULT eServiceMP3::seekRelative(int direction, pts_t to)
 
 RESULT eServiceMP3::getPlayPosition(pts_t &pts)
 {
-       if (!m_gst_pipeline)
+       GstFormat fmt = GST_FORMAT_TIME;
+       gint64 pos;
+       GstElement *sink;
+       pts = 0;
+
+       if (!m_gst_playbin)
                return -1;
        if (m_state != stRunning)
                return -1;
-       
-       GstFormat fmt = GST_FORMAT_TIME;
-       gint64 len;
-       
-       if (!gst_element_query_position(m_gst_pipeline, &fmt, &len))
+
+       g_object_get (G_OBJECT (m_gst_playbin), "audio-sink", &sink, NULL);
+
+       if (!sink)
+               g_object_get (G_OBJECT (m_gst_playbin), "video-sink", &sink, NULL);
+
+       if (!sink)
                return -1;
-       
-               /* len is in nanoseconds. we have 90 000 pts per second. */
-       pts = len / 11111;
+
+       gchar *name = gst_element_get_name(sink);
+       gboolean use_get_decoder_time = strstr(name, "dvbaudiosink") || strstr(name, "dvbvideosink");
+       g_free(name);
+
+       if (use_get_decoder_time)
+               g_signal_emit_by_name(sink, "get-decoder-time", &pos);
+
+       gst_object_unref(sink);
+
+       if (!use_get_decoder_time && !gst_element_query_position(m_gst_playbin, &fmt, &pos)) {
+               eDebug("gst_element_query_position failed in getPlayPosition");
+               return -1;
+       }
+
+       /* pos is in nanoseconds. we have 90 000 pts per second. */
+       pts = pos / 11111;
        return 0;
 }
 
@@ -674,35 +650,105 @@ RESULT eServiceMP3::info(ePtr<iServiceInformation>&i)
 
 RESULT eServiceMP3::getName(std::string &name)
 {
-       name = m_filename;
-       size_t n = name.rfind('/');
-       if (n != std::string::npos)
-               name = name.substr(n + 1);
+       std::string title = m_ref.getName();
+       if (title.empty())
+       {
+               name = m_ref.path;
+               size_t n = name.rfind('/');
+               if (n != std::string::npos)
+                       name = name.substr(n + 1);
+       }
+       else
+               name = title;
        return 0;
 }
 
 int eServiceMP3::getInfo(int w)
 {
-       gchar *tag = 0;
+       const gchar *tag = 0;
 
        switch (w)
        {
-       case sTitle:
-       case sArtist:
-       case sAlbum:
-       case sComment:
-       case sTracknumber:
-       case sGenre:
-       case sVideoType:
-       case sTimeCreate:
+       case sServiceref: return m_ref;
+       case sVideoHeight: return m_height;
+       case sVideoWidth: return m_width;
+       case sFrameRate: return m_framerate;
+       case sProgressive: return m_progressive;
+       case sAspect: return m_aspect;
+       case sTagTitle:
+       case sTagArtist:
+       case sTagAlbum:
+       case sTagTitleSortname:
+       case sTagArtistSortname:
+       case sTagAlbumSortname:
+       case sTagDate:
+       case sTagComposer:
+       case sTagGenre:
+       case sTagComment:
+       case sTagExtendedComment:
+       case sTagLocation:
+       case sTagHomepage:
+       case sTagDescription:
+       case sTagVersion:
+       case sTagISRC:
+       case sTagOrganization:
+       case sTagCopyright:
+       case sTagCopyrightURI:
+       case sTagContact:
+       case sTagLicense:
+       case sTagLicenseURI:
+       case sTagCodec:
+       case sTagAudioCodec:
+       case sTagVideoCodec:
+       case sTagEncoder:
+       case sTagLanguageCode:
+       case sTagKeywords:
+       case sTagChannelMode:
        case sUser+12:
                return resIsString;
-       case sCurrentTitle:
+       case sTagTrackGain:
+       case sTagTrackPeak:
+       case sTagAlbumGain:
+       case sTagAlbumPeak:
+       case sTagReferenceLevel:
+       case sTagBeatsPerMinute:
+       case sTagImage:
+       case sTagPreviewImage:
+       case sTagAttachment:
+               return resIsPyObject;
+       case sTagTrackNumber:
                tag = GST_TAG_TRACK_NUMBER;
                break;
-       case sTotalTitles:
+       case sTagTrackCount:
                tag = GST_TAG_TRACK_COUNT;
                break;
+       case sTagAlbumVolumeNumber:
+               tag = GST_TAG_ALBUM_VOLUME_NUMBER;
+               break;
+       case sTagAlbumVolumeCount:
+               tag = GST_TAG_ALBUM_VOLUME_COUNT;
+               break;
+       case sTagBitrate:
+               tag = GST_TAG_BITRATE;
+               break;
+       case sTagNominalBitrate:
+               tag = GST_TAG_NOMINAL_BITRATE;
+               break;
+       case sTagMinimumBitrate:
+               tag = GST_TAG_MINIMUM_BITRATE;
+               break;
+       case sTagMaximumBitrate:
+               tag = GST_TAG_MAXIMUM_BITRATE;
+               break;
+       case sTagSerial:
+               tag = GST_TAG_SERIAL;
+               break;
+       case sTagEncoderVersion:
+               tag = GST_TAG_ENCODER_VERSION;
+               break;
+       case sTagCRC:
+               tag = "has-crc";
+               break;
        default:
                return resNA;
        }
@@ -713,48 +759,110 @@ int eServiceMP3::getInfo(int w)
        guint value;
        if (gst_tag_list_get_uint(m_stream_tags, tag, &value))
                return (int) value;
-       
-       return 0;
 
+       return 0;
 }
 
 std::string eServiceMP3::getInfoString(int w)
 {
-       if ( !m_stream_tags )
+       if ( !m_stream_tags && w < sUser && w > 26 )
                return "";
-       gchar *tag = 0;
+       const gchar *tag = 0;
        switch (w)
        {
-       case sTitle:
+       case sTagTitle:
                tag = GST_TAG_TITLE;
                break;
-       case sArtist:
+       case sTagArtist:
                tag = GST_TAG_ARTIST;
                break;
-       case sAlbum:
+       case sTagAlbum:
                tag = GST_TAG_ALBUM;
                break;
-       case sComment:
-               tag = GST_TAG_COMMENT;
-               break;
-       case sTracknumber:
-               tag = GST_TAG_TRACK_NUMBER;
+       case sTagTitleSortname:
+               tag = GST_TAG_TITLE_SORTNAME;
                break;
-       case sGenre:
-               tag = GST_TAG_GENRE;
+       case sTagArtistSortname:
+               tag = GST_TAG_ARTIST_SORTNAME;
                break;
-       case sVideoType:
-               tag = GST_TAG_VIDEO_CODEC;
+       case sTagAlbumSortname:
+               tag = GST_TAG_ALBUM_SORTNAME;
                break;
-       case sTimeCreate:
+       case sTagDate:
                GDate *date;
                if (gst_tag_list_get_date(m_stream_tags, GST_TAG_DATE, &date))
                {
                        gchar res[5];
-                       g_date_strftime (res, sizeof(res), "%Y", date); 
+                       g_date_strftime (res, sizeof(res), "%Y-%M-%D", date); 
                        return (std::string)res;
                }
                break;
+       case sTagComposer:
+               tag = GST_TAG_COMPOSER;
+               break;
+       case sTagGenre:
+               tag = GST_TAG_GENRE;
+               break;
+       case sTagComment:
+               tag = GST_TAG_COMMENT;
+               break;
+       case sTagExtendedComment:
+               tag = GST_TAG_EXTENDED_COMMENT;
+               break;
+       case sTagLocation:
+               tag = GST_TAG_LOCATION;
+               break;
+       case sTagHomepage:
+               tag = GST_TAG_HOMEPAGE;
+               break;
+       case sTagDescription:
+               tag = GST_TAG_DESCRIPTION;
+               break;
+       case sTagVersion:
+               tag = GST_TAG_VERSION;
+               break;
+       case sTagISRC:
+               tag = GST_TAG_ISRC;
+               break;
+       case sTagOrganization:
+               tag = GST_TAG_ORGANIZATION;
+               break;
+       case sTagCopyright:
+               tag = GST_TAG_COPYRIGHT;
+               break;
+       case sTagCopyrightURI:
+               tag = GST_TAG_COPYRIGHT_URI;
+               break;
+       case sTagContact:
+               tag = GST_TAG_CONTACT;
+               break;
+       case sTagLicense:
+               tag = GST_TAG_LICENSE;
+               break;
+       case sTagLicenseURI:
+               tag = GST_TAG_LICENSE_URI;
+               break;
+       case sTagCodec:
+               tag = GST_TAG_CODEC;
+               break;
+       case sTagAudioCodec:
+               tag = GST_TAG_AUDIO_CODEC;
+               break;
+       case sTagVideoCodec:
+               tag = GST_TAG_VIDEO_CODEC;
+               break;
+       case sTagEncoder:
+               tag = GST_TAG_ENCODER;
+               break;
+       case sTagLanguageCode:
+               tag = GST_TAG_LANGUAGE_CODE;
+               break;
+       case sTagKeywords:
+               tag = GST_TAG_KEYWORDS;
+               break;
+       case sTagChannelMode:
+               tag = "channel-mode";
+               break;
        case sUser+12:
                return m_error_message;
        default:
@@ -772,6 +880,66 @@ std::string eServiceMP3::getInfoString(int w)
        return "";
 }
 
+PyObject *eServiceMP3::getInfoObject(int w)
+{
+       const gchar *tag = 0;
+       bool isBuffer = false;
+       switch (w)
+       {
+               case sTagTrackGain:
+                       tag = GST_TAG_TRACK_GAIN;
+                       break;
+               case sTagTrackPeak:
+                       tag = GST_TAG_TRACK_PEAK;
+                       break;
+               case sTagAlbumGain:
+                       tag = GST_TAG_ALBUM_GAIN;
+                       break;
+               case sTagAlbumPeak:
+                       tag = GST_TAG_ALBUM_PEAK;
+                       break;
+               case sTagReferenceLevel:
+                       tag = GST_TAG_REFERENCE_LEVEL;
+                       break;
+               case sTagBeatsPerMinute:
+                       tag = GST_TAG_BEATS_PER_MINUTE;
+                       break;
+               case sTagImage:
+                       tag = GST_TAG_IMAGE;
+                       isBuffer = true;
+                       break;
+               case sTagPreviewImage:
+                       tag = GST_TAG_PREVIEW_IMAGE;
+                       isBuffer = true;
+                       break;
+               case sTagAttachment:
+                       tag = GST_TAG_ATTACHMENT;
+                       isBuffer = true;
+                       break;
+               default:
+                       break;
+       }
+
+       if ( isBuffer )
+       {
+               const GValue *gv_buffer = gst_tag_list_get_value_index(m_stream_tags, tag, 0);
+               if ( gv_buffer )
+               {
+                       GstBuffer *buffer;
+                       buffer = gst_value_get_buffer (gv_buffer);
+                       return PyBuffer_FromMemory(GST_BUFFER_DATA(buffer), GST_BUFFER_SIZE(buffer));
+               }
+       }
+       else
+       {
+               gdouble value = 0.0;
+               gst_tag_list_get_double(m_stream_tags, tag, &value);
+               return PyFloat_FromDouble(value);
+       }
+
+       return 0;
+}
+
 RESULT eServiceMP3::audioChannel(ePtr<iAudioChannelSelection> &ptr)
 {
        ptr = this;
@@ -790,6 +958,12 @@ RESULT eServiceMP3::subtitle(ePtr<iSubtitleOutput> &ptr)
        return 0;
 }
 
+RESULT eServiceMP3::audioDelay(ePtr<iAudioDelay> &ptr)
+{
+       ptr = this;
+       return 0;
+}
+
 int eServiceMP3::getNumberOfTracks()
 {
        return m_audioStreams.size();
@@ -797,43 +971,40 @@ int eServiceMP3::getNumberOfTracks()
 
 int eServiceMP3::getCurrentTrack()
 {
+       if (m_currentAudioStream == -1)
+               g_object_get (G_OBJECT (m_gst_playbin), "current-audio", &m_currentAudioStream, NULL);
        return m_currentAudioStream;
 }
 
 RESULT eServiceMP3::selectTrack(unsigned int i)
 {
-       int ret = selectAudioStream(i);
-       /* flush */
        pts_t ppos;
        getPlayPosition(ppos);
-       seekTo(ppos);
+       ppos -= 90000;
+       if (ppos < 0)
+               ppos = 0;
+
+       int ret = selectAudioStream(i);
+       if (!ret) {
+               /* flush */
+               seekTo(ppos);
+       }
 
        return ret;
 }
 
 int eServiceMP3::selectAudioStream(int i)
 {
-       gint nb_sources;
-       GstPad *active_pad;
-       GstElement *switch_audio = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_audio");
-       if ( !switch_audio )
+       int current_audio;
+       g_object_set (G_OBJECT (m_gst_playbin), "current-audio", i, NULL);
+       g_object_get (G_OBJECT (m_gst_playbin), "current-audio", &current_audio, NULL);
+       if ( current_audio == i )
        {
-               eDebug("can't switch audio tracks! gst-plugin-selector needed");
-               return -1;
+               eDebug ("eServiceMP3::switched to audio stream %i", current_audio);
+               m_currentAudioStream = i;
+               return 0;
        }
-       g_object_get (G_OBJECT (switch_audio), "n-pads", &nb_sources, NULL);
-       if ( (unsigned int)i >= m_audioStreams.size() || i >= nb_sources || (unsigned int)m_currentAudioStream >= m_audioStreams.size() )
-               return -2;
-       char sinkpad[8];
-       sprintf(sinkpad, "sink%d", i);
-       g_object_set (G_OBJECT (switch_audio), "active-pad", gst_element_get_pad (switch_audio, sinkpad), NULL);
-       g_object_get (G_OBJECT (switch_audio), "active-pad", &active_pad, NULL);
-       gchar *name;
-       name = gst_pad_get_name (active_pad);
-       eDebug ("switched audio to (%s)", name);
-       g_free(name);
-       m_currentAudioStream = i;
-       return 0;
+       return -1;
 }
 
 int eServiceMP3::getCurrentChannel()
@@ -849,10 +1020,10 @@ RESULT eServiceMP3::selectChannel(int i)
 
 RESULT eServiceMP3::getTrackInfo(struct iAudioTrackInfo &info, unsigned int i)
 {
-//     eDebug("eServiceMP3::getTrackInfo(&info, %i)",i);
        if (i >= m_audioStreams.size())
                return -2;
-       if (m_audioStreams[i].type == atMPEG)
+               info.m_description = m_audioStreams[i].codec;
+/*     if (m_audioStreams[i].type == atMPEG)
                info.m_description = "MPEG";
        else if (m_audioStreams[i].type == atMP3)
                info.m_description = "MP3";
@@ -866,8 +1037,10 @@ RESULT eServiceMP3::getTrackInfo(struct iAudioTrackInfo &info, unsigned int i)
                info.m_description = "PCM";
        else if (m_audioStreams[i].type == atOGG)
                info.m_description = "OGG";
+       else if (m_audioStreams[i].type == atFLAC)
+               info.m_description = "FLAC";
        else
-               info.m_description = "???";
+               info.m_description = "???";*/
        if (info.m_language.empty())
                info.m_language = m_audioStreams[i].language_code;
        return 0;
@@ -886,111 +1059,256 @@ void eServiceMP3::gstBusCall(GstBus *bus, GstMessage *msg)
        if (gst_message_get_structure(msg))
        {
                gchar *string = gst_structure_to_string(gst_message_get_structure(msg));
-               eDebug("gst_message from %s: %s", sourceName, string);
+               eDebug("eServiceMP3::gst_message from %s: %s", sourceName, string);
                g_free(string);
        }
        else
-               eDebug("gst_message from %s: %s (without structure)", sourceName, GST_MESSAGE_TYPE_NAME(msg));
+               eDebug("eServiceMP3::gst_message from %s: %s (without structure)", sourceName, GST_MESSAGE_TYPE_NAME(msg));
 #endif
        switch (GST_MESSAGE_TYPE (msg))
        {
-       case GST_MESSAGE_EOS:
-               m_event((iPlayableService*)this, evEOF);
-               break;
-       case GST_MESSAGE_ERROR:
-       {
-               gchar *debug;
-               GError *err;
+               case GST_MESSAGE_EOS:
+                       m_event((iPlayableService*)this, evEOF);
+                       break;
+               case GST_MESSAGE_STATE_CHANGED:
+               {
+                       if(GST_MESSAGE_SRC(msg) != GST_OBJECT(m_gst_playbin))
+                               break;
 
-               gst_message_parse_error (msg, &err, &debug);
-               g_free (debug);
-               eWarning("Gstreamer error: %s (%i)", err->message, err->code );
-               if ( err->domain == GST_STREAM_ERROR && err->code == GST_STREAM_ERROR_DECODE )
+                       GstState old_state, new_state;
+                       gst_message_parse_state_changed(msg, &old_state, &new_state, NULL);
+               
+                       if(old_state == new_state)
+                               break;
+       
+                       eDebug("eServiceMP3::state transition %s -> %s", gst_element_state_get_name(old_state), gst_element_state_get_name(new_state));
+       
+                       GstStateChange transition = (GstStateChange)GST_STATE_TRANSITION(old_state, new_state);
+       
+                       switch(transition)
+                       {
+                               case GST_STATE_CHANGE_NULL_TO_READY:
+                               {
+                               }       break;
+                               case GST_STATE_CHANGE_READY_TO_PAUSED:
+                               {
+                                       GstElement *sink;
+                                       g_object_get (G_OBJECT (m_gst_playbin), "text-sink", &sink, NULL);
+                                       if (sink)
+                                       {
+                                               g_object_set (G_OBJECT (sink), "max-buffers", 2, NULL);
+                                               g_object_set (G_OBJECT (sink), "sync", FALSE, NULL);
+                                               g_object_set (G_OBJECT (sink), "async", FALSE, NULL);
+                                               g_object_set (G_OBJECT (sink), "emit-signals", TRUE, NULL);
+                                               gst_object_unref(sink);
+                                       }
+                                       setAC3Delay(ac3_delay);
+                                       setPCMDelay(pcm_delay);
+                               }       break;
+                               case GST_STATE_CHANGE_PAUSED_TO_PLAYING:
+                               {
+                               }       break;
+                               case GST_STATE_CHANGE_PLAYING_TO_PAUSED:
+                               {
+                               }       break;
+                               case GST_STATE_CHANGE_PAUSED_TO_READY:
+                               {
+                               }       break;
+                               case GST_STATE_CHANGE_READY_TO_NULL:
+                               {
+                               }       break;
+                       }
+                       break;
+               }
+               case GST_MESSAGE_ERROR:
                {
-                       if ( g_strrstr(sourceName, "videosink") )
-                               m_event((iPlayableService*)this, evUser+11);
+                       gchar *debug;
+                       GError *err;
+                       gst_message_parse_error (msg, &err, &debug);
+                       g_free (debug);
+                       eWarning("Gstreamer error: %s (%i) from %s", err->message, err->code, sourceName );
+                       if ( err->domain == GST_STREAM_ERROR )
+                       {
+                               if ( err->code == GST_STREAM_ERROR_CODEC_NOT_FOUND )
+                               {
+                                       if ( g_strrstr(sourceName, "videosink") )
+                                               m_event((iPlayableService*)this, evUser+11);
+                                       else if ( g_strrstr(sourceName, "audiosink") )
+                                               m_event((iPlayableService*)this, evUser+10);
+                               }
+                       }
+                       g_error_free(err);
+                       break;
                }
-               g_error_free(err);
-                       /* TODO: signal error condition to user */
-               break;
-       }
-       case GST_MESSAGE_TAG:
-       {
-               GstTagList *tags, *result;
-               gst_message_parse_tag(msg, &tags);
-
-               result = gst_tag_list_merge(m_stream_tags, tags, GST_TAG_MERGE_PREPEND);
-               if (result)
+               case GST_MESSAGE_INFO:
+               {
+                       gchar *debug;
+                       GError *inf;
+       
+                       gst_message_parse_info (msg, &inf, &debug);
+                       g_free (debug);
+                       if ( inf->domain == GST_STREAM_ERROR && inf->code == GST_STREAM_ERROR_DECODE )
+                       {
+                               if ( g_strrstr(sourceName, "videosink") )
+                                       m_event((iPlayableService*)this, evUser+14);
+                       }
+                       g_error_free(inf);
+                       break;
+               }
+               case GST_MESSAGE_TAG:
                {
-                       if (m_stream_tags)
-                               gst_tag_list_free(m_stream_tags);
-                       m_stream_tags = result;
+                       GstTagList *tags, *result;
+                       gst_message_parse_tag(msg, &tags);
+       
+                       result = gst_tag_list_merge(m_stream_tags, tags, GST_TAG_MERGE_REPLACE);
+                       if (result)
+                       {
+                               if (m_stream_tags)
+                                       gst_tag_list_free(m_stream_tags);
+                               m_stream_tags = result;
+                       }
+       
+                       const GValue *gv_image = gst_tag_list_get_value_index(tags, GST_TAG_IMAGE, 0);
+                       if ( gv_image )
+                       {
+                               GstBuffer *buf_image;
+                               buf_image = gst_value_get_buffer (gv_image);
+                               int fd = open("/tmp/.id3coverart", O_CREAT|O_WRONLY|O_TRUNC, 0644);
+                               int ret = write(fd, GST_BUFFER_DATA(buf_image), GST_BUFFER_SIZE(buf_image));
+                               close(fd);
+                               eDebug("eServiceMP3::/tmp/.id3coverart %d bytes written ", ret);
+                               m_event((iPlayableService*)this, evUser+13);
+                       }
+                       gst_tag_list_free(tags);
+                       m_event((iPlayableService*)this, evUpdatedInfo);
+                       break;
                }
-
-               gchar *g_audiocodec;
-               if ( gst_tag_list_get_string(tags, GST_TAG_AUDIO_CODEC, &g_audiocodec) && m_audioStreams.size() == 0 )
+               case GST_MESSAGE_ASYNC_DONE:
                {
-                       GstPad* pad = gst_element_get_pad (GST_ELEMENT(source), "src");
-                       GstCaps* caps = gst_pad_get_caps(pad);
-                       GstStructure* str = gst_caps_get_structure(caps, 0);
-                       if ( !str )
+                       if(GST_MESSAGE_SRC(msg) != GST_OBJECT(m_gst_playbin))
                                break;
-                       audioStream audio;
-                       audio.type = gstCheckAudioPad(str);
-                       m_audioStreams.push_back(audio);
-               }
 
-               gst_tag_list_free(tags);
-               m_event((iPlayableService*)this, evUpdatedInfo);
-               break;
-       }
-       case GST_MESSAGE_ASYNC_DONE:
-       {
-               GstTagList *tags;
-               for (std::vector<audioStream>::iterator IterAudioStream(m_audioStreams.begin()); IterAudioStream != m_audioStreams.end(); ++IterAudioStream)
-               {
-                       if ( IterAudioStream->pad )
+                       GstTagList *tags;
+                       gint i, active_idx, n_video = 0, n_audio = 0, n_text = 0;
+
+                       g_object_get (m_gst_playbin, "n-video", &n_video, NULL);
+                       g_object_get (m_gst_playbin, "n-audio", &n_audio, NULL);
+                       g_object_get (m_gst_playbin, "n-text", &n_text, NULL);
+
+                       eDebug("eServiceMP3::async-done - %d video, %d audio, %d subtitle", n_video, n_audio, n_text);
+
+                       if ( n_video + n_audio <= 0 )
+                               stop();
+
+                       active_idx = 0;
+
+                       m_audioStreams.clear();
+                       m_subtitleStreams.clear();
+
+                       for (i = 0; i < n_audio; i++)
                        {
-                               g_object_get(IterAudioStream->pad, "tags", &tags, NULL);
-                               gchar *g_language;
-                               if ( gst_is_tag_list(tags) && gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) )
+                               audioStream audio;
+                               gchar *g_codec, *g_lang;
+                               GstPad* pad = 0;
+                               g_signal_emit_by_name (m_gst_playbin, "get-audio-pad", i, &pad);
+                               GstCaps* caps = gst_pad_get_negotiated_caps(pad);
+                               if (!caps)
+                                       continue;
+                               GstStructure* str = gst_caps_get_structure(caps, 0);
+                               const gchar *g_type = gst_structure_get_name(str);
+                               eDebug("AUDIO STRUCT=%s", g_type);
+                               audio.type = gstCheckAudioPad(str);
+                               g_codec = g_strdup(g_type);
+                               g_lang = g_strdup_printf ("und");
+                               g_signal_emit_by_name (m_gst_playbin, "get-audio-tags", i, &tags);
+                               if ( tags && gst_is_tag_list(tags) )
                                {
-                                       eDebug("found audio language %s",g_language);
-                                       IterAudioStream->language_code = std::string(g_language);
-                                       g_free (g_language);
+                                       gst_tag_list_get_string(tags, GST_TAG_AUDIO_CODEC, &g_codec);
+                                       gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_lang);
+                                       gst_tag_list_free(tags);
                                }
+                               audio.language_code = std::string(g_lang);
+                               audio.codec = std::string(g_codec);
+                               eDebug("eServiceMP3::audio stream=%i codec=%s language=%s", i, g_codec, g_lang);
+                               m_audioStreams.push_back(audio);
+                               g_free (g_lang);
+                               g_free (g_codec);
+                               gst_caps_unref(caps);
                        }
+
+                       for (i = 0; i < n_text; i++)
+                       {       
+                               gchar *g_lang;
+//                             gchar *g_type;
+//                             GstPad* pad = 0;
+//                             g_signal_emit_by_name (m_gst_playbin, "get-text-pad", i, &pad);
+//                             GstCaps* caps = gst_pad_get_negotiated_caps(pad);
+//                             GstStructure* str = gst_caps_get_structure(caps, 0);
+//                             g_type = gst_structure_get_name(str);
+//                             g_signal_emit_by_name (m_gst_playbin, "get-text-tags", i, &tags);
+                               subtitleStream subs;
+                               subs.type = stPlainText;
+                               g_lang = g_strdup_printf ("und");
+                               if ( tags && gst_is_tag_list(tags) )
+                                       gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_lang);
+                               subs.language_code = std::string(g_lang);
+                               eDebug("eServiceMP3::subtitle stream=%i language=%s"/* type=%s*/, i, g_lang/*, g_type*/);
+                               m_subtitleStreams.push_back(subs);
+                               g_free (g_lang);
+//                             g_free (g_type);
+                       }
+                       m_event((iPlayableService*)this, evUpdatedEventInfo);
                }
-               for (std::vector<subtitleStream>::iterator IterSubtitleStream(m_subtitleStreams.begin()); IterSubtitleStream != m_subtitleStreams.end(); ++IterSubtitleStream)
+               case GST_MESSAGE_ELEMENT:
                {
-                       if ( IterSubtitleStream->pad )
+                       if ( gst_is_missing_plugin_message(msg) )
                        {
-                               g_object_get(IterSubtitleStream->pad, "tags", &tags, NULL);
-                               gchar *g_language;
-                               if ( gst_is_tag_list(tags) && gst_tag_list_get_string(tags, GST_TAG_LANGUAGE_CODE, &g_language) )
+                               gchar *description = gst_missing_plugin_message_get_description(msg);
+                               if ( description )
                                {
-                                       eDebug("found subtitle language %s",g_language);
-                                       IterSubtitleStream->language_code = std::string(g_language);
-                                       g_free (g_language);
+                                       m_error_message = "GStreamer plugin " + (std::string)description + " not available!\n";
+                                       g_free(description);
+                                       m_event((iPlayableService*)this, evUser+12);
                                }
                        }
-               }
-       }
-        case GST_MESSAGE_ELEMENT:
-       {
-               if ( gst_is_missing_plugin_message(msg) )
-               {
-                       gchar *description = gst_missing_plugin_message_get_description(msg);                   
-                       if ( description )
+                       else if (const GstStructure *msgstruct = gst_message_get_structure(msg))
                        {
-                               m_error_message = "GStreamer plugin " + (std::string)description + " not available!\n";
-                               g_free(description);
-                               m_event((iPlayableService*)this, evUser+12);
+                               const gchar *eventname = gst_structure_get_name(msgstruct);
+                               if ( eventname )
+                               {
+                                       if (!strcmp(eventname, "eventSizeChanged") || !strcmp(eventname, "eventSizeAvail"))
+                                       {
+                                               gst_structure_get_int (msgstruct, "aspect_ratio", &m_aspect);
+                                               gst_structure_get_int (msgstruct, "width", &m_width);
+                                               gst_structure_get_int (msgstruct, "height", &m_height);
+                                               if (strstr(eventname, "Changed"))
+                                                       m_event((iPlayableService*)this, evVideoSizeChanged);
+                                       }
+                                       else if (!strcmp(eventname, "eventFrameRateChanged") || !strcmp(eventname, "eventFrameRateAvail"))
+                                       {
+                                               gst_structure_get_int (msgstruct, "frame_rate", &m_framerate);
+                                               if (strstr(eventname, "Changed"))
+                                                       m_event((iPlayableService*)this, evVideoFramerateChanged);
+                                       }
+                                       else if (!strcmp(eventname, "eventProgressiveChanged") || !strcmp(eventname, "eventProgressiveAvail"))
+                                       {
+                                               gst_structure_get_int (msgstruct, "progressive", &m_progressive);
+                                               if (strstr(eventname, "Changed"))
+                                                       m_event((iPlayableService*)this, evVideoProgressiveChanged);
+                                       }
+                               }
                        }
+                       break;
                }
-       }
-       default:
-               break;
+               case GST_MESSAGE_BUFFERING:
+               {
+                       GstBufferingMode mode;
+                       gst_message_parse_buffering(msg, &(m_bufferInfo.bufferPercent));
+                       gst_message_parse_buffering_stats(msg, &mode, &(m_bufferInfo.avgInRate), &(m_bufferInfo.avgOutRate), &(m_bufferInfo.bufferingLeft));
+                       m_event((iPlayableService*)this, evBuffering);
+               }
+               default:
+                       break;
        }
        g_free (sourceName);
 }
@@ -1005,252 +1323,170 @@ GstBusSyncReply eServiceMP3::gstBusSyncHandler(GstBus *bus, GstMessage *message,
 
 audiotype_t eServiceMP3::gstCheckAudioPad(GstStructure* structure)
 {
-       const gchar* type;
-       type = gst_structure_get_name(structure);
+       if (!structure)
+               return atUnknown;
+
+       if ( gst_structure_has_name (structure, "audio/mpeg"))
+       {
+               gint mpegversion, layer = -1;
+               if (!gst_structure_get_int (structure, "mpegversion", &mpegversion))
+                       return atUnknown;
 
-       if (!strcmp(type, "audio/mpeg")) {
-                       gint mpegversion, layer = 0;
-                       gst_structure_get_int (structure, "mpegversion", &mpegversion);
-                       gst_structure_get_int (structure, "layer", &layer);
-                       eDebug("mime audio/mpeg version %d layer %d", mpegversion, layer);
-                       switch (mpegversion) {
-                               case 1:
+               switch (mpegversion) {
+                       case 1:
                                {
+                                       gst_structure_get_int (structure, "layer", &layer);
                                        if ( layer == 3 )
                                                return atMP3;
                                        else
                                                return atMPEG;
+                                       break;
                                }
-                               case 2:
-                                       return atMPEG;
-                               case 4:
-                                       return atAAC;
-                               default:
-                                       return atUnknown;
-                       }
+                       case 2:
+                               return atAAC;
+                       case 4:
+                               return atAAC;
+                       default:
+                               return atUnknown;
                }
-       else
-       {
-               eDebug("mime %s", type);
-               if (!strcmp(type, "audio/x-ac3") || !strcmp(type, "audio/ac3"))
-                       return atAC3;
-               else if (!strcmp(type, "audio/x-dts") || !strcmp(type, "audio/dts"))
-                       return atDTS;
-               else if (!strcmp(type, "audio/x-raw-int"))
-                       return atPCM;
        }
+
+       else if ( gst_structure_has_name (structure, "audio/x-ac3") || gst_structure_has_name (structure, "audio/ac3") )
+               return atAC3;
+       else if ( gst_structure_has_name (structure, "audio/x-dts") || gst_structure_has_name (structure, "audio/dts") )
+               return atDTS;
+       else if ( gst_structure_has_name (structure, "audio/x-raw-int") )
+               return atPCM;
+
        return atUnknown;
 }
 
-void eServiceMP3::gstCBpadAdded(GstElement *decodebin, GstPad *pad, gpointer user_data)
+void eServiceMP3::gstPoll(const int &msg)
 {
-       const gchar* type;
-       GstCaps* caps;
-       GstStructure* str;
-       caps = gst_pad_get_caps(pad);
-       str = gst_caps_get_structure(caps, 0);
-       type = gst_structure_get_name(str);
-
-       eDebug("A new pad %s:%s was created", GST_OBJECT_NAME (decodebin), GST_OBJECT_NAME (pad));
-
-       eServiceMP3 *_this = (eServiceMP3*)user_data;
-       GstBin *pipeline = GST_BIN(_this->m_gst_pipeline);
-       if (g_strrstr(type,"audio"))
+               /* ok, we have a serious problem here. gstBusSyncHandler sends 
+                  us the wakup signal, but likely before it was posted.
+                  the usleep, an EVIL HACK (DON'T DO THAT!!!) works around this.
+                  
+                  I need to understand the API a bit more to make this work 
+                  proplerly. */
+       if (msg == 1)
        {
-               audioStream audio;
-               audio.type = _this->gstCheckAudioPad(str);
-               GstElement *switch_audio = gst_bin_get_by_name(pipeline , "switch_audio");
-               if ( switch_audio )
-               {
-                       GstPad *sinkpad = gst_element_get_request_pad (switch_audio, "sink%d");
-                       gst_pad_link(pad, sinkpad);
-                       audio.pad = sinkpad;
-                       _this->m_audioStreams.push_back(audio);
-               
-                       if ( _this->m_audioStreams.size() == 1 )
-                       {
-                               _this->selectAudioStream(0);
-                               gst_element_set_state (_this->m_gst_pipeline, GST_STATE_PLAYING);
-                       }
-                       else
-                               g_object_set (G_OBJECT (switch_audio), "select-all", FALSE, NULL);
-               }
-               else
+               GstBus *bus = gst_pipeline_get_bus (GST_PIPELINE (m_gst_playbin));
+               GstMessage *message;
+               usleep(1);
+               while ((message = gst_bus_pop (bus)))
                {
-                       gst_pad_link(pad, gst_element_get_static_pad(gst_bin_get_by_name(pipeline,"queue_audio"), "sink"));
-                       _this->m_audioStreams.push_back(audio);
+                       gstBusCall(bus, message);
+                       gst_message_unref (message);
                }
        }
-       if (g_strrstr(type,"video"))
-       {
-               gst_pad_link(pad, gst_element_get_static_pad(gst_bin_get_by_name(pipeline,"queue_video"), "sink"));
-       }
-       if (g_strrstr(type,"application/x-ssa") || g_strrstr(type,"application/x-ass"))
-       {
-               GstPad *switchpad = _this->gstCreateSubtitleSink(_this, stSSA);
-               gst_pad_link(pad, switchpad);
-               subtitleStream subs;
-               subs.pad = switchpad;
-               subs.type = stSSA;
-               _this->m_subtitleStreams.push_back(subs);
-       }
-       if (g_strrstr(type,"text/plain"))
-       {
-               GstPad *switchpad = _this->gstCreateSubtitleSink(_this, stPlainText);
-               gst_pad_link(pad, switchpad);
-               subtitleStream subs;
-               subs.pad = switchpad;
-               subs.type = stPlainText;
-               _this->m_subtitleStreams.push_back(subs);
-       }
-}
-
-GstPad* eServiceMP3::gstCreateSubtitleSink(eServiceMP3* _this, subtype_t type)
-{
-       GstBin *pipeline = GST_BIN(_this->m_gst_pipeline);
-       GstElement *switch_subparse = gst_bin_get_by_name(pipeline,"switch_subparse");
-       if ( !switch_subparse )
-       {
-               switch_subparse = gst_element_factory_make ("input-selector", "switch_subparse");
-               GstElement *sink = gst_element_factory_make("fakesink", "sink_subtitles");
-               gst_bin_add_many(pipeline, switch_subparse, sink, NULL);
-               gst_element_link(switch_subparse, sink);
-               g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
-               g_object_set (G_OBJECT(sink), "sync", TRUE, NULL);
-               g_object_set (G_OBJECT(sink), "async", FALSE, NULL);
-               g_signal_connect(sink, "handoff", G_CALLBACK(_this->gstCBsubtitleAvail), _this);
-       
-               // order is essential since requested sink pad names can't be explicitely chosen
-               GstElement *switch_substream_plain = gst_element_factory_make ("input-selector", "switch_substream_plain");
-               gst_bin_add(pipeline, switch_substream_plain);
-               GstPad *sinkpad_plain = gst_element_get_request_pad (switch_subparse, "sink%d");
-               gst_pad_link(gst_element_get_pad (switch_substream_plain, "src"), sinkpad_plain);
-       
-               GstElement *switch_substream_ssa = gst_element_factory_make ("input-selector", "switch_substream_ssa");
-               GstElement *ssaparse = gst_element_factory_make("ssaparse", "ssaparse");
-               gst_bin_add_many(pipeline, switch_substream_ssa, ssaparse, NULL);
-               GstPad *sinkpad_ssa = gst_element_get_request_pad (switch_subparse, "sink%d");
-               gst_element_link(switch_substream_ssa, ssaparse);
-               gst_pad_link(gst_element_get_pad (ssaparse, "src"), sinkpad_ssa);
-       
-               GstElement *switch_substream_srt = gst_element_factory_make ("input-selector", "switch_substream_srt");
-               GstElement *srtparse = gst_element_factory_make("subparse", "srtparse");
-               gst_bin_add_many(pipeline, switch_substream_srt, srtparse, NULL);
-               GstPad *sinkpad_srt = gst_element_get_request_pad (switch_subparse, "sink%d");
-               gst_element_link(switch_substream_srt, srtparse);
-               gst_pad_link(gst_element_get_pad (srtparse, "src"), sinkpad_srt);
-               g_object_set (G_OBJECT(srtparse), "subtitle-encoding", "ISO-8859-15", NULL);
-       }
-
-       switch (type)
-       {
-               case stSSA:
-                       return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_ssa"), "sink%d");
-               case stSRT:
-                       return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_srt"), "sink%d");
-               case stPlainText:
-               default:
-                       break;
-       }
-       return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_plain"), "sink%d");
+       else
+               pullSubtitle();
 }
 
-void eServiceMP3::gstCBfilterPadAdded(GstElement *filter, GstPad *pad, gpointer user_data)
-{
-       eServiceMP3 *_this = (eServiceMP3*)user_data;
-       GstElement *decoder = gst_bin_get_by_name(GST_BIN(_this->m_gst_pipeline),"decoder");
-       gst_pad_link(pad, gst_element_get_static_pad (decoder, "sink"));
-}
+eAutoInitPtr<eServiceFactoryMP3> init_eServiceFactoryMP3(eAutoInitNumbers::service+1, "eServiceFactoryMP3");
 
-void eServiceMP3::gstCBnewPad(GstElement *decodebin, GstPad *pad, gboolean last, gpointer user_data)
+void eServiceMP3::gstCBsubtitleAvail(GstElement *appsink, gpointer user_data)
 {
        eServiceMP3 *_this = (eServiceMP3*)user_data;
-       GstCaps *caps;
-       GstStructure *str;
-       GstPad *audiopad;
-
-       /* only link once */
-       GstElement *audiobin = gst_bin_get_by_name(GST_BIN(_this->m_gst_pipeline),"audiobin");
-       audiopad = gst_element_get_static_pad (audiobin, "sink");
-       if ( !audiopad || GST_PAD_IS_LINKED (audiopad)) {
-               eDebug("audio already linked!");
-               g_object_unref (audiopad);
-               return;
-       }
-
-       /* check media type */
-       caps = gst_pad_get_caps (pad);
-       str = gst_caps_get_structure (caps, 0);
-       eDebug("gst new pad! %s", gst_structure_get_name (str));
-
-       if (!g_strrstr (gst_structure_get_name (str), "audio")) {
-               gst_caps_unref (caps);
-               gst_object_unref (audiopad);
-               return;
-       }
-       
-       gst_caps_unref (caps);
-       gst_pad_link (pad, audiopad);
+       eSingleLocker l(_this->m_subs_to_pull_lock);
+       ++_this->m_subs_to_pull;
+       _this->m_pump.send(2);
 }
 
-void eServiceMP3::gstCBunknownType(GstElement *decodebin, GstPad *pad, GstCaps *caps, gpointer user_data)
+void eServiceMP3::pullSubtitle()
 {
-       GstStructure *str;
-
-       /* check media type */
-       caps = gst_pad_get_caps (pad);
-       str = gst_caps_get_structure (caps, 0);
-       eDebug("unknown type: %s - this can't be decoded.", gst_structure_get_name (str));
-       gst_caps_unref (caps);
-}
-
-void eServiceMP3::gstPoll(const int&)
-{
-               /* ok, we have a serious problem here. gstBusSyncHandler sends 
-                  us the wakup signal, but likely before it was posted.
-                  the usleep, an EVIL HACK (DON'T DO THAT!!!) works around this.
-                  
-                  I need to understand the API a bit more to make this work 
-                  proplerly. */
-       usleep(1);
-       
-       GstBus *bus = gst_pipeline_get_bus (GST_PIPELINE (m_gst_pipeline));
-       GstMessage *message;
-       while ((message = gst_bus_pop (bus)))
+       GstElement *sink;
+       g_object_get (G_OBJECT (m_gst_playbin), "text-sink", &sink, NULL);
+       if (sink)
        {
-               gstBusCall(bus, message);
-               gst_message_unref (message);
+               while (m_subs_to_pull && m_subtitle_pages.size() < 2)
+               {
+                       GstBuffer *buffer;
+                       {
+                               eSingleLocker l(m_subs_to_pull_lock);
+                               --m_subs_to_pull;
+                               g_signal_emit_by_name (sink, "pull-buffer", &buffer);
+                       }
+                       if (buffer)
+                       {
+                               gint64 buf_pos = GST_BUFFER_TIMESTAMP(buffer);
+                               gint64 duration_ns = GST_BUFFER_DURATION(buffer);
+                               size_t len = GST_BUFFER_SIZE(buffer);
+                               unsigned char line[len+1];
+                               memcpy(line, GST_BUFFER_DATA(buffer), len);
+                               line[len] = 0;
+                               eDebug("got new subtitle @ buf_pos = %lld ns (in pts=%lld): '%s' ", buf_pos, buf_pos/11111, line);
+                               ePangoSubtitlePage page;
+                               gRGB rgbcol(0xD0,0xD0,0xD0);
+                               page.m_elements.push_back(ePangoSubtitlePageElement(rgbcol, (const char*)line));
+                               page.show_pts = buf_pos / 11111L;
+                               page.m_timeout = duration_ns / 1000000;
+                               m_subtitle_pages.push_back(page);
+                               pushSubtitles();
+                               gst_buffer_unref(buffer);
+                       }
+               }
+               gst_object_unref(sink);
        }
+       else
+               eDebug("no subtitle sink!");
 }
 
-eAutoInitPtr<eServiceFactoryMP3> init_eServiceFactoryMP3(eAutoInitNumbers::service+1, "eServiceFactoryMP3");
-
-void eServiceMP3::gstCBsubtitleAvail(GstElement *element, GstBuffer *buffer, GstPad *pad, gpointer user_data)
+void eServiceMP3::pushSubtitles()
 {
-       gint64 duration_ns = GST_BUFFER_DURATION(buffer);
-       const unsigned char *text = (unsigned char *)GST_BUFFER_DATA(buffer);
-       eDebug("gstCBsubtitleAvail: %s",text);
-       eServiceMP3 *_this = (eServiceMP3*)user_data;
-       if ( _this->m_subtitle_widget )
+       ePangoSubtitlePage page;
+       pts_t running_pts;
+       while ( !m_subtitle_pages.empty() )
        {
-               ePangoSubtitlePage page;
-               gRGB rgbcol(0xD0,0xD0,0xD0);
-               page.m_elements.push_back(ePangoSubtitlePageElement(rgbcol, (const char*)text));
-               page.m_timeout = duration_ns / 1000000;
-               (_this->m_subtitle_widget)->setPage(page);
+               getPlayPosition(running_pts);
+               page = m_subtitle_pages.front();
+               gint64 diff_ms = ( page.show_pts - running_pts ) / 90;
+               eDebug("eServiceMP3::pushSubtitles show_pts = %lld  running_pts = %lld  diff = %lld", page.show_pts, running_pts, diff_ms);
+               if (diff_ms < -100)
+               {
+                       GstFormat fmt = GST_FORMAT_TIME;
+                       gint64 now;
+                       if (gst_element_query_position(m_gst_playbin, &fmt, &now) != -1)
+                       {
+                               now /= 11111;
+                               diff_ms = abs((now - running_pts) / 90);
+                               eDebug("diff < -100ms check decoder/pipeline diff: decoder: %lld, pipeline: %lld, diff: %lld", running_pts, now, diff_ms);
+                               if (diff_ms > 100000)
+                               {
+                                       eDebug("high decoder/pipeline difference.. assume decoder has now started yet.. check again in 1sec");
+                                       m_subtitle_sync_timer->start(1000, true);
+                                       break;
+                               }
+                       }
+                       else
+                               eDebug("query position for decoder/pipeline check failed!");
+                       eDebug("subtitle to late... drop");
+                       m_subtitle_pages.pop_front();
+               }
+               else if ( diff_ms > 20 )
+               {
+//                     eDebug("start recheck timer");
+                       m_subtitle_sync_timer->start(diff_ms > 1000 ? 1000 : diff_ms, true);
+                       break;
+               }
+               else // immediate show
+               {
+                       if (m_subtitle_widget)
+                               m_subtitle_widget->setPage(page);
+                       m_subtitle_pages.pop_front();
+               }
        }
+       if (m_subtitle_pages.empty())
+               pullSubtitle();
 }
 
 RESULT eServiceMP3::enableSubtitles(eWidget *parent, ePyObject tuple)
 {
        ePyObject entry;
        int tuplesize = PyTuple_Size(tuple);
-       int pid;
-       int type;
-       gint nb_sources;
-       GstPad *active_pad;
-       GstElement *switch_substream = NULL;
-       GstElement *switch_subparse = gst_bin_get_by_name (GST_BIN(m_gst_pipeline), "switch_subparse");
+       int pid, type;
+       gint text_pid = 0;
 
        if (!PyTuple_Check(tuple))
                goto error_out;
@@ -1265,46 +1501,27 @@ RESULT eServiceMP3::enableSubtitles(eWidget *parent, ePyObject tuple)
                goto error_out;
        type = PyInt_AsLong(entry);
 
-       switch ((subtype_t)type)
+       if (m_currentSubtitleStream != pid)
        {
-               case stPlainText:
-                       switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_plain");
-                       break;
-               case stSSA:
-                       switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_ssa");
-                       break;
-               case stSRT:
-                       switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_srt");
-                       break;
-               default:
-                       goto error_out;
+               eSingleLocker l(m_subs_to_pull_lock);
+               g_object_set (G_OBJECT (m_gst_playbin), "current-text", pid, NULL);
+               m_currentSubtitleStream = pid;
+               m_subs_to_pull = 0;
+               m_subtitle_pages.clear();
        }
 
+       m_subtitle_widget = 0;
        m_subtitle_widget = new eSubtitleWidget(parent);
        m_subtitle_widget->resize(parent->size()); /* full size */
 
-       if ( !switch_substream )
-       {
-               eDebug("can't switch subtitle tracks! gst-plugin-selector needed");
-               return -2;
-       }
-       g_object_get (G_OBJECT (switch_substream), "n-pads", &nb_sources, NULL);
-       if ( (unsigned int)pid >= m_subtitleStreams.size() || pid >= nb_sources || (unsigned int)m_currentSubtitleStream >= m_subtitleStreams.size() )
-               return -2;
-       g_object_get (G_OBJECT (switch_subparse), "n-pads", &nb_sources, NULL);
-       if ( type < 0 || type >= nb_sources )
-               return -2;
+       g_object_get (G_OBJECT (m_gst_playbin), "current-text", &text_pid, NULL);
 
-       char sinkpad[6];
-       sprintf(sinkpad, "sink%d", type);
-       g_object_set (G_OBJECT (switch_subparse), "active-pad", gst_element_get_pad (switch_subparse, sinkpad), NULL);
-       sprintf(sinkpad, "sink%d", pid);
-       g_object_set (G_OBJECT (switch_substream), "active-pad", gst_element_get_pad (switch_substream, sinkpad), NULL);
-       m_currentSubtitleStream = pid;
+       eDebug ("eServiceMP3::switched to subtitle stream %i", text_pid);
 
        return 0;
+
 error_out:
-       eDebug("enableSubtitles needs a tuple as 2nd argument!\n"
+       eDebug("eServiceMP3::enableSubtitles needs a tuple as 2nd argument!\n"
                "for gst subtitles (2, subtitle_stream_count, subtitle_type)");
        return -1;
 }
@@ -1312,6 +1529,7 @@ error_out:
 RESULT eServiceMP3::disableSubtitles(eWidget *parent)
 {
        eDebug("eServiceMP3::disableSubtitles");
+       m_subtitle_pages.clear();
        delete m_subtitle_widget;
        m_subtitle_widget = 0;
        return 0;
@@ -1319,7 +1537,7 @@ RESULT eServiceMP3::disableSubtitles(eWidget *parent)
 
 PyObject *eServiceMP3::getCachedSubtitle()
 {
-       eDebug("eServiceMP3::getCachedSubtitle");
+//     eDebug("eServiceMP3::getCachedSubtitle");
        Py_RETURN_NONE;
 }
 
@@ -1348,6 +1566,102 @@ PyObject *eServiceMP3::getSubtitleList()
        return l;
 }
 
+RESULT eServiceMP3::streamed(ePtr<iStreamedService> &ptr)
+{
+       ptr = this;
+       return 0;
+}
+
+PyObject *eServiceMP3::getBufferCharge()
+{
+       ePyObject tuple = PyTuple_New(5);
+       PyTuple_SET_ITEM(tuple, 0, PyInt_FromLong(m_bufferInfo.bufferPercent));
+       PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(m_bufferInfo.avgInRate));
+       PyTuple_SET_ITEM(tuple, 2, PyInt_FromLong(m_bufferInfo.avgOutRate));
+       PyTuple_SET_ITEM(tuple, 3, PyInt_FromLong(m_bufferInfo.bufferingLeft));
+       PyTuple_SET_ITEM(tuple, 4, PyInt_FromLong(m_buffer_size));
+       return tuple;
+}
+
+int eServiceMP3::setBufferSize(int size)
+{
+       m_buffer_size = size;
+       g_object_set (G_OBJECT (m_gst_playbin), "buffer-size", m_buffer_size, NULL);
+       return 0;
+}
+
+int eServiceMP3::getAC3Delay()
+{
+       return ac3_delay;
+}
+
+int eServiceMP3::getPCMDelay()
+{
+       return pcm_delay;
+}
+
+void eServiceMP3::setAC3Delay(int delay)
+{
+       ac3_delay = delay;
+       if (!m_gst_playbin || m_state != stRunning)
+               return;
+       else
+       {
+               GstElement *sink;
+               std::string config_delay;
+               int config_delay_int = delay;
+               if(ePythonConfigQuery::getConfigValue("config.av.generalAC3delay", config_delay) == 0)
+                       config_delay_int += atoi(config_delay.c_str());
+
+               g_object_get (G_OBJECT (m_gst_playbin), "audio-sink", &sink, NULL);
+
+               if (!sink)
+                       return;
+               else {
+                       gchar *name = gst_element_get_name(sink);
+
+                       if (strstr(name, "dvbaudiosink"))
+                               eTSMPEGDecoder::setHwAC3Delay(config_delay_int);
+                       g_free(name);
+                       gst_object_unref(sink);
+               }
+       }
+}
+
+void eServiceMP3::setPCMDelay(int delay)
+{
+       pcm_delay = delay;
+       if (!m_gst_playbin || m_state != stRunning)
+               return;
+       else
+       {
+               GstElement *sink;
+               std::string config_delay;
+               int config_delay_int = delay;
+               if(ePythonConfigQuery::getConfigValue("config.av.generalPCMdelay", config_delay) == 0)
+                       config_delay_int += atoi(config_delay.c_str());
+
+               g_object_get (G_OBJECT (m_gst_playbin), "audio-sink", &sink, NULL);
+
+               if (!sink)
+                       return;
+               else {
+                       gchar *name = gst_element_get_name(sink);
+
+                       if (strstr(name, "dvbaudiosink"))
+                               eTSMPEGDecoder::setHwPCMDelay(config_delay_int);
+                       else {
+                               // this is realy untested..and not used yet
+                               gint64 offset = config_delay_int;
+                               offset *= 1000000; // milli to nano
+                               g_object_set (G_OBJECT (m_gst_playbin), "ts-offset", offset, NULL);
+                       }
+                       g_free(name);
+                       gst_object_unref(sink);
+               }
+       }
+}
+
 #else
 #warning gstreamer not available, not building media player
 #endif