allow hardware playback of M4A (AAC) and MP3 audio streams.
[enigma2.git] / lib / service / servicemp3.cpp
index 225d90f69938145f8e00a156efd428ea782c9d2b..2d21773167b3bab282cdb6c3be1d91e5c2badba8 100644 (file)
@@ -27,6 +27,7 @@ eServiceFactoryMP3::eServiceFactoryMP3()
        if (sc)
        {
                std::list<std::string> extensions;
+               extensions.push_back("mp2");
                extensions.push_back("mp3");
                extensions.push_back("ogg");
                extensions.push_back("mpg");
@@ -35,8 +36,11 @@ eServiceFactoryMP3::eServiceFactoryMP3()
                extensions.push_back("wave");
                extensions.push_back("mkv");
                extensions.push_back("avi");
+               extensions.push_back("divx");
                extensions.push_back("dat");
                extensions.push_back("flac");
+               extensions.push_back("mp4");
+               extensions.push_back("m4a");
                sc->addServiceFactory(eServiceFactoryMP3::id, this, extensions);
        }
 
@@ -173,20 +177,17 @@ int eStaticServiceMP3Info::getLength(const eServiceReference &ref)
 
 eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eApp, 1)
 {
+       m_seekTimeout = eTimer::create(eApp);
        m_stream_tags = 0;
-       m_audioStreams.clear();
-       m_subtitleStreams.clear();
        m_currentAudioStream = 0;
        m_currentSubtitleStream = 0;
        m_subtitle_widget = 0;
        m_currentTrickRatio = 0;
-       CONNECT(m_seekTimeout.timeout, eServiceMP3::seekTimeoutCB);
+       CONNECT(m_seekTimeout->timeout, eServiceMP3::seekTimeoutCB);
        CONNECT(m_pump.recv_msg, eServiceMP3::gstPoll);
        GstElement *source = 0;
-       
-       GstElement *decoder = 0, *conv = 0, *flt = 0, *sink = 0; /* for audio */
-       
-       GstElement *audio = 0, *switch_audio = 0, *queue_audio = 0, *video = 0, *queue_video = 0, *videodemux = 0;
+       GstElement *decoder = 0, *conv = 0, *flt = 0, *parser = 0, *sink = 0; /* for audio */
+       GstElement *audio = 0, *switch_audio = 0, *queue_audio = 0, *video = 0, *queue_video = 0, *videodemux = 0, *audiodemux = 0;
        
        m_state = stIdle;
        eDebug("SERVICEMP3 construct!");
@@ -198,116 +199,149 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp
        if (!ext)
                ext = filename;
 
-       int is_mpeg_ps = !(strcasecmp(ext, ".mpeg") && strcasecmp(ext, ".mpg") && strcasecmp(ext, ".vob") && strcasecmp(ext, ".bin") && strcasecmp(ext, ".dat"));
-       int is_mpeg_ts = !strcasecmp(ext, ".ts");
-       int is_matroska = !strcasecmp(ext, ".mkv");
-       int is_avi = !strcasecmp(ext, ".avi");
-       int is_mp3 = !strcasecmp(ext, ".mp3"); /* force mp3 instead of decodebin */
-       int is_video = is_mpeg_ps || is_mpeg_ts || is_matroska || is_avi;
-       int is_streaming = !strncmp(filename, "http://", 7);
-       int is_AudioCD = !(strncmp(filename, "/autofs/", 8) || strncmp(filename+strlen(filename)-13, "/track-", 7) || strcasecmp(ext, ".wav"));
-       int is_VCD = !strcasecmp(ext, ".dat");
-       
-       eDebug("filename: %s, is_mpeg_ps: %d, is_mpeg_ts: %d, is_video: %d, is_streaming: %d, is_mp3: %d, is_matroska: %d, is_avi: %d, is_AudioCD: %d, is_VCD: %d", filename, is_mpeg_ps, is_mpeg_ts, is_video, is_streaming, is_mp3, is_matroska, is_avi, is_AudioCD, is_VCD);
-       
-       int is_audio = !is_video;
+       sourceStream sourceinfo;
+       sourceinfo.is_video = FALSE;
+       sourceinfo.audiotype = atUnknown;
+       if ( (strcasecmp(ext, ".mpeg") && strcasecmp(ext, ".mpg") && strcasecmp(ext, ".vob") && strcasecmp(ext, ".bin") && strcasecmp(ext, ".dat") ) == 0 )
+       {
+               sourceinfo.containertype = ctMPEGPS;
+               sourceinfo.is_video = TRUE;
+       }
+       else if ( strcasecmp(ext, ".ts") == 0 )
+       {
+               sourceinfo.containertype = ctMPEGTS;
+               sourceinfo.is_video = TRUE;
+       }
+       else if ( strcasecmp(ext, ".mkv") == 0 )
+       {
+               sourceinfo.containertype = ctMKV;
+               sourceinfo.is_video = TRUE;
+       }
+       else if ( strcasecmp(ext, ".avi") == 0 || strcasecmp(ext, ".divx") == 0)
+       {
+               sourceinfo.containertype = ctAVI;
+               sourceinfo.is_video = TRUE;
+       }
+       else if ( strcasecmp(ext, ".mp4") == 0 )
+       {
+               sourceinfo.containertype = ctMP4;
+               sourceinfo.is_video = TRUE;
+       }
+       else if ( strcasecmp(ext, ".m4a") == 0 )
+       {
+               sourceinfo.containertype = ctMP4;
+               sourceinfo.audiotype = atAAC;
+       }
+       else if ( strcasecmp(ext, ".mp3") == 0 )
+               sourceinfo.audiotype = atMP3;
+       else if ( (strncmp(filename, "/autofs/", 8) || strncmp(filename+strlen(filename)-13, "/track-", 7) || strcasecmp(ext, ".wav")) == 0 )
+               sourceinfo.containertype = ctCDA;
+       if ( strcasecmp(ext, ".dat") == 0 )
+       {
+               sourceinfo.containertype = ctVCD;
+               sourceinfo.is_video = TRUE;
+       }
+       if ( (strncmp(filename, "http://", 7)) == 0 )
+               sourceinfo.is_streaming = TRUE;
+
+       eDebug("filename=%s, containertype=%d, is_video=%d, is_streaming=%d", filename, sourceinfo.containertype, sourceinfo.is_video, sourceinfo.is_streaming);
 
        int all_ok = 0;
 
        m_gst_pipeline = gst_pipeline_new ("mediaplayer");
        if (!m_gst_pipeline)
-               eWarning("failed to create pipeline");
+               m_error_message = "failed to create GStreamer pipeline!\n";
 
-       if (is_AudioCD)
+       if ( sourceinfo.is_streaming )
        {
-               source = gst_element_factory_make ("cdiocddasrc", "cda-source");
+               eDebug("play webradio!");
+               source = gst_element_factory_make ("neonhttpsrc", "http-source");
                if (source)
-                       g_object_set (G_OBJECT (source), "device", "/dev/cdroms/cdrom0", NULL);
+               {
+                       g_object_set (G_OBJECT (source), "location", filename, NULL);
+                       g_object_set (G_OBJECT (source), "automatic-redirect", TRUE, NULL);
+               }
                else
-                       is_AudioCD = 0;
+                       m_error_message = "GStreamer plugin neonhttpsrc not available!\n";
        }
-       if ( !is_streaming && !is_AudioCD )
-               source = gst_element_factory_make ("filesrc", "file-source");
-       else if ( is_streaming ) 
+       else if ( sourceinfo.containertype == ctCDA )
        {
-               source = gst_element_factory_make ("neonhttpsrc", "http-source");
+               source = gst_element_factory_make ("cdiocddasrc", "cda-source");
                if (source)
-                       g_object_set (G_OBJECT (source), "automatic-redirect", TRUE, NULL);
+               {
+                       g_object_set (G_OBJECT (source), "device", "/dev/cdroms/cdrom0", NULL);
+                       int track = atoi(filename+18);
+                       eDebug("play audio CD track #%i",track);
+                       if (track > 0)
+                               g_object_set (G_OBJECT (source), "track", track, NULL);
+               }
        }
-
-       if (!source)
-               eWarning("failed to create %s", is_streaming ? "neonhttpsrc" : "filesrc");
-                               /* configure source */
-       else if (!is_AudioCD)
-               g_object_set (G_OBJECT (source), "location", filename, NULL);
-       else
-       { 
-               int track = atoi(filename+18);
-               eDebug("play audio CD track #%i",track);
-               if (track > 0)
-                       g_object_set (G_OBJECT (source), "track", track, NULL);
+       else if ( sourceinfo.containertype == ctVCD )
+       {
+               int fd = open(filename,O_RDONLY);
+               char tmp[128*1024];
+               int ret = read(fd, tmp, 128*1024);
+               close(fd);
+               if ( ret == -1 ) // this is a "REAL" VCD
+                       source = gst_element_factory_make ("vcdsrc", "vcd-source");
+                       if (source)
+                               g_object_set (G_OBJECT (source), "device", "/dev/cdroms/cdrom0", NULL);
        }
-
-       if (is_audio)
+       if ( !source && !sourceinfo.is_streaming )
        {
-                       /* filesrc -> decodebin -> audioconvert -> capsfilter -> alsasink */
-               const char *decodertype = "decodebin";
-
-               decoder = gst_element_factory_make (decodertype, "decoder");
-               if (!decoder)
-                       eWarning("failed to create %s decoder", decodertype);
-
-               conv = gst_element_factory_make ("audioconvert", "converter");
-               if (!conv)
-                       eWarning("failed to create audioconvert");
-
-               flt = gst_element_factory_make ("capsfilter", "flt");
-               if (!flt)
-                       eWarning("failed to create capsfilter");
-
-                       /* for some reasons, we need to set the sample format to depth/width=16, because auto negotiation doesn't work. */
-                       /* endianness, however, is not required to be set anymore. */
-               if (flt)
-               {
-                       GstCaps *caps = gst_caps_new_simple("audio/x-raw-int", /* "endianness", G_TYPE_INT, 4321, */ "depth", G_TYPE_INT, 16, "width", G_TYPE_INT, 16, /*"channels", G_TYPE_INT, 2, */(char*)0);
-                       g_object_set (G_OBJECT (flt), "caps", caps, (char*)0);
-                       gst_caps_unref(caps);
-               }
-
-               sink = gst_element_factory_make ("alsasink", "alsa-output");
-               if (!sink)
-                       eWarning("failed to create osssink");
-
-               if (source && decoder && conv && sink)
-                       all_ok = 1;
-       } else /* is_video */
+               source = gst_element_factory_make ("filesrc", "file-source");
+               if (source)
+                       g_object_set (G_OBJECT (source), "location", filename, NULL);
+               else
+                       m_error_message = "GStreamer can't open filesrc " + (std::string)filename + "!\n";
+       }
+       if ( sourceinfo.is_video )
        {
                        /* filesrc -> mpegdemux -> | queue_audio -> dvbaudiosink
                                                   | queue_video -> dvbvideosink */
 
                audio = gst_element_factory_make("dvbaudiosink", "audiosink");
-               queue_audio = gst_element_factory_make("queue", "queue_audio");
-               
+               if (!audio)
+                       m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+
                video = gst_element_factory_make("dvbvideosink", "videosink");
+               if (!video)
+                       m_error_message += "failed to create Gstreamer element dvbvideosink\n";
+
+               queue_audio = gst_element_factory_make("queue", "queue_audio");
                queue_video = gst_element_factory_make("queue", "queue_video");
-               
-               if (is_mpeg_ps)
-                       videodemux = gst_element_factory_make("flupsdemux", "videodemux");
-               else if (is_mpeg_ts)
-                       videodemux = gst_element_factory_make("flutsdemux", "videodemux");
-               else if (is_matroska)
-                       videodemux = gst_element_factory_make("matroskademux", "videodemux");
-               else if (is_avi)
-                       videodemux = gst_element_factory_make("avidemux", "videodemux");
 
-               if (!videodemux)
+               std::string demux_type;
+               switch (sourceinfo.containertype)
                {
-                       eDebug("fluendo mpegdemux not available, falling back to mpegdemux\n");
-                       videodemux = gst_element_factory_make("mpegdemux", "videodemux");
+                       case ctMPEGTS:
+                               demux_type = "flutsdemux";
+                               break;
+                       case ctMPEGPS:
+                       case ctVCD:
+                               demux_type = "flupsdemux";
+                               break;
+                       case ctMKV:
+                               demux_type = "matroskademux";
+                               break;
+                       case ctAVI:
+                               demux_type = "avidemux";
+                               break;
+                       case ctMP4:
+                               demux_type = "qtdemux";
+                               break;
+                       default:
+                               break;
                }
+               videodemux = gst_element_factory_make(demux_type.c_str(), "videodemux");
+               if (!videodemux)
+                       m_error_message = "GStreamer plugin " + demux_type + " not available!\n";
 
-               eDebug("audio: %p, queue_audio %p, video %p, queue_video %p, videodemux %p", audio, queue_audio, video, queue_video, videodemux);
-               if (audio && queue_audio && video && queue_video && videodemux)
+               switch_audio = gst_element_factory_make ("input-selector", "switch_audio");
+               if (!switch_audio)
+                       m_error_message = "GStreamer plugin input-selector not available!\n";
+
+               if (audio && queue_audio && video && queue_video && videodemux && switch_audio)
                {
                        g_object_set (G_OBJECT (queue_audio), "max-size-bytes", 256*1024, NULL);
                        g_object_set (G_OBJECT (queue_audio), "max-size-buffers", 0, NULL);
@@ -315,52 +349,120 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp
                        g_object_set (G_OBJECT (queue_video), "max-size-buffers", 0, NULL);
                        g_object_set (G_OBJECT (queue_video), "max-size-bytes", 2*1024*1024, NULL);
                        g_object_set (G_OBJECT (queue_video), "max-size-time", (guint64)0, NULL);
+                       g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL);
                        all_ok = 1;
                }
+       } else /* is audio */
+       {
+               std::string demux_type;
+               switch ( sourceinfo.containertype )
+               {
+                       case ctMP4:
+                               demux_type = "qtdemux";
+                               break;
+                       default:
+                               break;
+               }
+               if ( demux_type.length() )
+               {
+                       audiodemux = gst_element_factory_make(demux_type.c_str(), "audiodemux");
+                       if (!audiodemux)
+                               m_error_message = "GStreamer plugin " + demux_type + " not available!\n";
+               }
+               switch ( sourceinfo.audiotype )
+               {
+                       case atMP3:
+                       {
+                               if ( !audiodemux )
+                               {
+                                       parser = gst_element_factory_make("mp3parse", "audioparse");
+                                       if (!parser)
+                                       {
+                                               m_error_message += "failed to create Gstreamer element mp3parse\n";
+                                               break;
+                                       }
+                               }
+                               sink = gst_element_factory_make("dvbaudiosink", "audiosink");
+                               if ( !sink )
+                                       m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+                               else
+                                       all_ok = 1;
+                               break;
+                       }
+                       case atAAC:
+                       {
+                               if ( !audiodemux )
+                               {
+                                       m_error_message += "cannot parse raw AAC audio\n";
+                                       break;
+                               }
+                               sink = gst_element_factory_make("dvbaudiosink", "audiosink");
+                               if (!sink)
+                                       m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+                               else
+                                       all_ok = 1;
+                               break;
+                       }
+                       case atAC3:
+                       {
+                               if ( !audiodemux )
+                               {
+                                       m_error_message += "cannot parse raw AC3 audio\n";
+                                       break;
+                               }
+                               sink = gst_element_factory_make("dvbaudiosink", "audiosink");
+                               if ( !sink )
+                                       m_error_message += "failed to create Gstreamer element dvbaudiosink\n";
+                               else
+                                       all_ok = 1;
+                               break;
+                       }
+                       default:
+                       {       /* filesrc -> decodebin -> audioconvert -> capsfilter -> alsasink */
+                               decoder = gst_element_factory_make ("decodebin", "decoder");
+                               if (!decoder)
+                                       m_error_message += "failed to create Gstreamer element decodebin\n";
+               
+                               conv = gst_element_factory_make ("audioconvert", "converter");
+                               if (!conv)
+                                       m_error_message += "failed to create Gstreamer element audioconvert\n";
+               
+                               flt = gst_element_factory_make ("capsfilter", "flt");
+                               if (!flt)
+                                       m_error_message += "failed to create Gstreamer element capsfilter\n";
+               
+                                       /* for some reasons, we need to set the sample format to depth/width=16, because auto negotiation doesn't work. */
+                                       /* endianness, however, is not required to be set anymore. */
+                               if (flt)
+                               {
+                                       GstCaps *caps = gst_caps_new_simple("audio/x-raw-int", /* "endianness", G_TYPE_INT, 4321, */ "depth", G_TYPE_INT, 16, "width", G_TYPE_INT, 16, /*"channels", G_TYPE_INT, 2, */NULL);
+                                       g_object_set (G_OBJECT (flt), "caps", caps, NULL);
+                                       gst_caps_unref(caps);
+                               }
+               
+                               sink = gst_element_factory_make ("alsasink", "alsa-output");
+                               if (!sink)
+                                       m_error_message += "failed to create Gstreamer element alsasink\n";
+               
+                               if (source && decoder && conv && sink)
+                                       all_ok = 1;
+                               break;
+                       }
+               }
+
        }
-       
        if (m_gst_pipeline && all_ok)
        {
                gst_bus_set_sync_handler(gst_pipeline_get_bus (GST_PIPELINE (m_gst_pipeline)), gstBusSyncHandler, this);
 
-               if (is_AudioCD)
+               if ( sourceinfo.containertype == ctCDA )
                {
                        queue_audio = gst_element_factory_make("queue", "queue_audio");
                        g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
                        gst_bin_add_many (GST_BIN (m_gst_pipeline), source, queue_audio, conv, sink, NULL);
                        gst_element_link_many(source, queue_audio, conv, sink, NULL);
                }
-               else if (is_audio)
-               {
-                       queue_audio = gst_element_factory_make("queue", "queue_audio");
-
-                       g_signal_connect (decoder, "new-decoded-pad", G_CALLBACK(gstCBnewPad), this);
-                       g_signal_connect (decoder, "unknown-type", G_CALLBACK(gstCBunknownType), this);
-
-                       if (!is_mp3)
-                               g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
-
-                               /* gst_bin will take the 'floating references' */
-                       gst_bin_add_many (GST_BIN (m_gst_pipeline),
-                                               source, queue_audio, decoder, NULL);
-
-                               /* in decodebin's case we can just connect the source with the decodebin, and decodebin will take care about id3demux (or whatever is required) */
-                       gst_element_link_many(source, queue_audio, decoder, NULL);
-
-                               /* create audio bin with the audioconverter, the capsfilter and the audiosink */
-                       audio = gst_bin_new ("audiobin");
-
-                       GstPad *audiopad = gst_element_get_static_pad (conv, "sink");
-                       gst_bin_add_many(GST_BIN(audio), conv, flt, sink, (char*)0);
-                       gst_element_link_many(conv, flt, sink, (char*)0);
-                       gst_element_add_pad(audio, gst_ghost_pad_new ("sink", audiopad));
-                       gst_object_unref(audiopad);
-                       gst_bin_add (GST_BIN(m_gst_pipeline), audio);
-                               /* in mad's case, we can directly connect the decoder to the audiobin. otherwise, we do this in gstCBnewPad */
-                       if (is_mp3)
-                               gst_element_link(decoder, audio);
-
-               } else /* is_video */
+               else if ( sourceinfo.is_video )
                {
                        char srt_filename[strlen(filename)+1];
                        strncpy(srt_filename,filename,strlen(filename)-3);
@@ -372,33 +474,20 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp
                                eDebug("subtitle file found: %s",srt_filename);
                                GstElement *subsource = gst_element_factory_make ("filesrc", "srt_source");
                                g_object_set (G_OBJECT (subsource), "location", srt_filename, NULL);
-                               GstElement *parser = gst_element_factory_make("subparse", "parse_subtitles");
-                               GstElement *switch_subtitles = gst_element_factory_make ("input-selector", "switch_subtitles");
-                               GstElement *sink = gst_element_factory_make("fakesink", "sink_subtitles");
-                               gst_bin_add_many(GST_BIN (m_gst_pipeline), subsource, switch_subtitles, parser, sink, NULL);
-                               gst_element_link(subsource, switch_subtitles);
-                               gst_element_link(switch_subtitles, parser);
-                               gst_element_link(parser, sink);
-                               g_object_set (G_OBJECT(switch_subtitles), "select-all", TRUE, NULL);
-                               g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
-                               g_object_set (G_OBJECT(sink), "sync", TRUE, NULL);
-                               g_object_set (G_OBJECT(parser), "subtitle-encoding", "ISO-8859-15", NULL);
-                               g_signal_connect(sink, "handoff", G_CALLBACK(gstCBsubtitleAvail), this);
+                               gst_bin_add(GST_BIN (m_gst_pipeline), subsource);
+                               GstPad *switchpad = gstCreateSubtitleSink(this, stSRT);
+                               gst_pad_link(gst_element_get_pad (subsource, "src"), switchpad);
                                subtitleStream subs;
-                               subs.language_code = std::string(".srt file");
+                               subs.pad = switchpad;
+                               subs.type = stSRT;
+                               subs.language_code = std::string("und");
                                m_subtitleStreams.push_back(subs);
                        }
-                       gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, audio, queue_audio, video, queue_video, NULL);
-                       switch_audio = gst_element_factory_make ("input-selector", "switch_audio");
-                       if (switch_audio)
-                       {
-                               g_object_set (G_OBJECT (switch_audio), "select-all", TRUE, NULL);
-                               gst_bin_add(GST_BIN(m_gst_pipeline), switch_audio);
-                               gst_element_link(switch_audio, queue_audio);
-                       }
+                       gst_bin_add_many(GST_BIN(m_gst_pipeline), source, videodemux, audio, queue_audio, video, queue_video, switch_audio, NULL);
 
-                       if (is_VCD)
+                       if ( sourceinfo.containertype == ctVCD && gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"file-source") )
                        {
+                               eDebug("this is a fake video cd... we use filesrc ! cdxaparse !");
                                GstElement *cdxaparse = gst_element_factory_make("cdxaparse", "cdxaparse");
                                gst_bin_add(GST_BIN(m_gst_pipeline), cdxaparse);
                                gst_element_link(source, cdxaparse);
@@ -406,12 +495,64 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp
                        }
                        else
                                gst_element_link(source, videodemux);
+
+                       gst_element_link(switch_audio, queue_audio);
                        gst_element_link(queue_audio, audio);
                        gst_element_link(queue_video, video);
                        g_signal_connect(videodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
+
+               } else /* is audio*/
+               {
+                       if ( decoder )
+                       {
+                               queue_audio = gst_element_factory_make("queue", "queue_audio");
+       
+                               g_signal_connect (decoder, "new-decoded-pad", G_CALLBACK(gstCBnewPad), this);
+                               g_signal_connect (decoder, "unknown-type", G_CALLBACK(gstCBunknownType), this);
+       
+                               g_object_set (G_OBJECT (sink), "preroll-queue-len", 80, NULL);
+       
+                                       /* gst_bin will take the 'floating references' */
+                               gst_bin_add_many (GST_BIN (m_gst_pipeline),
+                                                       source, queue_audio, decoder, NULL);
+       
+                                       /* in decodebin's case we can just connect the source with the decodebin, and decodebin will take care about id3demux (or whatever is required) */
+                               gst_element_link_many(source, queue_audio, decoder, NULL);
+       
+                                       /* create audio bin with the audioconverter, the capsfilter and the audiosink */
+                               audio = gst_bin_new ("audiobin");
+       
+                               GstPad *audiopad = gst_element_get_static_pad (conv, "sink");
+                               gst_bin_add_many(GST_BIN(audio), conv, flt, sink, NULL);
+                               gst_element_link_many(conv, flt, sink, NULL);
+                               gst_element_add_pad(audio, gst_ghost_pad_new ("sink", audiopad));
+                               gst_object_unref(audiopad);
+                               gst_bin_add (GST_BIN(m_gst_pipeline), audio);
+                       }
+                       else
+                       {
+                               gst_bin_add_many (GST_BIN (m_gst_pipeline), source, sink, NULL);
+                               if ( parser )
+                               {
+                                       gst_bin_add (GST_BIN (m_gst_pipeline), parser);
+                                       gst_element_link_many(source, parser, sink, NULL);
+                               }
+                               if ( audiodemux )
+                               {
+                                       gst_bin_add (GST_BIN (m_gst_pipeline), audiodemux);
+                                       g_signal_connect(audiodemux, "pad-added", G_CALLBACK (gstCBpadAdded), this);
+                                       gst_element_link(source, audiodemux);
+                                       eDebug("linked source, audiodemux, sink");
+                               }
+                               audioStream audio;
+                               audio.type = sourceinfo.audiotype;
+                               m_audioStreams.push_back(audio);
+                       }
                }
        } else
        {
+               m_event((iPlayableService*)this, evUser+12);
+
                if (m_gst_pipeline)
                        gst_object_unref(GST_OBJECT(m_gst_pipeline));
                if (source)
@@ -436,10 +577,10 @@ eServiceMP3::eServiceMP3(const char *filename): m_filename(filename), m_pump(eAp
                if (switch_audio)
                        gst_object_unref(GST_OBJECT(switch_audio));
 
-               eDebug("sorry, can't play.");
+               eDebug("sorry, can't play: %s",m_error_message.c_str());
                m_gst_pipeline = 0;
        }
-       
+
        gst_element_set_state (m_gst_pipeline, GST_STATE_PLAYING);
 }
 
@@ -513,9 +654,9 @@ RESULT eServiceMP3::setFastForward(int ratio)
 {
        m_currentTrickRatio = ratio;
        if (ratio)
-               m_seekTimeout.start(1000, 0);
+               m_seekTimeout->start(1000, 0);
        else
-               m_seekTimeout.stop();
+               m_seekTimeout->stop();
        return 0;
 }
 
@@ -529,13 +670,13 @@ void eServiceMP3::seekTimeoutCB()
        if (ppos < 0)
        {
                ppos = 0;
-               m_seekTimeout.stop();
+               m_seekTimeout->stop();
        }
        if (ppos > len)
        {
                ppos = 0;
                stop();
-               m_seekTimeout.stop();
+               m_seekTimeout->stop();
                return;
        }
        seekTo(ppos);
@@ -927,6 +1068,17 @@ void eServiceMP3::gstBusCall(GstBus *bus, GstMessage *msg)
                        m_audioStreams.push_back(audio);
                }
 
+               GValue *gv_image = gst_tag_list_get_value_index(tags, GST_TAG_IMAGE, 0);
+               if ( gv_image )
+               {
+                       GstBuffer *buf_image;
+                       buf_image = gst_value_get_buffer (gv_image);
+                       int fd = open("/tmp/.id3coverart", O_CREAT|O_WRONLY|O_TRUNC, 0644);
+                       int ret = write(fd, GST_BUFFER_DATA(buf_image), GST_BUFFER_SIZE(buf_image));
+                       close(fd);
+                       m_event((iPlayableService*)this, evUser+13);
+               }
+
                gst_tag_list_free(tags);
                m_event((iPlayableService*)this, evUpdatedInfo);
                break;
@@ -967,10 +1119,10 @@ void eServiceMP3::gstBusCall(GstBus *bus, GstMessage *msg)
        {
                if ( gst_is_missing_plugin_message(msg) )
                {
-                       gchar *description = gst_missing_plugin_message_get_description(msg);                   
+                       gchar *description = gst_missing_plugin_message_get_description(msg);
                        if ( description )
                        {
-                               m_error_message = description;
+                               m_error_message = "GStreamer plugin " + (std::string)description + " not available!\n";
                                g_free(description);
                                m_event((iPlayableService*)this, evUser+12);
                        }
@@ -1064,8 +1216,14 @@ void eServiceMP3::gstCBpadAdded(GstElement *decodebin, GstPad *pad, gpointer use
                }
                else
                {
-                       gst_pad_link(pad, gst_element_get_static_pad(gst_bin_get_by_name(pipeline,"queue_audio"), "sink"));
-                       _this->m_audioStreams.push_back(audio);
+                       GstElement *queue_audio = gst_bin_get_by_name(pipeline , "queue_audio");
+                       if ( queue_audio)
+                       {
+                               gst_pad_link(pad, gst_element_get_static_pad(queue_audio, "sink"));
+                               _this->m_audioStreams.push_back(audio);
+                       }
+                       else
+                               gst_pad_link(pad, gst_element_get_static_pad(gst_bin_get_by_name(pipeline , "audiosink"), "sink"));
                }
        }
        if (g_strrstr(type,"video"))
@@ -1074,28 +1232,74 @@ void eServiceMP3::gstCBpadAdded(GstElement *decodebin, GstPad *pad, gpointer use
        }
        if (g_strrstr(type,"application/x-ssa") || g_strrstr(type,"application/x-ass"))
        {
-               GstElement *switch_subtitles = gst_bin_get_by_name(pipeline,"switch_subtitles");
-               if ( !switch_subtitles )
-               {
-                       switch_subtitles = gst_element_factory_make ("input-selector", "switch_subtitles");
-                       if ( !switch_subtitles )
-                               return;
-                       GstElement *parser = gst_element_factory_make("ssaparse", "parse_subtitles");
-                       GstElement *sink = gst_element_factory_make("fakesink", "sink_subtitles");
-                       gst_bin_add_many(pipeline, switch_subtitles, parser, sink, NULL);
-                       gst_element_link(switch_subtitles, parser);
-                       gst_element_link(parser, sink);
-                       g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
-                       g_signal_connect(sink, "handoff", G_CALLBACK(gstCBsubtitleAvail), _this);
-               }
-               GstPad *sinkpad = gst_element_get_request_pad (switch_subtitles, "sink%d");
-               gst_pad_link(pad, sinkpad);
+               GstPad *switchpad = _this->gstCreateSubtitleSink(_this, stSSA);
+               gst_pad_link(pad, switchpad);
+               subtitleStream subs;
+               subs.pad = switchpad;
+               subs.type = stSSA;
+               _this->m_subtitleStreams.push_back(subs);
+       }
+       if (g_strrstr(type,"text/plain"))
+       {
+               GstPad *switchpad = _this->gstCreateSubtitleSink(_this, stPlainText);
+               gst_pad_link(pad, switchpad);
                subtitleStream subs;
-               subs.pad = sinkpad;
+               subs.pad = switchpad;
+               subs.type = stPlainText;
                _this->m_subtitleStreams.push_back(subs);
        }
 }
 
+GstPad* eServiceMP3::gstCreateSubtitleSink(eServiceMP3* _this, subtype_t type)
+{
+       GstBin *pipeline = GST_BIN(_this->m_gst_pipeline);
+       GstElement *switch_subparse = gst_bin_get_by_name(pipeline,"switch_subparse");
+       if ( !switch_subparse )
+       {
+               switch_subparse = gst_element_factory_make ("input-selector", "switch_subparse");
+               GstElement *sink = gst_element_factory_make("fakesink", "sink_subtitles");
+               gst_bin_add_many(pipeline, switch_subparse, sink, NULL);
+               gst_element_link(switch_subparse, sink);
+               g_object_set (G_OBJECT(sink), "signal-handoffs", TRUE, NULL);
+               g_object_set (G_OBJECT(sink), "sync", TRUE, NULL);
+               g_object_set (G_OBJECT(sink), "async", FALSE, NULL);
+               g_signal_connect(sink, "handoff", G_CALLBACK(_this->gstCBsubtitleAvail), _this);
+       
+               // order is essential since requested sink pad names can't be explicitely chosen
+               GstElement *switch_substream_plain = gst_element_factory_make ("input-selector", "switch_substream_plain");
+               gst_bin_add(pipeline, switch_substream_plain);
+               GstPad *sinkpad_plain = gst_element_get_request_pad (switch_subparse, "sink%d");
+               gst_pad_link(gst_element_get_pad (switch_substream_plain, "src"), sinkpad_plain);
+       
+               GstElement *switch_substream_ssa = gst_element_factory_make ("input-selector", "switch_substream_ssa");
+               GstElement *ssaparse = gst_element_factory_make("ssaparse", "ssaparse");
+               gst_bin_add_many(pipeline, switch_substream_ssa, ssaparse, NULL);
+               GstPad *sinkpad_ssa = gst_element_get_request_pad (switch_subparse, "sink%d");
+               gst_element_link(switch_substream_ssa, ssaparse);
+               gst_pad_link(gst_element_get_pad (ssaparse, "src"), sinkpad_ssa);
+       
+               GstElement *switch_substream_srt = gst_element_factory_make ("input-selector", "switch_substream_srt");
+               GstElement *srtparse = gst_element_factory_make("subparse", "srtparse");
+               gst_bin_add_many(pipeline, switch_substream_srt, srtparse, NULL);
+               GstPad *sinkpad_srt = gst_element_get_request_pad (switch_subparse, "sink%d");
+               gst_element_link(switch_substream_srt, srtparse);
+               gst_pad_link(gst_element_get_pad (srtparse, "src"), sinkpad_srt);
+               g_object_set (G_OBJECT(srtparse), "subtitle-encoding", "ISO-8859-15", NULL);
+       }
+
+       switch (type)
+       {
+               case stSSA:
+                       return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_ssa"), "sink%d");
+               case stSRT:
+                       return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_srt"), "sink%d");
+               case stPlainText:
+               default:
+                       break;
+       }
+       return gst_element_get_request_pad (gst_bin_get_by_name(pipeline,"switch_substream_plain"), "sink%d");
+}
+
 void eServiceMP3::gstCBfilterPadAdded(GstElement *filter, GstPad *pad, gpointer user_data)
 {
        eServiceMP3 *_this = (eServiceMP3*)user_data;
@@ -1168,28 +1372,30 @@ eAutoInitPtr<eServiceFactoryMP3> init_eServiceFactoryMP3(eAutoInitNumbers::servi
 
 void eServiceMP3::gstCBsubtitleAvail(GstElement *element, GstBuffer *buffer, GstPad *pad, gpointer user_data)
 {
+       gint64 duration_ns = GST_BUFFER_DURATION(buffer);
        const unsigned char *text = (unsigned char *)GST_BUFFER_DATA(buffer);
        eDebug("gstCBsubtitleAvail: %s",text);
        eServiceMP3 *_this = (eServiceMP3*)user_data;
        if ( _this->m_subtitle_widget )
        {
-               eDVBTeletextSubtitlePage page;
+               ePangoSubtitlePage page;
                gRGB rgbcol(0xD0,0xD0,0xD0);
-               page.m_elements.push_back(eDVBTeletextSubtitlePageElement(rgbcol, (const char*)text));
+               page.m_elements.push_back(ePangoSubtitlePageElement(rgbcol, (const char*)text));
+               page.m_timeout = duration_ns / 1000000;
                (_this->m_subtitle_widget)->setPage(page);
        }
 }
 
 RESULT eServiceMP3::enableSubtitles(eWidget *parent, ePyObject tuple)
 {
-       eDebug("eServiceMP3::enableSubtitles");
-
        ePyObject entry;
        int tuplesize = PyTuple_Size(tuple);
        int pid;
+       int type;
        gint nb_sources;
        GstPad *active_pad;
-       GstElement *switch_subtitles = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_subtitles");
+       GstElement *switch_substream = NULL;
+       GstElement *switch_subparse = gst_bin_get_by_name (GST_BIN(m_gst_pipeline), "switch_subparse");
 
        if (!PyTuple_Check(tuple))
                goto error_out;
@@ -1199,32 +1405,52 @@ RESULT eServiceMP3::enableSubtitles(eWidget *parent, ePyObject tuple)
        if (!PyInt_Check(entry))
                goto error_out;
        pid = PyInt_AsLong(entry);
+       entry = PyTuple_GET_ITEM(tuple, 2);
+       if (!PyInt_Check(entry))
+               goto error_out;
+       type = PyInt_AsLong(entry);
+
+       switch ((subtype_t)type)
+       {
+               case stPlainText:
+                       switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_plain");
+                       break;
+               case stSSA:
+                       switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_ssa");
+                       break;
+               case stSRT:
+                       switch_substream = gst_bin_get_by_name(GST_BIN(m_gst_pipeline),"switch_substream_srt");
+                       break;
+               default:
+                       goto error_out;
+       }
 
        m_subtitle_widget = new eSubtitleWidget(parent);
        m_subtitle_widget->resize(parent->size()); /* full size */
 
-       if ( !switch_subtitles )
+       if ( !switch_substream )
        {
                eDebug("can't switch subtitle tracks! gst-plugin-selector needed");
                return -2;
        }
-       g_object_get (G_OBJECT (switch_subtitles), "n-pads", &nb_sources, NULL);
+       g_object_get (G_OBJECT (switch_substream), "n-pads", &nb_sources, NULL);
        if ( (unsigned int)pid >= m_subtitleStreams.size() || pid >= nb_sources || (unsigned int)m_currentSubtitleStream >= m_subtitleStreams.size() )
                return -2;
-       char sinkpad[8];
+       g_object_get (G_OBJECT (switch_subparse), "n-pads", &nb_sources, NULL);
+       if ( type < 0 || type >= nb_sources )
+               return -2;
+
+       char sinkpad[6];
+       sprintf(sinkpad, "sink%d", type);
+       g_object_set (G_OBJECT (switch_subparse), "active-pad", gst_element_get_pad (switch_subparse, sinkpad), NULL);
        sprintf(sinkpad, "sink%d", pid);
-       g_object_set (G_OBJECT (switch_subtitles), "active-pad", gst_element_get_pad (switch_subtitles, sinkpad), NULL);
-       g_object_get (G_OBJECT (switch_subtitles), "active-pad", &active_pad, NULL);
-       gchar *name;
-       name = gst_pad_get_name (active_pad);
-       eDebug ("switched subtitles to (%s)", name);
-       g_free(name);
+       g_object_set (G_OBJECT (switch_substream), "active-pad", gst_element_get_pad (switch_substream, sinkpad), NULL);
        m_currentSubtitleStream = pid;
 
        return 0;
 error_out:
        eDebug("enableSubtitles needs a tuple as 2nd argument!\n"
-               "for gst subtitles (2, subtitle_stream_count)");
+               "for gst subtitles (2, subtitle_stream_count, subtitle_type)");
        return -1;
 }
 
@@ -1247,21 +1473,23 @@ PyObject *eServiceMP3::getSubtitleList()
        eDebug("eServiceMP3::getSubtitleList");
 
        ePyObject l = PyList_New(0);
-       int stream_count = 0;
+       int stream_count[sizeof(subtype_t)];
+       for ( unsigned int i = 0; i < sizeof(subtype_t); i++ )
+               stream_count[i] = 0;
 
        for (std::vector<subtitleStream>::iterator IterSubtitleStream(m_subtitleStreams.begin()); IterSubtitleStream != m_subtitleStreams.end(); ++IterSubtitleStream)
        {
+               subtype_t type = IterSubtitleStream->type;
                ePyObject tuple = PyTuple_New(5);
                PyTuple_SET_ITEM(tuple, 0, PyInt_FromLong(2));
-               PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(stream_count));
-               PyTuple_SET_ITEM(tuple, 2, PyInt_FromLong(0));
+               PyTuple_SET_ITEM(tuple, 1, PyInt_FromLong(stream_count[type]));
+               PyTuple_SET_ITEM(tuple, 2, PyInt_FromLong(int(type)));
                PyTuple_SET_ITEM(tuple, 3, PyInt_FromLong(0));
                PyTuple_SET_ITEM(tuple, 4, PyString_FromString((IterSubtitleStream->language_code).c_str()));
                PyList_Append(l, tuple);
                Py_DECREF(tuple);
-               stream_count++;
+               stream_count[type]++;
        }
-
        return l;
 }